linux_dsm_epyc7002/arch/x86/include/asm/vm86.h

79 lines
1.9 KiB
C
Raw Normal View History

#ifndef _ASM_X86_VM86_H
#define _ASM_X86_VM86_H
#include <asm/ptrace.h>
#include <uapi/asm/vm86.h>
/*
* This is the (kernel) stack-layout when we have done a "SAVE_ALL" from vm86
* mode - the main change is that the old segment descriptors aren't
* useful any more and are forced to be zero by the kernel (and the
* hardware when a trap occurs), and the real segment descriptors are
* at the end of the structure. Look at ptrace.h to see the "normal"
* setup. For user space layout see 'struct vm86_regs' above.
*/
struct kernel_vm86_regs {
/*
* normal regs, with special meaning for the segment descriptors..
*/
struct pt_regs pt;
/*
* these are specific to v86 mode:
*/
unsigned short es, __esh;
unsigned short ds, __dsh;
unsigned short fs, __fsh;
unsigned short gs, __gsh;
};
struct vm86 {
struct vm86plus_struct __user *vm86_info;
struct pt_regs regs32;
unsigned long v86flags;
unsigned long v86mask;
unsigned long saved_sp0;
unsigned long flags;
unsigned long screen_bitmap;
unsigned long cpu_type;
struct revectored_struct int_revectored;
struct revectored_struct int21_revectored;
struct vm86plus_info_struct vm86plus;
};
#ifdef CONFIG_VM86
void handle_vm86_fault(struct kernel_vm86_regs *, long);
int handle_vm86_trap(struct kernel_vm86_regs *, long, int);
void save_v86_state(struct kernel_vm86_regs *, int);
struct task_struct;
void release_vm86_irqs(struct task_struct *);
#define free_vm86(t) do { \
struct thread_struct *__t = (t); \
if (__t->vm86 != NULL) { \
kfree(__t->vm86); \
__t->vm86 = NULL; \
} \
} while (0)
#else
#define handle_vm86_fault(a, b)
#define release_vm86_irqs(a)
static inline int handle_vm86_trap(struct kernel_vm86_regs *a, long b, int c)
{
return 0;
}
static inline void save_v86_state(struct kernel_vm86_regs *a, int b) { }
#define free_vm86(t) do { } while(0)
#endif /* CONFIG_VM86 */
#endif /* _ASM_X86_VM86_H */