mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-25 22:05:23 +07:00
8242c6c84a
The goal is to integrate the SYSENTER and SYSCALL32 entry paths with the INT80 path. SYSENTER clobbers ESP and EIP. SYSCALL32 clobbers ECX (and, invisibly, R11). SYSRETL (long mode to compat mode) clobbers ECX and, invisibly, R11. SYSEXIT (which we only need for native 32-bit) clobbers ECX and EDX. This means that we'll need to provide ESP to the kernel in a register (I chose ECX, since it's only needed for SYSENTER) and we need to provide the args that normally live in ECX and EDX in memory. The epilogue needs to restore ECX and EDX, since user code relies on regs being preserved. We don't need to do anything special about EIP, since the kernel already knows where we are. The kernel will eventually need to know where int $0x80 lands, so add a vdso_image entry for it. The only user-visible effect of this code is that ptrace-induced changes to ECX and EDX during fast syscalls will be lost. This is already the case for the SYSENTER path. Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/b860925adbee2d2627a0671fbfe23a7fd04127f8.1444091584.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
49 lines
1.0 KiB
C
49 lines
1.0 KiB
C
#ifndef _ASM_X86_VDSO_H
|
|
#define _ASM_X86_VDSO_H
|
|
|
|
#include <asm/page_types.h>
|
|
#include <linux/linkage.h>
|
|
#include <linux/init.h>
|
|
|
|
#ifndef __ASSEMBLER__
|
|
|
|
#include <linux/mm_types.h>
|
|
|
|
struct vdso_image {
|
|
void *data;
|
|
unsigned long size; /* Always a multiple of PAGE_SIZE */
|
|
|
|
/* text_mapping.pages is big enough for data/size page pointers */
|
|
struct vm_special_mapping text_mapping;
|
|
|
|
unsigned long alt, alt_len;
|
|
|
|
long sym_vvar_start; /* Negative offset to the vvar area */
|
|
|
|
long sym_vvar_page;
|
|
long sym_hpet_page;
|
|
long sym_VDSO32_NOTE_MASK;
|
|
long sym___kernel_sigreturn;
|
|
long sym___kernel_rt_sigreturn;
|
|
long sym___kernel_vsyscall;
|
|
long sym_int80_landing_pad;
|
|
};
|
|
|
|
#ifdef CONFIG_X86_64
|
|
extern const struct vdso_image vdso_image_64;
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_X32
|
|
extern const struct vdso_image vdso_image_x32;
|
|
#endif
|
|
|
|
#if defined CONFIG_X86_32 || defined CONFIG_COMPAT
|
|
extern const struct vdso_image vdso_image_32;
|
|
#endif
|
|
|
|
extern void __init init_vdso_image(const struct vdso_image *image);
|
|
|
|
#endif /* __ASSEMBLER__ */
|
|
|
|
#endif /* _ASM_X86_VDSO_H */
|