mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 04:05:18 +07:00
bf904d2762
The SYSCALL64 trampoline has a couple of nice properties: - The usual sequence of SWAPGS followed by two GS-relative accesses to set up RSP is somewhat slow because the GS-relative accesses need to wait for SWAPGS to finish. The trampoline approach allows RIP-relative accesses to set up RSP, which avoids the stall. - The trampoline avoids any percpu access before CR3 is set up, which means that no percpu memory needs to be mapped in the user page tables. This prevents using Meltdown to read any percpu memory outside the cpu_entry_area and prevents using timing leaks to directly locate the percpu areas. The downsides of using a trampoline may outweigh the upsides, however. It adds an extra non-contiguous I$ cache line to system calls, and it forces an indirect jump to transfer control back to the normal kernel text after CR3 is set up. The latter is because x86 lacks a 64-bit direct jump instruction that could jump from the trampoline to the entry text. With retpolines enabled, the indirect jump is extremely slow. Change the code to map the percpu TSS into the user page tables to allow the non-trampoline SYSCALL64 path to work under PTI. This does not add a new direct information leak, since the TSS is readable by Meltdown from the cpu_entry_area alias regardless. It does allow a timing attack to locate the percpu area, but KASLR is more or less a lost cause against local attack on CPUs vulnerable to Meltdown regardless. As far as I'm concerned, on current hardware, KASLR is only useful to mitigate remote attacks that try to attack the kernel without first gaining RCE against a vulnerable user process. On Skylake, with CONFIG_RETPOLINE=y and KPTI on, this reduces syscall overhead from ~237ns to ~228ns. There is a possible alternative approach: Move the trampoline within 2G of the entry text and make a separate copy for each CPU. This would allow a direct jump to rejoin the normal entry path. There are pro's and con's for this approach: + It avoids a pipeline stall - It executes from an extra page and read from another extra page during the syscall. The latter is because it needs to use a relative addressing mode to find sp1 -- it's the same *cacheline*, but accessed using an alias, so it's an extra TLB entry. - Slightly more memory. This would be one page per CPU for a simple implementation and 64-ish bytes per CPU or one page per node for a more complex implementation. - More code complexity. The current approach is chosen for simplicity and because the alternative does not provide a significant benefit, which makes it worth. [ tglx: Added the alternative discussion to the changelog ] Signed-off-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Link: https://lkml.kernel.org/r/8c7c6e483612c3e4e10ca89495dc160b1aa66878.1536015544.git.luto@kernel.org
80 lines
2.2 KiB
C
80 lines
2.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#ifndef _ASM_X86_CPU_ENTRY_AREA_H
|
|
#define _ASM_X86_CPU_ENTRY_AREA_H
|
|
|
|
#include <linux/percpu-defs.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/intel_ds.h>
|
|
|
|
/*
|
|
* cpu_entry_area is a percpu region that contains things needed by the CPU
|
|
* and early entry/exit code. Real types aren't used for all fields here
|
|
* to avoid circular header dependencies.
|
|
*
|
|
* Every field is a virtual alias of some other allocated backing store.
|
|
* There is no direct allocation of a struct cpu_entry_area.
|
|
*/
|
|
struct cpu_entry_area {
|
|
char gdt[PAGE_SIZE];
|
|
|
|
/*
|
|
* The GDT is just below entry_stack and thus serves (on x86_64) as
|
|
* a a read-only guard page.
|
|
*/
|
|
struct entry_stack_page entry_stack_page;
|
|
|
|
/*
|
|
* On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
|
|
* we need task switches to work, and task switches write to the TSS.
|
|
*/
|
|
struct tss_struct tss;
|
|
|
|
#ifdef CONFIG_X86_64
|
|
/*
|
|
* Exception stacks used for IST entries.
|
|
*
|
|
* In the future, this should have a separate slot for each stack
|
|
* with guard pages between them.
|
|
*/
|
|
char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
|
|
#endif
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
|
/*
|
|
* Per CPU debug store for Intel performance monitoring. Wastes a
|
|
* full page at the moment.
|
|
*/
|
|
struct debug_store cpu_debug_store;
|
|
/*
|
|
* The actual PEBS/BTS buffers must be mapped to user space
|
|
* Reserve enough fixmap PTEs.
|
|
*/
|
|
struct debug_store_buffers cpu_debug_buffers;
|
|
#endif
|
|
};
|
|
|
|
#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
|
|
#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
|
|
|
|
DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
|
|
|
|
extern void setup_cpu_entry_areas(void);
|
|
extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
|
|
|
|
#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
|
|
#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
|
|
|
|
#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
|
|
|
|
#define CPU_ENTRY_AREA_MAP_SIZE \
|
|
(CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
|
|
|
|
extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
|
|
|
|
static inline struct entry_stack *cpu_entry_stack(int cpu)
|
|
{
|
|
return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
|
|
}
|
|
|
|
#endif
|