mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 00:35:35 +07:00
2a594d4ccf
The debug IST stack is actually two separate debug stacks to handle #DB recursion. This is required because the CPU starts always at top of stack on exception entry, which means on #DB recursion the second #DB would overwrite the stack of the first. The low level entry code therefore adjusts the top of stack on entry so a secondary #DB starts from a different stack page. But the stack pages are adjacent without a guard page between them. Split the debug stack into 3 stacks which are separated by guard pages. The 3rd stack is never mapped into the cpu_entry_area and is only there to catch triple #DB nesting: --- top of DB_stack <- Initial stack --- end of DB_stack guard page --- top of DB1_stack <- Top of stack after entering first #DB --- end of DB1_stack guard page --- top of DB2_stack <- Top of stack after entering second #DB --- end of DB2_stack guard page If DB2 would not act as the final guard hole, a second #DB would point the top of #DB stack to the stack below #DB1 which would be valid and not catch the not so desired triple nesting. The backing store does not allocate any memory for DB2 and its guard page as it is not going to be mapped into the cpu_entry_area. - Adjust the low level entry code so it adjusts top of #DB with the offset between the stacks instead of exception stack size. - Make the dumpstack code aware of the new stacks. - Adjust the in_debug_stack() implementation and move it into the NMI code where it belongs. As this is NMI hotpath code, it just checks the full area between top of DB_stack and bottom of DB1_stack without checking for the guard page. That's correct because the NMI cannot hit a stackpointer pointing to the guard page between DB and DB1 stack. Even if it would, then the NMI operation still is unaffected, but the resume of the debug exception on the topmost DB stack will crash by touching the guard page. [ bp: Make exception_stack_names static const char * const ] Suggested-by: Andy Lutomirski <luto@kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: "Chang S. Bae" <chang.seok.bae@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dominik Brodowski <linux@dominikbrodowski.net> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: linux-doc@vger.kernel.org Cc: Masahiro Yamada <yamada.masahiro@socionext.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Qian Cai <cai@lca.pw> Cc: Sean Christopherson <sean.j.christopherson@intel.com> Cc: x86-ml <x86@kernel.org> Link: https://lkml.kernel.org/r/20190414160145.439944544@linutronix.de
208 lines
6.2 KiB
C
208 lines
6.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
#include <linux/spinlock.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/kallsyms.h>
|
|
#include <linux/kcore.h>
|
|
|
|
#include <asm/cpu_entry_area.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/fixmap.h>
|
|
#include <asm/desc.h>
|
|
|
|
static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
|
|
|
|
#ifdef CONFIG_X86_64
|
|
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
|
|
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
|
|
#endif
|
|
|
|
struct cpu_entry_area *get_cpu_entry_area(int cpu)
|
|
{
|
|
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
|
|
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
|
|
|
|
return (struct cpu_entry_area *) va;
|
|
}
|
|
EXPORT_SYMBOL(get_cpu_entry_area);
|
|
|
|
void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
|
|
{
|
|
unsigned long va = (unsigned long) cea_vaddr;
|
|
pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
|
|
|
|
/*
|
|
* The cpu_entry_area is shared between the user and kernel
|
|
* page tables. All of its ptes can safely be global.
|
|
* _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
|
|
* non-present PTEs, so be careful not to set it in that
|
|
* case to avoid confusion.
|
|
*/
|
|
if (boot_cpu_has(X86_FEATURE_PGE) &&
|
|
(pgprot_val(flags) & _PAGE_PRESENT))
|
|
pte = pte_set_flags(pte, _PAGE_GLOBAL);
|
|
|
|
set_pte_vaddr(va, pte);
|
|
}
|
|
|
|
static void __init
|
|
cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
|
|
{
|
|
for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
|
|
cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
|
|
}
|
|
|
|
static void __init percpu_setup_debug_store(unsigned int cpu)
|
|
{
|
|
#ifdef CONFIG_CPU_SUP_INTEL
|
|
unsigned int npages;
|
|
void *cea;
|
|
|
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
|
return;
|
|
|
|
cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
|
|
npages = sizeof(struct debug_store) / PAGE_SIZE;
|
|
BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
|
|
cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
|
|
PAGE_KERNEL);
|
|
|
|
cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
|
|
/*
|
|
* Force the population of PMDs for not yet allocated per cpu
|
|
* memory like debug store buffers.
|
|
*/
|
|
npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
|
|
for (; npages; npages--, cea += PAGE_SIZE)
|
|
cea_set_pte(cea, 0, PAGE_NONE);
|
|
#endif
|
|
}
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
#define cea_map_stack(name) do { \
|
|
npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \
|
|
cea_map_percpu_pages(cea->estacks.name## _stack, \
|
|
estacks->name## _stack, npages, PAGE_KERNEL); \
|
|
} while (0)
|
|
|
|
static void __init percpu_setup_exception_stacks(unsigned int cpu)
|
|
{
|
|
struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
|
|
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
|
|
unsigned int npages;
|
|
|
|
BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
|
|
|
|
per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
|
|
|
|
/*
|
|
* The exceptions stack mappings in the per cpu area are protected
|
|
* by guard pages so each stack must be mapped separately. DB2 is
|
|
* not mapped; it just exists to catch triple nesting of #DB.
|
|
*/
|
|
cea_map_stack(DF);
|
|
cea_map_stack(NMI);
|
|
cea_map_stack(DB1);
|
|
cea_map_stack(DB);
|
|
cea_map_stack(MCE);
|
|
}
|
|
#else
|
|
static inline void percpu_setup_exception_stacks(unsigned int cpu) {}
|
|
#endif
|
|
|
|
/* Setup the fixmap mappings only once per-processor */
|
|
static void __init setup_cpu_entry_area(unsigned int cpu)
|
|
{
|
|
struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
|
|
#ifdef CONFIG_X86_64
|
|
/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
|
|
pgprot_t gdt_prot = PAGE_KERNEL_RO;
|
|
pgprot_t tss_prot = PAGE_KERNEL_RO;
|
|
#else
|
|
/*
|
|
* On native 32-bit systems, the GDT cannot be read-only because
|
|
* our double fault handler uses a task gate, and entering through
|
|
* a task gate needs to change an available TSS to busy. If the
|
|
* GDT is read-only, that will triple fault. The TSS cannot be
|
|
* read-only because the CPU writes to it on task switches.
|
|
*
|
|
* On Xen PV, the GDT must be read-only because the hypervisor
|
|
* requires it.
|
|
*/
|
|
pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
|
|
PAGE_KERNEL_RO : PAGE_KERNEL;
|
|
pgprot_t tss_prot = PAGE_KERNEL;
|
|
#endif
|
|
|
|
cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
|
|
|
|
cea_map_percpu_pages(&cea->entry_stack_page,
|
|
per_cpu_ptr(&entry_stack_storage, cpu), 1,
|
|
PAGE_KERNEL);
|
|
|
|
/*
|
|
* The Intel SDM says (Volume 3, 7.2.1):
|
|
*
|
|
* Avoid placing a page boundary in the part of the TSS that the
|
|
* processor reads during a task switch (the first 104 bytes). The
|
|
* processor may not correctly perform address translations if a
|
|
* boundary occurs in this area. During a task switch, the processor
|
|
* reads and writes into the first 104 bytes of each TSS (using
|
|
* contiguous physical addresses beginning with the physical address
|
|
* of the first byte of the TSS). So, after TSS access begins, if
|
|
* part of the 104 bytes is not physically contiguous, the processor
|
|
* will access incorrect information without generating a page-fault
|
|
* exception.
|
|
*
|
|
* There are also a lot of errata involving the TSS spanning a page
|
|
* boundary. Assert that we're not doing that.
|
|
*/
|
|
BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
|
|
offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
|
|
BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
|
|
cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
|
|
sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
|
|
|
|
#ifdef CONFIG_X86_32
|
|
per_cpu(cpu_entry_area, cpu) = cea;
|
|
#endif
|
|
|
|
percpu_setup_exception_stacks(cpu);
|
|
|
|
percpu_setup_debug_store(cpu);
|
|
}
|
|
|
|
static __init void setup_cpu_entry_area_ptes(void)
|
|
{
|
|
#ifdef CONFIG_X86_32
|
|
unsigned long start, end;
|
|
|
|
BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
|
|
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
|
|
|
|
start = CPU_ENTRY_AREA_BASE;
|
|
end = start + CPU_ENTRY_AREA_MAP_SIZE;
|
|
|
|
/* Careful here: start + PMD_SIZE might wrap around */
|
|
for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
|
|
populate_extra_pte(start);
|
|
#endif
|
|
}
|
|
|
|
void __init setup_cpu_entry_areas(void)
|
|
{
|
|
unsigned int cpu;
|
|
|
|
setup_cpu_entry_area_ptes();
|
|
|
|
for_each_possible_cpu(cpu)
|
|
setup_cpu_entry_area(cpu);
|
|
|
|
/*
|
|
* This is the last essential update to swapper_pgdir which needs
|
|
* to be synchronized to initial_page_table on 32bit.
|
|
*/
|
|
sync_initial_page_table();
|
|
}
|