mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 21:55:36 +07:00
15301a5707
Łukasz Daniluk reported that on a RHEL kernel that his machine would lock up after enabling function tracer. I asked him to bisect the functions within available_filter_functions, which he did and it came down to three: _paravirt_nop(), _paravirt_ident_32() and _paravirt_ident_64() It was found that this is only an issue when noreplace-paravirt is added to the kernel command line. This means that those functions are most likely called within critical sections of the funtion tracer, and must not be traced. In newer kenels _paravirt_nop() is defined within gcc asm(), and is no longer an issue. But both _paravirt_ident_{32,64}() causes the following splat when they are traced: mm/pgtable-generic.c:33: bad pmd ffff8800d2435150(0000000001d00054) mm/pgtable-generic.c:33: bad pmd ffff8800d3624190(0000000001d00070) mm/pgtable-generic.c:33: bad pmd ffff8800d36a5110(0000000001d00054) mm/pgtable-generic.c:33: bad pmd ffff880118eb1450(0000000001d00054) NMI watchdog: BUG: soft lockup - CPU#2 stuck for 22s! [systemd-journal:469] Modules linked in: e1000e CPU: 2 PID: 469 Comm: systemd-journal Not tainted 4.6.0-rc4-test+ #513 Hardware name: Hewlett-Packard HP Compaq Pro 6300 SFF/339A, BIOS K01 v02.05 05/07/2012 task: ffff880118f740c0 ti: ffff8800d4aec000 task.ti: ffff8800d4aec000 RIP: 0010:[<ffffffff81134148>] [<ffffffff81134148>] queued_spin_lock_slowpath+0x118/0x1a0 RSP: 0018:ffff8800d4aefb90 EFLAGS: 00000246 RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffff88011eb16d40 RDX: ffffffff82485760 RSI: 000000001f288820 RDI: ffffea0000008030 RBP: ffff8800d4aefb90 R08: 00000000000c0000 R09: 0000000000000000 R10: ffffffff821c8e0e R11: 0000000000000000 R12: ffff880000200fb8 R13: 00007f7a4e3f7000 R14: ffffea000303f600 R15: ffff8800d4b562e0 FS: 00007f7a4e3d7840(0000) GS:ffff88011eb00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f7a4e3f7000 CR3: 00000000d3e71000 CR4: 00000000001406e0 Call Trace: _raw_spin_lock+0x27/0x30 handle_pte_fault+0x13db/0x16b0 handle_mm_fault+0x312/0x670 __do_page_fault+0x1b1/0x4e0 do_page_fault+0x22/0x30 page_fault+0x28/0x30 __vfs_read+0x28/0xe0 vfs_read+0x86/0x130 SyS_read+0x46/0xa0 entry_SYSCALL_64_fastpath+0x1e/0xa8 Code: 12 48 c1 ea 0c 83 e8 01 83 e2 30 48 98 48 81 c2 40 6d 01 00 48 03 14 c5 80 6a 5d 82 48 89 0a 8b 41 08 85 c0 75 09 f3 90 8b 41 08 <85> c0 74 f7 4c 8b 09 4d 85 c9 74 08 41 0f 18 09 eb 02 f3 90 8b Reported-by: Łukasz Daniluk <lukasz.daniluk@intel.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
466 lines
11 KiB
C
466 lines
11 KiB
C
/* Paravirtualization interfaces
|
|
Copyright (C) 2006 Rusty Russell IBM Corporation
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; either version 2 of the License, or
|
|
(at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program; if not, write to the Free Software
|
|
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
|
|
*/
|
|
|
|
#include <linux/errno.h>
|
|
#include <linux/init.h>
|
|
#include <linux/export.h>
|
|
#include <linux/efi.h>
|
|
#include <linux/bcd.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/kprobes.h>
|
|
|
|
#include <asm/bug.h>
|
|
#include <asm/paravirt.h>
|
|
#include <asm/debugreg.h>
|
|
#include <asm/desc.h>
|
|
#include <asm/setup.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/time.h>
|
|
#include <asm/pgalloc.h>
|
|
#include <asm/irq.h>
|
|
#include <asm/delay.h>
|
|
#include <asm/fixmap.h>
|
|
#include <asm/apic.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/timer.h>
|
|
#include <asm/special_insns.h>
|
|
|
|
/*
|
|
* nop stub, which must not clobber anything *including the stack* to
|
|
* avoid confusing the entry prologues.
|
|
*/
|
|
extern void _paravirt_nop(void);
|
|
asm (".pushsection .entry.text, \"ax\"\n"
|
|
".global _paravirt_nop\n"
|
|
"_paravirt_nop:\n\t"
|
|
"ret\n\t"
|
|
".size _paravirt_nop, . - _paravirt_nop\n\t"
|
|
".type _paravirt_nop, @function\n\t"
|
|
".popsection");
|
|
|
|
/* identity function, which can be inlined */
|
|
u32 notrace _paravirt_ident_32(u32 x)
|
|
{
|
|
return x;
|
|
}
|
|
|
|
u64 notrace _paravirt_ident_64(u64 x)
|
|
{
|
|
return x;
|
|
}
|
|
|
|
void __init default_banner(void)
|
|
{
|
|
printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
|
|
pv_info.name);
|
|
}
|
|
|
|
/* Undefined instruction for dealing with missing ops pointers. */
|
|
static const unsigned char ud2a[] = { 0x0f, 0x0b };
|
|
|
|
struct branch {
|
|
unsigned char opcode;
|
|
u32 delta;
|
|
} __attribute__((packed));
|
|
|
|
unsigned paravirt_patch_call(void *insnbuf,
|
|
const void *target, u16 tgt_clobbers,
|
|
unsigned long addr, u16 site_clobbers,
|
|
unsigned len)
|
|
{
|
|
struct branch *b = insnbuf;
|
|
unsigned long delta = (unsigned long)target - (addr+5);
|
|
|
|
if (tgt_clobbers & ~site_clobbers)
|
|
return len; /* target would clobber too much for this site */
|
|
if (len < 5)
|
|
return len; /* call too long for patch site */
|
|
|
|
b->opcode = 0xe8; /* call */
|
|
b->delta = delta;
|
|
BUILD_BUG_ON(sizeof(*b) != 5);
|
|
|
|
return 5;
|
|
}
|
|
|
|
unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
|
|
unsigned long addr, unsigned len)
|
|
{
|
|
struct branch *b = insnbuf;
|
|
unsigned long delta = (unsigned long)target - (addr+5);
|
|
|
|
if (len < 5)
|
|
return len; /* call too long for patch site */
|
|
|
|
b->opcode = 0xe9; /* jmp */
|
|
b->delta = delta;
|
|
|
|
return 5;
|
|
}
|
|
|
|
/* Neat trick to map patch type back to the call within the
|
|
* corresponding structure. */
|
|
static void *get_call_destination(u8 type)
|
|
{
|
|
struct paravirt_patch_template tmpl = {
|
|
.pv_init_ops = pv_init_ops,
|
|
.pv_time_ops = pv_time_ops,
|
|
.pv_cpu_ops = pv_cpu_ops,
|
|
.pv_irq_ops = pv_irq_ops,
|
|
.pv_mmu_ops = pv_mmu_ops,
|
|
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
|
.pv_lock_ops = pv_lock_ops,
|
|
#endif
|
|
};
|
|
return *((void **)&tmpl + type);
|
|
}
|
|
|
|
unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
|
|
unsigned long addr, unsigned len)
|
|
{
|
|
void *opfunc = get_call_destination(type);
|
|
unsigned ret;
|
|
|
|
if (opfunc == NULL)
|
|
/* If there's no function, patch it with a ud2a (BUG) */
|
|
ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
|
|
else if (opfunc == _paravirt_nop)
|
|
ret = 0;
|
|
|
|
/* identity functions just return their single argument */
|
|
else if (opfunc == _paravirt_ident_32)
|
|
ret = paravirt_patch_ident_32(insnbuf, len);
|
|
else if (opfunc == _paravirt_ident_64)
|
|
ret = paravirt_patch_ident_64(insnbuf, len);
|
|
|
|
else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
|
|
type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
|
|
/* If operation requires a jmp, then jmp */
|
|
ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len);
|
|
else
|
|
/* Otherwise call the function; assume target could
|
|
clobber any caller-save reg */
|
|
ret = paravirt_patch_call(insnbuf, opfunc, CLBR_ANY,
|
|
addr, clobbers, len);
|
|
|
|
return ret;
|
|
}
|
|
|
|
unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
|
|
const char *start, const char *end)
|
|
{
|
|
unsigned insn_len = end - start;
|
|
|
|
if (insn_len > len || start == NULL)
|
|
insn_len = len;
|
|
else
|
|
memcpy(insnbuf, start, insn_len);
|
|
|
|
return insn_len;
|
|
}
|
|
|
|
static void native_flush_tlb(void)
|
|
{
|
|
__native_flush_tlb();
|
|
}
|
|
|
|
/*
|
|
* Global pages have to be flushed a bit differently. Not a real
|
|
* performance problem because this does not happen often.
|
|
*/
|
|
static void native_flush_tlb_global(void)
|
|
{
|
|
__native_flush_tlb_global();
|
|
}
|
|
|
|
static void native_flush_tlb_single(unsigned long addr)
|
|
{
|
|
__native_flush_tlb_single(addr);
|
|
}
|
|
|
|
struct static_key paravirt_steal_enabled;
|
|
struct static_key paravirt_steal_rq_enabled;
|
|
|
|
static u64 native_steal_clock(int cpu)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
/* These are in entry.S */
|
|
extern void native_iret(void);
|
|
extern void native_usergs_sysret64(void);
|
|
|
|
static struct resource reserve_ioports = {
|
|
.start = 0,
|
|
.end = IO_SPACE_LIMIT,
|
|
.name = "paravirt-ioport",
|
|
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
|
|
};
|
|
|
|
/*
|
|
* Reserve the whole legacy IO space to prevent any legacy drivers
|
|
* from wasting time probing for their hardware. This is a fairly
|
|
* brute-force approach to disabling all non-virtual drivers.
|
|
*
|
|
* Note that this must be called very early to have any effect.
|
|
*/
|
|
int paravirt_disable_iospace(void)
|
|
{
|
|
return request_resource(&ioport_resource, &reserve_ioports);
|
|
}
|
|
|
|
static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
|
|
|
|
static inline void enter_lazy(enum paravirt_lazy_mode mode)
|
|
{
|
|
BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
|
|
|
|
this_cpu_write(paravirt_lazy_mode, mode);
|
|
}
|
|
|
|
static void leave_lazy(enum paravirt_lazy_mode mode)
|
|
{
|
|
BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
|
|
|
|
this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
|
|
}
|
|
|
|
void paravirt_enter_lazy_mmu(void)
|
|
{
|
|
enter_lazy(PARAVIRT_LAZY_MMU);
|
|
}
|
|
|
|
void paravirt_leave_lazy_mmu(void)
|
|
{
|
|
leave_lazy(PARAVIRT_LAZY_MMU);
|
|
}
|
|
|
|
void paravirt_flush_lazy_mmu(void)
|
|
{
|
|
preempt_disable();
|
|
|
|
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
|
|
arch_leave_lazy_mmu_mode();
|
|
arch_enter_lazy_mmu_mode();
|
|
}
|
|
|
|
preempt_enable();
|
|
}
|
|
|
|
void paravirt_start_context_switch(struct task_struct *prev)
|
|
{
|
|
BUG_ON(preemptible());
|
|
|
|
if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
|
|
arch_leave_lazy_mmu_mode();
|
|
set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
|
|
}
|
|
enter_lazy(PARAVIRT_LAZY_CPU);
|
|
}
|
|
|
|
void paravirt_end_context_switch(struct task_struct *next)
|
|
{
|
|
BUG_ON(preemptible());
|
|
|
|
leave_lazy(PARAVIRT_LAZY_CPU);
|
|
|
|
if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
|
|
arch_enter_lazy_mmu_mode();
|
|
}
|
|
|
|
enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
|
|
{
|
|
if (in_interrupt())
|
|
return PARAVIRT_LAZY_NONE;
|
|
|
|
return this_cpu_read(paravirt_lazy_mode);
|
|
}
|
|
|
|
struct pv_info pv_info = {
|
|
.name = "bare hardware",
|
|
.kernel_rpl = 0,
|
|
.shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
|
|
|
|
#ifdef CONFIG_X86_64
|
|
.extra_user_64bit_cs = __USER_CS,
|
|
#endif
|
|
};
|
|
|
|
struct pv_init_ops pv_init_ops = {
|
|
.patch = native_patch,
|
|
};
|
|
|
|
struct pv_time_ops pv_time_ops = {
|
|
.sched_clock = native_sched_clock,
|
|
.steal_clock = native_steal_clock,
|
|
};
|
|
|
|
__visible struct pv_irq_ops pv_irq_ops = {
|
|
.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
|
|
.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
|
|
.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
|
|
.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
|
|
.safe_halt = native_safe_halt,
|
|
.halt = native_halt,
|
|
#ifdef CONFIG_X86_64
|
|
.adjust_exception_frame = paravirt_nop,
|
|
#endif
|
|
};
|
|
|
|
__visible struct pv_cpu_ops pv_cpu_ops = {
|
|
.cpuid = native_cpuid,
|
|
.get_debugreg = native_get_debugreg,
|
|
.set_debugreg = native_set_debugreg,
|
|
.clts = native_clts,
|
|
.read_cr0 = native_read_cr0,
|
|
.write_cr0 = native_write_cr0,
|
|
.read_cr4 = native_read_cr4,
|
|
.read_cr4_safe = native_read_cr4_safe,
|
|
.write_cr4 = native_write_cr4,
|
|
#ifdef CONFIG_X86_64
|
|
.read_cr8 = native_read_cr8,
|
|
.write_cr8 = native_write_cr8,
|
|
#endif
|
|
.wbinvd = native_wbinvd,
|
|
.read_msr = native_read_msr,
|
|
.write_msr = native_write_msr,
|
|
.read_msr_safe = native_read_msr_safe,
|
|
.write_msr_safe = native_write_msr_safe,
|
|
.read_pmc = native_read_pmc,
|
|
.load_tr_desc = native_load_tr_desc,
|
|
.set_ldt = native_set_ldt,
|
|
.load_gdt = native_load_gdt,
|
|
.load_idt = native_load_idt,
|
|
.store_idt = native_store_idt,
|
|
.store_tr = native_store_tr,
|
|
.load_tls = native_load_tls,
|
|
#ifdef CONFIG_X86_64
|
|
.load_gs_index = native_load_gs_index,
|
|
#endif
|
|
.write_ldt_entry = native_write_ldt_entry,
|
|
.write_gdt_entry = native_write_gdt_entry,
|
|
.write_idt_entry = native_write_idt_entry,
|
|
|
|
.alloc_ldt = paravirt_nop,
|
|
.free_ldt = paravirt_nop,
|
|
|
|
.load_sp0 = native_load_sp0,
|
|
|
|
#ifdef CONFIG_X86_64
|
|
.usergs_sysret64 = native_usergs_sysret64,
|
|
#endif
|
|
.iret = native_iret,
|
|
.swapgs = native_swapgs,
|
|
|
|
.set_iopl_mask = native_set_iopl_mask,
|
|
.io_delay = native_io_delay,
|
|
|
|
.start_context_switch = paravirt_nop,
|
|
.end_context_switch = paravirt_nop,
|
|
};
|
|
|
|
/* At this point, native_get/set_debugreg has real function entries */
|
|
NOKPROBE_SYMBOL(native_get_debugreg);
|
|
NOKPROBE_SYMBOL(native_set_debugreg);
|
|
NOKPROBE_SYMBOL(native_load_idt);
|
|
|
|
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
|
|
/* 32-bit pagetable entries */
|
|
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
|
|
#else
|
|
/* 64-bit pagetable entries */
|
|
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
|
|
#endif
|
|
|
|
struct pv_mmu_ops pv_mmu_ops = {
|
|
|
|
.read_cr2 = native_read_cr2,
|
|
.write_cr2 = native_write_cr2,
|
|
.read_cr3 = native_read_cr3,
|
|
.write_cr3 = native_write_cr3,
|
|
|
|
.flush_tlb_user = native_flush_tlb,
|
|
.flush_tlb_kernel = native_flush_tlb_global,
|
|
.flush_tlb_single = native_flush_tlb_single,
|
|
.flush_tlb_others = native_flush_tlb_others,
|
|
|
|
.pgd_alloc = __paravirt_pgd_alloc,
|
|
.pgd_free = paravirt_nop,
|
|
|
|
.alloc_pte = paravirt_nop,
|
|
.alloc_pmd = paravirt_nop,
|
|
.alloc_pud = paravirt_nop,
|
|
.release_pte = paravirt_nop,
|
|
.release_pmd = paravirt_nop,
|
|
.release_pud = paravirt_nop,
|
|
|
|
.set_pte = native_set_pte,
|
|
.set_pte_at = native_set_pte_at,
|
|
.set_pmd = native_set_pmd,
|
|
.set_pmd_at = native_set_pmd_at,
|
|
.pte_update = paravirt_nop,
|
|
|
|
.ptep_modify_prot_start = __ptep_modify_prot_start,
|
|
.ptep_modify_prot_commit = __ptep_modify_prot_commit,
|
|
|
|
#if CONFIG_PGTABLE_LEVELS >= 3
|
|
#ifdef CONFIG_X86_PAE
|
|
.set_pte_atomic = native_set_pte_atomic,
|
|
.pte_clear = native_pte_clear,
|
|
.pmd_clear = native_pmd_clear,
|
|
#endif
|
|
.set_pud = native_set_pud,
|
|
|
|
.pmd_val = PTE_IDENT,
|
|
.make_pmd = PTE_IDENT,
|
|
|
|
#if CONFIG_PGTABLE_LEVELS == 4
|
|
.pud_val = PTE_IDENT,
|
|
.make_pud = PTE_IDENT,
|
|
|
|
.set_pgd = native_set_pgd,
|
|
#endif
|
|
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
|
|
|
|
.pte_val = PTE_IDENT,
|
|
.pgd_val = PTE_IDENT,
|
|
|
|
.make_pte = PTE_IDENT,
|
|
.make_pgd = PTE_IDENT,
|
|
|
|
.dup_mmap = paravirt_nop,
|
|
.exit_mmap = paravirt_nop,
|
|
.activate_mm = paravirt_nop,
|
|
|
|
.lazy_mode = {
|
|
.enter = paravirt_nop,
|
|
.leave = paravirt_nop,
|
|
.flush = paravirt_nop,
|
|
},
|
|
|
|
.set_fixmap = native_set_fixmap,
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(pv_time_ops);
|
|
EXPORT_SYMBOL (pv_cpu_ops);
|
|
EXPORT_SYMBOL (pv_mmu_ops);
|
|
EXPORT_SYMBOL_GPL(pv_info);
|
|
EXPORT_SYMBOL (pv_irq_ops);
|