mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 11:37:47 +07:00
b52e0a7c4e
The following change fixes the x86 implementation of trigger_all_cpu_backtrace(), which was previously (accidentally, as far as I can tell) disabled to always return false as on architectures that do not implement this function. trigger_all_cpu_backtrace(), as defined in include/linux/nmi.h, should call arch_trigger_all_cpu_backtrace() if available, or return false if the underlying arch doesn't implement this function. x86 did provide a suitable arch_trigger_all_cpu_backtrace() implementation, but it wasn't actually being used because it was declared in asm/nmi.h, which linux/nmi.h doesn't include. Also, linux/nmi.h couldn't easily be fixed by including asm/nmi.h, because that file is not available on all architectures. I am proposing to fix this by moving the x86 definition of arch_trigger_all_cpu_backtrace() to asm/irq.h. Tested via: echo l > /proc/sysrq-trigger Before the change, this uses a fallback implementation which shows backtraces on active CPUs (using smp_call_function_interrupt() ) After the change, this shows NMI backtraces on all CPUs Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1370518875-1346-1-git-send-email-walken@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
92 lines
2.1 KiB
C
92 lines
2.1 KiB
C
/*
|
|
* HW NMI watchdog support
|
|
*
|
|
* started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
|
|
*
|
|
* Arch specific calls to support NMI watchdog
|
|
*
|
|
* Bits copied from original nmi.c file
|
|
*
|
|
*/
|
|
#include <asm/apic.h>
|
|
#include <asm/nmi.h>
|
|
|
|
#include <linux/cpumask.h>
|
|
#include <linux/kdebug.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/nmi.h>
|
|
#include <linux/module.h>
|
|
#include <linux/delay.h>
|
|
|
|
#ifdef CONFIG_HARDLOCKUP_DETECTOR
|
|
u64 hw_nmi_get_sample_period(int watchdog_thresh)
|
|
{
|
|
return (u64)(cpu_khz) * 1000 * watchdog_thresh;
|
|
}
|
|
#endif
|
|
|
|
#ifdef arch_trigger_all_cpu_backtrace
|
|
/* For reliability, we're prepared to waste bits here. */
|
|
static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
|
|
|
|
/* "in progress" flag of arch_trigger_all_cpu_backtrace */
|
|
static unsigned long backtrace_flag;
|
|
|
|
void arch_trigger_all_cpu_backtrace(void)
|
|
{
|
|
int i;
|
|
|
|
if (test_and_set_bit(0, &backtrace_flag))
|
|
/*
|
|
* If there is already a trigger_all_cpu_backtrace() in progress
|
|
* (backtrace_flag == 1), don't output double cpu dump infos.
|
|
*/
|
|
return;
|
|
|
|
cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
|
|
|
|
printk(KERN_INFO "sending NMI to all CPUs:\n");
|
|
apic->send_IPI_all(NMI_VECTOR);
|
|
|
|
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
|
|
for (i = 0; i < 10 * 1000; i++) {
|
|
if (cpumask_empty(to_cpumask(backtrace_mask)))
|
|
break;
|
|
mdelay(1);
|
|
}
|
|
|
|
clear_bit(0, &backtrace_flag);
|
|
smp_mb__after_clear_bit();
|
|
}
|
|
|
|
static int __kprobes
|
|
arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
|
|
{
|
|
int cpu;
|
|
|
|
cpu = smp_processor_id();
|
|
|
|
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
|
|
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
|
|
|
arch_spin_lock(&lock);
|
|
printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
|
|
show_regs(regs);
|
|
arch_spin_unlock(&lock);
|
|
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
|
|
return NMI_HANDLED;
|
|
}
|
|
|
|
return NMI_DONE;
|
|
}
|
|
|
|
static int __init register_trigger_all_cpu_backtrace(void)
|
|
{
|
|
register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
|
|
0, "arch_bt");
|
|
return 0;
|
|
}
|
|
early_initcall(register_trigger_all_cpu_backtrace);
|
|
#endif
|