mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-10 23:16:38 +07:00
42a0bb3f71
printk() takes some locks and could not be used a safe way in NMI
context.
The chance of a deadlock is real especially when printing stacks from
all CPUs. This particular problem has been addressed on x86 by the
commit a9edc88093
("x86/nmi: Perform a safe NMI stack trace on all
CPUs").
The patchset brings two big advantages. First, it makes the NMI
backtraces safe on all architectures for free. Second, it makes all NMI
messages almost safe on all architectures (the temporary buffer is
limited. We still should keep the number of messages in NMI context at
minimum).
Note that there already are several messages printed in NMI context:
WARN_ON(in_nmi()), BUG_ON(in_nmi()), anything being printed out from MCE
handlers. These are not easy to avoid.
This patch reuses most of the code and makes it generic. It is useful
for all messages and architectures that support NMI.
The alternative printk_func is set when entering and is reseted when
leaving NMI context. It queues IRQ work to copy the messages into the
main ring buffer in a safe context.
__printk_nmi_flush() copies all available messages and reset the buffer.
Then we could use a simple cmpxchg operations to get synchronized with
writers. There is also used a spinlock to get synchronized with other
flushers.
We do not longer use seq_buf because it depends on external lock. It
would be hard to make all supported operations safe for a lockless use.
It would be confusing and error prone to make only some operations safe.
The code is put into separate printk/nmi.c as suggested by Steven
Rostedt. It needs a per-CPU buffer and is compiled only on
architectures that call nmi_enter(). This is achieved by the new
HAVE_NMI Kconfig flag.
The are MN10300 and Xtensa architectures. We need to clean up NMI
handling there first. Let's do it separately.
The patch is heavily based on the draft from Peter Zijlstra, see
https://lkml.org/lkml/2015/6/10/327
[arnd@arndb.de: printk-nmi: use %zu format string for size_t]
[akpm@linux-foundation.org: min_t->min - all types are size_t here]
Signed-off-by: Petr Mladek <pmladek@suse.com>
Suggested-by: Peter Zijlstra <peterz@infradead.org>
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Jan Kara <jack@suse.cz>
Acked-by: Russell King <rmk+kernel@arm.linux.org.uk> [arm part]
Cc: Daniel Thompson <daniel.thompson@linaro.org>
Cc: Jiri Kosina <jkosina@suse.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: David Miller <davem@davemloft.net>
Cc: Daniel Thompson <daniel.thompson@linaro.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
93 lines
2.3 KiB
C
93 lines
2.3 KiB
C
/*
|
|
* NMI backtrace support
|
|
*
|
|
* Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King,
|
|
* with the following header:
|
|
*
|
|
* HW NMI watchdog support
|
|
*
|
|
* started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
|
|
*
|
|
* Arch specific calls to support NMI watchdog
|
|
*
|
|
* Bits copied from original nmi.c file
|
|
*/
|
|
#include <linux/cpumask.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/nmi.h>
|
|
|
|
#ifdef arch_trigger_all_cpu_backtrace
|
|
/* For reliability, we're prepared to waste bits here. */
|
|
static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
|
|
|
|
/* "in progress" flag of arch_trigger_all_cpu_backtrace */
|
|
static unsigned long backtrace_flag;
|
|
|
|
/*
|
|
* When raise() is called it will be is passed a pointer to the
|
|
* backtrace_mask. Architectures that call nmi_cpu_backtrace()
|
|
* directly from their raise() functions may rely on the mask
|
|
* they are passed being updated as a side effect of this call.
|
|
*/
|
|
void nmi_trigger_all_cpu_backtrace(bool include_self,
|
|
void (*raise)(cpumask_t *mask))
|
|
{
|
|
int i, this_cpu = get_cpu();
|
|
|
|
if (test_and_set_bit(0, &backtrace_flag)) {
|
|
/*
|
|
* If there is already a trigger_all_cpu_backtrace() in progress
|
|
* (backtrace_flag == 1), don't output double cpu dump infos.
|
|
*/
|
|
put_cpu();
|
|
return;
|
|
}
|
|
|
|
cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
|
|
if (!include_self)
|
|
cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
|
|
|
|
if (!cpumask_empty(to_cpumask(backtrace_mask))) {
|
|
pr_info("Sending NMI to %s CPUs:\n",
|
|
(include_self ? "all" : "other"));
|
|
raise(to_cpumask(backtrace_mask));
|
|
}
|
|
|
|
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
|
|
for (i = 0; i < 10 * 1000; i++) {
|
|
if (cpumask_empty(to_cpumask(backtrace_mask)))
|
|
break;
|
|
mdelay(1);
|
|
touch_softlockup_watchdog();
|
|
}
|
|
|
|
/*
|
|
* Force flush any remote buffers that might be stuck in IRQ context
|
|
* and therefore could not run their irq_work.
|
|
*/
|
|
printk_nmi_flush();
|
|
|
|
clear_bit_unlock(0, &backtrace_flag);
|
|
put_cpu();
|
|
}
|
|
|
|
bool nmi_cpu_backtrace(struct pt_regs *regs)
|
|
{
|
|
int cpu = smp_processor_id();
|
|
|
|
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
|
|
pr_warn("NMI backtrace for cpu %d\n", cpu);
|
|
if (regs)
|
|
show_regs(regs);
|
|
else
|
|
dump_stack();
|
|
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
NOKPROBE_SYMBOL(nmi_cpu_backtrace);
|
|
#endif
|