mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 10:37:51 +07:00
719f6a7040
The commit42a0bb3f71
("printk/nmi: generic solution for safe printk in NMI") caused that printk stores messages into a temporary buffer in NMI context. The buffer is per-CPU and therefore the size is rather limited. It works quite well for NMI backtraces. But there are longer logs that might get printed in NMI context, for example, lockdep warnings, ftrace_dump_on_oops. The temporary buffer is used to avoid deadlocks caused by logbuf_lock. Also it is needed to avoid races with the other temporary buffer that is used when PRINTK_SAFE_CONTEXT is entered. But the main buffer can be used in NMI if the lock is available and we did not interrupt PRINTK_SAFE_CONTEXT. The lock is checked using raw_spin_is_locked(). It might cause false negatives when the lock is taken on another CPU and this CPU is in the safe context from other reasons. Note that the safe context is used also to get console semaphore or when calling console drivers. For this reason, we do the check in printk_nmi_enter(). It makes the handling consistent for the entire NMI handler and avoids reshuffling of the messages. The patch also defines special printk context that allows to use printk_deferred() in NMI. Note that we could not flush the messages to the consoles because console drivers might use many other internal locks. The newly created vprintk_deferred() disables the preemption only around the irq work handling. It is needed there to keep the consistency between the two per-CPU variables. But there is no reason to disable preemption around vprintk_emit(). Finally, the patch puts back explicit serialization of the NMI backtraces from different CPUs. It was removed by the commita9edc88093
("x86/nmi: Perform a safe NMI stack trace on all CPUs"). It was not needed because the flushing of the temporary per-CPU buffers was serialized. Link: http://lkml.kernel.org/r/1493912763-24873-1-git-send-email-pmladek@suse.com Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <rack+kernel@arm.linux.org.uk> Cc: Daniel Thompson <daniel.thompson@linaro.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: x86@kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Suggested-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Signed-off-by: Petr Mladek <pmladek@suse.com>
113 lines
3.1 KiB
C
113 lines
3.1 KiB
C
/*
|
|
* NMI backtrace support
|
|
*
|
|
* Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King,
|
|
* with the following header:
|
|
*
|
|
* HW NMI watchdog support
|
|
*
|
|
* started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
|
|
*
|
|
* Arch specific calls to support NMI watchdog
|
|
*
|
|
* Bits copied from original nmi.c file
|
|
*/
|
|
#include <linux/cpumask.h>
|
|
#include <linux/delay.h>
|
|
#include <linux/kprobes.h>
|
|
#include <linux/nmi.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/sched/debug.h>
|
|
|
|
#ifdef arch_trigger_cpumask_backtrace
|
|
/* For reliability, we're prepared to waste bits here. */
|
|
static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
|
|
|
|
/* "in progress" flag of arch_trigger_cpumask_backtrace */
|
|
static unsigned long backtrace_flag;
|
|
|
|
/*
|
|
* When raise() is called it will be passed a pointer to the
|
|
* backtrace_mask. Architectures that call nmi_cpu_backtrace()
|
|
* directly from their raise() functions may rely on the mask
|
|
* they are passed being updated as a side effect of this call.
|
|
*/
|
|
void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
|
|
bool exclude_self,
|
|
void (*raise)(cpumask_t *mask))
|
|
{
|
|
int i, this_cpu = get_cpu();
|
|
|
|
if (test_and_set_bit(0, &backtrace_flag)) {
|
|
/*
|
|
* If there is already a trigger_all_cpu_backtrace() in progress
|
|
* (backtrace_flag == 1), don't output double cpu dump infos.
|
|
*/
|
|
put_cpu();
|
|
return;
|
|
}
|
|
|
|
cpumask_copy(to_cpumask(backtrace_mask), mask);
|
|
if (exclude_self)
|
|
cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask));
|
|
|
|
/*
|
|
* Don't try to send an NMI to this cpu; it may work on some
|
|
* architectures, but on others it may not, and we'll get
|
|
* information at least as useful just by doing a dump_stack() here.
|
|
* Note that nmi_cpu_backtrace(NULL) will clear the cpu bit.
|
|
*/
|
|
if (cpumask_test_cpu(this_cpu, to_cpumask(backtrace_mask)))
|
|
nmi_cpu_backtrace(NULL);
|
|
|
|
if (!cpumask_empty(to_cpumask(backtrace_mask))) {
|
|
pr_info("Sending NMI from CPU %d to CPUs %*pbl:\n",
|
|
this_cpu, nr_cpumask_bits, to_cpumask(backtrace_mask));
|
|
raise(to_cpumask(backtrace_mask));
|
|
}
|
|
|
|
/* Wait for up to 10 seconds for all CPUs to do the backtrace */
|
|
for (i = 0; i < 10 * 1000; i++) {
|
|
if (cpumask_empty(to_cpumask(backtrace_mask)))
|
|
break;
|
|
mdelay(1);
|
|
touch_softlockup_watchdog();
|
|
}
|
|
|
|
/*
|
|
* Force flush any remote buffers that might be stuck in IRQ context
|
|
* and therefore could not run their irq_work.
|
|
*/
|
|
printk_safe_flush();
|
|
|
|
clear_bit_unlock(0, &backtrace_flag);
|
|
put_cpu();
|
|
}
|
|
|
|
bool nmi_cpu_backtrace(struct pt_regs *regs)
|
|
{
|
|
static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
|
int cpu = smp_processor_id();
|
|
|
|
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
|
|
arch_spin_lock(&lock);
|
|
if (regs && cpu_in_idle(instruction_pointer(regs))) {
|
|
pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
|
|
cpu, instruction_pointer(regs));
|
|
} else {
|
|
pr_warn("NMI backtrace for cpu %d\n", cpu);
|
|
if (regs)
|
|
show_regs(regs);
|
|
else
|
|
dump_stack();
|
|
}
|
|
arch_spin_unlock(&lock);
|
|
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
NOKPROBE_SYMBOL(nmi_cpu_backtrace);
|
|
#endif
|