linux_dsm_epyc7002/arch/x86/kernel/dumpstack.c
Josh Poimboeuf e18bcccd1a x86/dumpstack: Convert show_trace_log_lvl() to use the new unwinder
Convert show_trace_log_lvl() to use the new unwinder.  dump_trace() has
been deprecated.

show_trace_log_lvl() is special compared to other users of the unwinder.
It's the only place where both reliable *and* unreliable addresses are
needed.  With frame pointers enabled, most callers of the unwinder don't
want to know about unreliable addresses.  But in this case, when we're
dumping the stack to the console because something presumably went
wrong, the unreliable addresses are useful:

- They show stale data on the stack which can provide useful clues.

- If something goes wrong with the unwinder, or if frame pointers are
  corrupt or missing, all the stack addresses still get shown.

So in order to show all addresses on the stack, and at the same time
figure out which addresses are reliable, we have to do the scanning and
the unwinding in parallel.

The scanning is done with the help of get_stack_info() to traverse the
stacks.  The unwinding is done separately by the new unwinder.

In theory we could simplify show_trace_log_lvl() by instead pushing some
of this logic into the unwind code.  But then we would need some kind of
"fake" frame logic in the unwinder which would add a lot of complexity
and wouldn't be worth it in order to support only one user.

Another benefit of this approach is that once we have a DWARF unwinder,
we should be able to just plug it in with minimal impact to this code.

Another change here is that callers of show_trace_log_lvl() don't need
to provide the 'bp' argument.  The unwinder already finds the relevant
frame pointer by unwinding until it reaches the first frame after the
provided stack pointer.

Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/703b5998604c712a1f801874b43f35d6dac52ede.1474045023.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-09-20 08:29:34 +02:00

415 lines
10 KiB
C

/*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
*/
#include <linux/kallsyms.h>
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/utsname.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/ftrace.h>
#include <linux/kexec.h>
#include <linux/bug.h>
#include <linux/nmi.h>
#include <linux/sysfs.h>
#include <asm/stacktrace.h>
#include <asm/unwind.h>
int panic_on_unrecovered_nmi;
int panic_on_io_nmi;
unsigned int code_bytes = 64;
int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
static int die_counter;
bool in_task_stack(unsigned long *stack, struct task_struct *task,
struct stack_info *info)
{
unsigned long *begin = task_stack_page(task);
unsigned long *end = task_stack_page(task) + THREAD_SIZE;
if (stack < begin || stack >= end)
return false;
info->type = STACK_TYPE_TASK;
info->begin = begin;
info->end = end;
info->next_sp = NULL;
return true;
}
static void printk_stack_address(unsigned long address, int reliable,
char *log_lvl)
{
touch_nmi_watchdog();
printk("%s [<%p>] %s%pB\n",
log_lvl, (void *)address, reliable ? "" : "? ",
(void *)address);
}
void printk_address(unsigned long address)
{
pr_cont(" [<%p>] %pS\n", (void *)address, (void *)address);
}
/*
* x86-64 can have up to three kernel stacks:
* process stack
* interrupt stack
* severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
*/
unsigned long
print_context_stack(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
struct stack_info *info, int *graph)
{
struct stack_frame *frame = (struct stack_frame *)bp;
/*
* If we overflowed the stack into a guard page, jump back to the
* bottom of the usable stack.
*/
if ((unsigned long)task_stack_page(task) - (unsigned long)stack <
PAGE_SIZE)
stack = (unsigned long *)task_stack_page(task);
while (on_stack(info, stack, sizeof(*stack))) {
unsigned long addr = *stack;
if (__kernel_text_address(addr)) {
unsigned long real_addr;
int reliable = 0;
if ((unsigned long) stack == bp + sizeof(long)) {
reliable = 1;
frame = frame->next_frame;
bp = (unsigned long) frame;
}
/*
* When function graph tracing is enabled for a
* function, its return address on the stack is
* replaced with the address of an ftrace handler
* (return_to_handler). In that case, before printing
* the "real" address, we want to print the handler
* address as an "unreliable" hint that function graph
* tracing was involved.
*/
real_addr = ftrace_graph_ret_addr(task, graph, addr,
stack);
if (real_addr != addr)
ops->address(data, addr, 0);
ops->address(data, real_addr, reliable);
}
stack++;
}
return bp;
}
EXPORT_SYMBOL_GPL(print_context_stack);
unsigned long
print_context_stack_bp(struct task_struct *task,
unsigned long *stack, unsigned long bp,
const struct stacktrace_ops *ops, void *data,
struct stack_info *info, int *graph)
{
struct stack_frame *frame = (struct stack_frame *)bp;
unsigned long *retp = &frame->return_address;
while (on_stack(info, stack, sizeof(*stack) * 2)) {
unsigned long addr = *retp;
unsigned long real_addr;
if (!__kernel_text_address(addr))
break;
real_addr = ftrace_graph_ret_addr(task, graph, addr, retp);
if (ops->address(data, real_addr, 1))
break;
frame = frame->next_frame;
retp = &frame->return_address;
}
return (unsigned long)frame;
}
EXPORT_SYMBOL_GPL(print_context_stack_bp);
void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, char *log_lvl)
{
struct unwind_state state;
struct stack_info stack_info = {0};
unsigned long visit_mask = 0;
int graph_idx = 0;
printk("%sCall Trace:\n", log_lvl);
unwind_start(&state, task, regs, stack);
/*
* Iterate through the stacks, starting with the current stack pointer.
* Each stack has a pointer to the next one.
*
* x86-64 can have several stacks:
* - task stack
* - interrupt stack
* - HW exception stacks (double fault, nmi, debug, mce)
*
* x86-32 can have up to three stacks:
* - task stack
* - softirq stack
* - hardirq stack
*/
for (; stack; stack = stack_info.next_sp) {
const char *str_begin, *str_end;
/*
* If we overflowed the task stack into a guard page, jump back
* to the bottom of the usable stack.
*/
if (task_stack_page(task) - (void *)stack < PAGE_SIZE)
stack = task_stack_page(task);
if (get_stack_info(stack, task, &stack_info, &visit_mask))
break;
stack_type_str(stack_info.type, &str_begin, &str_end);
if (str_begin)
printk("%s <%s> ", log_lvl, str_begin);
/*
* Scan the stack, printing any text addresses we find. At the
* same time, follow proper stack frames with the unwinder.
*
* Addresses found during the scan which are not reported by
* the unwinder are considered to be additional clues which are
* sometimes useful for debugging and are prefixed with '?'.
* This also serves as a failsafe option in case the unwinder
* goes off in the weeds.
*/
for (; stack < stack_info.end; stack++) {
unsigned long real_addr;
int reliable = 0;
unsigned long addr = *stack;
unsigned long *ret_addr_p =
unwind_get_return_address_ptr(&state);
if (!__kernel_text_address(addr))
continue;
if (stack == ret_addr_p)
reliable = 1;
/*
* When function graph tracing is enabled for a
* function, its return address on the stack is
* replaced with the address of an ftrace handler
* (return_to_handler). In that case, before printing
* the "real" address, we want to print the handler
* address as an "unreliable" hint that function graph
* tracing was involved.
*/
real_addr = ftrace_graph_ret_addr(task, &graph_idx,
addr, stack);
if (real_addr != addr)
printk_stack_address(addr, 0, log_lvl);
printk_stack_address(real_addr, reliable, log_lvl);
if (!reliable)
continue;
/*
* Get the next frame from the unwinder. No need to
* check for an error: if anything goes wrong, the rest
* of the addresses will just be printed as unreliable.
*/
unwind_next_frame(&state);
}
if (str_end)
printk("%s <%s> ", log_lvl, str_end);
}
}
void show_stack(struct task_struct *task, unsigned long *sp)
{
task = task ? : current;
/*
* Stack frames below this one aren't interesting. Don't show them
* if we're printing for %current.
*/
if (!sp && task == current)
sp = get_stack_pointer(current, NULL);
show_stack_log_lvl(current, NULL, sp, "");
}
void show_stack_regs(struct pt_regs *regs)
{
show_stack_log_lvl(current, regs, NULL, "");
}
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
static int die_owner = -1;
static unsigned int die_nest_count;
unsigned long oops_begin(void)
{
int cpu;
unsigned long flags;
oops_enter();
/* racy, but better than risking deadlock. */
raw_local_irq_save(flags);
cpu = smp_processor_id();
if (!arch_spin_trylock(&die_lock)) {
if (cpu == die_owner)
/* nested oops. should stop eventually */;
else
arch_spin_lock(&die_lock);
}
die_nest_count++;
die_owner = cpu;
console_verbose();
bust_spinlocks(1);
return flags;
}
EXPORT_SYMBOL_GPL(oops_begin);
NOKPROBE_SYMBOL(oops_begin);
void __noreturn rewind_stack_do_exit(int signr);
void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
if (regs && kexec_should_crash(current))
crash_kexec(regs);
bust_spinlocks(0);
die_owner = -1;
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
die_nest_count--;
if (!die_nest_count)
/* Nest count reaches zero, release the lock. */
arch_spin_unlock(&die_lock);
raw_local_irq_restore(flags);
oops_exit();
if (!signr)
return;
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
/*
* We're not going to return, but we might be on an IST stack or
* have very little stack space left. Rewind the stack and kill
* the task.
*/
rewind_stack_do_exit(signr);
}
NOKPROBE_SYMBOL(oops_end);
int __die(const char *str, struct pt_regs *regs, long err)
{
#ifdef CONFIG_X86_32
unsigned short ss;
unsigned long sp;
#endif
printk(KERN_DEFAULT
"%s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff, ++die_counter,
IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : "",
IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
IS_ENABLED(CONFIG_KASAN) ? " KASAN" : "");
if (notify_die(DIE_OOPS, str, regs, err,
current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
return 1;
print_modules();
show_regs(regs);
#ifdef CONFIG_X86_32
if (user_mode(regs)) {
sp = regs->sp;
ss = regs->ss & 0xffff;
} else {
sp = kernel_stack_pointer(regs);
savesegment(ss, ss);
}
printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
print_symbol("%s", regs->ip);
printk(" SS:ESP %04x:%08lx\n", ss, sp);
#else
/* Executive summary in case the oops scrolled away */
printk(KERN_ALERT "RIP ");
printk_address(regs->ip);
printk(" RSP <%016lx>\n", regs->sp);
#endif
return 0;
}
NOKPROBE_SYMBOL(__die);
/*
* This is gone through when something in the kernel has done something bad
* and is about to be terminated:
*/
void die(const char *str, struct pt_regs *regs, long err)
{
unsigned long flags = oops_begin();
int sig = SIGSEGV;
if (!user_mode(regs))
report_bug(regs->ip, regs);
if (__die(str, regs, err))
sig = 0;
oops_end(flags, regs, sig);
}
static int __init kstack_setup(char *s)
{
ssize_t ret;
unsigned long val;
if (!s)
return -EINVAL;
ret = kstrtoul(s, 0, &val);
if (ret)
return ret;
kstack_depth_to_print = val;
return 0;
}
early_param("kstack", kstack_setup);
static int __init code_bytes_setup(char *s)
{
ssize_t ret;
unsigned long val;
if (!s)
return -EINVAL;
ret = kstrtoul(s, 0, &val);
if (ret)
return ret;
code_bytes = val;
if (code_bytes > 8192)
code_bytes = 8192;
return 1;
}
__setup("code_bytes=", code_bytes_setup);