x86/dumpstack: Convert show_trace_log_lvl() to use the new unwinder

Convert show_trace_log_lvl() to use the new unwinder.  dump_trace() has
been deprecated.

show_trace_log_lvl() is special compared to other users of the unwinder.
It's the only place where both reliable *and* unreliable addresses are
needed.  With frame pointers enabled, most callers of the unwinder don't
want to know about unreliable addresses.  But in this case, when we're
dumping the stack to the console because something presumably went
wrong, the unreliable addresses are useful:

- They show stale data on the stack which can provide useful clues.

- If something goes wrong with the unwinder, or if frame pointers are
  corrupt or missing, all the stack addresses still get shown.

So in order to show all addresses on the stack, and at the same time
figure out which addresses are reliable, we have to do the scanning and
the unwinding in parallel.

The scanning is done with the help of get_stack_info() to traverse the
stacks.  The unwinding is done separately by the new unwinder.

In theory we could simplify show_trace_log_lvl() by instead pushing some
of this logic into the unwind code.  But then we would need some kind of
"fake" frame logic in the unwinder which would add a lot of complexity
and wouldn't be worth it in order to support only one user.

Another benefit of this approach is that once we have a DWARF unwinder,
we should be able to just plug it in with minimal impact to this code.

Another change here is that callers of show_trace_log_lvl() don't need
to provide the 'bp' argument.  The unwinder already finds the relevant
frame pointer by unwinding until it reaches the first frame after the
provided stack pointer.

Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/703b5998604c712a1f801874b43f35d6dac52ede.1474045023.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Josh Poimboeuf 2016-09-16 14:18:16 -05:00 committed by Ingo Molnar
parent ec2ad9ccf1
commit e18bcccd1a
4 changed files with 108 additions and 48 deletions

View File

@ -119,13 +119,11 @@ get_stack_pointer(struct task_struct *task, struct pt_regs *regs)
return (unsigned long *)task->thread.sp; return (unsigned long *)task->thread.sp;
} }
extern void void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, char *log_lvl);
unsigned long *stack, unsigned long bp, char *log_lvl);
extern void void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, char *log_lvl);
unsigned long *sp, unsigned long bp, char *log_lvl);
extern unsigned int code_bytes; extern unsigned int code_bytes;

View File

@ -17,7 +17,7 @@
#include <linux/sysfs.h> #include <linux/sysfs.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/unwind.h>
int panic_on_unrecovered_nmi; int panic_on_unrecovered_nmi;
int panic_on_io_nmi; int panic_on_io_nmi;
@ -142,56 +142,120 @@ print_context_stack_bp(struct task_struct *task,
} }
EXPORT_SYMBOL_GPL(print_context_stack_bp); EXPORT_SYMBOL_GPL(print_context_stack_bp);
static int print_trace_stack(void *data, const char *name) void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, char *log_lvl)
{ {
printk("%s <%s> ", (char *)data, name); struct unwind_state state;
return 0; struct stack_info stack_info = {0};
} unsigned long visit_mask = 0;
int graph_idx = 0;
/*
* Print one address/symbol entries per line.
*/
static int print_trace_address(void *data, unsigned long addr, int reliable)
{
printk_stack_address(addr, reliable, data);
return 0;
}
static const struct stacktrace_ops print_trace_ops = {
.stack = print_trace_stack,
.address = print_trace_address,
.walk_stack = print_context_stack,
};
void
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp, char *log_lvl)
{
printk("%sCall Trace:\n", log_lvl); printk("%sCall Trace:\n", log_lvl);
dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
unwind_start(&state, task, regs, stack);
/*
* Iterate through the stacks, starting with the current stack pointer.
* Each stack has a pointer to the next one.
*
* x86-64 can have several stacks:
* - task stack
* - interrupt stack
* - HW exception stacks (double fault, nmi, debug, mce)
*
* x86-32 can have up to three stacks:
* - task stack
* - softirq stack
* - hardirq stack
*/
for (; stack; stack = stack_info.next_sp) {
const char *str_begin, *str_end;
/*
* If we overflowed the task stack into a guard page, jump back
* to the bottom of the usable stack.
*/
if (task_stack_page(task) - (void *)stack < PAGE_SIZE)
stack = task_stack_page(task);
if (get_stack_info(stack, task, &stack_info, &visit_mask))
break;
stack_type_str(stack_info.type, &str_begin, &str_end);
if (str_begin)
printk("%s <%s> ", log_lvl, str_begin);
/*
* Scan the stack, printing any text addresses we find. At the
* same time, follow proper stack frames with the unwinder.
*
* Addresses found during the scan which are not reported by
* the unwinder are considered to be additional clues which are
* sometimes useful for debugging and are prefixed with '?'.
* This also serves as a failsafe option in case the unwinder
* goes off in the weeds.
*/
for (; stack < stack_info.end; stack++) {
unsigned long real_addr;
int reliable = 0;
unsigned long addr = *stack;
unsigned long *ret_addr_p =
unwind_get_return_address_ptr(&state);
if (!__kernel_text_address(addr))
continue;
if (stack == ret_addr_p)
reliable = 1;
/*
* When function graph tracing is enabled for a
* function, its return address on the stack is
* replaced with the address of an ftrace handler
* (return_to_handler). In that case, before printing
* the "real" address, we want to print the handler
* address as an "unreliable" hint that function graph
* tracing was involved.
*/
real_addr = ftrace_graph_ret_addr(task, &graph_idx,
addr, stack);
if (real_addr != addr)
printk_stack_address(addr, 0, log_lvl);
printk_stack_address(real_addr, reliable, log_lvl);
if (!reliable)
continue;
/*
* Get the next frame from the unwinder. No need to
* check for an error: if anything goes wrong, the rest
* of the addresses will just be printed as unreliable.
*/
unwind_next_frame(&state);
}
if (str_end)
printk("%s <%s> ", log_lvl, str_end);
}
} }
void show_stack(struct task_struct *task, unsigned long *sp) void show_stack(struct task_struct *task, unsigned long *sp)
{ {
unsigned long bp = 0;
task = task ? : current; task = task ? : current;
/* /*
* Stack frames below this one aren't interesting. Don't show them * Stack frames below this one aren't interesting. Don't show them
* if we're printing for %current. * if we're printing for %current.
*/ */
if (!sp && task == current) { if (!sp && task == current)
sp = get_stack_pointer(current, NULL); sp = get_stack_pointer(current, NULL);
bp = (unsigned long)get_frame_pointer(current, NULL);
}
show_stack_log_lvl(task, NULL, sp, bp, ""); show_stack_log_lvl(current, NULL, sp, "");
} }
void show_stack_regs(struct pt_regs *regs) void show_stack_regs(struct pt_regs *regs)
{ {
show_stack_log_lvl(current, regs, NULL, 0, ""); show_stack_log_lvl(current, regs, NULL, "");
} }
static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;

View File

@ -156,9 +156,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
} }
EXPORT_SYMBOL(dump_trace); EXPORT_SYMBOL(dump_trace);
void void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, char *log_lvl)
unsigned long *sp, unsigned long bp, char *log_lvl)
{ {
unsigned long *stack; unsigned long *stack;
int i; int i;
@ -181,7 +180,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
touch_nmi_watchdog(); touch_nmi_watchdog();
} }
pr_cont("\n"); pr_cont("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_trace_log_lvl(task, regs, sp, log_lvl);
put_task_stack(task); put_task_stack(task);
} }
@ -205,7 +204,7 @@ void show_regs(struct pt_regs *regs)
u8 *ip; u8 *ip;
pr_emerg("Stack:\n"); pr_emerg("Stack:\n");
show_stack_log_lvl(current, regs, NULL, 0, KERN_EMERG); show_stack_log_lvl(current, regs, NULL, KERN_EMERG);
pr_emerg("Code:"); pr_emerg("Code:");

View File

@ -209,9 +209,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
} }
EXPORT_SYMBOL(dump_trace); EXPORT_SYMBOL(dump_trace);
void void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, char *log_lvl)
unsigned long *sp, unsigned long bp, char *log_lvl)
{ {
unsigned long *irq_stack_end; unsigned long *irq_stack_end;
unsigned long *irq_stack; unsigned long *irq_stack;
@ -255,7 +254,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
} }
pr_cont("\n"); pr_cont("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_trace_log_lvl(task, regs, sp, log_lvl);
put_task_stack(task); put_task_stack(task);
} }
@ -278,7 +277,7 @@ void show_regs(struct pt_regs *regs)
u8 *ip; u8 *ip;
printk(KERN_DEFAULT "Stack:\n"); printk(KERN_DEFAULT "Stack:\n");
show_stack_log_lvl(current, regs, NULL, 0, KERN_DEFAULT); show_stack_log_lvl(current, regs, NULL, KERN_DEFAULT);
printk(KERN_DEFAULT "Code: "); printk(KERN_DEFAULT "Code: ");