linux_dsm_epyc7002/kernel/stacktrace.c

80 lines
1.8 KiB
C
Raw Normal View History

/*
* kernel/stacktrace.c
*
* Stack trace management functions
*
* Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/stacktrace.h>
void print_stack_trace(struct stack_trace *trace, int spaces)
{
int i;
if (WARN_ON(!trace->entries))
return;
stacktrace, lockdep: Fix address, newline ugliness Since KERN_CONT became meaningful again, lockdep stack traces have had annoying extra newlines, like this: [ 5.561122] -> #1 (B){+.+...}: [ 5.561528] [ 5.561532] [<ffffffff810d8873>] lock_acquire+0xc3/0x210 [ 5.562178] [ 5.562181] [<ffffffff816f6414>] mutex_lock_nested+0x74/0x6d0 [ 5.562861] [ 5.562880] [<ffffffffa01aa3c3>] init_btrfs_fs+0x21/0x196 [btrfs] [ 5.563717] [ 5.563721] [<ffffffff81000472>] do_one_initcall+0x52/0x1b0 [ 5.564554] [ 5.564559] [<ffffffff811a3af6>] do_init_module+0x5f/0x209 [ 5.565357] [ 5.565361] [<ffffffff81122f4d>] load_module+0x218d/0x2b80 [ 5.566020] [ 5.566021] [<ffffffff81123beb>] SyS_finit_module+0xeb/0x120 [ 5.566694] [ 5.566696] [<ffffffff816fd241>] entry_SYSCALL_64_fastpath+0x1f/0xc2 That's happening because each printk() call now gets printed on its own line, and we do a separate call to print the spaces before the symbol. Fix it by doing the printk() directly instead of using the print_ip_sym() helper. Additionally, the symbol address isn't very helpful, so let's get rid of that, too. The final result looks like this: [ 5.194518] -> #1 (B){+.+...}: [ 5.195002] lock_acquire+0xc3/0x210 [ 5.195439] mutex_lock_nested+0x74/0x6d0 [ 5.196491] do_one_initcall+0x52/0x1b0 [ 5.196939] do_init_module+0x5f/0x209 [ 5.197355] load_module+0x218d/0x2b80 [ 5.197792] SyS_finit_module+0xeb/0x120 [ 5.198251] entry_SYSCALL_64_fastpath+0x1f/0xc2 Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Omar Sandoval <osandov@fb.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: kernel-team@fb.com Fixes: 4bcc595ccd80 ("printk: reinstate KERN_CONT for printing continuation lines") Link: http://lkml.kernel.org/r/43b4e114724b2bdb0308fa86cb33aa07d3d67fad.1486510315.git.osandov@fb.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-02-08 06:33:20 +07:00
for (i = 0; i < trace->nr_entries; i++)
printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
}
EXPORT_SYMBOL_GPL(print_stack_trace);
int snprint_stack_trace(char *buf, size_t size,
struct stack_trace *trace, int spaces)
{
int i;
int generated;
int total = 0;
if (WARN_ON(!trace->entries))
return 0;
for (i = 0; i < trace->nr_entries; i++) {
stacktrace, lockdep: Fix address, newline ugliness Since KERN_CONT became meaningful again, lockdep stack traces have had annoying extra newlines, like this: [ 5.561122] -> #1 (B){+.+...}: [ 5.561528] [ 5.561532] [<ffffffff810d8873>] lock_acquire+0xc3/0x210 [ 5.562178] [ 5.562181] [<ffffffff816f6414>] mutex_lock_nested+0x74/0x6d0 [ 5.562861] [ 5.562880] [<ffffffffa01aa3c3>] init_btrfs_fs+0x21/0x196 [btrfs] [ 5.563717] [ 5.563721] [<ffffffff81000472>] do_one_initcall+0x52/0x1b0 [ 5.564554] [ 5.564559] [<ffffffff811a3af6>] do_init_module+0x5f/0x209 [ 5.565357] [ 5.565361] [<ffffffff81122f4d>] load_module+0x218d/0x2b80 [ 5.566020] [ 5.566021] [<ffffffff81123beb>] SyS_finit_module+0xeb/0x120 [ 5.566694] [ 5.566696] [<ffffffff816fd241>] entry_SYSCALL_64_fastpath+0x1f/0xc2 That's happening because each printk() call now gets printed on its own line, and we do a separate call to print the spaces before the symbol. Fix it by doing the printk() directly instead of using the print_ip_sym() helper. Additionally, the symbol address isn't very helpful, so let's get rid of that, too. The final result looks like this: [ 5.194518] -> #1 (B){+.+...}: [ 5.195002] lock_acquire+0xc3/0x210 [ 5.195439] mutex_lock_nested+0x74/0x6d0 [ 5.196491] do_one_initcall+0x52/0x1b0 [ 5.196939] do_init_module+0x5f/0x209 [ 5.197355] load_module+0x218d/0x2b80 [ 5.197792] SyS_finit_module+0xeb/0x120 [ 5.198251] entry_SYSCALL_64_fastpath+0x1f/0xc2 Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Omar Sandoval <osandov@fb.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: kernel-team@fb.com Fixes: 4bcc595ccd80 ("printk: reinstate KERN_CONT for printing continuation lines") Link: http://lkml.kernel.org/r/43b4e114724b2bdb0308fa86cb33aa07d3d67fad.1486510315.git.osandov@fb.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-02-08 06:33:20 +07:00
generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
(void *)trace->entries[i]);
total += generated;
/* Assume that generated isn't a negative number */
if (generated >= size) {
buf += size;
size = 0;
} else {
buf += generated;
size -= generated;
}
}
return total;
}
EXPORT_SYMBOL_GPL(snprint_stack_trace);
/*
stacktrace/x86: add function for detecting reliable stack traces For live patching and possibly other use cases, a stack trace is only useful if it can be assured that it's completely reliable. Add a new save_stack_trace_tsk_reliable() function to achieve that. Note that if the target task isn't the current task, and the target task is allowed to run, then it could be writing the stack while the unwinder is reading it, resulting in possible corruption. So the caller of save_stack_trace_tsk_reliable() must ensure that the task is either 'current' or inactive. save_stack_trace_tsk_reliable() relies on the x86 unwinder's detection of pt_regs on the stack. If the pt_regs are not user-mode registers from a syscall, then they indicate an in-kernel interrupt or exception (e.g. preemption or a page fault), in which case the stack is considered unreliable due to the nature of frame pointers. It also relies on the x86 unwinder's detection of other issues, such as: - corrupted stack data - stack grows the wrong way - stack walk doesn't reach the bottom - user didn't provide a large enough entries array Such issues are reported by checking unwind_error() and !unwind_done(). Also add CONFIG_HAVE_RELIABLE_STACKTRACE so arch-independent code can determine at build time whether the function is implemented. Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Reviewed-by: Miroslav Benes <mbenes@suse.cz> Acked-by: Ingo Molnar <mingo@kernel.org> # for the x86 changes Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2017-02-14 08:42:28 +07:00
* Architectures that do not implement save_stack_trace_*()
* get these weak aliases and once-per-bootup warnings
* (whenever this facility is utilized - for example by procfs):
*/
__weak void
save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
WARN_ONCE(1, KERN_INFO "save_stack_trace_tsk() not implemented yet.\n");
}
__weak void
save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
WARN_ONCE(1, KERN_INFO "save_stack_trace_regs() not implemented yet.\n");
}
stacktrace/x86: add function for detecting reliable stack traces For live patching and possibly other use cases, a stack trace is only useful if it can be assured that it's completely reliable. Add a new save_stack_trace_tsk_reliable() function to achieve that. Note that if the target task isn't the current task, and the target task is allowed to run, then it could be writing the stack while the unwinder is reading it, resulting in possible corruption. So the caller of save_stack_trace_tsk_reliable() must ensure that the task is either 'current' or inactive. save_stack_trace_tsk_reliable() relies on the x86 unwinder's detection of pt_regs on the stack. If the pt_regs are not user-mode registers from a syscall, then they indicate an in-kernel interrupt or exception (e.g. preemption or a page fault), in which case the stack is considered unreliable due to the nature of frame pointers. It also relies on the x86 unwinder's detection of other issues, such as: - corrupted stack data - stack grows the wrong way - stack walk doesn't reach the bottom - user didn't provide a large enough entries array Such issues are reported by checking unwind_error() and !unwind_done(). Also add CONFIG_HAVE_RELIABLE_STACKTRACE so arch-independent code can determine at build time whether the function is implemented. Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Reviewed-by: Miroslav Benes <mbenes@suse.cz> Acked-by: Ingo Molnar <mingo@kernel.org> # for the x86 changes Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2017-02-14 08:42:28 +07:00
__weak int
save_stack_trace_tsk_reliable(struct task_struct *tsk,
struct stack_trace *trace)
{
WARN_ONCE(1, KERN_INFO "save_stack_tsk_reliable() not implemented yet.\n");
return -ENOSYS;
}