mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 08:16:49 +07:00
61c1917f47
The current print_context_stack helper that does the stack walking job is good for usual stacktraces as it walks through all the stack and reports even addresses that look unreliable, which is nice when we don't have frame pointers for example. But we have users like perf that only require reliable stacktraces, and those may want a more adapted stack walker, so lets make this function a callback in stacktrace_ops that users can tune for their needs. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1261024834-5336-1-git-send-regression-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
159 lines
3.7 KiB
C
159 lines
3.7 KiB
C
/*
|
|
* Stack trace management functions
|
|
*
|
|
* Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
|
|
*/
|
|
#include <linux/sched.h>
|
|
#include <linux/stacktrace.h>
|
|
#include <linux/module.h>
|
|
#include <linux/uaccess.h>
|
|
#include <asm/stacktrace.h>
|
|
|
|
static void save_stack_warning(void *data, char *msg)
|
|
{
|
|
}
|
|
|
|
static void
|
|
save_stack_warning_symbol(void *data, char *msg, unsigned long symbol)
|
|
{
|
|
}
|
|
|
|
static int save_stack_stack(void *data, char *name)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static void save_stack_address(void *data, unsigned long addr, int reliable)
|
|
{
|
|
struct stack_trace *trace = data;
|
|
if (!reliable)
|
|
return;
|
|
if (trace->skip > 0) {
|
|
trace->skip--;
|
|
return;
|
|
}
|
|
if (trace->nr_entries < trace->max_entries)
|
|
trace->entries[trace->nr_entries++] = addr;
|
|
}
|
|
|
|
static void
|
|
save_stack_address_nosched(void *data, unsigned long addr, int reliable)
|
|
{
|
|
struct stack_trace *trace = (struct stack_trace *)data;
|
|
if (!reliable)
|
|
return;
|
|
if (in_sched_functions(addr))
|
|
return;
|
|
if (trace->skip > 0) {
|
|
trace->skip--;
|
|
return;
|
|
}
|
|
if (trace->nr_entries < trace->max_entries)
|
|
trace->entries[trace->nr_entries++] = addr;
|
|
}
|
|
|
|
static const struct stacktrace_ops save_stack_ops = {
|
|
.warning = save_stack_warning,
|
|
.warning_symbol = save_stack_warning_symbol,
|
|
.stack = save_stack_stack,
|
|
.address = save_stack_address,
|
|
.walk_stack = print_context_stack,
|
|
};
|
|
|
|
static const struct stacktrace_ops save_stack_ops_nosched = {
|
|
.warning = save_stack_warning,
|
|
.warning_symbol = save_stack_warning_symbol,
|
|
.stack = save_stack_stack,
|
|
.address = save_stack_address_nosched,
|
|
.walk_stack = print_context_stack,
|
|
};
|
|
|
|
/*
|
|
* Save stack-backtrace addresses into a stack_trace buffer.
|
|
*/
|
|
void save_stack_trace(struct stack_trace *trace)
|
|
{
|
|
dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
|
|
if (trace->nr_entries < trace->max_entries)
|
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
}
|
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
|
|
|
void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp)
|
|
{
|
|
dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace);
|
|
if (trace->nr_entries < trace->max_entries)
|
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
}
|
|
|
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
{
|
|
dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
|
|
if (trace->nr_entries < trace->max_entries)
|
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
}
|
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
|
|
|
/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
|
|
|
|
struct stack_frame {
|
|
const void __user *next_fp;
|
|
unsigned long ret_addr;
|
|
};
|
|
|
|
static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
|
|
{
|
|
int ret;
|
|
|
|
if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
|
|
return 0;
|
|
|
|
ret = 1;
|
|
pagefault_disable();
|
|
if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
|
|
ret = 0;
|
|
pagefault_enable();
|
|
|
|
return ret;
|
|
}
|
|
|
|
static inline void __save_stack_trace_user(struct stack_trace *trace)
|
|
{
|
|
const struct pt_regs *regs = task_pt_regs(current);
|
|
const void __user *fp = (const void __user *)regs->bp;
|
|
|
|
if (trace->nr_entries < trace->max_entries)
|
|
trace->entries[trace->nr_entries++] = regs->ip;
|
|
|
|
while (trace->nr_entries < trace->max_entries) {
|
|
struct stack_frame frame;
|
|
|
|
frame.next_fp = NULL;
|
|
frame.ret_addr = 0;
|
|
if (!copy_stack_frame(fp, &frame))
|
|
break;
|
|
if ((unsigned long)fp < regs->sp)
|
|
break;
|
|
if (frame.ret_addr) {
|
|
trace->entries[trace->nr_entries++] =
|
|
frame.ret_addr;
|
|
}
|
|
if (fp == frame.next_fp)
|
|
break;
|
|
fp = frame.next_fp;
|
|
}
|
|
}
|
|
|
|
void save_stack_trace_user(struct stack_trace *trace)
|
|
{
|
|
/*
|
|
* Trace user stack if we are not a kernel thread
|
|
*/
|
|
if (current->mm) {
|
|
__save_stack_trace_user(trace);
|
|
}
|
|
if (trace->nr_entries < trace->max_entries)
|
|
trace->entries[trace->nr_entries++] = ULONG_MAX;
|
|
}
|
|
|