2006-09-26 21:44:01 +07:00
|
|
|
/*
|
|
|
|
* Stack trace management functions
|
|
|
|
*
|
|
|
|
* Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
|
|
|
|
*/
|
|
|
|
#include <linux/sched.h>
|
2017-02-09 00:51:35 +07:00
|
|
|
#include <linux/sched/debug.h>
|
2017-02-09 00:51:37 +07:00
|
|
|
#include <linux/sched/task_stack.h>
|
2006-09-26 21:44:01 +07:00
|
|
|
#include <linux/stacktrace.h>
|
2011-07-24 03:30:40 +07:00
|
|
|
#include <linux/export.h>
|
2006-09-26 21:44:01 +07:00
|
|
|
#include <asm/stacktrace.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save stack-backtrace addresses into a stack_trace buffer:
|
|
|
|
*/
|
|
|
|
static void save_raw_context_stack(struct stack_trace *trace,
|
2015-10-23 19:39:02 +07:00
|
|
|
unsigned long reg29, int savesched)
|
2006-09-26 21:44:01 +07:00
|
|
|
{
|
|
|
|
unsigned long *sp = (unsigned long *)reg29;
|
|
|
|
unsigned long addr;
|
|
|
|
|
|
|
|
while (!kstack_end(sp)) {
|
|
|
|
addr = *sp++;
|
2015-10-23 19:39:02 +07:00
|
|
|
if (__kernel_text_address(addr) &&
|
|
|
|
(savesched || !in_sched_functions(addr))) {
|
2006-09-28 17:15:33 +07:00
|
|
|
if (trace->skip > 0)
|
|
|
|
trace->skip--;
|
2006-09-26 21:44:01 +07:00
|
|
|
else
|
2006-09-28 17:15:33 +07:00
|
|
|
trace->entries[trace->nr_entries++] = addr;
|
2006-09-26 21:44:01 +07:00
|
|
|
if (trace->nr_entries >= trace->max_entries)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-10 15:26:08 +07:00
|
|
|
static void save_context_stack(struct stack_trace *trace,
|
2015-10-23 19:39:02 +07:00
|
|
|
struct task_struct *tsk, struct pt_regs *regs, int savesched)
|
2006-09-26 21:44:01 +07:00
|
|
|
{
|
|
|
|
unsigned long sp = regs->regs[29];
|
|
|
|
#ifdef CONFIG_KALLSYMS
|
|
|
|
unsigned long ra = regs->regs[31];
|
|
|
|
unsigned long pc = regs->cp0_epc;
|
|
|
|
|
|
|
|
if (raw_show_trace || !__kernel_text_address(pc)) {
|
2006-09-29 16:02:51 +07:00
|
|
|
unsigned long stack_page =
|
2008-11-10 15:26:08 +07:00
|
|
|
(unsigned long)task_stack_page(tsk);
|
2006-09-28 17:15:33 +07:00
|
|
|
if (stack_page && sp >= stack_page &&
|
|
|
|
sp <= stack_page + THREAD_SIZE - 32)
|
2015-10-23 19:39:02 +07:00
|
|
|
save_raw_context_stack(trace, sp, savesched);
|
2006-09-29 16:02:51 +07:00
|
|
|
return;
|
2006-09-26 21:44:01 +07:00
|
|
|
}
|
|
|
|
do {
|
2015-10-23 19:39:02 +07:00
|
|
|
if (savesched || !in_sched_functions(pc)) {
|
|
|
|
if (trace->skip > 0)
|
|
|
|
trace->skip--;
|
|
|
|
else
|
|
|
|
trace->entries[trace->nr_entries++] = pc;
|
|
|
|
if (trace->nr_entries >= trace->max_entries)
|
|
|
|
break;
|
|
|
|
}
|
2008-11-10 15:26:08 +07:00
|
|
|
pc = unwind_stack(tsk, &sp, pc, &ra);
|
2006-09-26 21:44:01 +07:00
|
|
|
} while (pc);
|
|
|
|
#else
|
2015-10-23 19:39:02 +07:00
|
|
|
save_raw_context_stack(trace, sp, savesched);
|
2006-09-26 21:44:01 +07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Save stack-backtrace addresses into a stack_trace buffer.
|
|
|
|
*/
|
2007-05-08 14:23:29 +07:00
|
|
|
void save_stack_trace(struct stack_trace *trace)
|
2008-11-10 15:26:08 +07:00
|
|
|
{
|
|
|
|
save_stack_trace_tsk(current, trace);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
|
|
|
|
|
|
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
2006-09-26 21:44:01 +07:00
|
|
|
{
|
|
|
|
struct pt_regs dummyregs;
|
|
|
|
struct pt_regs *regs = &dummyregs;
|
|
|
|
|
|
|
|
WARN_ON(trace->nr_entries || !trace->max_entries);
|
|
|
|
|
2008-11-10 15:26:08 +07:00
|
|
|
if (tsk != current) {
|
|
|
|
regs->regs[29] = tsk->thread.reg29;
|
|
|
|
regs->regs[31] = 0;
|
|
|
|
regs->cp0_epc = tsk->thread.reg31;
|
|
|
|
} else
|
|
|
|
prepare_frametrace(regs);
|
2015-10-23 19:39:02 +07:00
|
|
|
save_context_stack(trace, tsk, regs, tsk == current);
|
2006-09-26 21:44:01 +07:00
|
|
|
}
|
2008-11-10 15:26:08 +07:00
|
|
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|