mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 23:36:44 +07:00
667f0cee3e
Like x86, when the function graph tracer is enabled, emit the ftrace stub as well as the program counter it will be transformed back into. We duplicate a lot of similar stack walking logic in 3 or 4 spots, so eventually we should consolidate things like x86 does. Thanks to Frederic Weisbecker for pointing this out. Signed-off-by: David S. Miller <davem@davemloft.net>
86 lines
2.0 KiB
C
86 lines
2.0 KiB
C
#include <linux/sched.h>
|
|
#include <linux/stacktrace.h>
|
|
#include <linux/thread_info.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/module.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/stacktrace.h>
|
|
|
|
#include "kstack.h"
|
|
|
|
static void __save_stack_trace(struct thread_info *tp,
|
|
struct stack_trace *trace,
|
|
bool skip_sched)
|
|
{
|
|
unsigned long ksp, fp;
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
struct task_struct *t;
|
|
int graph = 0;
|
|
#endif
|
|
|
|
if (tp == current_thread_info()) {
|
|
stack_trace_flush();
|
|
__asm__ __volatile__("mov %%fp, %0" : "=r" (ksp));
|
|
} else {
|
|
ksp = tp->ksp;
|
|
}
|
|
|
|
fp = ksp + STACK_BIAS;
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
t = tp->task;
|
|
#endif
|
|
do {
|
|
struct sparc_stackf *sf;
|
|
struct pt_regs *regs;
|
|
unsigned long pc;
|
|
|
|
if (!kstack_valid(tp, fp))
|
|
break;
|
|
|
|
sf = (struct sparc_stackf *) fp;
|
|
regs = (struct pt_regs *) (sf + 1);
|
|
|
|
if (kstack_is_trap_frame(tp, regs)) {
|
|
if (!(regs->tstate & TSTATE_PRIV))
|
|
break;
|
|
pc = regs->tpc;
|
|
fp = regs->u_regs[UREG_I6] + STACK_BIAS;
|
|
} else {
|
|
pc = sf->callers_pc;
|
|
fp = (unsigned long)sf->fp + STACK_BIAS;
|
|
}
|
|
|
|
if (trace->skip > 0)
|
|
trace->skip--;
|
|
else if (!skip_sched || !in_sched_functions(pc)) {
|
|
trace->entries[trace->nr_entries++] = pc;
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
if ((pc + 8UL) == (unsigned long) &return_to_handler) {
|
|
int index = t->curr_ret_stack;
|
|
if (t->ret_stack && index >= graph) {
|
|
pc = t->ret_stack[index - graph].ret;
|
|
if (trace->nr_entries <
|
|
trace->max_entries)
|
|
trace->entries[trace->nr_entries++] = pc;
|
|
graph++;
|
|
}
|
|
}
|
|
#endif
|
|
}
|
|
} while (trace->nr_entries < trace->max_entries);
|
|
}
|
|
|
|
void save_stack_trace(struct stack_trace *trace)
|
|
{
|
|
__save_stack_trace(current_thread_info(), trace, false);
|
|
}
|
|
EXPORT_SYMBOL_GPL(save_stack_trace);
|
|
|
|
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|
{
|
|
struct thread_info *tp = task_thread_info(tsk);
|
|
|
|
__save_stack_trace(tp, trace, true);
|
|
}
|
|
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|