linux_dsm_epyc7002/arch/microblaze/kernel/ftrace.c
Josh Poimboeuf 9a7c348ba6 ftrace: Add return address pointer to ftrace_ret_stack
Storing this value will help prevent unwinders from getting out of sync
with the function graph tracer ret_stack.  Now instead of needing a
stateful iterator, they can compare the return address pointer to find
the right ret_stack entry.

Note that an array of 50 ftrace_ret_stack structs is allocated for every
task.  So when an arch implements this, it will add either 200 or 400
bytes of memory usage per task (depending on whether it's a 32-bit or
64-bit platform).

Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Byungchul Park <byungchul.park@lge.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/a95cfcc39e8f26b89a430c56926af0bb217bc0a1.1471607358.git.jpoimboe@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-08-24 12:15:14 +02:00

234 lines
6.1 KiB
C

/*
* Ftrace support for Microblaze.
*
* Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
* Copyright (C) 2009 PetaLogix
*
* Based on MIPS and PowerPC ftrace code
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <asm/cacheflush.h>
#include <linux/ftrace.h>
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
{
unsigned long old;
int faulted, err;
struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)
&return_to_handler;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
/*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
*/
asm volatile(" 1: lwi %0, %2, 0;" \
"2: swi %3, %2, 0;" \
" addik %1, r0, 0;" \
"3:" \
" .section .fixup, \"ax\";" \
"4: brid 3b;" \
" addik %1, r0, 1;" \
" .previous;" \
" .section __ex_table,\"a\";" \
" .word 1b,4b;" \
" .word 2b,4b;" \
" .previous;" \
: "=&r" (old), "=r" (faulted)
: "r" (parent), "r" (return_hooker)
);
flush_dcache_range((u32)parent, (u32)parent + 4);
flush_icache_range((u32)parent, (u32)parent + 4);
if (unlikely(faulted)) {
ftrace_graph_stop();
WARN_ON(1);
return;
}
err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL);
if (err == -EBUSY) {
*parent = old;
return;
}
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
*parent = old;
}
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_DYNAMIC_FTRACE
/* save value to addr - it is save to do it in asm */
static int ftrace_modify_code(unsigned long addr, unsigned int value)
{
int faulted = 0;
__asm__ __volatile__(" 1: swi %2, %1, 0;" \
" addik %0, r0, 0;" \
"2:" \
" .section .fixup, \"ax\";" \
"3: brid 2b;" \
" addik %0, r0, 1;" \
" .previous;" \
" .section __ex_table,\"a\";" \
" .word 1b,3b;" \
" .previous;" \
: "=r" (faulted)
: "r" (addr), "r" (value)
);
if (unlikely(faulted))
return -EFAULT;
flush_dcache_range(addr, addr + 4);
flush_icache_range(addr, addr + 4);
return 0;
}
#define MICROBLAZE_NOP 0x80000000
#define MICROBLAZE_BRI 0xb800000C
static unsigned int recorded; /* if save was or not */
static unsigned int imm; /* saving whole imm instruction */
/* There are two approaches howto solve ftrace_make nop function - look below */
#undef USE_FTRACE_NOP
#ifdef USE_FTRACE_NOP
static unsigned int bralid; /* saving whole bralid instruction */
#endif
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
/* we have this part of code which we are working with
* b000c000 imm -16384
* b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
* 80000000 or r0, r0, r0
*
* The first solution (!USE_FTRACE_NOP-could be called branch solution)
* b000c000 bri 12 (0xC - jump to any other instruction)
* b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
* 80000000 or r0, r0, r0
* any other instruction
*
* The second solution (USE_FTRACE_NOP) - no jump just nops
* 80000000 or r0, r0, r0
* 80000000 or r0, r0, r0
* 80000000 or r0, r0, r0
*/
int ret = 0;
if (recorded == 0) {
recorded = 1;
imm = *(unsigned int *)rec->ip;
pr_debug("%s: imm:0x%x\n", __func__, imm);
#ifdef USE_FTRACE_NOP
bralid = *(unsigned int *)(rec->ip + 4);
pr_debug("%s: bralid 0x%x\n", __func__, bralid);
#endif /* USE_FTRACE_NOP */
}
#ifdef USE_FTRACE_NOP
ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP);
ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP);
#else /* USE_FTRACE_NOP */
ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI);
#endif /* USE_FTRACE_NOP */
return ret;
}
/* I believe that first is called ftrace_make_nop before this function */
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
int ret;
pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n",
__func__, (unsigned int)addr, (unsigned int)rec->ip, imm);
ret = ftrace_modify_code(rec->ip, imm);
#ifdef USE_FTRACE_NOP
pr_debug("%s: bralid:0x%x\n", __func__, bralid);
ret += ftrace_modify_code(rec->ip + 4, bralid);
#endif /* USE_FTRACE_NOP */
return ret;
}
int __init ftrace_dyn_arch_init(void)
{
return 0;
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
unsigned int upper = (unsigned int)func;
unsigned int lower = (unsigned int)func;
int ret = 0;
/* create proper saving to ftrace_call poll */
upper = 0xb0000000 + (upper >> 16); /* imm func_upper */
lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */
pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n",
__func__, (unsigned int)func, (unsigned int)ip, upper, lower);
/* save upper and lower code */
ret = ftrace_modify_code(ip, upper);
ret += ftrace_modify_code(ip + 4, lower);
/* We just need to replace the rtsd r15, 8 with NOP */
ret += ftrace_modify_code((unsigned long)&ftrace_caller,
MICROBLAZE_NOP);
return ret;
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
unsigned int old_jump; /* saving place for jump instruction */
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned int ret;
unsigned long ip = (unsigned long)(&ftrace_call_graph);
old_jump = *(unsigned int *)ip; /* save jump over instruction */
ret = ftrace_modify_code(ip, MICROBLAZE_NOP);
pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump);
return ret;
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned int ret;
unsigned long ip = (unsigned long)(&ftrace_call_graph);
ret = ftrace_modify_code(ip, old_jump);
pr_debug("%s\n", __func__);
return ret;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#endif /* CONFIG_DYNAMIC_FTRACE */