2010-07-20 14:13:35 +07:00
|
|
|
/*
|
|
|
|
* Ftrace header. For implementation details beyond the random comments
|
|
|
|
* scattered below, see: Documentation/trace/ftrace-design.txt
|
|
|
|
*/
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
#ifndef _LINUX_FTRACE_H
|
|
|
|
#define _LINUX_FTRACE_H
|
|
|
|
|
2009-03-05 07:49:22 +07:00
|
|
|
#include <linux/trace_clock.h>
|
2008-10-02 18:26:05 +07:00
|
|
|
#include <linux/kallsyms.h>
|
2009-03-05 07:49:22 +07:00
|
|
|
#include <linux/linkage.h>
|
2008-12-04 03:36:57 +07:00
|
|
|
#include <linux/bitops.h>
|
2011-08-09 23:50:46 +07:00
|
|
|
#include <linux/ptrace.h>
|
2009-03-05 07:49:22 +07:00
|
|
|
#include <linux/ktime.h>
|
2008-12-05 05:51:23 +07:00
|
|
|
#include <linux/sched.h>
|
2009-03-05 07:49:22 +07:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/fs.h>
|
2008-05-13 02:20:42 +07:00
|
|
|
|
2009-02-28 03:30:03 +07:00
|
|
|
#include <asm/ftrace.h>
|
|
|
|
|
2011-08-09 03:57:47 +07:00
|
|
|
/*
|
|
|
|
* If the arch supports passing the variable contents of
|
|
|
|
* function_trace_op as the third parameter back from the
|
|
|
|
* mcount call, then the arch should define this as 1.
|
|
|
|
*/
|
|
|
|
#ifndef ARCH_SUPPORTS_FTRACE_OPS
|
|
|
|
#define ARCH_SUPPORTS_FTRACE_OPS 0
|
|
|
|
#endif
|
|
|
|
|
2012-06-05 20:44:25 +07:00
|
|
|
/*
|
|
|
|
* If the arch's mcount caller does not support all of ftrace's
|
|
|
|
* features, then it must call an indirect function that
|
|
|
|
* does. Or at least does enough to prevent any unwelcomed side effects.
|
|
|
|
*/
|
|
|
|
#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
|
|
|
|
!ARCH_SUPPORTS_FTRACE_OPS
|
|
|
|
# define FTRACE_FORCE_LIST_FUNC 1
|
|
|
|
#else
|
|
|
|
# define FTRACE_FORCE_LIST_FUNC 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2011-05-27 00:46:22 +07:00
|
|
|
struct module;
|
2011-07-11 21:12:59 +07:00
|
|
|
struct ftrace_hash;
|
|
|
|
|
2008-10-07 06:06:12 +07:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2008-10-02 22:45:47 +07:00
|
|
|
|
2008-05-13 02:20:43 +07:00
|
|
|
extern int ftrace_enabled;
|
|
|
|
extern int
|
|
|
|
ftrace_enable_sysctl(struct ctl_table *table, int write,
|
2009-09-24 05:57:19 +07:00
|
|
|
void __user *buffer, size_t *lenp,
|
2008-05-13 02:20:43 +07:00
|
|
|
loff_t *ppos);
|
|
|
|
|
2011-08-09 03:57:47 +07:00
|
|
|
struct ftrace_ops;
|
|
|
|
|
|
|
|
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
|
2011-08-09 23:50:46 +07:00
|
|
|
struct ftrace_ops *op, struct pt_regs *regs);
|
2008-05-13 02:20:42 +07:00
|
|
|
|
2012-02-15 21:51:48 +07:00
|
|
|
/*
|
|
|
|
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
|
|
|
|
* set in the flags member.
|
|
|
|
*
|
|
|
|
* ENABLED - set/unset when ftrace_ops is registered/unregistered
|
|
|
|
* GLOBAL - set manualy by ftrace_ops user to denote the ftrace_ops
|
|
|
|
* is part of the global tracers sharing the same filter
|
|
|
|
* via set_ftrace_* debugfs files.
|
|
|
|
* DYNAMIC - set when ftrace_ops is registered to denote dynamically
|
|
|
|
* allocated ftrace_ops which need special care
|
|
|
|
* CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
|
|
|
|
* could be controled by following calls:
|
|
|
|
* ftrace_function_local_enable
|
|
|
|
* ftrace_function_local_disable
|
2012-05-01 03:20:23 +07:00
|
|
|
* SAVE_REGS - The ftrace_ops wants regs saved at each function called
|
|
|
|
* and passed to the callback. If this flag is set, but the
|
|
|
|
* architecture does not support passing regs
|
2012-09-28 15:15:17 +07:00
|
|
|
* (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
|
2012-05-01 03:20:23 +07:00
|
|
|
* ftrace_ops will fail to register, unless the next flag
|
|
|
|
* is set.
|
|
|
|
* SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
|
|
|
|
* handler can handle an arch that does not save regs
|
|
|
|
* (the handler tests if regs == NULL), then it can set
|
|
|
|
* this flag instead. It will not fail registering the ftrace_ops
|
|
|
|
* but, the regs field will be NULL if the arch does not support
|
|
|
|
* passing regs to the handler.
|
|
|
|
* Note, if this flag is set, the SAVE_REGS flag will automatically
|
|
|
|
* get set upon registering the ftrace_ops, if the arch supports it.
|
2012-07-20 22:04:44 +07:00
|
|
|
* RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
|
|
|
|
* that the call back has its own recursion protection. If it does
|
|
|
|
* not set this, then the ftrace infrastructure will add recursion
|
|
|
|
* protection for the caller.
|
2013-03-27 20:31:28 +07:00
|
|
|
* STUB - The ftrace_ops is just a place holder.
|
2013-05-09 12:44:17 +07:00
|
|
|
* INITIALIZED - The ftrace_ops has already been initialized (first use time
|
|
|
|
* register_ftrace_function() is called, it will initialized the ops)
|
2012-02-15 21:51:48 +07:00
|
|
|
*/
|
2011-05-04 20:27:52 +07:00
|
|
|
enum {
|
2012-05-01 03:20:23 +07:00
|
|
|
FTRACE_OPS_FL_ENABLED = 1 << 0,
|
|
|
|
FTRACE_OPS_FL_GLOBAL = 1 << 1,
|
|
|
|
FTRACE_OPS_FL_DYNAMIC = 1 << 2,
|
|
|
|
FTRACE_OPS_FL_CONTROL = 1 << 3,
|
|
|
|
FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
|
|
|
|
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
|
2012-07-20 22:04:44 +07:00
|
|
|
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
|
2013-03-27 20:31:28 +07:00
|
|
|
FTRACE_OPS_FL_STUB = 1 << 7,
|
2013-05-09 12:44:17 +07:00
|
|
|
FTRACE_OPS_FL_INITIALIZED = 1 << 8,
|
2011-05-04 20:27:52 +07:00
|
|
|
};
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
struct ftrace_ops {
|
2011-05-02 23:29:25 +07:00
|
|
|
ftrace_func_t func;
|
|
|
|
struct ftrace_ops *next;
|
2011-05-04 20:27:52 +07:00
|
|
|
unsigned long flags;
|
2012-02-15 21:51:48 +07:00
|
|
|
int __percpu *disabled;
|
2011-05-02 23:29:25 +07:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
struct ftrace_hash *notrace_hash;
|
|
|
|
struct ftrace_hash *filter_hash;
|
2013-05-09 12:44:17 +07:00
|
|
|
struct mutex regex_lock;
|
2011-05-02 23:29:25 +07:00
|
|
|
#endif
|
2008-05-13 02:20:42 +07:00
|
|
|
};
|
|
|
|
|
2008-11-06 04:05:44 +07:00
|
|
|
extern int function_trace_stop;
|
|
|
|
|
2008-11-16 12:02:06 +07:00
|
|
|
/*
|
|
|
|
* Type of the current tracing.
|
|
|
|
*/
|
|
|
|
enum ftrace_tracing_type_t {
|
|
|
|
FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
|
|
|
|
FTRACE_TYPE_RETURN, /* Hook the return of the function */
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Current tracing type, default is FTRACE_TYPE_ENTER */
|
|
|
|
extern enum ftrace_tracing_type_t ftrace_tracing_type;
|
|
|
|
|
2008-11-06 04:05:44 +07:00
|
|
|
/**
|
|
|
|
* ftrace_stop - stop function tracer.
|
|
|
|
*
|
|
|
|
* A quick way to stop the function tracer. Note this an on off switch,
|
|
|
|
* it is not something that is recursive like preempt_disable.
|
|
|
|
* This does not disable the calling of mcount, it only stops the
|
|
|
|
* calling of functions from mcount.
|
|
|
|
*/
|
|
|
|
static inline void ftrace_stop(void)
|
|
|
|
{
|
|
|
|
function_trace_stop = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ftrace_start - start the function tracer.
|
|
|
|
*
|
|
|
|
* This function is the inverse of ftrace_stop. This does not enable
|
|
|
|
* the function tracing if the function tracer is disabled. This only
|
|
|
|
* sets the function tracer flag to continue calling the functions
|
|
|
|
* from mcount.
|
|
|
|
*/
|
|
|
|
static inline void ftrace_start(void)
|
|
|
|
{
|
|
|
|
function_trace_stop = 0;
|
|
|
|
}
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
/*
|
|
|
|
* The ftrace_ops must be a static and should also
|
|
|
|
* be read_mostly. These functions do modify read_mostly variables
|
|
|
|
* so use them sparely. Never free an ftrace_op or modify the
|
|
|
|
* next pointer after it has been registered. Even after unregistering
|
|
|
|
* it, the next pointer may still be used internally.
|
|
|
|
*/
|
|
|
|
int register_ftrace_function(struct ftrace_ops *ops);
|
|
|
|
int unregister_ftrace_function(struct ftrace_ops *ops);
|
|
|
|
void clear_ftrace_function(void);
|
|
|
|
|
2012-02-15 21:51:48 +07:00
|
|
|
/**
|
|
|
|
* ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
|
|
|
|
*
|
|
|
|
* This function enables tracing on current cpu by decreasing
|
|
|
|
* the per cpu control variable.
|
|
|
|
* It must be called with preemption disabled and only on ftrace_ops
|
|
|
|
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
|
|
|
|
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
|
|
|
|
*/
|
|
|
|
static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
|
|
|
|
{
|
|
|
|
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
(*this_cpu_ptr(ops->disabled))--;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
|
|
|
|
*
|
|
|
|
* This function enables tracing on current cpu by decreasing
|
|
|
|
* the per cpu control variable.
|
|
|
|
* It must be called with preemption disabled and only on ftrace_ops
|
|
|
|
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
|
|
|
|
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
|
|
|
|
*/
|
|
|
|
static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
|
|
|
|
{
|
|
|
|
if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
(*this_cpu_ptr(ops->disabled))++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* ftrace_function_local_disabled - returns ftrace_ops disabled value
|
|
|
|
* on current cpu
|
|
|
|
*
|
|
|
|
* This function returns value of ftrace_ops::disabled on current cpu.
|
|
|
|
* It must be called with preemption disabled and only on ftrace_ops
|
|
|
|
* registered with FTRACE_OPS_FL_CONTROL. If called without preemption
|
|
|
|
* disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
|
|
|
|
*/
|
|
|
|
static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
|
|
|
|
{
|
|
|
|
WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
|
|
|
|
return *this_cpu_ptr(ops->disabled);
|
|
|
|
}
|
|
|
|
|
2011-08-09 23:50:46 +07:00
|
|
|
extern void ftrace_stub(unsigned long a0, unsigned long a1,
|
|
|
|
struct ftrace_ops *op, struct pt_regs *regs);
|
2008-05-13 02:20:42 +07:00
|
|
|
|
2008-10-07 06:06:12 +07:00
|
|
|
#else /* !CONFIG_FUNCTION_TRACER */
|
2010-05-04 22:24:01 +07:00
|
|
|
/*
|
|
|
|
* (un)register_ftrace_function must be a macro since the ops parameter
|
|
|
|
* must not be evaluated.
|
|
|
|
*/
|
|
|
|
#define register_ftrace_function(ops) ({ 0; })
|
|
|
|
#define unregister_ftrace_function(ops) ({ 0; })
|
2012-07-21 00:08:05 +07:00
|
|
|
static inline int ftrace_nr_registered_ops(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2010-05-04 22:24:01 +07:00
|
|
|
static inline void clear_ftrace_function(void) { }
|
2008-10-23 20:33:02 +07:00
|
|
|
static inline void ftrace_kill(void) { }
|
2008-11-06 04:05:44 +07:00
|
|
|
static inline void ftrace_stop(void) { }
|
|
|
|
static inline void ftrace_start(void) { }
|
2008-10-07 06:06:12 +07:00
|
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
2008-05-13 02:20:42 +07:00
|
|
|
|
2008-12-17 11:06:40 +07:00
|
|
|
#ifdef CONFIG_STACK_TRACER
|
|
|
|
extern int stack_tracer_enabled;
|
|
|
|
int
|
|
|
|
stack_trace_sysctl(struct ctl_table *table, int write,
|
2009-09-24 05:57:19 +07:00
|
|
|
void __user *buffer, size_t *lenp,
|
2008-12-17 11:06:40 +07:00
|
|
|
loff_t *ppos);
|
|
|
|
#endif
|
|
|
|
|
2009-02-14 12:40:25 +07:00
|
|
|
struct ftrace_func_command {
|
|
|
|
struct list_head list;
|
|
|
|
char *name;
|
2011-07-07 22:09:22 +07:00
|
|
|
int (*func)(struct ftrace_hash *hash,
|
|
|
|
char *func, char *cmd,
|
2009-02-14 12:40:25 +07:00
|
|
|
char *params, int enable);
|
|
|
|
};
|
|
|
|
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-13 02:20:42 +07:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
2008-11-15 07:21:19 +07:00
|
|
|
|
2009-02-18 01:35:06 +07:00
|
|
|
int ftrace_arch_code_modify_prepare(void);
|
|
|
|
int ftrace_arch_code_modify_post_process(void);
|
|
|
|
|
2011-08-16 20:53:39 +07:00
|
|
|
void ftrace_bug(int err, unsigned long ip);
|
|
|
|
|
2009-02-17 11:06:01 +07:00
|
|
|
struct seq_file;
|
|
|
|
|
2009-02-18 00:32:04 +07:00
|
|
|
struct ftrace_probe_ops {
|
ftrace: trace different functions with a different tracer
Impact: new feature
Currently, the function tracer only gives you an ability to hook
a tracer to all functions being traced. The dynamic function trace
allows you to pick and choose which of those functions will be
traced, but all functions being traced will call all tracers that
registered with the function tracer.
This patch adds a new feature that allows a tracer to hook to specific
functions, even when all functions are being traced. It allows for
different functions to call different tracer hooks.
The way this is accomplished is by a special function that will hook
to the function tracer and will set up a hash table knowing which
tracer hook to call with which function. This is the most general
and easiest method to accomplish this. Later, an arch may choose
to supply their own method in changing the mcount call of a function
to call a different tracer. But that will be an exercise for the
future.
To register a function:
struct ftrace_hook_ops {
void (*func)(unsigned long ip,
unsigned long parent_ip,
void **data);
int (*callback)(unsigned long ip, void **data);
void (*free)(void **data);
};
int register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
void *data);
glob is a simple glob to search for the functions to hook.
ops is a pointer to the operations (listed below)
data is the default data to be passed to the hook functions when traced
ops:
func is the hook function to call when the functions are traced
callback is a callback function that is called when setting up the hash.
That is, if the tracer needs to do something special for each
function, that is being traced, and wants to give each function
its own data. The address of the entry data is passed to this
callback, so that the callback may wish to update the entry to
whatever it would like.
free is a callback for when the entry is freed. In case the tracer
allocated any data, it is give the chance to free it.
To unregister we have three functions:
void
unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
void *data)
This will unregister all hooks that match glob, point to ops, and
have its data matching data. (note, if glob is NULL, blank or '*',
all functions will be tested).
void
unregister_ftrace_function_hook_func(char *glob,
struct ftrace_hook_ops *ops)
This will unregister all functions matching glob that has an entry
pointing to ops.
void unregister_ftrace_function_hook_all(char *glob)
This simply unregisters all funcs.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
2009-02-15 03:29:06 +07:00
|
|
|
void (*func)(unsigned long ip,
|
|
|
|
unsigned long parent_ip,
|
|
|
|
void **data);
|
2013-03-13 02:07:59 +07:00
|
|
|
int (*init)(struct ftrace_probe_ops *ops,
|
|
|
|
unsigned long ip, void **data);
|
|
|
|
void (*free)(struct ftrace_probe_ops *ops,
|
|
|
|
unsigned long ip, void **data);
|
2009-02-17 11:06:01 +07:00
|
|
|
int (*print)(struct seq_file *m,
|
|
|
|
unsigned long ip,
|
2009-02-18 00:32:04 +07:00
|
|
|
struct ftrace_probe_ops *ops,
|
2009-02-17 11:06:01 +07:00
|
|
|
void *data);
|
ftrace: trace different functions with a different tracer
Impact: new feature
Currently, the function tracer only gives you an ability to hook
a tracer to all functions being traced. The dynamic function trace
allows you to pick and choose which of those functions will be
traced, but all functions being traced will call all tracers that
registered with the function tracer.
This patch adds a new feature that allows a tracer to hook to specific
functions, even when all functions are being traced. It allows for
different functions to call different tracer hooks.
The way this is accomplished is by a special function that will hook
to the function tracer and will set up a hash table knowing which
tracer hook to call with which function. This is the most general
and easiest method to accomplish this. Later, an arch may choose
to supply their own method in changing the mcount call of a function
to call a different tracer. But that will be an exercise for the
future.
To register a function:
struct ftrace_hook_ops {
void (*func)(unsigned long ip,
unsigned long parent_ip,
void **data);
int (*callback)(unsigned long ip, void **data);
void (*free)(void **data);
};
int register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
void *data);
glob is a simple glob to search for the functions to hook.
ops is a pointer to the operations (listed below)
data is the default data to be passed to the hook functions when traced
ops:
func is the hook function to call when the functions are traced
callback is a callback function that is called when setting up the hash.
That is, if the tracer needs to do something special for each
function, that is being traced, and wants to give each function
its own data. The address of the entry data is passed to this
callback, so that the callback may wish to update the entry to
whatever it would like.
free is a callback for when the entry is freed. In case the tracer
allocated any data, it is give the chance to free it.
To unregister we have three functions:
void
unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
void *data)
This will unregister all hooks that match glob, point to ops, and
have its data matching data. (note, if glob is NULL, blank or '*',
all functions will be tested).
void
unregister_ftrace_function_hook_func(char *glob,
struct ftrace_hook_ops *ops)
This will unregister all functions matching glob that has an entry
pointing to ops.
void unregister_ftrace_function_hook_all(char *glob)
This simply unregisters all funcs.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
2009-02-15 03:29:06 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
extern int
|
2009-02-18 00:32:04 +07:00
|
|
|
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
ftrace: trace different functions with a different tracer
Impact: new feature
Currently, the function tracer only gives you an ability to hook
a tracer to all functions being traced. The dynamic function trace
allows you to pick and choose which of those functions will be
traced, but all functions being traced will call all tracers that
registered with the function tracer.
This patch adds a new feature that allows a tracer to hook to specific
functions, even when all functions are being traced. It allows for
different functions to call different tracer hooks.
The way this is accomplished is by a special function that will hook
to the function tracer and will set up a hash table knowing which
tracer hook to call with which function. This is the most general
and easiest method to accomplish this. Later, an arch may choose
to supply their own method in changing the mcount call of a function
to call a different tracer. But that will be an exercise for the
future.
To register a function:
struct ftrace_hook_ops {
void (*func)(unsigned long ip,
unsigned long parent_ip,
void **data);
int (*callback)(unsigned long ip, void **data);
void (*free)(void **data);
};
int register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
void *data);
glob is a simple glob to search for the functions to hook.
ops is a pointer to the operations (listed below)
data is the default data to be passed to the hook functions when traced
ops:
func is the hook function to call when the functions are traced
callback is a callback function that is called when setting up the hash.
That is, if the tracer needs to do something special for each
function, that is being traced, and wants to give each function
its own data. The address of the entry data is passed to this
callback, so that the callback may wish to update the entry to
whatever it would like.
free is a callback for when the entry is freed. In case the tracer
allocated any data, it is give the chance to free it.
To unregister we have three functions:
void
unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
void *data)
This will unregister all hooks that match glob, point to ops, and
have its data matching data. (note, if glob is NULL, blank or '*',
all functions will be tested).
void
unregister_ftrace_function_hook_func(char *glob,
struct ftrace_hook_ops *ops)
This will unregister all functions matching glob that has an entry
pointing to ops.
void unregister_ftrace_function_hook_all(char *glob)
This simply unregisters all funcs.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
2009-02-15 03:29:06 +07:00
|
|
|
void *data);
|
|
|
|
extern void
|
2009-02-18 00:32:04 +07:00
|
|
|
unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
ftrace: trace different functions with a different tracer
Impact: new feature
Currently, the function tracer only gives you an ability to hook
a tracer to all functions being traced. The dynamic function trace
allows you to pick and choose which of those functions will be
traced, but all functions being traced will call all tracers that
registered with the function tracer.
This patch adds a new feature that allows a tracer to hook to specific
functions, even when all functions are being traced. It allows for
different functions to call different tracer hooks.
The way this is accomplished is by a special function that will hook
to the function tracer and will set up a hash table knowing which
tracer hook to call with which function. This is the most general
and easiest method to accomplish this. Later, an arch may choose
to supply their own method in changing the mcount call of a function
to call a different tracer. But that will be an exercise for the
future.
To register a function:
struct ftrace_hook_ops {
void (*func)(unsigned long ip,
unsigned long parent_ip,
void **data);
int (*callback)(unsigned long ip, void **data);
void (*free)(void **data);
};
int register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
void *data);
glob is a simple glob to search for the functions to hook.
ops is a pointer to the operations (listed below)
data is the default data to be passed to the hook functions when traced
ops:
func is the hook function to call when the functions are traced
callback is a callback function that is called when setting up the hash.
That is, if the tracer needs to do something special for each
function, that is being traced, and wants to give each function
its own data. The address of the entry data is passed to this
callback, so that the callback may wish to update the entry to
whatever it would like.
free is a callback for when the entry is freed. In case the tracer
allocated any data, it is give the chance to free it.
To unregister we have three functions:
void
unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
void *data)
This will unregister all hooks that match glob, point to ops, and
have its data matching data. (note, if glob is NULL, blank or '*',
all functions will be tested).
void
unregister_ftrace_function_hook_func(char *glob,
struct ftrace_hook_ops *ops)
This will unregister all functions matching glob that has an entry
pointing to ops.
void unregister_ftrace_function_hook_all(char *glob)
This simply unregisters all funcs.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
2009-02-15 03:29:06 +07:00
|
|
|
void *data);
|
|
|
|
extern void
|
2009-02-18 00:32:04 +07:00
|
|
|
unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
|
|
|
|
extern void unregister_ftrace_function_probe_all(char *glob);
|
ftrace: trace different functions with a different tracer
Impact: new feature
Currently, the function tracer only gives you an ability to hook
a tracer to all functions being traced. The dynamic function trace
allows you to pick and choose which of those functions will be
traced, but all functions being traced will call all tracers that
registered with the function tracer.
This patch adds a new feature that allows a tracer to hook to specific
functions, even when all functions are being traced. It allows for
different functions to call different tracer hooks.
The way this is accomplished is by a special function that will hook
to the function tracer and will set up a hash table knowing which
tracer hook to call with which function. This is the most general
and easiest method to accomplish this. Later, an arch may choose
to supply their own method in changing the mcount call of a function
to call a different tracer. But that will be an exercise for the
future.
To register a function:
struct ftrace_hook_ops {
void (*func)(unsigned long ip,
unsigned long parent_ip,
void **data);
int (*callback)(unsigned long ip, void **data);
void (*free)(void **data);
};
int register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
void *data);
glob is a simple glob to search for the functions to hook.
ops is a pointer to the operations (listed below)
data is the default data to be passed to the hook functions when traced
ops:
func is the hook function to call when the functions are traced
callback is a callback function that is called when setting up the hash.
That is, if the tracer needs to do something special for each
function, that is being traced, and wants to give each function
its own data. The address of the entry data is passed to this
callback, so that the callback may wish to update the entry to
whatever it would like.
free is a callback for when the entry is freed. In case the tracer
allocated any data, it is give the chance to free it.
To unregister we have three functions:
void
unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
void *data)
This will unregister all hooks that match glob, point to ops, and
have its data matching data. (note, if glob is NULL, blank or '*',
all functions will be tested).
void
unregister_ftrace_function_hook_func(char *glob,
struct ftrace_hook_ops *ops)
This will unregister all functions matching glob that has an entry
pointing to ops.
void unregister_ftrace_function_hook_all(char *glob)
This simply unregisters all funcs.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
2009-02-15 03:29:06 +07:00
|
|
|
|
2010-02-03 04:49:11 +07:00
|
|
|
extern int ftrace_text_reserved(void *start, void *end);
|
|
|
|
|
2012-07-21 00:08:05 +07:00
|
|
|
extern int ftrace_nr_registered_ops(void);
|
|
|
|
|
2012-05-01 03:20:23 +07:00
|
|
|
/*
|
|
|
|
* The dyn_ftrace record's flags field is split into two parts.
|
|
|
|
* the first part which is '0-FTRACE_REF_MAX' is a counter of
|
|
|
|
* the number of callbacks that have registered the function that
|
|
|
|
* the dyn_ftrace descriptor represents.
|
|
|
|
*
|
|
|
|
* The second part is a mask:
|
|
|
|
* ENABLED - the function is being traced
|
|
|
|
* REGS - the record wants the function to save regs
|
|
|
|
* REGS_EN - the function is set up to save regs.
|
|
|
|
*
|
|
|
|
* When a new ftrace_ops is registered and wants a function to save
|
|
|
|
* pt_regs, the rec->flag REGS is set. When the function has been
|
|
|
|
* set up to save regs, the REG_EN flag is set. Once a function
|
|
|
|
* starts saving regs it will do so until all ftrace_ops are removed
|
|
|
|
* from tracing that function.
|
|
|
|
*/
|
2008-05-13 02:20:43 +07:00
|
|
|
enum {
|
2012-05-01 03:20:23 +07:00
|
|
|
FTRACE_FL_ENABLED = (1UL << 29),
|
|
|
|
FTRACE_FL_REGS = (1UL << 30),
|
|
|
|
FTRACE_FL_REGS_EN = (1UL << 31)
|
2008-05-13 02:20:43 +07:00
|
|
|
};
|
|
|
|
|
2012-05-01 03:20:23 +07:00
|
|
|
#define FTRACE_FL_MASK (0x7UL << 29)
|
|
|
|
#define FTRACE_REF_MAX ((1UL << 29) - 1)
|
2011-05-04 00:25:24 +07:00
|
|
|
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-13 02:20:42 +07:00
|
|
|
struct dyn_ftrace {
|
2009-03-24 12:38:06 +07:00
|
|
|
union {
|
|
|
|
unsigned long ip; /* address of mcount call-site */
|
|
|
|
struct dyn_ftrace *freelist;
|
|
|
|
};
|
2011-12-17 04:30:31 +07:00
|
|
|
unsigned long flags;
|
2009-03-24 12:38:06 +07:00
|
|
|
struct dyn_arch_ftrace arch;
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-13 02:20:42 +07:00
|
|
|
};
|
|
|
|
|
2008-05-13 02:20:44 +07:00
|
|
|
int ftrace_force_update(void);
|
2012-06-05 17:28:08 +07:00
|
|
|
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
|
|
|
|
int remove, int reset);
|
2012-01-02 16:04:14 +07:00
|
|
|
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
|
2011-05-06 09:54:01 +07:00
|
|
|
int len, int reset);
|
2012-01-02 16:04:14 +07:00
|
|
|
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
|
2011-05-06 09:54:01 +07:00
|
|
|
int len, int reset);
|
|
|
|
void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
|
|
|
|
void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
|
ftrace, perf: Add filter support for function trace event
Adding support to filter function trace event via perf
interface. It is now possible to use filter interface
in the perf tool like:
perf record -e ftrace:function --filter="(ip == mm_*)" ls
The filter syntax is restricted to the the 'ip' field only,
and following operators are accepted '==' '!=' '||', ending
up with the filter strings like:
ip == f1[, ]f2 ... || ip != f3[, ]f4 ...
with comma ',' or space ' ' as a function separator. If the
space ' ' is used as a separator, the right side of the
assignment needs to be enclosed in double quotes '"', e.g.:
perf record -e ftrace:function --filter '(ip == do_execve,sys_*,ext*)' ls
perf record -e ftrace:function --filter '(ip == "do_execve,sys_*,ext*")' ls
perf record -e ftrace:function --filter '(ip == "do_execve sys_* ext*")' ls
The '==' operator adds trace filter with same effect as would
be added via set_ftrace_filter file.
The '!=' operator adds trace filter with same effect as would
be added via set_ftrace_notrace file.
The right side of the '!=', '==' operators is list of functions
or regexp. to be added to filter separated by space.
The '||' operator is used for connecting multiple filter definitions
together. It is possible to have more than one '==' and '!='
operators within one filter string.
Link: http://lkml.kernel.org/r/1329317514-8131-8-git-send-email-jolsa@redhat.com
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2012-02-15 21:51:54 +07:00
|
|
|
void ftrace_free_filter(struct ftrace_ops *ops);
|
2008-05-13 02:20:44 +07:00
|
|
|
|
2009-02-14 12:40:25 +07:00
|
|
|
int register_ftrace_command(struct ftrace_func_command *cmd);
|
|
|
|
int unregister_ftrace_command(struct ftrace_func_command *cmd);
|
|
|
|
|
2011-08-16 20:53:39 +07:00
|
|
|
enum {
|
|
|
|
FTRACE_UPDATE_CALLS = (1 << 0),
|
|
|
|
FTRACE_DISABLE_CALLS = (1 << 1),
|
|
|
|
FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
|
|
|
|
FTRACE_START_FUNC_RET = (1 << 3),
|
|
|
|
FTRACE_STOP_FUNC_RET = (1 << 4),
|
|
|
|
};
|
|
|
|
|
2012-05-01 03:20:23 +07:00
|
|
|
/*
|
|
|
|
* The FTRACE_UPDATE_* enum is used to pass information back
|
|
|
|
* from the ftrace_update_record() and ftrace_test_record()
|
|
|
|
* functions. These are called by the code update routines
|
|
|
|
* to find out what is to be done for a given function.
|
|
|
|
*
|
|
|
|
* IGNORE - The function is already what we want it to be
|
|
|
|
* MAKE_CALL - Start tracing the function
|
|
|
|
* MODIFY_CALL - Stop saving regs for the function
|
|
|
|
* MODIFY_CALL_REGS - Start saving regs for the function
|
|
|
|
* MAKE_NOP - Stop tracing the function
|
|
|
|
*/
|
2011-08-16 20:53:39 +07:00
|
|
|
enum {
|
|
|
|
FTRACE_UPDATE_IGNORE,
|
|
|
|
FTRACE_UPDATE_MAKE_CALL,
|
2012-05-01 03:20:23 +07:00
|
|
|
FTRACE_UPDATE_MODIFY_CALL,
|
|
|
|
FTRACE_UPDATE_MODIFY_CALL_REGS,
|
2011-08-16 20:53:39 +07:00
|
|
|
FTRACE_UPDATE_MAKE_NOP,
|
|
|
|
};
|
|
|
|
|
2011-12-20 02:41:25 +07:00
|
|
|
enum {
|
|
|
|
FTRACE_ITER_FILTER = (1 << 0),
|
|
|
|
FTRACE_ITER_NOTRACE = (1 << 1),
|
|
|
|
FTRACE_ITER_PRINTALL = (1 << 2),
|
2011-12-20 03:21:16 +07:00
|
|
|
FTRACE_ITER_DO_HASH = (1 << 3),
|
|
|
|
FTRACE_ITER_HASH = (1 << 4),
|
|
|
|
FTRACE_ITER_ENABLED = (1 << 5),
|
2011-12-20 02:41:25 +07:00
|
|
|
};
|
|
|
|
|
2011-08-16 20:53:39 +07:00
|
|
|
void arch_ftrace_update_code(int command);
|
|
|
|
|
|
|
|
struct ftrace_rec_iter;
|
|
|
|
|
|
|
|
struct ftrace_rec_iter *ftrace_rec_iter_start(void);
|
|
|
|
struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
|
|
|
|
struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
|
|
|
|
|
2011-08-16 20:57:10 +07:00
|
|
|
#define for_ftrace_rec_iter(iter) \
|
|
|
|
for (iter = ftrace_rec_iter_start(); \
|
|
|
|
iter; \
|
|
|
|
iter = ftrace_rec_iter_next(iter))
|
|
|
|
|
|
|
|
|
2011-08-16 20:53:39 +07:00
|
|
|
int ftrace_update_record(struct dyn_ftrace *rec, int enable);
|
|
|
|
int ftrace_test_record(struct dyn_ftrace *rec, int enable);
|
|
|
|
void ftrace_run_stop_machine(int command);
|
2012-04-26 01:39:54 +07:00
|
|
|
unsigned long ftrace_location(unsigned long ip);
|
2011-08-16 20:53:39 +07:00
|
|
|
|
|
|
|
extern ftrace_func_t ftrace_trace_function;
|
|
|
|
|
2011-12-20 02:41:25 +07:00
|
|
|
int ftrace_regex_open(struct ftrace_ops *ops, int flag,
|
|
|
|
struct inode *inode, struct file *file);
|
|
|
|
ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos);
|
|
|
|
ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos);
|
|
|
|
int ftrace_regex_release(struct inode *inode, struct file *file);
|
|
|
|
|
2011-12-20 09:57:44 +07:00
|
|
|
void __init
|
|
|
|
ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
|
|
|
|
|
ftrace: dynamic enabling/disabling of function calls
This patch adds a feature to dynamically replace the ftrace code
with the jmps to allow a kernel with ftrace configured to run
as fast as it can without it configured.
The way this works, is on bootup (if ftrace is enabled), a ftrace
function is registered to record the instruction pointer of all
places that call the function.
Later, if there's still any code to patch, a kthread is awoken
(rate limited to at most once a second) that performs a stop_machine,
and replaces all the code that was called with a jmp over the call
to ftrace. It only replaces what was found the previous time. Typically
the system reaches equilibrium quickly after bootup and there's no code
patching needed at all.
e.g.
call ftrace /* 5 bytes */
is replaced with
jmp 3f /* jmp is 2 bytes and we jump 3 forward */
3:
When we want to enable ftrace for function tracing, the IP recording
is removed, and stop_machine is called again to replace all the locations
of that were recorded back to the call of ftrace. When it is disabled,
we replace the code back to the jmp.
Allocation is done by the kthread. If the ftrace recording function is
called, and we don't have any record slots available, then we simply
skip that call. Once a second a new page (if needed) is allocated for
recording new ftrace function calls. A large batch is allocated at
boot up to get most of the calls there.
Because we do this via stop_machine, we don't have to worry about another
CPU executing a ftrace call as we modify it. But we do need to worry
about NMI's so all functions that might be called via nmi must be
annotated with notrace_nmi. When this code is configured in, the NMI code
will not call notrace.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-05-13 02:20:42 +07:00
|
|
|
/* defined in arch */
|
2008-05-13 02:20:43 +07:00
|
|
|
extern int ftrace_ip_converted(unsigned long ip);
|
2008-05-13 02:20:43 +07:00
|
|
|
extern int ftrace_dyn_arch_init(void *data);
|
2012-04-27 20:13:18 +07:00
|
|
|
extern void ftrace_replace_code(int enable);
|
2008-05-13 02:20:43 +07:00
|
|
|
extern int ftrace_update_ftrace_func(ftrace_func_t func);
|
|
|
|
extern void ftrace_caller(void);
|
2012-05-01 03:20:23 +07:00
|
|
|
extern void ftrace_regs_caller(void);
|
2008-05-13 02:20:43 +07:00
|
|
|
extern void ftrace_call(void);
|
2012-05-01 03:20:23 +07:00
|
|
|
extern void ftrace_regs_call(void);
|
2008-05-13 02:20:43 +07:00
|
|
|
extern void mcount_call(void);
|
2009-01-09 10:29:42 +07:00
|
|
|
|
2012-04-27 01:59:43 +07:00
|
|
|
void ftrace_modify_all_code(int command);
|
|
|
|
|
2009-01-09 10:29:42 +07:00
|
|
|
#ifndef FTRACE_ADDR
|
|
|
|
#define FTRACE_ADDR ((unsigned long)ftrace_caller)
|
|
|
|
#endif
|
2012-05-01 03:20:23 +07:00
|
|
|
|
|
|
|
#ifndef FTRACE_REGS_ADDR
|
2012-09-28 15:15:17 +07:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
2012-05-01 03:20:23 +07:00
|
|
|
# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
|
|
|
|
#else
|
|
|
|
# define FTRACE_REGS_ADDR FTRACE_ADDR
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2008-11-26 03:07:04 +07:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
|
|
extern void ftrace_graph_caller(void);
|
2008-11-26 12:16:24 +07:00
|
|
|
extern int ftrace_enable_ftrace_graph_caller(void);
|
|
|
|
extern int ftrace_disable_ftrace_graph_caller(void);
|
|
|
|
#else
|
|
|
|
static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
|
|
|
|
static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
|
2008-11-16 12:02:06 +07:00
|
|
|
#endif
|
ftrace: user update and disable dynamic ftrace daemon
In dynamic ftrace, the mcount function starts off pointing to a stub
function that just returns.
On start up, the call to the stub is modified to point to a "record_ip"
function. The job of the record_ip function is to add the function to
a pre-allocated hash list. If the function is already there, it simply is
ignored, otherwise it is added to the list.
Later, a ftraced daemon wakes up and calls kstop_machine if any functions
have been recorded, and changes the calls to the recorded functions to
a simple nop. If no functions were recorded, the daemon goes back to sleep.
The daemon wakes up once a second to see if it needs to update any newly
recorded functions into nops. Usually it does not, but if a lot of code
has been executed for the first time in the kernel, the ftraced daemon
will call kstop_machine to update those into nops.
The problem currently is that there's no way to stop the daemon from doing
this, and it can cause unneeded latencies (800us which for some is bothersome).
This patch adds a new file /debugfs/tracing/ftraced_enabled. If the daemon
is active, reading this will return "enabled\n" and "disabled\n" when the
daemon is not running. To disable the daemon, the user can echo "0" or
"disable" into this file, and "1" or "enable" to re-enable the daemon.
Since the daemon is used to convert the functions into nops to increase
the performance of the system, I also added that anytime something is
written into the ftraced_enabled file, kstop_machine will run if there
are new functions that have been detected that need to be converted.
This way the user can disable the daemon but still be able to control the
conversion of the mcount calls to nops by simply,
"echo 0 > /debugfs/tracing/ftraced_enabled"
when they need to do more conversions.
To see the number of converted functions:
"cat /debugfs/tracing/dyn_ftrace_total_info"
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-05-28 07:48:37 +07:00
|
|
|
|
2008-11-15 07:21:19 +07:00
|
|
|
/**
|
2009-02-06 16:33:27 +07:00
|
|
|
* ftrace_make_nop - convert code into nop
|
2008-11-15 07:21:19 +07:00
|
|
|
* @mod: module structure if called by module load initialization
|
|
|
|
* @rec: the mcount call site record
|
|
|
|
* @addr: the address that the call site should be calling
|
|
|
|
*
|
|
|
|
* This is a very sensitive operation and great care needs
|
|
|
|
* to be taken by the arch. The operation should carefully
|
|
|
|
* read the location, check to see if what is read is indeed
|
|
|
|
* what we expect it to be, and then on success of the compare,
|
|
|
|
* it should write to the location.
|
|
|
|
*
|
|
|
|
* The code segment at @rec->ip should be a caller to @addr
|
|
|
|
*
|
|
|
|
* Return must be:
|
|
|
|
* 0 on success
|
|
|
|
* -EFAULT on error reading the location
|
|
|
|
* -EINVAL on a failed compare of the contents
|
|
|
|
* -EPERM on error writing to the location
|
|
|
|
* Any other value will be considered a failure.
|
|
|
|
*/
|
|
|
|
extern int ftrace_make_nop(struct module *mod,
|
|
|
|
struct dyn_ftrace *rec, unsigned long addr);
|
2008-10-31 11:03:22 +07:00
|
|
|
|
2008-10-23 20:32:59 +07:00
|
|
|
/**
|
2008-11-15 07:21:19 +07:00
|
|
|
* ftrace_make_call - convert a nop call site into a call to addr
|
|
|
|
* @rec: the mcount call site record
|
|
|
|
* @addr: the address that the call site should call
|
2008-10-23 20:32:59 +07:00
|
|
|
*
|
|
|
|
* This is a very sensitive operation and great care needs
|
|
|
|
* to be taken by the arch. The operation should carefully
|
|
|
|
* read the location, check to see if what is read is indeed
|
|
|
|
* what we expect it to be, and then on success of the compare,
|
|
|
|
* it should write to the location.
|
|
|
|
*
|
2008-11-15 07:21:19 +07:00
|
|
|
* The code segment at @rec->ip should be a nop
|
|
|
|
*
|
2008-10-23 20:32:59 +07:00
|
|
|
* Return must be:
|
|
|
|
* 0 on success
|
|
|
|
* -EFAULT on error reading the location
|
|
|
|
* -EINVAL on a failed compare of the contents
|
|
|
|
* -EPERM on error writing to the location
|
|
|
|
* Any other value will be considered a failure.
|
|
|
|
*/
|
2008-11-15 07:21:19 +07:00
|
|
|
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
|
|
|
|
|
2012-09-28 15:15:17 +07:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
2012-05-01 03:20:23 +07:00
|
|
|
/**
|
|
|
|
* ftrace_modify_call - convert from one addr to another (no nop)
|
|
|
|
* @rec: the mcount call site record
|
|
|
|
* @old_addr: the address expected to be currently called to
|
|
|
|
* @addr: the address to change to
|
|
|
|
*
|
|
|
|
* This is a very sensitive operation and great care needs
|
|
|
|
* to be taken by the arch. The operation should carefully
|
|
|
|
* read the location, check to see if what is read is indeed
|
|
|
|
* what we expect it to be, and then on success of the compare,
|
|
|
|
* it should write to the location.
|
|
|
|
*
|
|
|
|
* The code segment at @rec->ip should be a caller to @old_addr
|
|
|
|
*
|
|
|
|
* Return must be:
|
|
|
|
* 0 on success
|
|
|
|
* -EFAULT on error reading the location
|
|
|
|
* -EINVAL on a failed compare of the contents
|
|
|
|
* -EPERM on error writing to the location
|
|
|
|
* Any other value will be considered a failure.
|
|
|
|
*/
|
|
|
|
extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
|
|
|
unsigned long addr);
|
|
|
|
#else
|
|
|
|
/* Should never be called */
|
|
|
|
static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
|
|
|
|
unsigned long addr)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2008-11-15 07:21:19 +07:00
|
|
|
/* May be defined in arch */
|
|
|
|
extern int ftrace_arch_read_dyn_info(char *buf, int size);
|
2008-10-23 20:32:59 +07:00
|
|
|
|
2008-06-22 01:17:53 +07:00
|
|
|
extern int skip_trace(unsigned long ip);
|
|
|
|
|
2008-09-06 12:06:03 +07:00
|
|
|
extern void ftrace_disable_daemon(void);
|
|
|
|
extern void ftrace_enable_daemon(void);
|
2012-06-07 00:45:31 +07:00
|
|
|
#else /* CONFIG_DYNAMIC_FTRACE */
|
2010-05-04 22:24:01 +07:00
|
|
|
static inline int skip_trace(unsigned long ip) { return 0; }
|
|
|
|
static inline int ftrace_force_update(void) { return 0; }
|
|
|
|
static inline void ftrace_disable_daemon(void) { }
|
|
|
|
static inline void ftrace_enable_daemon(void) { }
|
2009-10-08 00:00:35 +07:00
|
|
|
static inline void ftrace_release_mod(struct module *mod) {}
|
2013-10-24 20:34:18 +07:00
|
|
|
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
|
2009-02-14 12:40:25 +07:00
|
|
|
{
|
2009-02-17 17:47:39 +07:00
|
|
|
return -EINVAL;
|
2009-02-14 12:40:25 +07:00
|
|
|
}
|
2013-10-24 20:34:18 +07:00
|
|
|
static inline __init int unregister_ftrace_command(char *cmd_name)
|
2009-02-14 12:40:25 +07:00
|
|
|
{
|
2009-02-17 17:47:39 +07:00
|
|
|
return -EINVAL;
|
2009-02-14 12:40:25 +07:00
|
|
|
}
|
2010-02-03 04:49:11 +07:00
|
|
|
static inline int ftrace_text_reserved(void *start, void *end)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2012-06-07 00:45:31 +07:00
|
|
|
static inline unsigned long ftrace_location(unsigned long ip)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2011-12-20 02:41:25 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Again users of functions that have ftrace_ops may not
|
|
|
|
* have them defined when ftrace is not enabled, but these
|
|
|
|
* functions may still be called. Use a macro instead of inline.
|
|
|
|
*/
|
|
|
|
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
|
2012-01-08 05:26:49 +07:00
|
|
|
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
|
2012-06-05 17:28:08 +07:00
|
|
|
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
|
ftrace, perf: Add filter support for function trace event
Adding support to filter function trace event via perf
interface. It is now possible to use filter interface
in the perf tool like:
perf record -e ftrace:function --filter="(ip == mm_*)" ls
The filter syntax is restricted to the the 'ip' field only,
and following operators are accepted '==' '!=' '||', ending
up with the filter strings like:
ip == f1[, ]f2 ... || ip != f3[, ]f4 ...
with comma ',' or space ' ' as a function separator. If the
space ' ' is used as a separator, the right side of the
assignment needs to be enclosed in double quotes '"', e.g.:
perf record -e ftrace:function --filter '(ip == do_execve,sys_*,ext*)' ls
perf record -e ftrace:function --filter '(ip == "do_execve,sys_*,ext*")' ls
perf record -e ftrace:function --filter '(ip == "do_execve sys_* ext*")' ls
The '==' operator adds trace filter with same effect as would
be added via set_ftrace_filter file.
The '!=' operator adds trace filter with same effect as would
be added via set_ftrace_notrace file.
The right side of the '!=', '==' operators is list of functions
or regexp. to be added to filter separated by space.
The '||' operator is used for connecting multiple filter definitions
together. It is possible to have more than one '==' and '!='
operators within one filter string.
Link: http://lkml.kernel.org/r/1329317514-8131-8-git-send-email-jolsa@redhat.com
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2012-02-15 21:51:54 +07:00
|
|
|
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
|
|
|
|
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
|
|
|
|
#define ftrace_free_filter(ops) do { } while (0)
|
2011-12-20 02:41:25 +07:00
|
|
|
|
|
|
|
static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos) { return -ENODEV; }
|
|
|
|
static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
|
|
|
|
size_t cnt, loff_t *ppos) { return -ENODEV; }
|
|
|
|
static inline int
|
|
|
|
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
|
2008-06-22 01:17:53 +07:00
|
|
|
#endif /* CONFIG_DYNAMIC_FTRACE */
|
2008-05-13 02:20:42 +07:00
|
|
|
|
2013-04-13 03:40:13 +07:00
|
|
|
loff_t ftrace_filter_lseek(struct file *file, loff_t offset, int whence);
|
|
|
|
|
2008-05-13 02:20:49 +07:00
|
|
|
/* totally disable ftrace - can not re-enable after this */
|
|
|
|
void ftrace_kill(void);
|
|
|
|
|
2008-05-13 02:20:43 +07:00
|
|
|
static inline void tracer_disable(void)
|
|
|
|
{
|
2008-10-07 06:06:12 +07:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2008-05-13 02:20:43 +07:00
|
|
|
ftrace_enabled = 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2008-08-18 15:24:56 +07:00
|
|
|
/*
|
|
|
|
* Ftrace disable/restore without lock. Some synchronization mechanism
|
2008-08-15 14:40:25 +07:00
|
|
|
* must be used to prevent ftrace_enabled to be changed between
|
2008-08-18 15:24:56 +07:00
|
|
|
* disable/restore.
|
|
|
|
*/
|
2008-08-15 14:40:25 +07:00
|
|
|
static inline int __ftrace_enabled_save(void)
|
|
|
|
{
|
2008-10-07 06:06:12 +07:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2008-08-15 14:40:25 +07:00
|
|
|
int saved_ftrace_enabled = ftrace_enabled;
|
|
|
|
ftrace_enabled = 0;
|
|
|
|
return saved_ftrace_enabled;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __ftrace_enabled_restore(int enabled)
|
|
|
|
{
|
2008-10-07 06:06:12 +07:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2008-08-15 14:40:25 +07:00
|
|
|
ftrace_enabled = enabled;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-02-28 03:30:03 +07:00
|
|
|
#ifndef HAVE_ARCH_CALLER_ADDR
|
|
|
|
# ifdef CONFIG_FRAME_POINTER
|
|
|
|
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
|
|
|
|
# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
|
|
|
|
# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
|
|
|
|
# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
|
|
|
|
# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
|
|
|
|
# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
|
|
|
|
# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
|
|
|
|
# else
|
|
|
|
# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
|
|
|
|
# define CALLER_ADDR1 0UL
|
|
|
|
# define CALLER_ADDR2 0UL
|
|
|
|
# define CALLER_ADDR3 0UL
|
|
|
|
# define CALLER_ADDR4 0UL
|
|
|
|
# define CALLER_ADDR5 0UL
|
|
|
|
# define CALLER_ADDR6 0UL
|
|
|
|
# endif
|
|
|
|
#endif /* ifndef HAVE_ARCH_CALLER_ADDR */
|
2008-05-13 02:20:42 +07:00
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
#ifdef CONFIG_IRQSOFF_TRACER
|
2008-02-25 19:38:05 +07:00
|
|
|
extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
|
|
|
|
extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
|
2008-05-13 02:20:42 +07:00
|
|
|
#else
|
2010-05-04 22:24:01 +07:00
|
|
|
static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
|
|
|
|
static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
|
2008-05-13 02:20:42 +07:00
|
|
|
#endif
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
#ifdef CONFIG_PREEMPT_TRACER
|
2008-02-25 19:38:05 +07:00
|
|
|
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
|
|
|
|
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
|
2008-05-13 02:20:42 +07:00
|
|
|
#else
|
2012-05-07 09:36:00 +07:00
|
|
|
/*
|
|
|
|
* Use defines instead of static inlines because some arches will make code out
|
|
|
|
* of the CALLER_ADDR, when we really want these to be a real nop.
|
|
|
|
*/
|
|
|
|
# define trace_preempt_on(a0, a1) do { } while (0)
|
|
|
|
# define trace_preempt_off(a0, a1) do { } while (0)
|
2008-05-13 02:20:42 +07:00
|
|
|
#endif
|
|
|
|
|
2008-08-15 02:45:08 +07:00
|
|
|
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
|
|
|
|
extern void ftrace_init(void);
|
|
|
|
#else
|
|
|
|
static inline void ftrace_init(void) { }
|
|
|
|
#endif
|
|
|
|
|
2008-11-26 06:57:25 +07:00
|
|
|
/*
|
|
|
|
* Structure that defines an entry function trace.
|
|
|
|
*/
|
|
|
|
struct ftrace_graph_ent {
|
|
|
|
unsigned long func; /* Current function */
|
|
|
|
int depth;
|
|
|
|
};
|
2008-08-01 23:26:41 +07:00
|
|
|
|
2008-11-11 13:03:45 +07:00
|
|
|
/*
|
|
|
|
* Structure that defines a return function trace.
|
|
|
|
*/
|
2008-11-26 03:07:04 +07:00
|
|
|
struct ftrace_graph_ret {
|
2008-11-11 13:03:45 +07:00
|
|
|
unsigned long func; /* Current function */
|
|
|
|
unsigned long long calltime;
|
|
|
|
unsigned long long rettime;
|
2008-11-17 09:22:41 +07:00
|
|
|
/* Number of functions that overran the depth limit for current task */
|
|
|
|
unsigned long overrun;
|
2008-11-26 06:57:25 +07:00
|
|
|
int depth;
|
2008-11-11 13:03:45 +07:00
|
|
|
};
|
|
|
|
|
2010-04-03 00:01:22 +07:00
|
|
|
/* Type of the callback handlers for tracing function graph*/
|
|
|
|
typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
|
|
|
|
typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
|
|
|
|
|
2008-11-26 03:07:04 +07:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
2008-12-06 09:40:00 +07:00
|
|
|
|
2009-03-26 07:55:00 +07:00
|
|
|
/* for init task */
|
2009-04-08 12:05:43 +07:00
|
|
|
#define INIT_FTRACE_GRAPH .ret_stack = NULL,
|
2009-03-26 07:55:00 +07:00
|
|
|
|
2009-02-10 01:54:03 +07:00
|
|
|
/*
|
|
|
|
* Stack of return addresses for functions
|
|
|
|
* of a thread.
|
|
|
|
* Used in struct thread_info
|
|
|
|
*/
|
|
|
|
struct ftrace_ret_stack {
|
|
|
|
unsigned long ret;
|
|
|
|
unsigned long func;
|
|
|
|
unsigned long long calltime;
|
2009-03-25 10:17:58 +07:00
|
|
|
unsigned long long subtime;
|
function-graph: add stack frame test
In case gcc does something funny with the stack frames, or the return
from function code, we would like to detect that.
An arch may implement passing of a variable that is unique to the
function and can be saved on entering a function and can be tested
when exiting the function. Usually the frame pointer can be used for
this purpose.
This patch also implements this for x86. Where it passes in the stack
frame of the parent function, and will test that frame on exit.
There was a case in x86_32 with optimize for size (-Os) where, for a
few functions, gcc would align the stack frame and place a copy of the
return address into it. The function graph tracer modified the copy and
not the actual return address. On return from the funtion, it did not go
to the tracer hook, but returned to the parent. This broke the function
graph tracer, because the return of the parent (where gcc did not do
this funky manipulation) returned to the location that the child function
was suppose to. This caused strange kernel crashes.
This test detected the problem and pointed out where the issue was.
This modifies the parameters of one of the functions that the arch
specific code calls, so it includes changes to arch code to accommodate
the new prototype.
Note, I notice that the parsic arch implements its own push_return_trace.
This is now a generic function and the ftrace_push_return_trace should be
used instead. This patch does not touch that code.
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-06-18 23:45:08 +07:00
|
|
|
unsigned long fp;
|
2009-02-10 01:54:03 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Primary handler of a function return.
|
|
|
|
* It relays on ftrace_return_to_handler.
|
|
|
|
* Defined in entry_32/64.S
|
|
|
|
*/
|
|
|
|
extern void return_to_handler(void);
|
|
|
|
|
|
|
|
extern int
|
function-graph: add stack frame test
In case gcc does something funny with the stack frames, or the return
from function code, we would like to detect that.
An arch may implement passing of a variable that is unique to the
function and can be saved on entering a function and can be tested
when exiting the function. Usually the frame pointer can be used for
this purpose.
This patch also implements this for x86. Where it passes in the stack
frame of the parent function, and will test that frame on exit.
There was a case in x86_32 with optimize for size (-Os) where, for a
few functions, gcc would align the stack frame and place a copy of the
return address into it. The function graph tracer modified the copy and
not the actual return address. On return from the funtion, it did not go
to the tracer hook, but returned to the parent. This broke the function
graph tracer, because the return of the parent (where gcc did not do
this funky manipulation) returned to the location that the child function
was suppose to. This caused strange kernel crashes.
This test detected the problem and pointed out where the issue was.
This modifies the parameters of one of the functions that the arch
specific code calls, so it includes changes to arch code to accommodate
the new prototype.
Note, I notice that the parsic arch implements its own push_return_trace.
This is now a generic function and the ftrace_push_return_trace should be
used instead. This patch does not touch that code.
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-06-18 23:45:08 +07:00
|
|
|
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
|
|
|
|
unsigned long frame_pointer);
|
2009-02-10 01:54:03 +07:00
|
|
|
|
2008-12-06 09:40:00 +07:00
|
|
|
/*
|
|
|
|
* Sometimes we don't want to trace a function with the function
|
|
|
|
* graph tracer but we want them to keep traced by the usual function
|
|
|
|
* tracer if the function graph tracer is not configured.
|
|
|
|
*/
|
|
|
|
#define __notrace_funcgraph notrace
|
|
|
|
|
2008-12-10 05:54:20 +07:00
|
|
|
/*
|
|
|
|
* We want to which function is an entrypoint of a hardirq.
|
|
|
|
* That will help us to put a signal on output.
|
|
|
|
*/
|
|
|
|
#define __irq_entry __attribute__((__section__(".irqentry.text")))
|
|
|
|
|
|
|
|
/* Limits of hardirq entrypoints */
|
|
|
|
extern char __irqentry_text_start[];
|
|
|
|
extern char __irqentry_text_end[];
|
|
|
|
|
2013-10-14 15:24:26 +07:00
|
|
|
#define FTRACE_NOTRACE_DEPTH 65536
|
2008-11-23 12:22:56 +07:00
|
|
|
#define FTRACE_RETFUNC_DEPTH 50
|
|
|
|
#define FTRACE_RETSTACK_ALLOC_SIZE 32
|
2008-11-26 06:57:25 +07:00
|
|
|
extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
|
|
|
trace_func_graph_ent_t entryfunc);
|
|
|
|
|
2008-12-03 11:50:02 +07:00
|
|
|
extern void ftrace_graph_stop(void);
|
|
|
|
|
2008-11-26 06:57:25 +07:00
|
|
|
/* The current handlers in use */
|
|
|
|
extern trace_func_graph_ret_t ftrace_graph_return;
|
|
|
|
extern trace_func_graph_ent_t ftrace_graph_entry;
|
2008-11-11 13:03:45 +07:00
|
|
|
|
2008-11-26 03:07:04 +07:00
|
|
|
extern void unregister_ftrace_graph(void);
|
2008-11-23 12:22:56 +07:00
|
|
|
|
2008-11-26 03:07:04 +07:00
|
|
|
extern void ftrace_graph_init_task(struct task_struct *t);
|
|
|
|
extern void ftrace_graph_exit_task(struct task_struct *t);
|
ftrace: Fix memory leak with function graph and cpu hotplug
When the fuction graph tracer starts, it needs to make a special
stack for each task to save the real return values of the tasks.
All running tasks have this stack created, as well as any new
tasks.
On CPU hot plug, the new idle task will allocate a stack as well
when init_idle() is called. The problem is that cpu hotplug does
not create a new idle_task. Instead it uses the idle task that
existed when the cpu went down.
ftrace_graph_init_task() will add a new ret_stack to the task
that is given to it. Because a clone will make the task
have a stack of its parent it does not check if the task's
ret_stack is already NULL or not. When the CPU hotplug code
starts a CPU up again, it will allocate a new stack even
though one already existed for it.
The solution is to treat the idle_task specially. In fact, the
function_graph code already does, just not at init_idle().
Instead of using the ftrace_graph_init_task() for the idle task,
which that function expects the task to be a clone, have a
separate ftrace_graph_init_idle_task(). Also, we will create a
per_cpu ret_stack that is used by the idle task. When we call
ftrace_graph_init_idle_task() it will check if the idle task's
ret_stack is NULL, if it is, then it will assign it the per_cpu
ret_stack.
Reported-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Suggested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stable Tree <stable@kernel.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-02-11 09:26:13 +07:00
|
|
|
extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
|
2008-12-05 05:51:23 +07:00
|
|
|
|
|
|
|
static inline int task_curr_ret_stack(struct task_struct *t)
|
|
|
|
{
|
|
|
|
return t->curr_ret_stack;
|
|
|
|
}
|
2008-12-06 09:43:41 +07:00
|
|
|
|
|
|
|
static inline void pause_graph_tracing(void)
|
|
|
|
{
|
|
|
|
atomic_inc(¤t->tracing_graph_pause);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void unpause_graph_tracing(void)
|
|
|
|
{
|
|
|
|
atomic_dec(¤t->tracing_graph_pause);
|
|
|
|
}
|
2009-03-26 07:55:00 +07:00
|
|
|
#else /* !CONFIG_FUNCTION_GRAPH_TRACER */
|
2008-12-06 09:40:00 +07:00
|
|
|
|
|
|
|
#define __notrace_funcgraph
|
2008-12-10 05:54:20 +07:00
|
|
|
#define __irq_entry
|
2009-03-26 07:55:00 +07:00
|
|
|
#define INIT_FTRACE_GRAPH
|
2008-12-06 09:40:00 +07:00
|
|
|
|
2008-11-26 03:07:04 +07:00
|
|
|
static inline void ftrace_graph_init_task(struct task_struct *t) { }
|
|
|
|
static inline void ftrace_graph_exit_task(struct task_struct *t) { }
|
ftrace: Fix memory leak with function graph and cpu hotplug
When the fuction graph tracer starts, it needs to make a special
stack for each task to save the real return values of the tasks.
All running tasks have this stack created, as well as any new
tasks.
On CPU hot plug, the new idle task will allocate a stack as well
when init_idle() is called. The problem is that cpu hotplug does
not create a new idle_task. Instead it uses the idle task that
existed when the cpu went down.
ftrace_graph_init_task() will add a new ret_stack to the task
that is given to it. Because a clone will make the task
have a stack of its parent it does not check if the task's
ret_stack is already NULL or not. When the CPU hotplug code
starts a CPU up again, it will allocate a new stack even
though one already existed for it.
The solution is to treat the idle_task specially. In fact, the
function_graph code already does, just not at init_idle().
Instead of using the ftrace_graph_init_task() for the idle task,
which that function expects the task to be a clone, have a
separate ftrace_graph_init_idle_task(). Also, we will create a
per_cpu ret_stack that is used by the idle task. When we call
ftrace_graph_init_idle_task() it will check if the idle task's
ret_stack is NULL, if it is, then it will assign it the per_cpu
ret_stack.
Reported-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Suggested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stable Tree <stable@kernel.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-02-11 09:26:13 +07:00
|
|
|
static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
|
2008-12-05 05:51:23 +07:00
|
|
|
|
2010-04-03 00:01:22 +07:00
|
|
|
static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
|
|
|
|
trace_func_graph_ent_t entryfunc)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
static inline void unregister_ftrace_graph(void) { }
|
|
|
|
|
2008-12-05 05:51:23 +07:00
|
|
|
static inline int task_curr_ret_stack(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
2008-12-06 09:43:41 +07:00
|
|
|
|
|
|
|
static inline void pause_graph_tracing(void) { }
|
|
|
|
static inline void unpause_graph_tracing(void) { }
|
2009-03-26 07:55:00 +07:00
|
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
2008-11-11 13:03:45 +07:00
|
|
|
|
2008-12-04 03:36:57 +07:00
|
|
|
#ifdef CONFIG_TRACING
|
|
|
|
|
|
|
|
/* flags for current->trace */
|
|
|
|
enum {
|
|
|
|
TSK_TRACE_FL_TRACE_BIT = 0,
|
|
|
|
TSK_TRACE_FL_GRAPH_BIT = 1,
|
|
|
|
};
|
|
|
|
enum {
|
|
|
|
TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
|
|
|
|
TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline void set_tsk_trace_trace(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_tsk_trace_trace(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int test_tsk_trace_trace(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
return tsk->trace & TSK_TRACE_FL_TRACE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_tsk_trace_graph(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void clear_tsk_trace_graph(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int test_tsk_trace_graph(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
return tsk->trace & TSK_TRACE_FL_GRAPH;
|
|
|
|
}
|
|
|
|
|
2010-04-19 00:08:41 +07:00
|
|
|
enum ftrace_dump_mode;
|
|
|
|
|
|
|
|
extern enum ftrace_dump_mode ftrace_dump_on_oops;
|
2009-03-05 16:28:45 +07:00
|
|
|
|
2013-06-15 03:21:43 +07:00
|
|
|
extern void disable_trace_on_warning(void);
|
|
|
|
extern int __disable_trace_on_warning;
|
|
|
|
|
tracing: add same level recursion detection
The tracing infrastructure allows for recursion. That is, an interrupt
may interrupt the act of tracing an event, and that interrupt may very well
perform its own trace. This is a recursive trace, and is fine to do.
The problem arises when there is a bug, and the utility doing the trace
calls something that recurses back into the tracer. This recursion is not
caused by an external event like an interrupt, but by code that is not
expected to recurse. The result could be a lockup.
This patch adds a bitmask to the task structure that keeps track
of the trace recursion. To find the interrupt depth, the following
algorithm is used:
level = hardirq_count() + softirq_count() + in_nmi;
Here, level will be the depth of interrutps and softirqs, and even handles
the nmi. Then the corresponding bit is set in the recursion bitmask.
If the bit was already set, we know we had a recursion at the same level
and we warn about it and fail the writing to the buffer.
After the data has been committed to the buffer, we clear the bit.
No atomics are needed. The only races are with interrupts and they reset
the bitmask before returning anywy.
[ Impact: detect same irq level trace recursion ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-04-17 08:41:52 +07:00
|
|
|
#ifdef CONFIG_PREEMPT
|
|
|
|
#define INIT_TRACE_RECURSION .trace_recursion = 0,
|
|
|
|
#endif
|
|
|
|
|
2013-06-15 03:21:43 +07:00
|
|
|
#else /* CONFIG_TRACING */
|
|
|
|
static inline void disable_trace_on_warning(void) { }
|
2008-12-04 03:36:57 +07:00
|
|
|
#endif /* CONFIG_TRACING */
|
|
|
|
|
tracing: add same level recursion detection
The tracing infrastructure allows for recursion. That is, an interrupt
may interrupt the act of tracing an event, and that interrupt may very well
perform its own trace. This is a recursive trace, and is fine to do.
The problem arises when there is a bug, and the utility doing the trace
calls something that recurses back into the tracer. This recursion is not
caused by an external event like an interrupt, but by code that is not
expected to recurse. The result could be a lockup.
This patch adds a bitmask to the task structure that keeps track
of the trace recursion. To find the interrupt depth, the following
algorithm is used:
level = hardirq_count() + softirq_count() + in_nmi;
Here, level will be the depth of interrutps and softirqs, and even handles
the nmi. Then the corresponding bit is set in the recursion bitmask.
If the bit was already set, we know we had a recursion at the same level
and we warn about it and fail the writing to the buffer.
After the data has been committed to the buffer, we clear the bit.
No atomics are needed. The only races are with interrupts and they reset
the bitmask before returning anywy.
[ Impact: detect same irq level trace recursion ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-04-17 08:41:52 +07:00
|
|
|
#ifndef INIT_TRACE_RECURSION
|
|
|
|
#define INIT_TRACE_RECURSION
|
|
|
|
#endif
|
2009-01-19 16:31:01 +07:00
|
|
|
|
2010-01-26 16:40:03 +07:00
|
|
|
#ifdef CONFIG_FTRACE_SYSCALLS
|
|
|
|
|
|
|
|
unsigned long arch_syscall_addr(int nr);
|
|
|
|
|
|
|
|
#endif /* CONFIG_FTRACE_SYSCALLS */
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
#endif /* _LINUX_FTRACE_H */
|