mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 04:50:53 +07:00
tracing: Add trace_export support for event trace
Only function traces can be exported to other destinations currently. This patch exports event trace as well. Move trace export related function to the beginning of file so other trace can call trace_process_export() to export. Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Reviewed-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Tingwei Zhang <tingwei@codeaurora.org> Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Link: https://lore.kernel.org/r/20201005071319.78508-4-alexander.shishkin@linux.intel.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
8438f52114
commit
8ab7a2b705
@ -5,6 +5,7 @@
|
||||
#ifdef CONFIG_TRACING
|
||||
|
||||
#define TRACE_EXPORT_FUNCTION BIT(0)
|
||||
#define TRACE_EXPORT_EVENT BIT(1)
|
||||
|
||||
/*
|
||||
* The trace export - an export of Ftrace output. The trace_export
|
||||
|
@ -251,6 +251,138 @@ unsigned long long ns2usecs(u64 nsec)
|
||||
return nsec;
|
||||
}
|
||||
|
||||
static void
|
||||
trace_process_export(struct trace_export *export,
|
||||
struct ring_buffer_event *event, int flag)
|
||||
{
|
||||
struct trace_entry *entry;
|
||||
unsigned int size = 0;
|
||||
|
||||
if (export->flags & flag) {
|
||||
entry = ring_buffer_event_data(event);
|
||||
size = ring_buffer_event_length(event);
|
||||
export->write(export, entry, size);
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(ftrace_export_lock);
|
||||
|
||||
static struct trace_export __rcu *ftrace_exports_list __read_mostly;
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
|
||||
static DEFINE_STATIC_KEY_FALSE(trace_event_exports_enabled);
|
||||
|
||||
static inline void ftrace_exports_enable(struct trace_export *export)
|
||||
{
|
||||
if (export->flags & TRACE_EXPORT_FUNCTION)
|
||||
static_branch_inc(&trace_function_exports_enabled);
|
||||
|
||||
if (export->flags & TRACE_EXPORT_EVENT)
|
||||
static_branch_inc(&trace_event_exports_enabled);
|
||||
}
|
||||
|
||||
static inline void ftrace_exports_disable(struct trace_export *export)
|
||||
{
|
||||
if (export->flags & TRACE_EXPORT_FUNCTION)
|
||||
static_branch_dec(&trace_function_exports_enabled);
|
||||
|
||||
if (export->flags & TRACE_EXPORT_EVENT)
|
||||
static_branch_dec(&trace_event_exports_enabled);
|
||||
}
|
||||
|
||||
static void ftrace_exports(struct ring_buffer_event *event, int flag)
|
||||
{
|
||||
struct trace_export *export;
|
||||
|
||||
preempt_disable_notrace();
|
||||
|
||||
export = rcu_dereference_raw_check(ftrace_exports_list);
|
||||
while (export) {
|
||||
trace_process_export(export, event, flag);
|
||||
export = rcu_dereference_raw_check(export->next);
|
||||
}
|
||||
|
||||
preempt_enable_notrace();
|
||||
}
|
||||
|
||||
static inline void
|
||||
add_trace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
rcu_assign_pointer(export->next, *list);
|
||||
/*
|
||||
* We are entering export into the list but another
|
||||
* CPU might be walking that list. We need to make sure
|
||||
* the export->next pointer is valid before another CPU sees
|
||||
* the export pointer included into the list.
|
||||
*/
|
||||
rcu_assign_pointer(*list, export);
|
||||
}
|
||||
|
||||
static inline int
|
||||
rm_trace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
struct trace_export **p;
|
||||
|
||||
for (p = list; *p != NULL; p = &(*p)->next)
|
||||
if (*p == export)
|
||||
break;
|
||||
|
||||
if (*p != export)
|
||||
return -1;
|
||||
|
||||
rcu_assign_pointer(*p, (*p)->next);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
add_ftrace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
ftrace_exports_enable(export);
|
||||
|
||||
add_trace_export(list, export);
|
||||
}
|
||||
|
||||
static inline int
|
||||
rm_ftrace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = rm_trace_export(list, export);
|
||||
ftrace_exports_disable(export);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int register_ftrace_export(struct trace_export *export)
|
||||
{
|
||||
if (WARN_ON_ONCE(!export->write))
|
||||
return -1;
|
||||
|
||||
mutex_lock(&ftrace_export_lock);
|
||||
|
||||
add_ftrace_export(&ftrace_exports_list, export);
|
||||
|
||||
mutex_unlock(&ftrace_export_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_ftrace_export);
|
||||
|
||||
int unregister_ftrace_export(struct trace_export *export)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ftrace_export_lock);
|
||||
|
||||
ret = rm_ftrace_export(&ftrace_exports_list, export);
|
||||
|
||||
mutex_unlock(&ftrace_export_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_ftrace_export);
|
||||
|
||||
/* trace_flags holds trace_options default values */
|
||||
#define TRACE_DEFAULT_FLAGS \
|
||||
(FUNCTION_DEFAULT_FLAGS | \
|
||||
@ -2699,6 +2831,8 @@ void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
|
||||
if (static_key_false(&tracepoint_printk_key.key))
|
||||
output_printk(fbuffer);
|
||||
|
||||
if (static_branch_unlikely(&trace_event_exports_enabled))
|
||||
ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
|
||||
event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
|
||||
fbuffer->event, fbuffer->entry,
|
||||
fbuffer->flags, fbuffer->pc, fbuffer->regs);
|
||||
@ -2742,131 +2876,6 @@ trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
|
||||
__buffer_unlock_commit(buffer, event);
|
||||
}
|
||||
|
||||
static void
|
||||
trace_process_export(struct trace_export *export,
|
||||
struct ring_buffer_event *event, int flag)
|
||||
{
|
||||
struct trace_entry *entry;
|
||||
unsigned int size = 0;
|
||||
|
||||
if (export->flags & flag) {
|
||||
entry = ring_buffer_event_data(event);
|
||||
size = ring_buffer_event_length(event);
|
||||
export->write(export, entry, size);
|
||||
}
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(ftrace_export_lock);
|
||||
|
||||
static struct trace_export __rcu *ftrace_exports_list __read_mostly;
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(trace_function_exports_enabled);
|
||||
|
||||
static inline void ftrace_exports_enable(struct trace_export *export)
|
||||
{
|
||||
if (export->flags & TRACE_EXPORT_FUNCTION)
|
||||
static_branch_inc(&trace_function_exports_enabled);
|
||||
}
|
||||
|
||||
static inline void ftrace_exports_disable(struct trace_export *export)
|
||||
{
|
||||
if (export->flags & TRACE_EXPORT_FUNCTION)
|
||||
static_branch_dec(&trace_function_exports_enabled);
|
||||
}
|
||||
|
||||
static void ftrace_exports(struct ring_buffer_event *event, int flag)
|
||||
{
|
||||
struct trace_export *export;
|
||||
|
||||
preempt_disable_notrace();
|
||||
|
||||
export = rcu_dereference_raw_check(ftrace_exports_list);
|
||||
while (export) {
|
||||
trace_process_export(export, event, flag);
|
||||
export = rcu_dereference_raw_check(export->next);
|
||||
}
|
||||
|
||||
preempt_enable_notrace();
|
||||
}
|
||||
|
||||
static inline void
|
||||
add_trace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
rcu_assign_pointer(export->next, *list);
|
||||
/*
|
||||
* We are entering export into the list but another
|
||||
* CPU might be walking that list. We need to make sure
|
||||
* the export->next pointer is valid before another CPU sees
|
||||
* the export pointer included into the list.
|
||||
*/
|
||||
rcu_assign_pointer(*list, export);
|
||||
}
|
||||
|
||||
static inline int
|
||||
rm_trace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
struct trace_export **p;
|
||||
|
||||
for (p = list; *p != NULL; p = &(*p)->next)
|
||||
if (*p == export)
|
||||
break;
|
||||
|
||||
if (*p != export)
|
||||
return -1;
|
||||
|
||||
rcu_assign_pointer(*p, (*p)->next);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
add_ftrace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
ftrace_exports_enable(export);
|
||||
|
||||
add_trace_export(list, export);
|
||||
}
|
||||
|
||||
static inline int
|
||||
rm_ftrace_export(struct trace_export **list, struct trace_export *export)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = rm_trace_export(list, export);
|
||||
ftrace_exports_disable(export);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int register_ftrace_export(struct trace_export *export)
|
||||
{
|
||||
if (WARN_ON_ONCE(!export->write))
|
||||
return -1;
|
||||
|
||||
mutex_lock(&ftrace_export_lock);
|
||||
|
||||
add_ftrace_export(&ftrace_exports_list, export);
|
||||
|
||||
mutex_unlock(&ftrace_export_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_ftrace_export);
|
||||
|
||||
int unregister_ftrace_export(struct trace_export *export)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ftrace_export_lock);
|
||||
|
||||
ret = rm_ftrace_export(&ftrace_exports_list, export);
|
||||
|
||||
mutex_unlock(&ftrace_export_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_ftrace_export);
|
||||
|
||||
void
|
||||
trace_function(struct trace_array *tr,
|
||||
unsigned long ip, unsigned long parent_ip, unsigned long flags,
|
||||
|
Loading…
Reference in New Issue
Block a user