2008-05-13 02:20:42 +07:00
|
|
|
/*
|
|
|
|
* trace task wakeup timings
|
|
|
|
*
|
|
|
|
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
|
|
|
|
* Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
|
|
|
|
*
|
|
|
|
* Based on code from the latency_tracer, that is:
|
|
|
|
*
|
|
|
|
* Copyright (C) 2004-2006 Ingo Molnar
|
2012-12-06 16:39:54 +07:00
|
|
|
* Copyright (C) 2004 Nadia Yvette Chambers
|
2008-05-13 02:20:42 +07:00
|
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kallsyms.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/ftrace.h>
|
2013-02-07 22:47:07 +07:00
|
|
|
#include <linux/sched/rt.h>
|
sched/deadline: Add SCHED_DEADLINE inheritance logic
Some method to deal with rt-mutexes and make sched_dl interact with
the current PI-coded is needed, raising all but trivial issues, that
needs (according to us) to be solved with some restructuring of
the pi-code (i.e., going toward a proxy execution-ish implementation).
This is under development, in the meanwhile, as a temporary solution,
what this commits does is:
- ensure a pi-lock owner with waiters is never throttled down. Instead,
when it runs out of runtime, it immediately gets replenished and it's
deadline is postponed;
- the scheduling parameters (relative deadline and default runtime)
used for that replenishments --during the whole period it holds the
pi-lock-- are the ones of the waiting task with earliest deadline.
Acting this way, we provide some kind of boosting to the lock-owner,
still by using the existing (actually, slightly modified by the previous
commit) pi-architecture.
We would stress the fact that this is only a surely needed, all but
clean solution to the problem. In the end it's only a way to re-start
discussion within the community. So, as always, comments, ideas, rants,
etc.. are welcome! :-)
Signed-off-by: Dario Faggioli <raistlin@linux.it>
Signed-off-by: Juri Lelli <juri.lelli@gmail.com>
[ Added !RT_MUTEXES build fix. ]
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1383831828-15501-11-git-send-email-juri.lelli@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-11-07 20:43:44 +07:00
|
|
|
#include <linux/sched/deadline.h>
|
2009-04-15 06:39:12 +07:00
|
|
|
#include <trace/events/sched.h>
|
2008-05-13 02:20:42 +07:00
|
|
|
#include "trace.h"
|
|
|
|
|
|
|
|
static struct trace_array *wakeup_trace;
|
|
|
|
static int __read_mostly tracer_enabled;
|
|
|
|
|
|
|
|
static struct task_struct *wakeup_task;
|
|
|
|
static int wakeup_cpu;
|
tracing: do not grab lock in wakeup latency function tracing
The wakeup tracer, when enabled, has its own function tracer.
It only traces the functions on the CPU where the task it is following
is on. If a task is woken on one CPU but then migrates to another CPU
before it wakes up, the latency tracer will then start tracing functions
on the other CPU.
To find which CPU the task is on, the wakeup function tracer performs
a task_cpu(wakeup_task). But to make sure the task does not disappear
it grabs the wakeup_lock, which is also taken when the task wakes up.
By taking this lock, the function tracer does not need to worry about
the task being freed as it checks its cpu.
Jan Blunck found a problem with this approach on his 32 CPU box. When
a task is being traced by the wakeup tracer, all functions take this
lock. That means that on all 32 CPUs, each function call is taking
this one lock to see if the task is on that CPU. This lock has just
serialized all functions on all 32 CPUs. Needless to say, this caused
major issues on that box. It would even lockup.
This patch changes the wakeup latency to insert a probe on the migrate task
tracepoint. When a task changes its CPU that it will run on, the
probe will take note. Now the wakeup function tracer no longer needs
to take the lock. It only compares the current CPU with a variable that
holds the current CPU the task is on. We don't worry about races since
it is OK to add or miss a function trace.
Reported-by: Jan Blunck <jblunck@suse.de>
Tested-by: Jan Blunck <jblunck@suse.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-09-09 21:36:01 +07:00
|
|
|
static int wakeup_current_cpu;
|
2008-05-13 02:20:42 +07:00
|
|
|
static unsigned wakeup_prio = -1;
|
2009-01-22 04:24:46 +07:00
|
|
|
static int wakeup_rt;
|
2013-11-07 20:43:42 +07:00
|
|
|
static int wakeup_dl;
|
|
|
|
static int tracing_dl = 0;
|
2008-05-13 02:20:42 +07:00
|
|
|
|
2009-12-03 01:49:50 +07:00
|
|
|
static arch_spinlock_t wakeup_lock =
|
2009-12-03 18:38:57 +07:00
|
|
|
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
2008-05-13 02:20:42 +07:00
|
|
|
|
2010-09-23 19:00:53 +07:00
|
|
|
static void wakeup_reset(struct trace_array *tr);
|
2008-05-13 02:20:51 +07:00
|
|
|
static void __wakeup_reset(struct trace_array *tr);
|
2008-05-13 02:20:42 +07:00
|
|
|
|
2013-03-15 02:03:53 +07:00
|
|
|
static int save_flags;
|
2010-09-23 19:00:53 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
2015-09-29 02:37:49 +07:00
|
|
|
static int wakeup_display_graph(struct trace_array *tr, int set);
|
2015-09-30 20:42:05 +07:00
|
|
|
# define is_graph(tr) ((tr)->trace_flags & TRACE_ITER_DISPLAY_GRAPH)
|
2015-09-29 02:37:49 +07:00
|
|
|
#else
|
|
|
|
static inline int wakeup_display_graph(struct trace_array *tr, int set)
|
|
|
|
{
|
2015-09-29 21:24:56 +07:00
|
|
|
return 0;
|
2015-09-29 02:37:49 +07:00
|
|
|
}
|
2015-09-30 20:42:05 +07:00
|
|
|
# define is_graph(tr) false
|
2010-09-23 19:00:53 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2008-10-07 06:06:12 +07:00
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2010-10-06 03:38:49 +07:00
|
|
|
|
2015-09-29 21:24:56 +07:00
|
|
|
static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
|
|
|
|
static void wakeup_graph_return(struct ftrace_graph_ret *trace);
|
|
|
|
|
|
|
|
static bool function_enabled;
|
|
|
|
|
2008-05-22 11:22:19 +07:00
|
|
|
/*
|
2010-10-06 03:38:49 +07:00
|
|
|
* Prologue for the wakeup function tracers.
|
|
|
|
*
|
|
|
|
* Returns 1 if it is OK to continue, and preemption
|
|
|
|
* is disabled and data->disabled is incremented.
|
|
|
|
* 0 if the trace is to be ignored, and preemption
|
|
|
|
* is not disabled and data->disabled is
|
|
|
|
* kept the same.
|
|
|
|
*
|
|
|
|
* Note, this function is also used outside this ifdef but
|
|
|
|
* inside the #ifdef of the function graph tracer below.
|
|
|
|
* This is OK, since the function graph tracer is
|
|
|
|
* dependent on the function tracer.
|
2008-05-22 11:22:19 +07:00
|
|
|
*/
|
2010-10-06 03:38:49 +07:00
|
|
|
static int
|
|
|
|
func_prolog_preempt_disable(struct trace_array *tr,
|
|
|
|
struct trace_array_cpu **data,
|
|
|
|
int *pc)
|
2008-05-22 11:22:19 +07:00
|
|
|
{
|
|
|
|
long disabled;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
if (likely(!wakeup_task))
|
2010-10-06 03:38:49 +07:00
|
|
|
return 0;
|
2008-05-22 11:22:19 +07:00
|
|
|
|
2010-10-06 03:38:49 +07:00
|
|
|
*pc = preempt_count();
|
2010-06-03 20:36:50 +07:00
|
|
|
preempt_disable_notrace();
|
2008-05-22 11:22:19 +07:00
|
|
|
|
|
|
|
cpu = raw_smp_processor_id();
|
tracing: do not grab lock in wakeup latency function tracing
The wakeup tracer, when enabled, has its own function tracer.
It only traces the functions on the CPU where the task it is following
is on. If a task is woken on one CPU but then migrates to another CPU
before it wakes up, the latency tracer will then start tracing functions
on the other CPU.
To find which CPU the task is on, the wakeup function tracer performs
a task_cpu(wakeup_task). But to make sure the task does not disappear
it grabs the wakeup_lock, which is also taken when the task wakes up.
By taking this lock, the function tracer does not need to worry about
the task being freed as it checks its cpu.
Jan Blunck found a problem with this approach on his 32 CPU box. When
a task is being traced by the wakeup tracer, all functions take this
lock. That means that on all 32 CPUs, each function call is taking
this one lock to see if the task is on that CPU. This lock has just
serialized all functions on all 32 CPUs. Needless to say, this caused
major issues on that box. It would even lockup.
This patch changes the wakeup latency to insert a probe on the migrate task
tracepoint. When a task changes its CPU that it will run on, the
probe will take note. Now the wakeup function tracer no longer needs
to take the lock. It only compares the current CPU with a variable that
holds the current CPU the task is on. We don't worry about races since
it is OK to add or miss a function trace.
Reported-by: Jan Blunck <jblunck@suse.de>
Tested-by: Jan Blunck <jblunck@suse.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-09-09 21:36:01 +07:00
|
|
|
if (cpu != wakeup_current_cpu)
|
|
|
|
goto out_enable;
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 21:24:35 +07:00
|
|
|
*data = per_cpu_ptr(tr->trace_buffer.data, cpu);
|
2010-10-06 03:38:49 +07:00
|
|
|
disabled = atomic_inc_return(&(*data)->disabled);
|
2008-05-22 11:22:19 +07:00
|
|
|
if (unlikely(disabled != 1))
|
|
|
|
goto out;
|
|
|
|
|
2010-10-06 03:38:49 +07:00
|
|
|
return 1;
|
2008-05-22 11:22:19 +07:00
|
|
|
|
2010-10-06 03:38:49 +07:00
|
|
|
out:
|
|
|
|
atomic_dec(&(*data)->disabled);
|
2008-05-22 11:22:19 +07:00
|
|
|
|
2010-10-06 03:38:49 +07:00
|
|
|
out_enable:
|
|
|
|
preempt_enable_notrace();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* wakeup uses its own tracer function to keep the overhead down:
|
|
|
|
*/
|
|
|
|
static void
|
2011-08-09 23:50:46 +07:00
|
|
|
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
|
|
|
|
struct ftrace_ops *op, struct pt_regs *pt_regs)
|
2010-10-06 03:38:49 +07:00
|
|
|
{
|
|
|
|
struct trace_array *tr = wakeup_trace;
|
|
|
|
struct trace_array_cpu *data;
|
|
|
|
unsigned long flags;
|
|
|
|
int pc;
|
|
|
|
|
|
|
|
if (!func_prolog_preempt_disable(tr, &data, &pc))
|
|
|
|
return;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
trace_function(tr, ip, parent_ip, flags, pc);
|
2008-07-16 11:13:45 +07:00
|
|
|
local_irq_restore(flags);
|
2008-05-22 11:22:19 +07:00
|
|
|
|
|
|
|
atomic_dec(&data->disabled);
|
2010-06-03 20:36:50 +07:00
|
|
|
preempt_enable_notrace();
|
2008-05-22 11:22:19 +07:00
|
|
|
}
|
2010-09-23 19:00:53 +07:00
|
|
|
|
2014-01-11 05:01:58 +07:00
|
|
|
static int register_wakeup_function(struct trace_array *tr, int graph, int set)
|
2010-09-23 19:00:53 +07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2013-03-14 23:10:40 +07:00
|
|
|
/* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
|
2015-09-30 20:42:05 +07:00
|
|
|
if (function_enabled || (!set && !(tr->trace_flags & TRACE_ITER_FUNCTION)))
|
2013-03-14 23:10:40 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (graph)
|
2010-09-23 19:00:53 +07:00
|
|
|
ret = register_ftrace_graph(&wakeup_graph_return,
|
|
|
|
&wakeup_graph_entry);
|
2013-03-14 23:10:40 +07:00
|
|
|
else
|
2014-01-11 05:01:58 +07:00
|
|
|
ret = register_ftrace_function(tr->ops);
|
2013-03-14 23:10:40 +07:00
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
function_enabled = true;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-01-11 05:01:58 +07:00
|
|
|
static void unregister_wakeup_function(struct trace_array *tr, int graph)
|
2013-03-14 23:10:40 +07:00
|
|
|
{
|
|
|
|
if (!function_enabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (graph)
|
|
|
|
unregister_ftrace_graph();
|
|
|
|
else
|
2014-01-11 05:01:58 +07:00
|
|
|
unregister_ftrace_function(tr->ops);
|
2013-03-14 23:10:40 +07:00
|
|
|
|
|
|
|
function_enabled = false;
|
|
|
|
}
|
|
|
|
|
2015-09-29 21:24:56 +07:00
|
|
|
static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
|
2013-03-14 23:10:40 +07:00
|
|
|
{
|
2015-09-29 21:24:56 +07:00
|
|
|
if (!(mask & TRACE_ITER_FUNCTION))
|
|
|
|
return 0;
|
|
|
|
|
2013-03-14 23:10:40 +07:00
|
|
|
if (set)
|
2015-09-30 20:42:05 +07:00
|
|
|
register_wakeup_function(tr, is_graph(tr), 1);
|
2013-03-14 23:10:40 +07:00
|
|
|
else
|
2015-09-30 20:42:05 +07:00
|
|
|
unregister_wakeup_function(tr, is_graph(tr));
|
2015-09-29 21:24:56 +07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static int register_wakeup_function(struct trace_array *tr, int graph, int set)
|
|
|
|
{
|
2015-09-29 02:37:49 +07:00
|
|
|
return 0;
|
2013-03-14 23:10:40 +07:00
|
|
|
}
|
2015-09-29 21:24:56 +07:00
|
|
|
static void unregister_wakeup_function(struct trace_array *tr, int graph) { }
|
|
|
|
static int wakeup_function_set(struct trace_array *tr, u32 mask, int set)
|
|
|
|
{
|
|
|
|
return 0;
|
2013-03-14 23:10:40 +07:00
|
|
|
}
|
2015-09-29 21:24:56 +07:00
|
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
2013-03-14 23:10:40 +07:00
|
|
|
|
2014-01-11 05:51:01 +07:00
|
|
|
static int wakeup_flag_changed(struct trace_array *tr, u32 mask, int set)
|
2013-03-14 23:10:40 +07:00
|
|
|
{
|
2014-01-11 05:51:01 +07:00
|
|
|
struct tracer *tracer = tr->current_trace;
|
|
|
|
|
2015-09-29 21:24:56 +07:00
|
|
|
if (wakeup_function_set(tr, mask, set))
|
|
|
|
return 0;
|
2015-09-29 02:37:49 +07:00
|
|
|
|
2015-09-29 21:15:10 +07:00
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
2015-09-29 02:37:49 +07:00
|
|
|
if (mask & TRACE_ITER_DISPLAY_GRAPH)
|
|
|
|
return wakeup_display_graph(tr, set);
|
2015-09-29 21:15:10 +07:00
|
|
|
#endif
|
2013-03-14 23:10:40 +07:00
|
|
|
|
|
|
|
return trace_keep_overwrite(tracer, mask, set);
|
|
|
|
}
|
|
|
|
|
2014-01-11 05:01:58 +07:00
|
|
|
static int start_func_tracer(struct trace_array *tr, int graph)
|
2013-03-14 23:10:40 +07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2014-01-11 05:01:58 +07:00
|
|
|
ret = register_wakeup_function(tr, graph, 0);
|
2010-09-23 19:00:53 +07:00
|
|
|
|
|
|
|
if (!ret && tracing_is_enabled())
|
|
|
|
tracer_enabled = 1;
|
|
|
|
else
|
|
|
|
tracer_enabled = 0;
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-01-11 05:01:58 +07:00
|
|
|
static void stop_func_tracer(struct trace_array *tr, int graph)
|
2010-09-23 19:00:53 +07:00
|
|
|
{
|
|
|
|
tracer_enabled = 0;
|
|
|
|
|
2014-01-11 05:01:58 +07:00
|
|
|
unregister_wakeup_function(tr, graph);
|
2010-09-23 19:00:53 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
2015-09-29 02:37:49 +07:00
|
|
|
static int wakeup_display_graph(struct trace_array *tr, int set)
|
2010-09-23 19:00:53 +07:00
|
|
|
{
|
2015-09-30 20:42:05 +07:00
|
|
|
if (!(is_graph(tr) ^ set))
|
2010-09-23 19:00:53 +07:00
|
|
|
return 0;
|
|
|
|
|
2014-01-11 05:01:58 +07:00
|
|
|
stop_func_tracer(tr, !set);
|
2010-09-23 19:00:53 +07:00
|
|
|
|
|
|
|
wakeup_reset(wakeup_trace);
|
2014-01-14 23:28:38 +07:00
|
|
|
tr->max_latency = 0;
|
2010-09-23 19:00:53 +07:00
|
|
|
|
2014-01-11 05:01:58 +07:00
|
|
|
return start_func_tracer(tr, set);
|
2010-09-23 19:00:53 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = wakeup_trace;
|
|
|
|
struct trace_array_cpu *data;
|
|
|
|
unsigned long flags;
|
2010-10-06 03:38:49 +07:00
|
|
|
int pc, ret = 0;
|
2010-09-23 19:00:53 +07:00
|
|
|
|
2010-10-06 03:38:49 +07:00
|
|
|
if (!func_prolog_preempt_disable(tr, &data, &pc))
|
2010-09-23 19:00:53 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
local_save_flags(flags);
|
|
|
|
ret = __trace_graph_entry(tr, trace, flags, pc);
|
|
|
|
atomic_dec(&data->disabled);
|
|
|
|
preempt_enable_notrace();
|
2010-10-06 03:38:49 +07:00
|
|
|
|
2010-09-23 19:00:53 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wakeup_graph_return(struct ftrace_graph_ret *trace)
|
|
|
|
{
|
|
|
|
struct trace_array *tr = wakeup_trace;
|
|
|
|
struct trace_array_cpu *data;
|
|
|
|
unsigned long flags;
|
2010-10-06 03:38:49 +07:00
|
|
|
int pc;
|
2010-09-23 19:00:53 +07:00
|
|
|
|
2010-10-06 03:38:49 +07:00
|
|
|
if (!func_prolog_preempt_disable(tr, &data, &pc))
|
2010-09-23 19:00:53 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
local_save_flags(flags);
|
|
|
|
__trace_graph_return(tr, trace, flags, pc);
|
|
|
|
atomic_dec(&data->disabled);
|
|
|
|
|
|
|
|
preempt_enable_notrace();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wakeup_trace_open(struct trace_iterator *iter)
|
|
|
|
{
|
2015-09-30 20:42:05 +07:00
|
|
|
if (is_graph(iter->tr))
|
2010-09-23 19:00:53 +07:00
|
|
|
graph_trace_open(iter);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wakeup_trace_close(struct trace_iterator *iter)
|
|
|
|
{
|
|
|
|
if (iter->private)
|
|
|
|
graph_trace_close(iter);
|
|
|
|
}
|
|
|
|
|
2011-06-03 21:58:47 +07:00
|
|
|
#define GRAPH_TRACER_FLAGS (TRACE_GRAPH_PRINT_PROC | \
|
|
|
|
TRACE_GRAPH_PRINT_ABS_TIME | \
|
|
|
|
TRACE_GRAPH_PRINT_DURATION)
|
2010-09-23 19:00:53 +07:00
|
|
|
|
|
|
|
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* In graph mode call the graph tracer output function,
|
|
|
|
* otherwise go with the TRACE_FN event handler
|
|
|
|
*/
|
2015-09-30 20:42:05 +07:00
|
|
|
if (is_graph(iter->tr))
|
2010-09-23 19:00:53 +07:00
|
|
|
return print_graph_function_flags(iter, GRAPH_TRACER_FLAGS);
|
|
|
|
|
|
|
|
return TRACE_TYPE_UNHANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wakeup_print_header(struct seq_file *s)
|
|
|
|
{
|
2015-09-30 20:42:05 +07:00
|
|
|
if (is_graph(wakeup_trace))
|
2010-09-23 19:00:53 +07:00
|
|
|
print_graph_headers_flags(s, GRAPH_TRACER_FLAGS);
|
|
|
|
else
|
|
|
|
trace_default_header(s);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
__trace_function(struct trace_array *tr,
|
|
|
|
unsigned long ip, unsigned long parent_ip,
|
|
|
|
unsigned long flags, int pc)
|
|
|
|
{
|
2015-09-30 20:42:05 +07:00
|
|
|
if (is_graph(tr))
|
2010-09-23 19:00:53 +07:00
|
|
|
trace_graph_function(tr, ip, parent_ip, flags, pc);
|
|
|
|
else
|
|
|
|
trace_function(tr, ip, parent_ip, flags, pc);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define __trace_function trace_function
|
|
|
|
|
|
|
|
static enum print_line_t wakeup_print_line(struct trace_iterator *iter)
|
|
|
|
{
|
|
|
|
return TRACE_TYPE_UNHANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wakeup_trace_open(struct trace_iterator *iter) { }
|
|
|
|
static void wakeup_trace_close(struct trace_iterator *iter) { }
|
tracing/latency: Fix header output for latency tracers
In case the the graph tracer (CONFIG_FUNCTION_GRAPH_TRACER) or even the
function tracer (CONFIG_FUNCTION_TRACER) are not set, the latency tracers
do not display proper latency header.
The involved/fixed latency tracers are:
wakeup_rt
wakeup
preemptirqsoff
preemptoff
irqsoff
The patch adds proper handling of tracer configuration options for latency
tracers, and displaying correct header info accordingly.
* The current output (for wakeup tracer) with both graph and function
tracers disabled is:
# tracer: wakeup
#
<idle>-0 0d.h5 1us+: 0:120:R + [000] 7: 0:R watchdog/0
<idle>-0 0d.h5 3us+: ttwu_do_activate.clone.1 <-try_to_wake_up
...
* The fixed output is:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 3.1.0-tip+
# --------------------------------------------------------------------
# latency: 55 us, #4/4, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
# -----------------
# | task: migration/0-6 (uid:0 nice:0 policy:1 rt_prio:99)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
cat-1129 0d..4 1us : 1129:120:R + [000] 6: 0:R migration/0
cat-1129 0d..4 2us+: ttwu_do_activate.clone.1 <-try_to_wake_up
* The current output (for wakeup tracer) with only function
tracer enabled is:
# tracer: wakeup
#
cat-1140 0d..4 1us+: 1140:120:R + [000] 6: 0:R migration/0
cat-1140 0d..4 2us : ttwu_do_activate.clone.1 <-try_to_wake_up
* The fixed output is:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 3.1.0-tip+
# --------------------------------------------------------------------
# latency: 207 us, #109/109, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
# -----------------
# | task: watchdog/1-12 (uid:0 nice:0 policy:1 rt_prio:99)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
<idle>-0 1d.h5 1us+: 0:120:R + [001] 12: 0:R watchdog/1
<idle>-0 1d.h5 3us : ttwu_do_activate.clone.1 <-try_to_wake_up
Link: http://lkml.kernel.org/r/20111107150849.GE1807@m.brq.redhat.com
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-07 22:08:49 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_FUNCTION_TRACER
|
2015-09-29 21:24:56 +07:00
|
|
|
static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
|
|
|
|
{
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
static void wakeup_graph_return(struct ftrace_graph_ret *trace) { }
|
tracing/latency: Fix header output for latency tracers
In case the the graph tracer (CONFIG_FUNCTION_GRAPH_TRACER) or even the
function tracer (CONFIG_FUNCTION_TRACER) are not set, the latency tracers
do not display proper latency header.
The involved/fixed latency tracers are:
wakeup_rt
wakeup
preemptirqsoff
preemptoff
irqsoff
The patch adds proper handling of tracer configuration options for latency
tracers, and displaying correct header info accordingly.
* The current output (for wakeup tracer) with both graph and function
tracers disabled is:
# tracer: wakeup
#
<idle>-0 0d.h5 1us+: 0:120:R + [000] 7: 0:R watchdog/0
<idle>-0 0d.h5 3us+: ttwu_do_activate.clone.1 <-try_to_wake_up
...
* The fixed output is:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 3.1.0-tip+
# --------------------------------------------------------------------
# latency: 55 us, #4/4, CPU#0 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
# -----------------
# | task: migration/0-6 (uid:0 nice:0 policy:1 rt_prio:99)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
cat-1129 0d..4 1us : 1129:120:R + [000] 6: 0:R migration/0
cat-1129 0d..4 2us+: ttwu_do_activate.clone.1 <-try_to_wake_up
* The current output (for wakeup tracer) with only function
tracer enabled is:
# tracer: wakeup
#
cat-1140 0d..4 1us+: 1140:120:R + [000] 6: 0:R migration/0
cat-1140 0d..4 2us : ttwu_do_activate.clone.1 <-try_to_wake_up
* The fixed output is:
# tracer: wakeup
#
# wakeup latency trace v1.1.5 on 3.1.0-tip+
# --------------------------------------------------------------------
# latency: 207 us, #109/109, CPU#1 | (M:preempt VP:0, KP:0, SP:0 HP:0 #P:2)
# -----------------
# | task: watchdog/1-12 (uid:0 nice:0 policy:1 rt_prio:99)
# -----------------
#
# _------=> CPU#
# / _-----=> irqs-off
# | / _----=> need-resched
# || / _---=> hardirq/softirq
# ||| / _--=> preempt-depth
# |||| / delay
# cmd pid ||||| time | caller
# \ / ||||| \ | /
<idle>-0 1d.h5 1us+: 0:120:R + [001] 12: 0:R watchdog/1
<idle>-0 1d.h5 3us : ttwu_do_activate.clone.1 <-try_to_wake_up
Link: http://lkml.kernel.org/r/20111107150849.GE1807@m.brq.redhat.com
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-11-07 22:08:49 +07:00
|
|
|
static void wakeup_print_header(struct seq_file *s)
|
|
|
|
{
|
|
|
|
trace_default_header(s);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void wakeup_print_header(struct seq_file *s)
|
|
|
|
{
|
|
|
|
trace_latency_header(s);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_FUNCTION_TRACER */
|
2010-09-23 19:00:53 +07:00
|
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
/*
|
|
|
|
* Should this new latency be reported/recorded?
|
|
|
|
*/
|
2015-09-29 21:43:29 +07:00
|
|
|
static bool report_latency(struct trace_array *tr, cycle_t delta)
|
2008-05-13 02:20:42 +07:00
|
|
|
{
|
|
|
|
if (tracing_thresh) {
|
|
|
|
if (delta < tracing_thresh)
|
2015-09-29 21:43:29 +07:00
|
|
|
return false;
|
2008-05-13 02:20:42 +07:00
|
|
|
} else {
|
2014-01-14 23:28:38 +07:00
|
|
|
if (delta <= tr->max_latency)
|
2015-09-29 21:43:29 +07:00
|
|
|
return false;
|
2008-05-13 02:20:42 +07:00
|
|
|
}
|
2015-09-29 21:43:29 +07:00
|
|
|
return true;
|
2008-05-13 02:20:42 +07:00
|
|
|
}
|
|
|
|
|
tracing: Let tracepoints have data passed to tracepoint callbacks
This patch adds data to be passed to tracepoint callbacks.
The created functions from DECLARE_TRACE() now need a mandatory data
parameter. For example:
DECLARE_TRACE(mytracepoint, int value, value)
Will create the register function:
int register_trace_mytracepoint((void(*)(void *data, int value))probe,
void *data);
As the first argument, all callbacks (probes) must take a (void *data)
parameter. So a callback for the above tracepoint will look like:
void myprobe(void *data, int value)
{
}
The callback may choose to ignore the data parameter.
This change allows callbacks to register a private data pointer along
with the function probe.
void mycallback(void *data, int value);
register_trace_mytracepoint(mycallback, mydata);
Then the mycallback() will receive the "mydata" as the first parameter
before the args.
A more detailed example:
DECLARE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
/* In the C file */
DEFINE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
[...]
trace_mytracepoint(status);
/* In a file registering this tracepoint */
int my_callback(void *data, int status)
{
struct my_struct my_data = data;
[...]
}
[...]
my_data = kmalloc(sizeof(*my_data), GFP_KERNEL);
init_my_data(my_data);
register_trace_mytracepoint(my_callback, my_data);
The same callback can also be registered to the same tracepoint as long
as the data registered is different. Note, the data must also be used
to unregister the callback:
unregister_trace_mytracepoint(my_callback, my_data);
Because of the data parameter, tracepoints declared this way can not have
no args. That is:
DECLARE_TRACE(mytracepoint, TP_PROTO(void), TP_ARGS());
will cause an error.
If no arguments are needed, a new macro can be used instead:
DECLARE_TRACE_NOARGS(mytracepoint);
Since there are no arguments, the proto and args fields are left out.
This is part of a series to make the tracepoint footprint smaller:
text data bss dec hex filename
4913961 1088356 861512 6863829 68bbd5 vmlinux.orig
4914025 1088868 861512 6864405 68be15 vmlinux.class
4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint
Again, this patch also increases the size of the kernel, but
lays the ground work for decreasing it.
v5: Fixed net/core/drop_monitor.c to handle these updates.
v4: Moved the DECLARE_TRACE() DECLARE_TRACE_NOARGS out of the
#ifdef CONFIG_TRACE_POINTS, since the two are the same in both
cases. The __DECLARE_TRACE() is what changes.
Thanks to Frederic Weisbecker for pointing this out.
v3: Made all register_* functions require data to be passed and
all callbacks to take a void * parameter as its first argument.
This makes the calling functions comply with C standards.
Also added more comments to the modifications of DECLARE_TRACE().
v2: Made the DECLARE_TRACE() have the ability to pass arguments
and added a new DECLARE_TRACE_NOARGS() for tracepoints that
do not need any arguments.
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-04-21 04:04:50 +07:00
|
|
|
static void
|
|
|
|
probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
|
tracing: do not grab lock in wakeup latency function tracing
The wakeup tracer, when enabled, has its own function tracer.
It only traces the functions on the CPU where the task it is following
is on. If a task is woken on one CPU but then migrates to another CPU
before it wakes up, the latency tracer will then start tracing functions
on the other CPU.
To find which CPU the task is on, the wakeup function tracer performs
a task_cpu(wakeup_task). But to make sure the task does not disappear
it grabs the wakeup_lock, which is also taken when the task wakes up.
By taking this lock, the function tracer does not need to worry about
the task being freed as it checks its cpu.
Jan Blunck found a problem with this approach on his 32 CPU box. When
a task is being traced by the wakeup tracer, all functions take this
lock. That means that on all 32 CPUs, each function call is taking
this one lock to see if the task is on that CPU. This lock has just
serialized all functions on all 32 CPUs. Needless to say, this caused
major issues on that box. It would even lockup.
This patch changes the wakeup latency to insert a probe on the migrate task
tracepoint. When a task changes its CPU that it will run on, the
probe will take note. Now the wakeup function tracer no longer needs
to take the lock. It only compares the current CPU with a variable that
holds the current CPU the task is on. We don't worry about races since
it is OK to add or miss a function trace.
Reported-by: Jan Blunck <jblunck@suse.de>
Tested-by: Jan Blunck <jblunck@suse.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-09-09 21:36:01 +07:00
|
|
|
{
|
|
|
|
if (task != wakeup_task)
|
|
|
|
return;
|
|
|
|
|
|
|
|
wakeup_current_cpu = cpu;
|
|
|
|
}
|
|
|
|
|
2014-10-31 07:44:53 +07:00
|
|
|
static void
|
|
|
|
tracing_sched_switch_trace(struct trace_array *tr,
|
|
|
|
struct task_struct *prev,
|
|
|
|
struct task_struct *next,
|
|
|
|
unsigned long flags, int pc)
|
|
|
|
{
|
2015-05-05 22:45:27 +07:00
|
|
|
struct trace_event_call *call = &event_context_switch;
|
2014-10-31 07:44:53 +07:00
|
|
|
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
|
|
|
struct ring_buffer_event *event;
|
|
|
|
struct ctx_switch_entry *entry;
|
|
|
|
|
|
|
|
event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
|
|
|
|
sizeof(*entry), flags, pc);
|
|
|
|
if (!event)
|
|
|
|
return;
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
entry->prev_pid = prev->pid;
|
|
|
|
entry->prev_prio = prev->prio;
|
|
|
|
entry->prev_state = prev->state;
|
|
|
|
entry->next_pid = next->pid;
|
|
|
|
entry->next_prio = next->prio;
|
|
|
|
entry->next_state = next->state;
|
|
|
|
entry->next_cpu = task_cpu(next);
|
|
|
|
|
|
|
|
if (!call_filter_check_discard(call, entry, buffer, event))
|
2015-09-26 04:38:44 +07:00
|
|
|
trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
|
2014-10-31 07:44:53 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
tracing_sched_wakeup_trace(struct trace_array *tr,
|
|
|
|
struct task_struct *wakee,
|
|
|
|
struct task_struct *curr,
|
|
|
|
unsigned long flags, int pc)
|
|
|
|
{
|
2015-05-05 22:45:27 +07:00
|
|
|
struct trace_event_call *call = &event_wakeup;
|
2014-10-31 07:44:53 +07:00
|
|
|
struct ring_buffer_event *event;
|
|
|
|
struct ctx_switch_entry *entry;
|
|
|
|
struct ring_buffer *buffer = tr->trace_buffer.buffer;
|
|
|
|
|
|
|
|
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
|
|
|
|
sizeof(*entry), flags, pc);
|
|
|
|
if (!event)
|
|
|
|
return;
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
|
|
entry->prev_pid = curr->pid;
|
|
|
|
entry->prev_prio = curr->prio;
|
|
|
|
entry->prev_state = curr->state;
|
|
|
|
entry->next_pid = wakee->pid;
|
|
|
|
entry->next_prio = wakee->prio;
|
|
|
|
entry->next_state = wakee->state;
|
|
|
|
entry->next_cpu = task_cpu(wakee);
|
|
|
|
|
|
|
|
if (!call_filter_check_discard(call, entry, buffer, event))
|
2015-09-26 04:38:44 +07:00
|
|
|
trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
|
2014-10-31 07:44:53 +07:00
|
|
|
}
|
|
|
|
|
2008-05-13 02:21:10 +07:00
|
|
|
static void notrace
|
2015-09-28 23:06:56 +07:00
|
|
|
probe_wakeup_sched_switch(void *ignore, bool preempt,
|
tracing: Let tracepoints have data passed to tracepoint callbacks
This patch adds data to be passed to tracepoint callbacks.
The created functions from DECLARE_TRACE() now need a mandatory data
parameter. For example:
DECLARE_TRACE(mytracepoint, int value, value)
Will create the register function:
int register_trace_mytracepoint((void(*)(void *data, int value))probe,
void *data);
As the first argument, all callbacks (probes) must take a (void *data)
parameter. So a callback for the above tracepoint will look like:
void myprobe(void *data, int value)
{
}
The callback may choose to ignore the data parameter.
This change allows callbacks to register a private data pointer along
with the function probe.
void mycallback(void *data, int value);
register_trace_mytracepoint(mycallback, mydata);
Then the mycallback() will receive the "mydata" as the first parameter
before the args.
A more detailed example:
DECLARE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
/* In the C file */
DEFINE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
[...]
trace_mytracepoint(status);
/* In a file registering this tracepoint */
int my_callback(void *data, int status)
{
struct my_struct my_data = data;
[...]
}
[...]
my_data = kmalloc(sizeof(*my_data), GFP_KERNEL);
init_my_data(my_data);
register_trace_mytracepoint(my_callback, my_data);
The same callback can also be registered to the same tracepoint as long
as the data registered is different. Note, the data must also be used
to unregister the callback:
unregister_trace_mytracepoint(my_callback, my_data);
Because of the data parameter, tracepoints declared this way can not have
no args. That is:
DECLARE_TRACE(mytracepoint, TP_PROTO(void), TP_ARGS());
will cause an error.
If no arguments are needed, a new macro can be used instead:
DECLARE_TRACE_NOARGS(mytracepoint);
Since there are no arguments, the proto and args fields are left out.
This is part of a series to make the tracepoint footprint smaller:
text data bss dec hex filename
4913961 1088356 861512 6863829 68bbd5 vmlinux.orig
4914025 1088868 861512 6864405 68be15 vmlinux.class
4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint
Again, this patch also increases the size of the kernel, but
lays the ground work for decreasing it.
v5: Fixed net/core/drop_monitor.c to handle these updates.
v4: Moved the DECLARE_TRACE() DECLARE_TRACE_NOARGS out of the
#ifdef CONFIG_TRACE_POINTS, since the two are the same in both
cases. The __DECLARE_TRACE() is what changes.
Thanks to Frederic Weisbecker for pointing this out.
v3: Made all register_* functions require data to be passed and
all callbacks to take a void * parameter as its first argument.
This makes the calling functions comply with C standards.
Also added more comments to the modifications of DECLARE_TRACE().
v2: Made the DECLARE_TRACE() have the ability to pass arguments
and added a new DECLARE_TRACE_NOARGS() for tracepoints that
do not need any arguments.
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-04-21 04:04:50 +07:00
|
|
|
struct task_struct *prev, struct task_struct *next)
|
2008-05-13 02:20:42 +07:00
|
|
|
{
|
|
|
|
struct trace_array_cpu *data;
|
|
|
|
cycle_t T0, T1, delta;
|
|
|
|
unsigned long flags;
|
|
|
|
long disabled;
|
|
|
|
int cpu;
|
2008-10-02 00:14:09 +07:00
|
|
|
int pc;
|
2008-05-13 02:20:42 +07:00
|
|
|
|
2008-07-18 23:16:17 +07:00
|
|
|
tracing_record_cmdline(prev);
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
if (unlikely(!tracer_enabled))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When we start a new trace, we set wakeup_task to NULL
|
|
|
|
* and then set tracer_enabled = 1. We want to make sure
|
|
|
|
* that another CPU does not see the tracer_enabled = 1
|
|
|
|
* and the wakeup_task with an older task, that might
|
|
|
|
* actually be the same as next.
|
|
|
|
*/
|
|
|
|
smp_rmb();
|
|
|
|
|
|
|
|
if (next != wakeup_task)
|
|
|
|
return;
|
|
|
|
|
2008-10-02 00:14:09 +07:00
|
|
|
pc = preempt_count();
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
/* disable local data, not wakeup_cpu data */
|
|
|
|
cpu = raw_smp_processor_id();
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 21:24:35 +07:00
|
|
|
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
|
2008-05-13 02:20:42 +07:00
|
|
|
if (likely(disabled != 1))
|
|
|
|
goto out;
|
|
|
|
|
2008-07-16 11:13:45 +07:00
|
|
|
local_irq_save(flags);
|
2009-12-03 02:01:25 +07:00
|
|
|
arch_spin_lock(&wakeup_lock);
|
2008-05-13 02:20:42 +07:00
|
|
|
|
|
|
|
/* We could race with grabbing wakeup_lock */
|
|
|
|
if (unlikely(!tracer_enabled || next != wakeup_task))
|
|
|
|
goto out_unlock;
|
|
|
|
|
2009-03-26 21:25:24 +07:00
|
|
|
/* The task we are waiting for is waking up */
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 21:24:35 +07:00
|
|
|
data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
|
2009-03-26 21:25:24 +07:00
|
|
|
|
2010-09-23 19:00:53 +07:00
|
|
|
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
|
2009-02-05 13:13:37 +07:00
|
|
|
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
|
2008-05-13 02:20:42 +07:00
|
|
|
|
|
|
|
T0 = data->preempt_timestamp;
|
2008-05-13 02:20:46 +07:00
|
|
|
T1 = ftrace_now(cpu);
|
2008-05-13 02:20:42 +07:00
|
|
|
delta = T1-T0;
|
|
|
|
|
2014-01-14 23:28:38 +07:00
|
|
|
if (!report_latency(wakeup_trace, delta))
|
2008-05-13 02:20:42 +07:00
|
|
|
goto out_unlock;
|
|
|
|
|
2009-09-13 06:43:07 +07:00
|
|
|
if (likely(!is_tracing_stopped())) {
|
2014-01-14 23:28:38 +07:00
|
|
|
wakeup_trace->max_latency = delta;
|
2009-09-13 06:43:07 +07:00
|
|
|
update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
|
|
|
|
}
|
2008-05-13 02:20:42 +07:00
|
|
|
|
|
|
|
out_unlock:
|
2008-07-18 23:16:17 +07:00
|
|
|
__wakeup_reset(wakeup_trace);
|
2009-12-03 02:01:25 +07:00
|
|
|
arch_spin_unlock(&wakeup_lock);
|
2008-07-16 11:13:45 +07:00
|
|
|
local_irq_restore(flags);
|
2008-05-13 02:20:42 +07:00
|
|
|
out:
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 21:24:35 +07:00
|
|
|
atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
|
2008-05-13 02:21:10 +07:00
|
|
|
}
|
|
|
|
|
2008-05-13 02:20:51 +07:00
|
|
|
static void __wakeup_reset(struct trace_array *tr)
|
2008-05-13 02:20:42 +07:00
|
|
|
{
|
|
|
|
wakeup_cpu = -1;
|
|
|
|
wakeup_prio = -1;
|
2013-11-07 20:43:42 +07:00
|
|
|
tracing_dl = 0;
|
2008-05-13 02:20:42 +07:00
|
|
|
|
|
|
|
if (wakeup_task)
|
|
|
|
put_task_struct(wakeup_task);
|
|
|
|
|
|
|
|
wakeup_task = NULL;
|
|
|
|
}
|
|
|
|
|
2008-05-13 02:20:51 +07:00
|
|
|
static void wakeup_reset(struct trace_array *tr)
|
2008-05-13 02:20:42 +07:00
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 21:24:35 +07:00
|
|
|
tracing_reset_online_cpus(&tr->trace_buffer);
|
2009-09-01 22:06:29 +07:00
|
|
|
|
2008-07-16 11:13:45 +07:00
|
|
|
local_irq_save(flags);
|
2009-12-03 02:01:25 +07:00
|
|
|
arch_spin_lock(&wakeup_lock);
|
2008-05-13 02:20:42 +07:00
|
|
|
__wakeup_reset(tr);
|
2009-12-03 02:01:25 +07:00
|
|
|
arch_spin_unlock(&wakeup_lock);
|
2008-07-16 11:13:45 +07:00
|
|
|
local_irq_restore(flags);
|
2008-05-13 02:20:42 +07:00
|
|
|
}
|
|
|
|
|
2008-05-13 02:20:51 +07:00
|
|
|
static void
|
2015-06-09 16:13:36 +07:00
|
|
|
probe_wakeup(void *ignore, struct task_struct *p)
|
2008-05-13 02:20:42 +07:00
|
|
|
{
|
2009-01-22 05:17:04 +07:00
|
|
|
struct trace_array_cpu *data;
|
2008-05-13 02:20:42 +07:00
|
|
|
int cpu = smp_processor_id();
|
|
|
|
unsigned long flags;
|
|
|
|
long disabled;
|
2008-10-02 00:14:09 +07:00
|
|
|
int pc;
|
2008-05-13 02:20:42 +07:00
|
|
|
|
2008-07-18 23:16:17 +07:00
|
|
|
if (likely(!tracer_enabled))
|
|
|
|
return;
|
|
|
|
|
|
|
|
tracing_record_cmdline(p);
|
|
|
|
tracing_record_cmdline(current);
|
|
|
|
|
2013-11-07 20:43:42 +07:00
|
|
|
/*
|
|
|
|
* Semantic is like this:
|
|
|
|
* - wakeup tracer handles all tasks in the system, independently
|
|
|
|
* from their scheduling class;
|
|
|
|
* - wakeup_rt tracer handles tasks belonging to sched_dl and
|
|
|
|
* sched_rt class;
|
|
|
|
* - wakeup_dl handles tasks belonging to sched_dl class only.
|
|
|
|
*/
|
|
|
|
if (tracing_dl || (wakeup_dl && !dl_task(p)) ||
|
|
|
|
(wakeup_rt && !dl_task(p) && !rt_task(p)) ||
|
|
|
|
(!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
|
2008-05-13 02:20:42 +07:00
|
|
|
return;
|
|
|
|
|
2008-10-02 00:14:09 +07:00
|
|
|
pc = preempt_count();
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 21:24:35 +07:00
|
|
|
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
|
2008-05-13 02:20:42 +07:00
|
|
|
if (unlikely(disabled != 1))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* interrupts should be off from try_to_wake_up */
|
2009-12-03 02:01:25 +07:00
|
|
|
arch_spin_lock(&wakeup_lock);
|
2008-05-13 02:20:42 +07:00
|
|
|
|
|
|
|
/* check for races. */
|
2013-11-07 20:43:42 +07:00
|
|
|
if (!tracer_enabled || tracing_dl ||
|
|
|
|
(!dl_task(p) && p->prio >= wakeup_prio))
|
2008-05-13 02:20:42 +07:00
|
|
|
goto out_locked;
|
|
|
|
|
|
|
|
/* reset the trace */
|
2008-07-18 23:16:17 +07:00
|
|
|
__wakeup_reset(wakeup_trace);
|
2008-05-13 02:20:42 +07:00
|
|
|
|
|
|
|
wakeup_cpu = task_cpu(p);
|
tracing: do not grab lock in wakeup latency function tracing
The wakeup tracer, when enabled, has its own function tracer.
It only traces the functions on the CPU where the task it is following
is on. If a task is woken on one CPU but then migrates to another CPU
before it wakes up, the latency tracer will then start tracing functions
on the other CPU.
To find which CPU the task is on, the wakeup function tracer performs
a task_cpu(wakeup_task). But to make sure the task does not disappear
it grabs the wakeup_lock, which is also taken when the task wakes up.
By taking this lock, the function tracer does not need to worry about
the task being freed as it checks its cpu.
Jan Blunck found a problem with this approach on his 32 CPU box. When
a task is being traced by the wakeup tracer, all functions take this
lock. That means that on all 32 CPUs, each function call is taking
this one lock to see if the task is on that CPU. This lock has just
serialized all functions on all 32 CPUs. Needless to say, this caused
major issues on that box. It would even lockup.
This patch changes the wakeup latency to insert a probe on the migrate task
tracepoint. When a task changes its CPU that it will run on, the
probe will take note. Now the wakeup function tracer no longer needs
to take the lock. It only compares the current CPU with a variable that
holds the current CPU the task is on. We don't worry about races since
it is OK to add or miss a function trace.
Reported-by: Jan Blunck <jblunck@suse.de>
Tested-by: Jan Blunck <jblunck@suse.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-09-09 21:36:01 +07:00
|
|
|
wakeup_current_cpu = wakeup_cpu;
|
2008-05-13 02:20:42 +07:00
|
|
|
wakeup_prio = p->prio;
|
|
|
|
|
2013-11-07 20:43:42 +07:00
|
|
|
/*
|
|
|
|
* Once you start tracing a -deadline task, don't bother tracing
|
|
|
|
* another task until the first one wakes up.
|
|
|
|
*/
|
|
|
|
if (dl_task(p))
|
|
|
|
tracing_dl = 1;
|
|
|
|
else
|
|
|
|
tracing_dl = 0;
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
wakeup_task = p;
|
|
|
|
get_task_struct(wakeup_task);
|
|
|
|
|
|
|
|
local_save_flags(flags);
|
|
|
|
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 21:24:35 +07:00
|
|
|
data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
|
2009-01-22 05:17:04 +07:00
|
|
|
data->preempt_timestamp = ftrace_now(cpu);
|
2009-02-05 13:13:37 +07:00
|
|
|
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
|
tracing: remove CALLER_ADDR2 from wakeup tracer
Maneesh Soni was getting a crash when running the wakeup tracer.
We debugged it down to the recording of the function with the
CALLER_ADDR2 macro. This is used to get the location of the caller
to schedule.
But the problem comes when schedule is called by assmebly. In the case
that Maneesh had, retint_careful would call schedule. But retint_careful
does not set up a proper frame pointer. CALLER_ADDR2 is defined as
__builtin_return_address(2). This produces the following assembly in
the wakeup tracer code.
mov 0x0(%rbp),%rcx <--- get the frame pointer of the caller
mov %r14d,%r8d
mov 0xf2de8e(%rip),%rdi
mov 0x8(%rcx),%rsi <-- this is __builtin_return_address(1)
mov 0x28(%rdi,%rax,8),%rbx
mov (%rcx),%rax <-- get the frame pointer of the caller's caller
mov %r12,%rcx
mov 0x8(%rax),%rdx <-- this is __builtin_return_address(2)
At the reading of 0x8(%rax) Maneesh's machine would take a fault.
The reason is that retint_careful did not set up the return address
and the content of %rax here was zero.
To verify this, I sent Maneesh a patch to create a frame pointer
in retint_careful. He ran the test again but this time he would take
the same type of fault from sysret_careful. The retint_careful was no
longer an issue, but there are other callers that still have issues.
Instead of adding frame pointers for all callers to schedule (in possibly
all archs), it is much safer to simply not use CALLER_ADDR2. This
loses out on knowing what called schedule, but the function tracer
will help there if needed.
Reported-by: Maneesh Soni <maneesh@in.ibm.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-04-03 22:12:23 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We must be careful in using CALLER_ADDR2. But since wake_up
|
|
|
|
* is not called by an assembly function (where as schedule is)
|
|
|
|
* it should be safe to use it here.
|
|
|
|
*/
|
2010-09-23 19:00:53 +07:00
|
|
|
__trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
|
2008-05-13 02:20:42 +07:00
|
|
|
|
|
|
|
out_locked:
|
2009-12-03 02:01:25 +07:00
|
|
|
arch_spin_unlock(&wakeup_lock);
|
2008-05-13 02:20:42 +07:00
|
|
|
out:
|
tracing: Consolidate max_tr into main trace_array structure
Currently, the way the latency tracers and snapshot feature works
is to have a separate trace_array called "max_tr" that holds the
snapshot buffer. For latency tracers, this snapshot buffer is used
to swap the running buffer with this buffer to save the current max
latency.
The only items needed for the max_tr is really just a copy of the buffer
itself, the per_cpu data pointers, the time_start timestamp that states
when the max latency was triggered, and the cpu that the max latency
was triggered on. All other fields in trace_array are unused by the
max_tr, making the max_tr mostly bloat.
This change removes the max_tr completely, and adds a new structure
called trace_buffer, that holds the buffer pointer, the per_cpu data
pointers, the time_start timestamp, and the cpu where the latency occurred.
The trace_array, now has two trace_buffers, one for the normal trace and
one for the max trace or snapshot. By doing this, not only do we remove
the bloat from the max_trace but the instances of traces can now use
their own snapshot feature and not have just the top level global_trace have
the snapshot feature and latency tracers for itself.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2013-03-05 21:24:35 +07:00
|
|
|
atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
|
2008-05-13 02:20:42 +07:00
|
|
|
}
|
|
|
|
|
2008-05-13 02:20:51 +07:00
|
|
|
static void start_wakeup_tracer(struct trace_array *tr)
|
2008-05-13 02:20:42 +07:00
|
|
|
{
|
2008-05-13 02:21:10 +07:00
|
|
|
int ret;
|
|
|
|
|
tracing: Let tracepoints have data passed to tracepoint callbacks
This patch adds data to be passed to tracepoint callbacks.
The created functions from DECLARE_TRACE() now need a mandatory data
parameter. For example:
DECLARE_TRACE(mytracepoint, int value, value)
Will create the register function:
int register_trace_mytracepoint((void(*)(void *data, int value))probe,
void *data);
As the first argument, all callbacks (probes) must take a (void *data)
parameter. So a callback for the above tracepoint will look like:
void myprobe(void *data, int value)
{
}
The callback may choose to ignore the data parameter.
This change allows callbacks to register a private data pointer along
with the function probe.
void mycallback(void *data, int value);
register_trace_mytracepoint(mycallback, mydata);
Then the mycallback() will receive the "mydata" as the first parameter
before the args.
A more detailed example:
DECLARE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
/* In the C file */
DEFINE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
[...]
trace_mytracepoint(status);
/* In a file registering this tracepoint */
int my_callback(void *data, int status)
{
struct my_struct my_data = data;
[...]
}
[...]
my_data = kmalloc(sizeof(*my_data), GFP_KERNEL);
init_my_data(my_data);
register_trace_mytracepoint(my_callback, my_data);
The same callback can also be registered to the same tracepoint as long
as the data registered is different. Note, the data must also be used
to unregister the callback:
unregister_trace_mytracepoint(my_callback, my_data);
Because of the data parameter, tracepoints declared this way can not have
no args. That is:
DECLARE_TRACE(mytracepoint, TP_PROTO(void), TP_ARGS());
will cause an error.
If no arguments are needed, a new macro can be used instead:
DECLARE_TRACE_NOARGS(mytracepoint);
Since there are no arguments, the proto and args fields are left out.
This is part of a series to make the tracepoint footprint smaller:
text data bss dec hex filename
4913961 1088356 861512 6863829 68bbd5 vmlinux.orig
4914025 1088868 861512 6864405 68be15 vmlinux.class
4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint
Again, this patch also increases the size of the kernel, but
lays the ground work for decreasing it.
v5: Fixed net/core/drop_monitor.c to handle these updates.
v4: Moved the DECLARE_TRACE() DECLARE_TRACE_NOARGS out of the
#ifdef CONFIG_TRACE_POINTS, since the two are the same in both
cases. The __DECLARE_TRACE() is what changes.
Thanks to Frederic Weisbecker for pointing this out.
v3: Made all register_* functions require data to be passed and
all callbacks to take a void * parameter as its first argument.
This makes the calling functions comply with C standards.
Also added more comments to the modifications of DECLARE_TRACE().
v2: Made the DECLARE_TRACE() have the ability to pass arguments
and added a new DECLARE_TRACE_NOARGS() for tracepoints that
do not need any arguments.
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-04-21 04:04:50 +07:00
|
|
|
ret = register_trace_sched_wakeup(probe_wakeup, NULL);
|
2008-05-13 02:21:10 +07:00
|
|
|
if (ret) {
|
2008-07-18 23:16:17 +07:00
|
|
|
pr_info("wakeup trace: Couldn't activate tracepoint"
|
2008-05-13 02:21:10 +07:00
|
|
|
" probe to kernel_sched_wakeup\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
tracing: Let tracepoints have data passed to tracepoint callbacks
This patch adds data to be passed to tracepoint callbacks.
The created functions from DECLARE_TRACE() now need a mandatory data
parameter. For example:
DECLARE_TRACE(mytracepoint, int value, value)
Will create the register function:
int register_trace_mytracepoint((void(*)(void *data, int value))probe,
void *data);
As the first argument, all callbacks (probes) must take a (void *data)
parameter. So a callback for the above tracepoint will look like:
void myprobe(void *data, int value)
{
}
The callback may choose to ignore the data parameter.
This change allows callbacks to register a private data pointer along
with the function probe.
void mycallback(void *data, int value);
register_trace_mytracepoint(mycallback, mydata);
Then the mycallback() will receive the "mydata" as the first parameter
before the args.
A more detailed example:
DECLARE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
/* In the C file */
DEFINE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
[...]
trace_mytracepoint(status);
/* In a file registering this tracepoint */
int my_callback(void *data, int status)
{
struct my_struct my_data = data;
[...]
}
[...]
my_data = kmalloc(sizeof(*my_data), GFP_KERNEL);
init_my_data(my_data);
register_trace_mytracepoint(my_callback, my_data);
The same callback can also be registered to the same tracepoint as long
as the data registered is different. Note, the data must also be used
to unregister the callback:
unregister_trace_mytracepoint(my_callback, my_data);
Because of the data parameter, tracepoints declared this way can not have
no args. That is:
DECLARE_TRACE(mytracepoint, TP_PROTO(void), TP_ARGS());
will cause an error.
If no arguments are needed, a new macro can be used instead:
DECLARE_TRACE_NOARGS(mytracepoint);
Since there are no arguments, the proto and args fields are left out.
This is part of a series to make the tracepoint footprint smaller:
text data bss dec hex filename
4913961 1088356 861512 6863829 68bbd5 vmlinux.orig
4914025 1088868 861512 6864405 68be15 vmlinux.class
4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint
Again, this patch also increases the size of the kernel, but
lays the ground work for decreasing it.
v5: Fixed net/core/drop_monitor.c to handle these updates.
v4: Moved the DECLARE_TRACE() DECLARE_TRACE_NOARGS out of the
#ifdef CONFIG_TRACE_POINTS, since the two are the same in both
cases. The __DECLARE_TRACE() is what changes.
Thanks to Frederic Weisbecker for pointing this out.
v3: Made all register_* functions require data to be passed and
all callbacks to take a void * parameter as its first argument.
This makes the calling functions comply with C standards.
Also added more comments to the modifications of DECLARE_TRACE().
v2: Made the DECLARE_TRACE() have the ability to pass arguments
and added a new DECLARE_TRACE_NOARGS() for tracepoints that
do not need any arguments.
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-04-21 04:04:50 +07:00
|
|
|
ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
|
2008-05-13 02:21:10 +07:00
|
|
|
if (ret) {
|
2008-07-18 23:16:17 +07:00
|
|
|
pr_info("wakeup trace: Couldn't activate tracepoint"
|
2008-05-13 02:21:10 +07:00
|
|
|
" probe to kernel_sched_wakeup_new\n");
|
|
|
|
goto fail_deprobe;
|
|
|
|
}
|
|
|
|
|
tracing: Let tracepoints have data passed to tracepoint callbacks
This patch adds data to be passed to tracepoint callbacks.
The created functions from DECLARE_TRACE() now need a mandatory data
parameter. For example:
DECLARE_TRACE(mytracepoint, int value, value)
Will create the register function:
int register_trace_mytracepoint((void(*)(void *data, int value))probe,
void *data);
As the first argument, all callbacks (probes) must take a (void *data)
parameter. So a callback for the above tracepoint will look like:
void myprobe(void *data, int value)
{
}
The callback may choose to ignore the data parameter.
This change allows callbacks to register a private data pointer along
with the function probe.
void mycallback(void *data, int value);
register_trace_mytracepoint(mycallback, mydata);
Then the mycallback() will receive the "mydata" as the first parameter
before the args.
A more detailed example:
DECLARE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
/* In the C file */
DEFINE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
[...]
trace_mytracepoint(status);
/* In a file registering this tracepoint */
int my_callback(void *data, int status)
{
struct my_struct my_data = data;
[...]
}
[...]
my_data = kmalloc(sizeof(*my_data), GFP_KERNEL);
init_my_data(my_data);
register_trace_mytracepoint(my_callback, my_data);
The same callback can also be registered to the same tracepoint as long
as the data registered is different. Note, the data must also be used
to unregister the callback:
unregister_trace_mytracepoint(my_callback, my_data);
Because of the data parameter, tracepoints declared this way can not have
no args. That is:
DECLARE_TRACE(mytracepoint, TP_PROTO(void), TP_ARGS());
will cause an error.
If no arguments are needed, a new macro can be used instead:
DECLARE_TRACE_NOARGS(mytracepoint);
Since there are no arguments, the proto and args fields are left out.
This is part of a series to make the tracepoint footprint smaller:
text data bss dec hex filename
4913961 1088356 861512 6863829 68bbd5 vmlinux.orig
4914025 1088868 861512 6864405 68be15 vmlinux.class
4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint
Again, this patch also increases the size of the kernel, but
lays the ground work for decreasing it.
v5: Fixed net/core/drop_monitor.c to handle these updates.
v4: Moved the DECLARE_TRACE() DECLARE_TRACE_NOARGS out of the
#ifdef CONFIG_TRACE_POINTS, since the two are the same in both
cases. The __DECLARE_TRACE() is what changes.
Thanks to Frederic Weisbecker for pointing this out.
v3: Made all register_* functions require data to be passed and
all callbacks to take a void * parameter as its first argument.
This makes the calling functions comply with C standards.
Also added more comments to the modifications of DECLARE_TRACE().
v2: Made the DECLARE_TRACE() have the ability to pass arguments
and added a new DECLARE_TRACE_NOARGS() for tracepoints that
do not need any arguments.
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-04-21 04:04:50 +07:00
|
|
|
ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
|
2008-05-13 02:21:10 +07:00
|
|
|
if (ret) {
|
2008-07-18 23:16:17 +07:00
|
|
|
pr_info("sched trace: Couldn't activate tracepoint"
|
2009-02-17 13:10:02 +07:00
|
|
|
" probe to kernel_sched_switch\n");
|
2008-05-13 02:21:10 +07:00
|
|
|
goto fail_deprobe_wake_new;
|
|
|
|
}
|
|
|
|
|
tracing: Let tracepoints have data passed to tracepoint callbacks
This patch adds data to be passed to tracepoint callbacks.
The created functions from DECLARE_TRACE() now need a mandatory data
parameter. For example:
DECLARE_TRACE(mytracepoint, int value, value)
Will create the register function:
int register_trace_mytracepoint((void(*)(void *data, int value))probe,
void *data);
As the first argument, all callbacks (probes) must take a (void *data)
parameter. So a callback for the above tracepoint will look like:
void myprobe(void *data, int value)
{
}
The callback may choose to ignore the data parameter.
This change allows callbacks to register a private data pointer along
with the function probe.
void mycallback(void *data, int value);
register_trace_mytracepoint(mycallback, mydata);
Then the mycallback() will receive the "mydata" as the first parameter
before the args.
A more detailed example:
DECLARE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
/* In the C file */
DEFINE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
[...]
trace_mytracepoint(status);
/* In a file registering this tracepoint */
int my_callback(void *data, int status)
{
struct my_struct my_data = data;
[...]
}
[...]
my_data = kmalloc(sizeof(*my_data), GFP_KERNEL);
init_my_data(my_data);
register_trace_mytracepoint(my_callback, my_data);
The same callback can also be registered to the same tracepoint as long
as the data registered is different. Note, the data must also be used
to unregister the callback:
unregister_trace_mytracepoint(my_callback, my_data);
Because of the data parameter, tracepoints declared this way can not have
no args. That is:
DECLARE_TRACE(mytracepoint, TP_PROTO(void), TP_ARGS());
will cause an error.
If no arguments are needed, a new macro can be used instead:
DECLARE_TRACE_NOARGS(mytracepoint);
Since there are no arguments, the proto and args fields are left out.
This is part of a series to make the tracepoint footprint smaller:
text data bss dec hex filename
4913961 1088356 861512 6863829 68bbd5 vmlinux.orig
4914025 1088868 861512 6864405 68be15 vmlinux.class
4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint
Again, this patch also increases the size of the kernel, but
lays the ground work for decreasing it.
v5: Fixed net/core/drop_monitor.c to handle these updates.
v4: Moved the DECLARE_TRACE() DECLARE_TRACE_NOARGS out of the
#ifdef CONFIG_TRACE_POINTS, since the two are the same in both
cases. The __DECLARE_TRACE() is what changes.
Thanks to Frederic Weisbecker for pointing this out.
v3: Made all register_* functions require data to be passed and
all callbacks to take a void * parameter as its first argument.
This makes the calling functions comply with C standards.
Also added more comments to the modifications of DECLARE_TRACE().
v2: Made the DECLARE_TRACE() have the ability to pass arguments
and added a new DECLARE_TRACE_NOARGS() for tracepoints that
do not need any arguments.
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-04-21 04:04:50 +07:00
|
|
|
ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
|
tracing: do not grab lock in wakeup latency function tracing
The wakeup tracer, when enabled, has its own function tracer.
It only traces the functions on the CPU where the task it is following
is on. If a task is woken on one CPU but then migrates to another CPU
before it wakes up, the latency tracer will then start tracing functions
on the other CPU.
To find which CPU the task is on, the wakeup function tracer performs
a task_cpu(wakeup_task). But to make sure the task does not disappear
it grabs the wakeup_lock, which is also taken when the task wakes up.
By taking this lock, the function tracer does not need to worry about
the task being freed as it checks its cpu.
Jan Blunck found a problem with this approach on his 32 CPU box. When
a task is being traced by the wakeup tracer, all functions take this
lock. That means that on all 32 CPUs, each function call is taking
this one lock to see if the task is on that CPU. This lock has just
serialized all functions on all 32 CPUs. Needless to say, this caused
major issues on that box. It would even lockup.
This patch changes the wakeup latency to insert a probe on the migrate task
tracepoint. When a task changes its CPU that it will run on, the
probe will take note. Now the wakeup function tracer no longer needs
to take the lock. It only compares the current CPU with a variable that
holds the current CPU the task is on. We don't worry about races since
it is OK to add or miss a function trace.
Reported-by: Jan Blunck <jblunck@suse.de>
Tested-by: Jan Blunck <jblunck@suse.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2009-09-09 21:36:01 +07:00
|
|
|
if (ret) {
|
|
|
|
pr_info("wakeup trace: Couldn't activate tracepoint"
|
|
|
|
" probe to kernel_sched_migrate_task\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
wakeup_reset(tr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't let the tracer_enabled = 1 show up before
|
|
|
|
* the wakeup_task is reset. This may be overkill since
|
|
|
|
* wakeup_reset does a spin_unlock after setting the
|
|
|
|
* wakeup_task to NULL, but I want to be safe.
|
|
|
|
* This is a slow path anyway.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
|
|
|
|
2015-09-30 20:42:05 +07:00
|
|
|
if (start_func_tracer(tr, is_graph(tr)))
|
2010-09-23 19:00:53 +07:00
|
|
|
printk(KERN_ERR "failed to start wakeup tracer\n");
|
2008-07-11 07:58:13 +07:00
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
return;
|
2008-05-13 02:21:10 +07:00
|
|
|
fail_deprobe_wake_new:
|
tracing: Let tracepoints have data passed to tracepoint callbacks
This patch adds data to be passed to tracepoint callbacks.
The created functions from DECLARE_TRACE() now need a mandatory data
parameter. For example:
DECLARE_TRACE(mytracepoint, int value, value)
Will create the register function:
int register_trace_mytracepoint((void(*)(void *data, int value))probe,
void *data);
As the first argument, all callbacks (probes) must take a (void *data)
parameter. So a callback for the above tracepoint will look like:
void myprobe(void *data, int value)
{
}
The callback may choose to ignore the data parameter.
This change allows callbacks to register a private data pointer along
with the function probe.
void mycallback(void *data, int value);
register_trace_mytracepoint(mycallback, mydata);
Then the mycallback() will receive the "mydata" as the first parameter
before the args.
A more detailed example:
DECLARE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
/* In the C file */
DEFINE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
[...]
trace_mytracepoint(status);
/* In a file registering this tracepoint */
int my_callback(void *data, int status)
{
struct my_struct my_data = data;
[...]
}
[...]
my_data = kmalloc(sizeof(*my_data), GFP_KERNEL);
init_my_data(my_data);
register_trace_mytracepoint(my_callback, my_data);
The same callback can also be registered to the same tracepoint as long
as the data registered is different. Note, the data must also be used
to unregister the callback:
unregister_trace_mytracepoint(my_callback, my_data);
Because of the data parameter, tracepoints declared this way can not have
no args. That is:
DECLARE_TRACE(mytracepoint, TP_PROTO(void), TP_ARGS());
will cause an error.
If no arguments are needed, a new macro can be used instead:
DECLARE_TRACE_NOARGS(mytracepoint);
Since there are no arguments, the proto and args fields are left out.
This is part of a series to make the tracepoint footprint smaller:
text data bss dec hex filename
4913961 1088356 861512 6863829 68bbd5 vmlinux.orig
4914025 1088868 861512 6864405 68be15 vmlinux.class
4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint
Again, this patch also increases the size of the kernel, but
lays the ground work for decreasing it.
v5: Fixed net/core/drop_monitor.c to handle these updates.
v4: Moved the DECLARE_TRACE() DECLARE_TRACE_NOARGS out of the
#ifdef CONFIG_TRACE_POINTS, since the two are the same in both
cases. The __DECLARE_TRACE() is what changes.
Thanks to Frederic Weisbecker for pointing this out.
v3: Made all register_* functions require data to be passed and
all callbacks to take a void * parameter as its first argument.
This makes the calling functions comply with C standards.
Also added more comments to the modifications of DECLARE_TRACE().
v2: Made the DECLARE_TRACE() have the ability to pass arguments
and added a new DECLARE_TRACE_NOARGS() for tracepoints that
do not need any arguments.
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-04-21 04:04:50 +07:00
|
|
|
unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
|
2008-05-13 02:21:10 +07:00
|
|
|
fail_deprobe:
|
tracing: Let tracepoints have data passed to tracepoint callbacks
This patch adds data to be passed to tracepoint callbacks.
The created functions from DECLARE_TRACE() now need a mandatory data
parameter. For example:
DECLARE_TRACE(mytracepoint, int value, value)
Will create the register function:
int register_trace_mytracepoint((void(*)(void *data, int value))probe,
void *data);
As the first argument, all callbacks (probes) must take a (void *data)
parameter. So a callback for the above tracepoint will look like:
void myprobe(void *data, int value)
{
}
The callback may choose to ignore the data parameter.
This change allows callbacks to register a private data pointer along
with the function probe.
void mycallback(void *data, int value);
register_trace_mytracepoint(mycallback, mydata);
Then the mycallback() will receive the "mydata" as the first parameter
before the args.
A more detailed example:
DECLARE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
/* In the C file */
DEFINE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
[...]
trace_mytracepoint(status);
/* In a file registering this tracepoint */
int my_callback(void *data, int status)
{
struct my_struct my_data = data;
[...]
}
[...]
my_data = kmalloc(sizeof(*my_data), GFP_KERNEL);
init_my_data(my_data);
register_trace_mytracepoint(my_callback, my_data);
The same callback can also be registered to the same tracepoint as long
as the data registered is different. Note, the data must also be used
to unregister the callback:
unregister_trace_mytracepoint(my_callback, my_data);
Because of the data parameter, tracepoints declared this way can not have
no args. That is:
DECLARE_TRACE(mytracepoint, TP_PROTO(void), TP_ARGS());
will cause an error.
If no arguments are needed, a new macro can be used instead:
DECLARE_TRACE_NOARGS(mytracepoint);
Since there are no arguments, the proto and args fields are left out.
This is part of a series to make the tracepoint footprint smaller:
text data bss dec hex filename
4913961 1088356 861512 6863829 68bbd5 vmlinux.orig
4914025 1088868 861512 6864405 68be15 vmlinux.class
4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint
Again, this patch also increases the size of the kernel, but
lays the ground work for decreasing it.
v5: Fixed net/core/drop_monitor.c to handle these updates.
v4: Moved the DECLARE_TRACE() DECLARE_TRACE_NOARGS out of the
#ifdef CONFIG_TRACE_POINTS, since the two are the same in both
cases. The __DECLARE_TRACE() is what changes.
Thanks to Frederic Weisbecker for pointing this out.
v3: Made all register_* functions require data to be passed and
all callbacks to take a void * parameter as its first argument.
This makes the calling functions comply with C standards.
Also added more comments to the modifications of DECLARE_TRACE().
v2: Made the DECLARE_TRACE() have the ability to pass arguments
and added a new DECLARE_TRACE_NOARGS() for tracepoints that
do not need any arguments.
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-04-21 04:04:50 +07:00
|
|
|
unregister_trace_sched_wakeup(probe_wakeup, NULL);
|
2008-05-13 02:20:42 +07:00
|
|
|
}
|
|
|
|
|
2008-05-13 02:20:51 +07:00
|
|
|
static void stop_wakeup_tracer(struct trace_array *tr)
|
2008-05-13 02:20:42 +07:00
|
|
|
{
|
|
|
|
tracer_enabled = 0;
|
2015-09-30 20:42:05 +07:00
|
|
|
stop_func_tracer(tr, is_graph(tr));
|
tracing: Let tracepoints have data passed to tracepoint callbacks
This patch adds data to be passed to tracepoint callbacks.
The created functions from DECLARE_TRACE() now need a mandatory data
parameter. For example:
DECLARE_TRACE(mytracepoint, int value, value)
Will create the register function:
int register_trace_mytracepoint((void(*)(void *data, int value))probe,
void *data);
As the first argument, all callbacks (probes) must take a (void *data)
parameter. So a callback for the above tracepoint will look like:
void myprobe(void *data, int value)
{
}
The callback may choose to ignore the data parameter.
This change allows callbacks to register a private data pointer along
with the function probe.
void mycallback(void *data, int value);
register_trace_mytracepoint(mycallback, mydata);
Then the mycallback() will receive the "mydata" as the first parameter
before the args.
A more detailed example:
DECLARE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
/* In the C file */
DEFINE_TRACE(mytracepoint, TP_PROTO(int status), TP_ARGS(status));
[...]
trace_mytracepoint(status);
/* In a file registering this tracepoint */
int my_callback(void *data, int status)
{
struct my_struct my_data = data;
[...]
}
[...]
my_data = kmalloc(sizeof(*my_data), GFP_KERNEL);
init_my_data(my_data);
register_trace_mytracepoint(my_callback, my_data);
The same callback can also be registered to the same tracepoint as long
as the data registered is different. Note, the data must also be used
to unregister the callback:
unregister_trace_mytracepoint(my_callback, my_data);
Because of the data parameter, tracepoints declared this way can not have
no args. That is:
DECLARE_TRACE(mytracepoint, TP_PROTO(void), TP_ARGS());
will cause an error.
If no arguments are needed, a new macro can be used instead:
DECLARE_TRACE_NOARGS(mytracepoint);
Since there are no arguments, the proto and args fields are left out.
This is part of a series to make the tracepoint footprint smaller:
text data bss dec hex filename
4913961 1088356 861512 6863829 68bbd5 vmlinux.orig
4914025 1088868 861512 6864405 68be15 vmlinux.class
4918492 1084612 861512 6864616 68bee8 vmlinux.tracepoint
Again, this patch also increases the size of the kernel, but
lays the ground work for decreasing it.
v5: Fixed net/core/drop_monitor.c to handle these updates.
v4: Moved the DECLARE_TRACE() DECLARE_TRACE_NOARGS out of the
#ifdef CONFIG_TRACE_POINTS, since the two are the same in both
cases. The __DECLARE_TRACE() is what changes.
Thanks to Frederic Weisbecker for pointing this out.
v3: Made all register_* functions require data to be passed and
all callbacks to take a void * parameter as its first argument.
This makes the calling functions comply with C standards.
Also added more comments to the modifications of DECLARE_TRACE().
v2: Made the DECLARE_TRACE() have the ability to pass arguments
and added a new DECLARE_TRACE_NOARGS() for tracepoints that
do not need any arguments.
Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Acked-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Neil Horman <nhorman@tuxdriver.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2010-04-21 04:04:50 +07:00
|
|
|
unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
|
|
|
|
unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
|
|
|
|
unregister_trace_sched_wakeup(probe_wakeup, NULL);
|
|
|
|
unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
|
2008-05-13 02:20:42 +07:00
|
|
|
}
|
|
|
|
|
2014-01-14 19:06:29 +07:00
|
|
|
static bool wakeup_busy;
|
|
|
|
|
2009-01-22 04:24:46 +07:00
|
|
|
static int __wakeup_tracer_init(struct trace_array *tr)
|
2008-05-13 02:20:42 +07:00
|
|
|
{
|
2015-09-30 20:42:05 +07:00
|
|
|
save_flags = tr->trace_flags;
|
2013-03-15 02:03:53 +07:00
|
|
|
|
|
|
|
/* non overwrite screws up the latency tracers */
|
2012-05-12 00:29:49 +07:00
|
|
|
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
|
|
|
|
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
|
2009-03-05 10:15:30 +07:00
|
|
|
|
2014-01-14 23:28:38 +07:00
|
|
|
tr->max_latency = 0;
|
2008-05-13 02:20:42 +07:00
|
|
|
wakeup_trace = tr;
|
2014-01-11 05:01:58 +07:00
|
|
|
ftrace_init_array_ops(tr, wakeup_tracer_call);
|
2008-11-08 10:36:02 +07:00
|
|
|
start_wakeup_tracer(tr);
|
2014-01-14 19:06:29 +07:00
|
|
|
|
|
|
|
wakeup_busy = true;
|
2008-11-16 11:57:26 +07:00
|
|
|
return 0;
|
2008-05-13 02:20:42 +07:00
|
|
|
}
|
|
|
|
|
2009-01-22 04:24:46 +07:00
|
|
|
static int wakeup_tracer_init(struct trace_array *tr)
|
|
|
|
{
|
2014-01-14 19:06:29 +07:00
|
|
|
if (wakeup_busy)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2013-11-07 20:43:42 +07:00
|
|
|
wakeup_dl = 0;
|
2009-01-22 04:24:46 +07:00
|
|
|
wakeup_rt = 0;
|
|
|
|
return __wakeup_tracer_init(tr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int wakeup_rt_tracer_init(struct trace_array *tr)
|
|
|
|
{
|
2014-01-14 19:06:29 +07:00
|
|
|
if (wakeup_busy)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2013-11-07 20:43:42 +07:00
|
|
|
wakeup_dl = 0;
|
2009-01-22 04:24:46 +07:00
|
|
|
wakeup_rt = 1;
|
|
|
|
return __wakeup_tracer_init(tr);
|
|
|
|
}
|
|
|
|
|
2013-11-07 20:43:42 +07:00
|
|
|
static int wakeup_dl_tracer_init(struct trace_array *tr)
|
|
|
|
{
|
2014-01-14 19:06:29 +07:00
|
|
|
if (wakeup_busy)
|
|
|
|
return -EBUSY;
|
|
|
|
|
2013-11-07 20:43:42 +07:00
|
|
|
wakeup_dl = 1;
|
|
|
|
wakeup_rt = 0;
|
|
|
|
return __wakeup_tracer_init(tr);
|
|
|
|
}
|
|
|
|
|
2008-05-13 02:20:51 +07:00
|
|
|
static void wakeup_tracer_reset(struct trace_array *tr)
|
2008-05-13 02:20:42 +07:00
|
|
|
{
|
2013-03-15 02:03:53 +07:00
|
|
|
int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
|
|
|
|
int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
|
|
|
|
|
2008-11-08 10:36:02 +07:00
|
|
|
stop_wakeup_tracer(tr);
|
|
|
|
/* make sure we put back any tasks we are tracing */
|
|
|
|
wakeup_reset(tr);
|
2009-03-05 10:15:30 +07:00
|
|
|
|
2012-05-12 00:29:49 +07:00
|
|
|
set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
|
|
|
|
set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
|
2014-01-11 05:01:58 +07:00
|
|
|
ftrace_reset_array_ops(tr);
|
2014-01-14 19:06:29 +07:00
|
|
|
wakeup_busy = false;
|
2008-05-13 02:20:42 +07:00
|
|
|
}
|
|
|
|
|
ftrace: restructure tracing start/stop infrastructure
Impact: change where tracing is started up and stopped
Currently, when a new tracer is selected via echo'ing a tracer name into
the current_tracer file, the startup is only done if tracing_enabled is
set to one. If tracing_enabled is changed to zero (by echo'ing 0 into
the tracing_enabled file) a full shutdown is performed.
The full startup and shutdown of a tracer can be expensive and the
user can lose out traces when echo'ing in 0 to the tracing_enabled file,
because the process takes too long. There can also be places that
the user would like to start and stop the tracer several times and
doing the full startup and shutdown of a tracer might be too expensive.
This patch performs the full startup and shutdown when a tracer is
selected. It also adds a way to do a quick start or stop of a tracer.
The quick version is just a flag that prevents the tracing from
taking place, but the overhead of the code is still there.
For example, the startup of a tracer may enable tracepoints, or enable
the function tracer. The stop and start will just set a flag to
have the tracer ignore the calls when the tracepoint or function trace
is called. The overhead of the tracer may still be present when
the tracer is stopped, but no tracing will occur. Setting the tracer
to the 'nop' tracer (or any other tracer) will perform the shutdown
of the tracer which will disable the tracepoint or disable the
function tracer.
The tracing_enabled file will simply start or stop tracing.
This change is all internal. The end result for the user should be the same
as before. If tracing_enabled is not set, no trace will happen.
If tracing_enabled is set, then the trace will happen. The tracing_enabled
variable is static between tracers. Enabling tracing_enabled and
going to another tracer will keep tracing_enabled enabled. Same
is true with disabling tracing_enabled.
This patch will now provide a fast start/stop method to the users
for enabling or disabling tracing.
Note: There were two methods to the struct tracer that were never
used: The methods start and stop. These were to be used as a hook
to the reading of the trace output, but ended up not being
necessary. These two methods are now used to enable the start
and stop of each tracer, in case the tracer needs to do more than
just not write into the buffer. For example, the irqsoff tracer
must stop recording max latencies when tracing is stopped.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-06 04:05:44 +07:00
|
|
|
static void wakeup_tracer_start(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
wakeup_reset(tr);
|
|
|
|
tracer_enabled = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void wakeup_tracer_stop(struct trace_array *tr)
|
|
|
|
{
|
|
|
|
tracer_enabled = 0;
|
2008-05-13 02:20:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct tracer wakeup_tracer __read_mostly =
|
|
|
|
{
|
|
|
|
.name = "wakeup",
|
|
|
|
.init = wakeup_tracer_init,
|
|
|
|
.reset = wakeup_tracer_reset,
|
ftrace: restructure tracing start/stop infrastructure
Impact: change where tracing is started up and stopped
Currently, when a new tracer is selected via echo'ing a tracer name into
the current_tracer file, the startup is only done if tracing_enabled is
set to one. If tracing_enabled is changed to zero (by echo'ing 0 into
the tracing_enabled file) a full shutdown is performed.
The full startup and shutdown of a tracer can be expensive and the
user can lose out traces when echo'ing in 0 to the tracing_enabled file,
because the process takes too long. There can also be places that
the user would like to start and stop the tracer several times and
doing the full startup and shutdown of a tracer might be too expensive.
This patch performs the full startup and shutdown when a tracer is
selected. It also adds a way to do a quick start or stop of a tracer.
The quick version is just a flag that prevents the tracing from
taking place, but the overhead of the code is still there.
For example, the startup of a tracer may enable tracepoints, or enable
the function tracer. The stop and start will just set a flag to
have the tracer ignore the calls when the tracepoint or function trace
is called. The overhead of the tracer may still be present when
the tracer is stopped, but no tracing will occur. Setting the tracer
to the 'nop' tracer (or any other tracer) will perform the shutdown
of the tracer which will disable the tracepoint or disable the
function tracer.
The tracing_enabled file will simply start or stop tracing.
This change is all internal. The end result for the user should be the same
as before. If tracing_enabled is not set, no trace will happen.
If tracing_enabled is set, then the trace will happen. The tracing_enabled
variable is static between tracers. Enabling tracing_enabled and
going to another tracer will keep tracing_enabled enabled. Same
is true with disabling tracing_enabled.
This patch will now provide a fast start/stop method to the users
for enabling or disabling tracing.
Note: There were two methods to the struct tracer that were never
used: The methods start and stop. These were to be used as a hook
to the reading of the trace output, but ended up not being
necessary. These two methods are now used to enable the start
and stop of each tracer, in case the tracer needs to do more than
just not write into the buffer. For example, the irqsoff tracer
must stop recording max latencies when tracing is stopped.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-11-06 04:05:44 +07:00
|
|
|
.start = wakeup_tracer_start,
|
|
|
|
.stop = wakeup_tracer_stop,
|
2012-10-02 15:27:10 +07:00
|
|
|
.print_max = true,
|
2010-09-23 19:00:53 +07:00
|
|
|
.print_header = wakeup_print_header,
|
|
|
|
.print_line = wakeup_print_line,
|
2013-03-14 23:10:40 +07:00
|
|
|
.flag_changed = wakeup_flag_changed,
|
2008-05-13 02:20:44 +07:00
|
|
|
#ifdef CONFIG_FTRACE_SELFTEST
|
|
|
|
.selftest = trace_selftest_startup_wakeup,
|
|
|
|
#endif
|
2010-09-23 19:00:53 +07:00
|
|
|
.open = wakeup_trace_open,
|
|
|
|
.close = wakeup_trace_close,
|
2014-01-14 19:06:29 +07:00
|
|
|
.allow_instances = true,
|
2012-10-02 15:27:10 +07:00
|
|
|
.use_max_tr = true,
|
2008-05-13 02:20:42 +07:00
|
|
|
};
|
|
|
|
|
2009-01-22 04:24:46 +07:00
|
|
|
static struct tracer wakeup_rt_tracer __read_mostly =
|
|
|
|
{
|
|
|
|
.name = "wakeup_rt",
|
|
|
|
.init = wakeup_rt_tracer_init,
|
|
|
|
.reset = wakeup_tracer_reset,
|
|
|
|
.start = wakeup_tracer_start,
|
|
|
|
.stop = wakeup_tracer_stop,
|
2012-10-02 15:27:10 +07:00
|
|
|
.print_max = true,
|
2010-09-23 19:00:53 +07:00
|
|
|
.print_header = wakeup_print_header,
|
|
|
|
.print_line = wakeup_print_line,
|
2013-03-14 23:10:40 +07:00
|
|
|
.flag_changed = wakeup_flag_changed,
|
2009-01-22 04:24:46 +07:00
|
|
|
#ifdef CONFIG_FTRACE_SELFTEST
|
|
|
|
.selftest = trace_selftest_startup_wakeup,
|
|
|
|
#endif
|
2010-09-23 19:00:53 +07:00
|
|
|
.open = wakeup_trace_open,
|
|
|
|
.close = wakeup_trace_close,
|
2014-01-14 19:06:29 +07:00
|
|
|
.allow_instances = true,
|
2012-10-02 15:27:10 +07:00
|
|
|
.use_max_tr = true,
|
2009-01-22 04:24:46 +07:00
|
|
|
};
|
|
|
|
|
2013-11-07 20:43:42 +07:00
|
|
|
static struct tracer wakeup_dl_tracer __read_mostly =
|
|
|
|
{
|
|
|
|
.name = "wakeup_dl",
|
|
|
|
.init = wakeup_dl_tracer_init,
|
|
|
|
.reset = wakeup_tracer_reset,
|
|
|
|
.start = wakeup_tracer_start,
|
|
|
|
.stop = wakeup_tracer_stop,
|
|
|
|
.print_max = true,
|
|
|
|
.print_header = wakeup_print_header,
|
|
|
|
.print_line = wakeup_print_line,
|
|
|
|
.flag_changed = wakeup_flag_changed,
|
|
|
|
#ifdef CONFIG_FTRACE_SELFTEST
|
|
|
|
.selftest = trace_selftest_startup_wakeup,
|
|
|
|
#endif
|
|
|
|
.open = wakeup_trace_open,
|
|
|
|
.close = wakeup_trace_close,
|
|
|
|
.use_max_tr = true,
|
|
|
|
};
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
__init static int init_wakeup_tracer(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = register_tracer(&wakeup_tracer);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2009-01-22 04:24:46 +07:00
|
|
|
ret = register_tracer(&wakeup_rt_tracer);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2013-11-07 20:43:42 +07:00
|
|
|
ret = register_tracer(&wakeup_dl_tracer);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2008-05-13 02:20:42 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2012-10-05 23:13:07 +07:00
|
|
|
core_initcall(init_wakeup_tracer);
|