2009-06-03 04:37:05 +07:00
|
|
|
/*
|
|
|
|
* builtin-report.c
|
|
|
|
*
|
|
|
|
* Builtin report command: Analyze the perf.data input file,
|
|
|
|
* look up and read DSOs and symbol information and display
|
|
|
|
* a histogram of results, along various sorting keys.
|
|
|
|
*/
|
2009-05-27 14:10:38 +07:00
|
|
|
#include "builtin.h"
|
2009-05-26 14:17:18 +07:00
|
|
|
|
2009-06-03 04:37:05 +07:00
|
|
|
#include "util/util.h"
|
2013-01-22 16:09:46 +07:00
|
|
|
#include "util/cache.h"
|
2009-06-03 04:37:05 +07:00
|
|
|
|
2011-02-04 18:45:46 +07:00
|
|
|
#include "util/annotate.h"
|
2009-06-04 20:19:47 +07:00
|
|
|
#include "util/color.h"
|
2009-07-02 00:46:08 +07:00
|
|
|
#include <linux/list.h>
|
2009-07-01 22:28:37 +07:00
|
|
|
#include <linux/rbtree.h>
|
2009-05-29 00:55:04 +07:00
|
|
|
#include "util/symbol.h"
|
2009-06-26 21:28:01 +07:00
|
|
|
#include "util/callchain.h"
|
2009-07-01 05:01:20 +07:00
|
|
|
#include "util/strlist.h"
|
2009-08-07 18:55:24 +07:00
|
|
|
#include "util/values.h"
|
2009-05-18 22:45:42 +07:00
|
|
|
|
2009-05-26 14:17:18 +07:00
|
|
|
#include "perf.h"
|
2009-08-17 03:05:48 +07:00
|
|
|
#include "util/debug.h"
|
2011-03-06 07:40:06 +07:00
|
|
|
#include "util/evlist.h"
|
|
|
|
#include "util/evsel.h"
|
2009-06-25 22:05:54 +07:00
|
|
|
#include "util/header.h"
|
2009-12-12 06:24:02 +07:00
|
|
|
#include "util/session.h"
|
2011-11-28 17:30:20 +07:00
|
|
|
#include "util/tool.h"
|
2009-05-26 14:17:18 +07:00
|
|
|
|
|
|
|
#include "util/parse-options.h"
|
|
|
|
#include "util/parse-events.h"
|
|
|
|
|
2009-08-14 17:21:53 +07:00
|
|
|
#include "util/thread.h"
|
2009-09-24 23:02:49 +07:00
|
|
|
#include "util/sort.h"
|
2009-09-28 20:32:55 +07:00
|
|
|
#include "util/hist.h"
|
2013-10-15 21:27:32 +07:00
|
|
|
#include "util/data.h"
|
2012-10-16 06:33:38 +07:00
|
|
|
#include "arch/common.h"
|
2009-08-14 17:21:53 +07:00
|
|
|
|
2015-04-25 02:29:45 +07:00
|
|
|
#include "util/auxtrace.h"
|
|
|
|
|
2013-09-13 13:27:43 +07:00
|
|
|
#include <dlfcn.h>
|
2011-07-04 18:57:50 +07:00
|
|
|
#include <linux/bitmap.h>
|
|
|
|
|
2013-12-20 00:53:53 +07:00
|
|
|
struct report {
|
2011-11-28 17:30:20 +07:00
|
|
|
struct perf_tool tool;
|
2011-11-25 17:19:45 +07:00
|
|
|
struct perf_session *session;
|
2012-03-20 01:13:29 +07:00
|
|
|
bool force, use_tui, use_gtk, use_stdio;
|
2011-11-17 21:19:04 +07:00
|
|
|
bool hide_unresolved;
|
|
|
|
bool dont_use_callchains;
|
|
|
|
bool show_full_info;
|
|
|
|
bool show_threads;
|
|
|
|
bool inverted_callchain;
|
2013-01-24 22:10:36 +07:00
|
|
|
bool mem_mode;
|
2013-12-09 17:02:49 +07:00
|
|
|
bool header;
|
|
|
|
bool header_only;
|
perf report: Add --max-stack option to limit callchain stack scan
When callgraph data was included in the perf data file, it may take a
long time to scan all those data and merge them together especially if
the stored callchains are long and the perf data file itself is large,
like a Gbyte or so.
The callchain stack is currently limited to PERF_MAX_STACK_DEPTH (127).
This is a large value. Usually the callgraph data that developers are
most interested in are the first few levels, the rests are usually not
looked at.
This patch adds a new --max-stack option to perf-report to limit the
depth of callchain stack data to look at to reduce the time it takes for
perf-report to finish its processing. It trades the presence of trailing
stack information with faster speed.
The following table shows the elapsed time of doing perf-report on a
perf.data file of size 985,531,828 bytes.
--max_stack Elapsed Time Output data size
----------- ------------ ----------------
not set 88.0s 124,422,651
64 87.5s 116,303,213
32 87.2s 112,023,804
16 86.6s 94,326,380
8 59.9s 33,697,248
4 40.7s 10,116,637
-g none 27.1s 2,555,810
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1382107129-2010-4-git-send-email-Waiman.Long@hp.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 21:38:48 +07:00
|
|
|
int max_stack;
|
2011-11-17 21:19:04 +07:00
|
|
|
struct perf_read_values show_threads_values;
|
|
|
|
const char *pretty_printing_style;
|
|
|
|
const char *cpu_list;
|
2012-03-16 15:50:54 +07:00
|
|
|
const char *symbol_filter_str;
|
2013-05-14 09:09:04 +07:00
|
|
|
float min_percent;
|
2014-04-22 07:47:25 +07:00
|
|
|
u64 nr_entries;
|
2014-06-05 16:00:20 +07:00
|
|
|
u64 queue_size;
|
2011-11-17 21:19:04 +07:00
|
|
|
DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
|
2011-11-25 17:19:45 +07:00
|
|
|
};
|
2011-07-04 18:57:50 +07:00
|
|
|
|
2013-12-20 00:53:53 +07:00
|
|
|
static int report__config(const char *var, const char *value, void *cb)
|
2013-01-22 16:09:46 +07:00
|
|
|
{
|
2014-06-05 16:00:20 +07:00
|
|
|
struct report *rep = cb;
|
|
|
|
|
2013-01-22 16:09:46 +07:00
|
|
|
if (!strcmp(var, "report.group")) {
|
|
|
|
symbol_conf.event_group = perf_config_bool(var, value);
|
|
|
|
return 0;
|
|
|
|
}
|
2013-05-14 09:09:06 +07:00
|
|
|
if (!strcmp(var, "report.percent-limit")) {
|
|
|
|
rep->min_percent = strtof(value, NULL);
|
|
|
|
return 0;
|
|
|
|
}
|
2013-01-22 16:09:46 +07:00
|
|
|
if (!strcmp(var, "report.children")) {
|
|
|
|
symbol_conf.cumulate_callchain = perf_config_bool(var, value);
|
|
|
|
return 0;
|
|
|
|
}
|
2014-06-05 16:00:20 +07:00
|
|
|
if (!strcmp(var, "report.queue-size")) {
|
|
|
|
rep->queue_size = perf_config_u64(var, value);
|
|
|
|
return 0;
|
|
|
|
}
|
2013-01-22 16:09:46 +07:00
|
|
|
|
|
|
|
return perf_default_config(var, value, cb);
|
|
|
|
}
|
|
|
|
|
2014-01-07 15:02:25 +07:00
|
|
|
static int hist_iter__report_callback(struct hist_entry_iter *iter,
|
|
|
|
struct addr_location *al, bool single,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct report *rep = arg;
|
|
|
|
struct hist_entry *he = iter->he;
|
|
|
|
struct perf_evsel *evsel = iter->evsel;
|
|
|
|
struct mem_info *mi;
|
|
|
|
struct branch_info *bi;
|
|
|
|
|
|
|
|
if (!ui__has_annotation())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (sort__mode == SORT_MODE__BRANCH) {
|
|
|
|
bi = he->branch_info;
|
|
|
|
err = addr_map_symbol__inc_samples(&bi->from, evsel->idx);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = addr_map_symbol__inc_samples(&bi->to, evsel->idx);
|
|
|
|
|
|
|
|
} else if (rep->mem_mode) {
|
|
|
|
mi = he->mem_info;
|
|
|
|
err = addr_map_symbol__inc_samples(&mi->daddr, evsel->idx);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
|
|
|
|
|
|
|
|
} else if (symbol_conf.cumulate_callchain) {
|
|
|
|
if (single)
|
|
|
|
err = hist_entry__inc_addr_samples(he, evsel->idx,
|
|
|
|
al->addr);
|
|
|
|
} else {
|
|
|
|
err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return err;
|
2013-01-24 22:10:36 +07:00
|
|
|
}
|
|
|
|
|
2011-11-28 17:30:20 +07:00
|
|
|
static int process_sample_event(struct perf_tool *tool,
|
2011-11-25 17:19:45 +07:00
|
|
|
union perf_event *event,
|
2011-01-29 23:01:45 +07:00
|
|
|
struct perf_sample *sample,
|
2011-03-16 01:44:01 +07:00
|
|
|
struct perf_evsel *evsel,
|
2011-11-28 16:56:39 +07:00
|
|
|
struct machine *machine)
|
2009-06-04 04:14:49 +07:00
|
|
|
{
|
2013-12-20 00:53:53 +07:00
|
|
|
struct report *rep = container_of(tool, struct report, tool);
|
perf tools: Consolidate symbol resolving across all tools
Now we have a very high level routine for simple tools to
process IP sample events:
int event__preprocess_sample(const event_t *self,
struct addr_location *al,
symbol_filter_t filter)
It receives the event itself and will insert new threads in the
global threads list and resolve the map and symbol, filling all
this info into the new addr_location struct, so that tools like
annotate and report can further process the event by creating
hist_entries in their specific way (with or without callgraphs,
etc).
It in turn uses the new next layer function:
void thread__find_addr_location(struct thread *self, u8 cpumode,
enum map_type type, u64 addr,
struct addr_location *al,
symbol_filter_t filter)
This one will, given a thread (userspace or the kernel kthread
one), will find the given type (MAP__FUNCTION now, MAP__VARIABLE
too in the near future) at the given cpumode, taking vdsos into
account (userspace hit, but kernel symbol) and will fill all
these details in the addr_location given.
Tools that need a more compact API for plain function
resolution, like 'kmem', can use this other one:
struct symbol *thread__find_function(struct thread *self, u64 addr,
symbol_filter_t filter)
So, to resolve a kernel symbol, that is all the 'kmem' tool
needs, its just a matter of calling:
sym = thread__find_function(kthread, addr, NULL);
The 'filter' parameter is needed because we do lazy
parsing/loading of ELF symtabs or /proc/kallsyms.
With this we remove more code duplication all around, which is
always good, huh? :-)
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: John Kacur <jkacur@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1259346563-12568-12-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-11-28 01:29:23 +07:00
|
|
|
struct addr_location al;
|
2013-10-30 07:40:34 +07:00
|
|
|
struct hist_entry_iter iter = {
|
2015-05-19 15:04:10 +07:00
|
|
|
.evsel = evsel,
|
|
|
|
.sample = sample,
|
|
|
|
.hide_unresolved = rep->hide_unresolved,
|
|
|
|
.add_entry_cb = hist_iter__report_callback,
|
2013-10-30 07:40:34 +07:00
|
|
|
};
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 06:43:22 +07:00
|
|
|
int ret = 0;
|
2009-12-06 18:08:24 +07:00
|
|
|
|
2013-08-08 18:32:25 +07:00
|
|
|
if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
|
2013-12-20 12:11:12 +07:00
|
|
|
pr_debug("problem processing %d event, skipping it.\n",
|
|
|
|
event->header.type);
|
2009-06-04 04:14:49 +07:00
|
|
|
return -1;
|
|
|
|
}
|
2009-05-28 01:20:24 +07:00
|
|
|
|
2014-03-18 04:18:54 +07:00
|
|
|
if (rep->hide_unresolved && al.sym == NULL)
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 06:43:22 +07:00
|
|
|
goto out_put;
|
2009-07-01 05:01:22 +07:00
|
|
|
|
2011-11-17 21:19:04 +07:00
|
|
|
if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 06:43:22 +07:00
|
|
|
goto out_put;
|
2011-07-04 18:57:50 +07:00
|
|
|
|
2013-10-30 07:40:34 +07:00
|
|
|
if (sort__mode == SORT_MODE__BRANCH)
|
|
|
|
iter.ops = &hist_iter_branch;
|
|
|
|
else if (rep->mem_mode)
|
|
|
|
iter.ops = &hist_iter_mem;
|
2012-09-11 12:13:04 +07:00
|
|
|
else if (symbol_conf.cumulate_callchain)
|
|
|
|
iter.ops = &hist_iter_cumulative;
|
2013-10-30 07:40:34 +07:00
|
|
|
else
|
|
|
|
iter.ops = &hist_iter_normal;
|
|
|
|
|
|
|
|
if (al.map != NULL)
|
|
|
|
al.map->dso->hit = 1;
|
|
|
|
|
2015-05-19 15:04:10 +07:00
|
|
|
ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
|
2013-10-30 07:40:34 +07:00
|
|
|
if (ret < 0)
|
|
|
|
pr_debug("problem adding hist entry, skipping event\n");
|
perf machine: Protect the machine->threads with a rwlock
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.
That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.
So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.
I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".
The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 06:43:22 +07:00
|
|
|
out_put:
|
|
|
|
addr_location__put(&al);
|
2013-05-14 09:09:02 +07:00
|
|
|
return ret;
|
2009-06-04 04:14:49 +07:00
|
|
|
}
|
2009-06-03 14:38:58 +07:00
|
|
|
|
2011-11-28 17:30:20 +07:00
|
|
|
static int process_read_event(struct perf_tool *tool,
|
2011-11-25 17:19:45 +07:00
|
|
|
union perf_event *event,
|
2012-09-11 05:15:03 +07:00
|
|
|
struct perf_sample *sample __maybe_unused,
|
2011-11-28 16:56:39 +07:00
|
|
|
struct perf_evsel *evsel,
|
2012-09-11 05:15:03 +07:00
|
|
|
struct machine *machine __maybe_unused)
|
2009-06-25 03:46:04 +07:00
|
|
|
{
|
2013-12-20 00:53:53 +07:00
|
|
|
struct report *rep = container_of(tool, struct report, tool);
|
2011-11-28 16:56:39 +07:00
|
|
|
|
2011-11-17 21:19:04 +07:00
|
|
|
if (rep->show_threads) {
|
2012-06-12 22:34:58 +07:00
|
|
|
const char *name = evsel ? perf_evsel__name(evsel) : "unknown";
|
2011-11-17 21:19:04 +07:00
|
|
|
perf_read_values_add_value(&rep->show_threads_values,
|
2009-08-07 18:55:24 +07:00
|
|
|
event->read.pid, event->read.tid,
|
|
|
|
event->read.id,
|
|
|
|
name,
|
|
|
|
event->read.value);
|
|
|
|
}
|
|
|
|
|
2011-01-23 05:37:02 +07:00
|
|
|
dump_printf(": %d %d %s %" PRIu64 "\n", event->read.pid, event->read.tid,
|
2012-06-12 22:34:58 +07:00
|
|
|
evsel ? perf_evsel__name(evsel) : "FAIL",
|
2009-11-28 01:29:22 +07:00
|
|
|
event->read.value);
|
2009-06-25 03:46:04 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-12 02:48:41 +07:00
|
|
|
/* For pipe mode, sample_type is not currently set */
|
2013-12-20 00:53:53 +07:00
|
|
|
static int report__setup_sample_type(struct report *rep)
|
2009-06-04 04:14:49 +07:00
|
|
|
{
|
2013-10-23 05:01:31 +07:00
|
|
|
struct perf_session *session = rep->session;
|
|
|
|
u64 sample_type = perf_evlist__combined_sample_type(session->evlist);
|
|
|
|
bool is_pipe = perf_data_file__is_pipe(session->file);
|
2011-11-25 17:19:45 +07:00
|
|
|
|
2013-10-15 21:27:34 +07:00
|
|
|
if (!is_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
|
2009-07-05 12:39:17 +07:00
|
|
|
if (sort__has_parent) {
|
2012-05-29 11:22:57 +07:00
|
|
|
ui__error("Selected --sort parent, but no "
|
2011-08-03 22:33:24 +07:00
|
|
|
"callchain data. Did you call "
|
|
|
|
"'perf record' without -g?\n");
|
2009-12-28 06:37:02 +07:00
|
|
|
return -EINVAL;
|
2009-07-05 12:39:17 +07:00
|
|
|
}
|
2009-12-16 05:04:42 +07:00
|
|
|
if (symbol_conf.use_callchain) {
|
2014-11-13 09:05:22 +07:00
|
|
|
ui__error("Selected -g or --branch-history but no "
|
|
|
|
"callchain data. Did\n"
|
|
|
|
"you call 'perf record' without -g?\n");
|
2009-10-07 17:47:31 +07:00
|
|
|
return -1;
|
2009-07-05 12:39:17 +07:00
|
|
|
}
|
2011-11-17 21:19:04 +07:00
|
|
|
} else if (!rep->dont_use_callchains &&
|
|
|
|
callchain_param.mode != CHAIN_NONE &&
|
2010-01-05 20:54:45 +07:00
|
|
|
!symbol_conf.use_callchain) {
|
2009-12-16 05:04:42 +07:00
|
|
|
symbol_conf.use_callchain = true;
|
2011-01-14 10:52:00 +07:00
|
|
|
if (callchain_register_param(&callchain_param) < 0) {
|
2012-05-29 11:22:57 +07:00
|
|
|
ui__error("Can't register callchain params.\n");
|
2009-12-28 06:37:02 +07:00
|
|
|
return -EINVAL;
|
2009-08-08 07:16:24 +07:00
|
|
|
}
|
2009-06-19 04:22:55 +07:00
|
|
|
}
|
|
|
|
|
2013-10-30 15:05:55 +07:00
|
|
|
if (symbol_conf.cumulate_callchain) {
|
|
|
|
/* Silently ignore if callchain is missing */
|
|
|
|
if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
|
|
|
|
symbol_conf.cumulate_callchain = false;
|
|
|
|
perf_hpp__cancel_cumulate();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-01 18:35:20 +07:00
|
|
|
if (sort__mode == SORT_MODE__BRANCH) {
|
2013-10-15 21:27:34 +07:00
|
|
|
if (!is_pipe &&
|
2012-08-02 05:15:52 +07:00
|
|
|
!(sample_type & PERF_SAMPLE_BRANCH_STACK)) {
|
2012-05-29 11:22:57 +07:00
|
|
|
ui__error("Selected -b but no branch data. "
|
|
|
|
"Did you call perf record without -b?\n");
|
perf report: Add support for taken branch sampling
This patch adds support for taken branch sampling, i.e, the
PERF_SAMPLE_BRANCH_STACK feature to perf report. In other
words, to display histograms based on taken branches rather
than executed instructions addresses.
The new option is called -b and it takes no argument. To
generate meaningful output, the perf.data must have been
obtained using perf record -b xxx ... where xxx is a branch
filter option.
The output shows symbols, modules, sorted by 'who branches
where' the most often. The percentages reported in the first
column refer to the total number of branches captured and
not the usual number of samples.
Here is a quick example.
Here branchy is simple test program which looks as follows:
void f2(void)
{}
void f3(void)
{}
void f1(unsigned long n)
{
if (n & 1UL)
f2();
else
f3();
}
int main(void)
{
unsigned long i;
for (i=0; i < N; i++)
f1(i);
return 0;
}
Here is the output captured on Nehalem, if we are
only interested in user level function calls.
$ perf record -b any_call,u -e cycles:u branchy
$ perf report -b --sort=symbol
52.34% [.] main [.] f1
24.04% [.] f1 [.] f3
23.60% [.] f1 [.] f2
0.01% [k] _IO_new_file_xsputn [k] _IO_file_overflow
0.01% [k] _IO_vfprintf_internal [k] _IO_new_file_xsputn
0.01% [k] _IO_vfprintf_internal [k] strchrnul
0.01% [k] __printf [k] _IO_vfprintf_internal
0.01% [k] main [k] __printf
About half (52%) of the call branches captured are from main()
-> f1(). The second half (24%+23%) is split in two equal shares
between f1() -> f2(), f1() ->f3(). The output is as expected
given the code.
It should be noted, that using -b in perf record does not
eliminate information in the perf.data file. Consequently, a
typical profile can also be obtained by perf report by simply
not using its -b option.
It is possible to sort on branch related columns:
- dso_from, symbol_from
- dso_to, symbol_to
- mispredict
Signed-off-by: Roberto Agostino Vitillo <ravitillo@lbl.gov>
Signed-off-by: Stephane Eranian <eranian@google.com>
Cc: peterz@infradead.org
Cc: acme@redhat.com
Cc: robert.richter@amd.com
Cc: ming.m.lin@intel.com
Cc: andi@firstfloor.org
Cc: asharma@fb.com
Cc: vweaver1@eecs.utk.edu
Cc: khandual@linux.vnet.ibm.com
Cc: dsahern@gmail.com
Link: http://lkml.kernel.org/r/1328826068-11713-14-git-send-email-eranian@google.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2012-02-10 05:21:03 +07:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-06 07:45:59 +07:00
|
|
|
if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
|
|
|
|
if ((sample_type & PERF_SAMPLE_REGS_USER) &&
|
|
|
|
(sample_type & PERF_SAMPLE_STACK_USER))
|
|
|
|
callchain_param.record_mode = CALLCHAIN_DWARF;
|
2015-01-06 01:23:04 +07:00
|
|
|
else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
|
|
|
|
callchain_param.record_mode = CALLCHAIN_LBR;
|
2014-10-06 07:45:59 +07:00
|
|
|
else
|
|
|
|
callchain_param.record_mode = CALLCHAIN_FP;
|
|
|
|
}
|
2009-10-07 17:47:31 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2009-05-27 01:51:47 +07:00
|
|
|
|
2012-09-11 05:15:03 +07:00
|
|
|
static void sig_handler(int sig __maybe_unused)
|
2010-04-02 11:59:17 +07:00
|
|
|
{
|
|
|
|
session_done = 1;
|
|
|
|
}
|
|
|
|
|
2013-12-20 00:53:53 +07:00
|
|
|
static size_t hists__fprintf_nr_sample_events(struct hists *hists, struct report *rep,
|
2010-05-15 00:19:35 +07:00
|
|
|
const char *evname, FILE *fp)
|
|
|
|
{
|
|
|
|
size_t ret;
|
|
|
|
char unit;
|
2013-10-23 05:01:31 +07:00
|
|
|
unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
|
|
|
|
u64 nr_events = hists->stats.total_period;
|
|
|
|
struct perf_evsel *evsel = hists_to_evsel(hists);
|
2013-01-22 16:09:44 +07:00
|
|
|
char buf[512];
|
|
|
|
size_t size = sizeof(buf);
|
|
|
|
|
2014-01-14 09:52:48 +07:00
|
|
|
if (symbol_conf.filter_relative) {
|
|
|
|
nr_samples = hists->stats.nr_non_filtered_samples;
|
|
|
|
nr_events = hists->stats.total_non_filtered_period;
|
|
|
|
}
|
|
|
|
|
2013-03-05 12:53:26 +07:00
|
|
|
if (perf_evsel__is_group_event(evsel)) {
|
2013-01-22 16:09:44 +07:00
|
|
|
struct perf_evsel *pos;
|
|
|
|
|
|
|
|
perf_evsel__group_desc(evsel, buf, size);
|
|
|
|
evname = buf;
|
|
|
|
|
|
|
|
for_each_group_member(pos, evsel) {
|
2014-10-09 23:13:41 +07:00
|
|
|
const struct hists *pos_hists = evsel__hists(pos);
|
|
|
|
|
2014-01-14 09:52:48 +07:00
|
|
|
if (symbol_conf.filter_relative) {
|
2014-10-09 23:13:41 +07:00
|
|
|
nr_samples += pos_hists->stats.nr_non_filtered_samples;
|
|
|
|
nr_events += pos_hists->stats.total_non_filtered_period;
|
2014-01-14 09:52:48 +07:00
|
|
|
} else {
|
2014-10-09 23:13:41 +07:00
|
|
|
nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
|
|
|
|
nr_events += pos_hists->stats.total_period;
|
2014-01-14 09:52:48 +07:00
|
|
|
}
|
2013-01-22 16:09:44 +07:00
|
|
|
}
|
|
|
|
}
|
2010-05-15 00:19:35 +07:00
|
|
|
|
2012-04-06 09:01:01 +07:00
|
|
|
nr_samples = convert_unit(nr_samples, &unit);
|
|
|
|
ret = fprintf(fp, "# Samples: %lu%c", nr_samples, unit);
|
2010-05-15 00:19:35 +07:00
|
|
|
if (evname != NULL)
|
2012-04-06 09:01:01 +07:00
|
|
|
ret += fprintf(fp, " of event '%s'", evname);
|
|
|
|
|
2013-01-24 22:10:36 +07:00
|
|
|
if (rep->mem_mode) {
|
|
|
|
ret += fprintf(fp, "\n# Total weight : %" PRIu64, nr_events);
|
perf tools: Remove (null) value of "Sort order" for perf mem report
When '--sort' is not set, 'perf mem report" will print a null pointer as
the output value of sort order, so fix it.
Example:
Before this patch:
$ perf mem report
# To display the perf.data header info, please use --header/--header-only options.
#
# Samples: 18 of event 'cpu/mem-loads/pp'
# Total weight : 188
# Sort order : (null)
#
...
After this patch:
$ perf mem report
# To display the perf.data header info, please use --header/--header-only options.
#
# Samples: 18 of event 'cpu/mem-loads/pp'
# Total weight : 188
# Sort order : local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked
#
...
Signed-off-by: Yunlong Song <yunlong.song@huawei.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1427082605-12881-1-git-send-email-yunlong.song@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-03-23 10:50:05 +07:00
|
|
|
ret += fprintf(fp, "\n# Sort order : %s", sort_order ? : default_mem_sort_order);
|
2013-01-24 22:10:36 +07:00
|
|
|
} else
|
|
|
|
ret += fprintf(fp, "\n# Event count (approx.): %" PRIu64, nr_events);
|
2010-05-15 00:19:35 +07:00
|
|
|
return ret + fprintf(fp, "\n#\n");
|
|
|
|
}
|
|
|
|
|
2011-03-06 23:07:30 +07:00
|
|
|
static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
|
2013-12-20 00:53:53 +07:00
|
|
|
struct report *rep,
|
2011-03-06 23:07:30 +07:00
|
|
|
const char *help)
|
2010-05-24 08:36:51 +07:00
|
|
|
{
|
2011-03-06 07:40:06 +07:00
|
|
|
struct perf_evsel *pos;
|
2010-05-24 08:36:51 +07:00
|
|
|
|
2015-06-11 19:44:24 +07:00
|
|
|
fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n", evlist->stats.total_lost_samples);
|
2014-01-10 20:37:27 +07:00
|
|
|
evlist__for_each(evlist, pos) {
|
2014-10-09 23:13:41 +07:00
|
|
|
struct hists *hists = evsel__hists(pos);
|
2012-06-12 22:34:58 +07:00
|
|
|
const char *evname = perf_evsel__name(pos);
|
2010-05-24 08:36:51 +07:00
|
|
|
|
2013-01-22 16:09:43 +07:00
|
|
|
if (symbol_conf.event_group &&
|
|
|
|
!perf_evsel__is_group_leader(pos))
|
|
|
|
continue;
|
|
|
|
|
2013-12-20 00:53:53 +07:00
|
|
|
hists__fprintf_nr_sample_events(hists, rep, evname, stdout);
|
2013-05-14 09:09:04 +07:00
|
|
|
hists__fprintf(hists, true, 0, 0, rep->min_percent, stdout);
|
2010-05-24 08:36:51 +07:00
|
|
|
fprintf(stdout, "\n\n");
|
|
|
|
}
|
|
|
|
|
2015-04-29 19:08:48 +07:00
|
|
|
if (sort_order == NULL &&
|
2015-05-11 20:44:39 +07:00
|
|
|
parent_pattern == default_parent_pattern)
|
2010-05-24 08:36:51 +07:00
|
|
|
fprintf(stdout, "#\n# (%s)\n#\n", help);
|
|
|
|
|
2015-05-11 20:44:39 +07:00
|
|
|
if (rep->show_threads) {
|
|
|
|
bool style = !strcmp(rep->pretty_printing_style, "raw");
|
|
|
|
perf_read_values_display(stdout, &rep->show_threads_values,
|
|
|
|
style);
|
|
|
|
perf_read_values_destroy(&rep->show_threads_values);
|
2010-05-24 08:36:51 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-08 20:10:00 +07:00
|
|
|
static void report__warn_kptr_restrict(const struct report *rep)
|
|
|
|
{
|
|
|
|
struct map *kernel_map = rep->session->machines.host.vmlinux_maps[MAP__FUNCTION];
|
2015-04-08 17:59:32 +07:00
|
|
|
struct kmap *kernel_kmap = kernel_map ? map__kmap(kernel_map) : NULL;
|
2014-01-08 20:10:00 +07:00
|
|
|
|
|
|
|
if (kernel_map == NULL ||
|
|
|
|
(kernel_map->dso->hit &&
|
|
|
|
(kernel_kmap->ref_reloc_sym == NULL ||
|
|
|
|
kernel_kmap->ref_reloc_sym->addr == 0))) {
|
|
|
|
const char *desc =
|
|
|
|
"As no suitable kallsyms nor vmlinux was found, kernel samples\n"
|
|
|
|
"can't be resolved.";
|
|
|
|
|
|
|
|
if (kernel_map) {
|
|
|
|
const struct dso *kdso = kernel_map->dso;
|
|
|
|
if (!RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION])) {
|
|
|
|
desc = "If some relocation was applied (e.g. "
|
|
|
|
"kexec) symbols may be misresolved.";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ui__warning(
|
|
|
|
"Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n"
|
|
|
|
"Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n"
|
|
|
|
"Samples in kernel modules can't be resolved as well.\n\n",
|
|
|
|
desc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-08 22:22:07 +07:00
|
|
|
static int report__gtk_browse_hists(struct report *rep, const char *help)
|
|
|
|
{
|
|
|
|
int (*hist_browser)(struct perf_evlist *evlist, const char *help,
|
|
|
|
struct hist_browser_timer *timer, float min_pcnt);
|
|
|
|
|
|
|
|
hist_browser = dlsym(perf_gtk_handle, "perf_evlist__gtk_browse_hists");
|
|
|
|
|
|
|
|
if (hist_browser == NULL) {
|
|
|
|
ui__error("GTK browser not found!\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return hist_browser(rep->session->evlist, help, NULL, rep->min_percent);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int report__browse_hists(struct report *rep)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct perf_session *session = rep->session;
|
|
|
|
struct perf_evlist *evlist = session->evlist;
|
|
|
|
const char *help = "For a higher level overview, try: perf report --sort comm,dso";
|
|
|
|
|
|
|
|
switch (use_browser) {
|
|
|
|
case 1:
|
|
|
|
ret = perf_evlist__tui_browse_hists(evlist, help, NULL,
|
|
|
|
rep->min_percent,
|
|
|
|
&session->header.env);
|
|
|
|
/*
|
|
|
|
* Usually "ret" is the last pressed key, and we only
|
|
|
|
* care if the key notifies us to switch data file.
|
|
|
|
*/
|
|
|
|
if (ret != K_SWITCH_INPUT_DATA)
|
|
|
|
ret = 0;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
ret = report__gtk_browse_hists(rep, help);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ret = perf_evlist__tty_browse_hists(evlist, rep, help);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-04-22 07:47:25 +07:00
|
|
|
static void report__collapse_hists(struct report *rep)
|
2014-01-09 00:45:24 +07:00
|
|
|
{
|
|
|
|
struct ui_progress prog;
|
|
|
|
struct perf_evsel *pos;
|
|
|
|
|
2014-04-22 07:47:25 +07:00
|
|
|
ui_progress__init(&prog, rep->nr_entries, "Merging related events...");
|
2014-01-09 00:45:24 +07:00
|
|
|
|
2014-01-10 20:37:27 +07:00
|
|
|
evlist__for_each(rep->session->evlist, pos) {
|
2014-10-09 23:13:41 +07:00
|
|
|
struct hists *hists = evsel__hists(pos);
|
2014-01-09 00:45:24 +07:00
|
|
|
|
|
|
|
if (pos->idx == 0)
|
|
|
|
hists->symbol_filter_str = rep->symbol_filter_str;
|
|
|
|
|
|
|
|
hists__collapse_resort(hists, &prog);
|
|
|
|
|
|
|
|
/* Non-group events are considered as leader */
|
|
|
|
if (symbol_conf.event_group &&
|
|
|
|
!perf_evsel__is_group_leader(pos)) {
|
2014-10-09 23:13:41 +07:00
|
|
|
struct hists *leader_hists = evsel__hists(pos->leader);
|
2014-01-09 00:45:24 +07:00
|
|
|
|
|
|
|
hists__match(leader_hists, hists);
|
|
|
|
hists__link(leader_hists, hists);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ui_progress__finish();
|
|
|
|
}
|
|
|
|
|
2014-12-22 11:44:10 +07:00
|
|
|
static void report__output_resort(struct report *rep)
|
|
|
|
{
|
|
|
|
struct ui_progress prog;
|
|
|
|
struct perf_evsel *pos;
|
|
|
|
|
|
|
|
ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
|
|
|
|
|
|
|
|
evlist__for_each(rep->session->evlist, pos)
|
|
|
|
hists__output_resort(evsel__hists(pos), &prog);
|
|
|
|
|
|
|
|
ui_progress__finish();
|
|
|
|
}
|
|
|
|
|
2013-12-20 00:53:53 +07:00
|
|
|
static int __cmd_report(struct report *rep)
|
2009-10-07 17:47:31 +07:00
|
|
|
{
|
2014-01-09 00:45:24 +07:00
|
|
|
int ret;
|
2012-03-09 05:47:47 +07:00
|
|
|
struct perf_session *session = rep->session;
|
2011-03-06 07:40:06 +07:00
|
|
|
struct perf_evsel *pos;
|
2013-10-15 21:27:34 +07:00
|
|
|
struct perf_data_file *file = session->file;
|
2009-05-18 22:45:42 +07:00
|
|
|
|
2010-04-02 11:59:17 +07:00
|
|
|
signal(SIGINT, sig_handler);
|
|
|
|
|
2011-11-17 21:19:04 +07:00
|
|
|
if (rep->cpu_list) {
|
|
|
|
ret = perf_session__cpu_bitmap(session, rep->cpu_list,
|
|
|
|
rep->cpu_bitmap);
|
2011-07-04 18:57:50 +07:00
|
|
|
if (ret)
|
2013-06-25 18:54:13 +07:00
|
|
|
return ret;
|
2011-07-04 18:57:50 +07:00
|
|
|
}
|
|
|
|
|
2011-11-17 21:19:04 +07:00
|
|
|
if (rep->show_threads)
|
|
|
|
perf_read_values_init(&rep->show_threads_values);
|
2009-06-19 04:22:55 +07:00
|
|
|
|
2013-12-20 00:53:53 +07:00
|
|
|
ret = report__setup_sample_type(rep);
|
2009-12-28 06:37:02 +07:00
|
|
|
if (ret)
|
2013-06-25 18:54:13 +07:00
|
|
|
return ret;
|
2009-12-28 06:37:02 +07:00
|
|
|
|
2015-03-03 21:58:45 +07:00
|
|
|
ret = perf_session__process_events(session);
|
2009-10-07 17:47:31 +07:00
|
|
|
if (ret)
|
2013-06-25 18:54:13 +07:00
|
|
|
return ret;
|
2009-05-26 23:48:58 +07:00
|
|
|
|
2014-01-08 20:10:00 +07:00
|
|
|
report__warn_kptr_restrict(rep);
|
perf symbols: Handle /proc/sys/kernel/kptr_restrict
Perf uses /proc/modules to figure out where kernel modules are loaded.
With the advent of kptr_restrict, non root users get zeroes for all module
start addresses.
So check if kptr_restrict is non zero and don't generate the syntethic
PERF_RECORD_MMAP events for them.
Warn the user about it in perf record and in perf report.
In perf report the reference relocation symbol being zero means that
kptr_restrict was set, thus /proc/kallsyms has only zeroed addresses, so don't
use it to fixup symbol addresses when using a valid kallsyms (in the buildid
cache) or vmlinux (in the vmlinux path) build-id located automatically or
specified by the user.
Provide an explanation about it in 'perf report' if kernel samples were taken,
checking if a suitable vmlinux or kallsyms was found/specified.
Restricted /proc/kallsyms don't go to the buildid cache anymore.
Example:
[acme@emilia ~]$ perf record -F 100000 sleep 1
WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted, check
/proc/sys/kernel/kptr_restrict.
Samples in kernel functions may not be resolved if a suitable vmlinux file is
not found in the buildid cache or in the vmlinux path.
Samples in kernel modules won't be resolved at all.
If some relocation was applied (e.g. kexec) symbols may be misresolved even
with a suitable vmlinux or kallsyms file.
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.005 MB perf.data (~231 samples) ]
[acme@emilia ~]$
[acme@emilia ~]$ perf report --stdio
Kernel address maps (/proc/{kallsyms,modules}) were restricted,
check /proc/sys/kernel/kptr_restrict before running 'perf record'.
If some relocation was applied (e.g. kexec) symbols may be misresolved.
Samples in kernel modules can't be resolved as well.
# Events: 13 cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... ................. .....................
#
20.24% sleep [kernel.kallsyms] [k] page_fault
20.04% sleep [kernel.kallsyms] [k] filemap_fault
19.78% sleep [kernel.kallsyms] [k] __lru_cache_add
19.69% sleep ld-2.12.so [.] memcpy
14.71% sleep [kernel.kallsyms] [k] dput
4.70% sleep [kernel.kallsyms] [k] flush_signal_handlers
0.73% sleep [kernel.kallsyms] [k] perf_event_comm
0.11% sleep [kernel.kallsyms] [k] native_write_msr_safe
#
# (For a higher level overview, try: perf report --sort comm,dso)
#
[acme@emilia ~]$
This is because it found a suitable vmlinux (build-id checked) in
/lib/modules/2.6.39-rc7+/build/vmlinux (use -v in perf report to see the long
file name).
If we remove that file from the vmlinux path:
[root@emilia ~]# mv /lib/modules/2.6.39-rc7+/build/vmlinux \
/lib/modules/2.6.39-rc7+/build/vmlinux.OFF
[acme@emilia ~]$ perf report --stdio
[kernel.kallsyms] with build id 57298cdbe0131f6871667ec0eaab4804dcf6f562
not found, continuing without symbols
Kernel address maps (/proc/{kallsyms,modules}) were restricted, check
/proc/sys/kernel/kptr_restrict before running 'perf record'.
As no suitable kallsyms nor vmlinux was found, kernel samples can't be
resolved.
Samples in kernel modules can't be resolved as well.
# Events: 13 cycles
#
# Overhead Command Shared Object Symbol
# ........ ....... ................. ......
#
80.31% sleep [kernel.kallsyms] [k] 0xffffffff8103425a
19.69% sleep ld-2.12.so [.] memcpy
#
# (For a higher level overview, try: perf report --sort comm,dso)
#
[acme@emilia ~]$
Reported-by: Stephane Eranian <eranian@google.com>
Suggested-by: David Miller <davem@davemloft.net>
Cc: Dave Jones <davej@redhat.com>
Cc: David Miller <davem@davemloft.net>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Kees Cook <kees.cook@canonical.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
Link: http://lkml.kernel.org/n/tip-mt512joaxxbhhp1odop04yit@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-05-26 19:53:51 +07:00
|
|
|
|
2014-12-22 11:44:09 +07:00
|
|
|
evlist__for_each(session->evlist, pos)
|
|
|
|
rep->nr_entries += evsel__hists(pos)->nr_entries;
|
|
|
|
|
2013-12-20 12:11:13 +07:00
|
|
|
if (use_browser == 0) {
|
|
|
|
if (verbose > 3)
|
|
|
|
perf_session__fprintf(session, stdout);
|
2009-06-04 23:54:00 +07:00
|
|
|
|
2013-12-20 12:11:13 +07:00
|
|
|
if (verbose > 2)
|
|
|
|
perf_session__fprintf_dsos(session, stdout);
|
2009-05-27 14:10:38 +07:00
|
|
|
|
2013-12-20 12:11:13 +07:00
|
|
|
if (dump_trace) {
|
|
|
|
perf_session__fprintf_nr_events(session, stdout);
|
2014-10-11 01:49:21 +07:00
|
|
|
perf_evlist__fprintf_nr_events(session->evlist, stdout);
|
2013-12-20 12:11:13 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2012-08-07 20:20:46 +07:00
|
|
|
}
|
|
|
|
|
2014-04-22 07:47:25 +07:00
|
|
|
report__collapse_hists(rep);
|
2011-03-06 07:40:06 +07:00
|
|
|
|
2013-09-18 02:34:28 +07:00
|
|
|
if (session_done())
|
|
|
|
return 0;
|
|
|
|
|
2014-12-22 11:44:10 +07:00
|
|
|
/*
|
|
|
|
* recalculate number of entries after collapsing since it
|
|
|
|
* might be changed during the collapse phase.
|
|
|
|
*/
|
|
|
|
rep->nr_entries = 0;
|
|
|
|
evlist__for_each(session->evlist, pos)
|
|
|
|
rep->nr_entries += evsel__hists(pos)->nr_entries;
|
|
|
|
|
2014-04-22 07:47:25 +07:00
|
|
|
if (rep->nr_entries == 0) {
|
2013-10-15 21:27:34 +07:00
|
|
|
ui__error("The %s file has no samples!\n", file->path);
|
2013-06-25 18:54:13 +07:00
|
|
|
return 0;
|
2010-03-05 22:51:09 +07:00
|
|
|
}
|
|
|
|
|
2014-12-22 11:44:10 +07:00
|
|
|
report__output_resort(rep);
|
2013-01-22 16:09:32 +07:00
|
|
|
|
2014-01-08 22:22:07 +07:00
|
|
|
return report__browse_hists(rep);
|
2009-05-18 22:45:42 +07:00
|
|
|
}
|
|
|
|
|
2009-07-02 22:58:21 +07:00
|
|
|
static int
|
2014-04-08 01:55:24 +07:00
|
|
|
report_parse_callchain_opt(const struct option *opt, const char *arg, int unset)
|
2009-07-02 22:58:21 +07:00
|
|
|
{
|
2013-12-20 00:53:53 +07:00
|
|
|
struct report *rep = (struct report *)opt->value;
|
2009-07-03 01:14:33 +07:00
|
|
|
|
2010-01-05 20:54:45 +07:00
|
|
|
/*
|
|
|
|
* --no-call-graph
|
|
|
|
*/
|
|
|
|
if (unset) {
|
2011-11-17 21:19:04 +07:00
|
|
|
rep->dont_use_callchains = true;
|
2010-01-05 20:54:45 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-08 01:55:24 +07:00
|
|
|
return parse_callchain_report_opt(arg);
|
2009-07-02 22:58:21 +07:00
|
|
|
}
|
|
|
|
|
2012-12-07 12:48:05 +07:00
|
|
|
int
|
|
|
|
report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
|
|
|
|
const char *arg, int unset __maybe_unused)
|
|
|
|
{
|
|
|
|
if (arg) {
|
|
|
|
int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED);
|
|
|
|
if (err) {
|
|
|
|
char buf[BUFSIZ];
|
|
|
|
regerror(err, &ignore_callees_regex, buf, sizeof(buf));
|
|
|
|
pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
have_ignore_callees = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-03-09 05:47:47 +07:00
|
|
|
static int
|
2012-09-11 05:15:03 +07:00
|
|
|
parse_branch_mode(const struct option *opt __maybe_unused,
|
|
|
|
const char *str __maybe_unused, int unset)
|
2012-03-09 05:47:47 +07:00
|
|
|
{
|
2013-04-01 18:35:20 +07:00
|
|
|
int *branch_mode = opt->value;
|
|
|
|
|
|
|
|
*branch_mode = !unset;
|
2012-03-09 05:47:47 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-14 09:09:04 +07:00
|
|
|
static int
|
|
|
|
parse_percent_limit(const struct option *opt, const char *str,
|
|
|
|
int unset __maybe_unused)
|
|
|
|
{
|
2013-12-20 00:53:53 +07:00
|
|
|
struct report *rep = opt->value;
|
2013-05-14 09:09:04 +07:00
|
|
|
|
|
|
|
rep->min_percent = strtof(str, NULL);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-11 05:15:03 +07:00
|
|
|
int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
|
2011-11-25 17:19:45 +07:00
|
|
|
{
|
2012-03-09 05:47:47 +07:00
|
|
|
struct perf_session *session;
|
2015-04-25 02:29:45 +07:00
|
|
|
struct itrace_synth_opts itrace_synth_opts = { .set = 0, };
|
2011-12-07 16:02:54 +07:00
|
|
|
struct stat st;
|
2012-03-09 05:47:47 +07:00
|
|
|
bool has_br_stack = false;
|
2013-04-01 18:35:20 +07:00
|
|
|
int branch_mode = -1;
|
2014-11-13 09:05:22 +07:00
|
|
|
bool branch_call_mode = false;
|
2011-11-25 17:19:45 +07:00
|
|
|
char callchain_default_opt[] = "fractal,0.5,callee";
|
|
|
|
const char * const report_usage[] = {
|
2011-12-12 22:16:56 +07:00
|
|
|
"perf report [<options>]",
|
2011-11-25 17:19:45 +07:00
|
|
|
NULL
|
|
|
|
};
|
2013-12-20 00:53:53 +07:00
|
|
|
struct report report = {
|
2011-11-28 17:30:20 +07:00
|
|
|
.tool = {
|
2011-11-25 17:19:45 +07:00
|
|
|
.sample = process_sample_event,
|
|
|
|
.mmap = perf_event__process_mmap,
|
2013-08-21 17:10:25 +07:00
|
|
|
.mmap2 = perf_event__process_mmap2,
|
2011-11-25 17:19:45 +07:00
|
|
|
.comm = perf_event__process_comm,
|
2012-10-07 01:44:59 +07:00
|
|
|
.exit = perf_event__process_exit,
|
|
|
|
.fork = perf_event__process_fork,
|
2011-11-25 17:19:45 +07:00
|
|
|
.lost = perf_event__process_lost,
|
|
|
|
.read = process_read_event,
|
|
|
|
.attr = perf_event__process_attr,
|
|
|
|
.tracing_data = perf_event__process_tracing_data,
|
|
|
|
.build_id = perf_event__process_build_id,
|
2015-04-25 02:29:45 +07:00
|
|
|
.id_index = perf_event__process_id_index,
|
|
|
|
.auxtrace_info = perf_event__process_auxtrace_info,
|
|
|
|
.auxtrace = perf_event__process_auxtrace,
|
2014-07-06 19:18:21 +07:00
|
|
|
.ordered_events = true,
|
2011-11-25 17:19:45 +07:00
|
|
|
.ordering_requires_timestamps = true,
|
|
|
|
},
|
perf report: Add --max-stack option to limit callchain stack scan
When callgraph data was included in the perf data file, it may take a
long time to scan all those data and merge them together especially if
the stored callchains are long and the perf data file itself is large,
like a Gbyte or so.
The callchain stack is currently limited to PERF_MAX_STACK_DEPTH (127).
This is a large value. Usually the callgraph data that developers are
most interested in are the first few levels, the rests are usually not
looked at.
This patch adds a new --max-stack option to perf-report to limit the
depth of callchain stack data to look at to reduce the time it takes for
perf-report to finish its processing. It trades the presence of trailing
stack information with faster speed.
The following table shows the elapsed time of doing perf-report on a
perf.data file of size 985,531,828 bytes.
--max_stack Elapsed Time Output data size
----------- ------------ ----------------
not set 88.0s 124,422,651
64 87.5s 116,303,213
32 87.2s 112,023,804
16 86.6s 94,326,380
8 59.9s 33,697,248
4 40.7s 10,116,637
-g none 27.1s 2,555,810
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1382107129-2010-4-git-send-email-Waiman.Long@hp.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 21:38:48 +07:00
|
|
|
.max_stack = PERF_MAX_STACK_DEPTH,
|
2011-11-25 17:19:45 +07:00
|
|
|
.pretty_printing_style = "normal",
|
|
|
|
};
|
|
|
|
const struct option options[] = {
|
2012-10-30 10:56:02 +07:00
|
|
|
OPT_STRING('i', "input", &input_name, "file",
|
2009-05-26 14:17:18 +07:00
|
|
|
"input file name"),
|
2010-04-13 15:37:33 +07:00
|
|
|
OPT_INCR('v', "verbose", &verbose,
|
2009-05-27 05:46:14 +07:00
|
|
|
"be more verbose (show symbol address, etc)"),
|
2009-05-26 23:48:58 +07:00
|
|
|
OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
|
|
|
|
"dump raw trace in ASCII"),
|
2009-11-24 21:05:15 +07:00
|
|
|
OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
|
|
|
|
"file", "vmlinux pathname"),
|
2010-12-08 09:39:46 +07:00
|
|
|
OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
|
|
|
|
"file", "kallsyms pathname"),
|
2011-11-17 21:19:04 +07:00
|
|
|
OPT_BOOLEAN('f', "force", &report.force, "don't complain, do it"),
|
2009-11-24 21:05:15 +07:00
|
|
|
OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules,
|
2009-07-02 13:09:46 +07:00
|
|
|
"load module symbols - WARNING: use only with -k and LIVE kernel"),
|
2009-12-16 05:04:42 +07:00
|
|
|
OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
|
2009-07-11 22:18:37 +07:00
|
|
|
"Show a column with the number of samples"),
|
2011-11-17 21:19:04 +07:00
|
|
|
OPT_BOOLEAN('T', "threads", &report.show_threads,
|
2009-08-07 18:55:24 +07:00
|
|
|
"Show per-thread event counters"),
|
2011-11-17 21:19:04 +07:00
|
|
|
OPT_STRING(0, "pretty", &report.pretty_printing_style, "key",
|
2009-08-10 20:26:32 +07:00
|
|
|
"pretty printing style key: normal raw"),
|
2011-11-17 21:19:04 +07:00
|
|
|
OPT_BOOLEAN(0, "tui", &report.use_tui, "Use the TUI interface"),
|
2012-03-20 01:13:29 +07:00
|
|
|
OPT_BOOLEAN(0, "gtk", &report.use_gtk, "Use the GTK2 interface"),
|
2011-11-17 21:19:04 +07:00
|
|
|
OPT_BOOLEAN(0, "stdio", &report.use_stdio,
|
|
|
|
"Use the stdio interface"),
|
2013-12-09 17:02:49 +07:00
|
|
|
OPT_BOOLEAN(0, "header", &report.header, "Show data header."),
|
|
|
|
OPT_BOOLEAN(0, "header-only", &report.header_only,
|
|
|
|
"Show only data header."),
|
2009-05-28 15:52:00 +07:00
|
|
|
OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
|
2014-03-04 07:06:42 +07:00
|
|
|
"sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
|
|
|
|
" Please refer the man page for the complete list."),
|
2014-03-04 08:46:34 +07:00
|
|
|
OPT_STRING('F', "fields", &field_order, "key[,keys...]",
|
|
|
|
"output field(s): overhead, period, sample plus all of sort keys"),
|
2010-04-19 12:32:50 +07:00
|
|
|
OPT_BOOLEAN(0, "showcpuutilization", &symbol_conf.show_cpu_utilization,
|
|
|
|
"Show sample percentage for different cpu modes"),
|
2009-06-18 12:01:03 +07:00
|
|
|
OPT_STRING('p', "parent", &parent_pattern, "regex",
|
|
|
|
"regex filter to identify parent, see: '--sort parent'"),
|
2009-12-16 05:04:42 +07:00
|
|
|
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
|
2009-06-18 19:32:19 +07:00
|
|
|
"Only display entries with parent-match"),
|
perf callchain: Support handling complete branch stacks as histograms
Currently branch stacks can be only shown as edge histograms for
individual branches. I never found this display particularly useful.
This implements an alternative mode that creates histograms over
complete branch traces, instead of individual branches, similar to how
normal callgraphs are handled. This is done by putting it in front of
the normal callgraph and then using the normal callgraph histogram
infrastructure to unify them.
This way in complex functions we can understand the control flow that
lead to a particular sample, and may even see some control flow in the
caller for short functions.
Example (simplified, of course for such simple code this is usually not
needed), please run this after the whole patchkit is in, as at this
point in the patch order there is no --branch-history, that will be
added in a patch after this one:
tcall.c:
volatile a = 10000, b = 100000, c;
__attribute__((noinline)) f2()
{
c = a / b;
}
__attribute__((noinline)) f1()
{
f2();
f2();
}
main()
{
int i;
for (i = 0; i < 1000000; i++)
f1();
}
% perf record -b -g ./tsrc/tcall
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.044 MB perf.data (~1923 samples) ]
% perf report --no-children --branch-history
...
54.91% tcall.c:6 [.] f2 tcall
|
|--65.53%-- f2 tcall.c:5
| |
| |--70.83%-- f1 tcall.c:11
| | f1 tcall.c:10
| | main tcall.c:18
| | main tcall.c:18
| | main tcall.c:17
| | main tcall.c:17
| | f1 tcall.c:13
| | f1 tcall.c:13
| | f2 tcall.c:7
| | f2 tcall.c:5
| | f1 tcall.c:12
| | f1 tcall.c:12
| | f2 tcall.c:7
| | f2 tcall.c:5
| | f1 tcall.c:11
| |
| --29.17%-- f1 tcall.c:12
| f1 tcall.c:12
| f2 tcall.c:7
| f2 tcall.c:5
| f1 tcall.c:11
| f1 tcall.c:10
| main tcall.c:18
| main tcall.c:18
| main tcall.c:17
| main tcall.c:17
| f1 tcall.c:13
| f1 tcall.c:13
| f2 tcall.c:7
| f2 tcall.c:5
| f1 tcall.c:12
The default output is unchanged.
This is only implemented in perf report, no change to record or anywhere
else.
This adds the basic code to report:
- add a new "branch" option to the -g option parser to enable this mode
- when the flag is set include the LBR into the callstack in machine.c.
The rest of the history code is unchanged and doesn't know the
difference between LBR entry and normal call entry.
- detect overlaps with the callchain
- remove small loop duplicates in the LBR
Current limitations:
- The LBR flags (mispredict etc.) are not shown in the history
and LBR entries have no special marker.
- It would be nice if annotate marked the LBR entries somehow
(e.g. with arrows)
v2: Various fixes.
v3: Merge further patches into this one. Fix white space.
v4: Improve manpage. Address review feedback.
v5: Rename functions. Better error message without -g. Fix crash without
-b.
v6: Rebase
v7: Rebase. Use NO_ENTRY in memset.
v8: Port to latest tip. Move add_callchain_ip to separate
patch. Skip initial entries in callchain. Minor cleanups.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/1415844328-4884-3-git-send-email-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2014-11-13 09:05:20 +07:00
|
|
|
OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order[,branch]",
|
|
|
|
"Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit, callchain order, key (function or address), add branches. "
|
2014-04-08 01:55:24 +07:00
|
|
|
"Default: fractal,0.5,callee,function", &report_parse_callchain_opt, callchain_default_opt),
|
2013-10-30 15:05:55 +07:00
|
|
|
OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
|
|
|
|
"Accumulate callchains of children and show total overhead as well"),
|
perf report: Add --max-stack option to limit callchain stack scan
When callgraph data was included in the perf data file, it may take a
long time to scan all those data and merge them together especially if
the stored callchains are long and the perf data file itself is large,
like a Gbyte or so.
The callchain stack is currently limited to PERF_MAX_STACK_DEPTH (127).
This is a large value. Usually the callgraph data that developers are
most interested in are the first few levels, the rests are usually not
looked at.
This patch adds a new --max-stack option to perf-report to limit the
depth of callchain stack data to look at to reduce the time it takes for
perf-report to finish its processing. It trades the presence of trailing
stack information with faster speed.
The following table shows the elapsed time of doing perf-report on a
perf.data file of size 985,531,828 bytes.
--max_stack Elapsed Time Output data size
----------- ------------ ----------------
not set 88.0s 124,422,651
64 87.5s 116,303,213
32 87.2s 112,023,804
16 86.6s 94,326,380
8 59.9s 33,697,248
4 40.7s 10,116,637
-g none 27.1s 2,555,810
Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Aswin Chandramouleeswaran <aswin@hp.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/r/1382107129-2010-4-git-send-email-Waiman.Long@hp.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-10-18 21:38:48 +07:00
|
|
|
OPT_INTEGER(0, "max-stack", &report.max_stack,
|
|
|
|
"Set the maximum stack depth when parsing the callchain, "
|
|
|
|
"anything beyond the specified depth will be ignored. "
|
|
|
|
"Default: " __stringify(PERF_MAX_STACK_DEPTH)),
|
2011-11-17 21:19:04 +07:00
|
|
|
OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
|
|
|
|
"alias for inverted call graph"),
|
2012-12-07 12:48:05 +07:00
|
|
|
OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
|
|
|
|
"ignore callees of these functions in call graphs",
|
|
|
|
report_parse_ignore_callees_opt),
|
2009-12-16 05:04:40 +07:00
|
|
|
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
|
2009-07-01 05:01:20 +07:00
|
|
|
"only consider symbols in these dsos"),
|
2011-11-14 01:30:08 +07:00
|
|
|
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
|
2009-07-01 05:01:21 +07:00
|
|
|
"only consider symbols in these comms"),
|
2015-03-24 22:52:41 +07:00
|
|
|
OPT_STRING(0, "pid", &symbol_conf.pid_list_str, "pid[,pid...]",
|
|
|
|
"only consider symbols in these pids"),
|
|
|
|
OPT_STRING(0, "tid", &symbol_conf.tid_list_str, "tid[,tid...]",
|
|
|
|
"only consider symbols in these tids"),
|
2009-12-16 05:04:40 +07:00
|
|
|
OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
|
2009-07-01 05:01:22 +07:00
|
|
|
"only consider these symbols"),
|
2012-03-16 15:50:54 +07:00
|
|
|
OPT_STRING(0, "symbol-filter", &report.symbol_filter_str, "filter",
|
|
|
|
"only show symbols that (partially) match with this filter"),
|
2009-12-16 05:04:40 +07:00
|
|
|
OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
|
2009-07-11 08:47:28 +07:00
|
|
|
"width[,width...]",
|
|
|
|
"don't try to adjust column width, use these fixed values"),
|
2015-03-13 19:51:54 +07:00
|
|
|
OPT_STRING_NOEMPTY('t', "field-separator", &symbol_conf.field_sep, "separator",
|
2009-07-11 08:47:28 +07:00
|
|
|
"separator for columns, no spaces will be added between "
|
|
|
|
"columns '.' is reserved."),
|
2011-11-17 21:19:04 +07:00
|
|
|
OPT_BOOLEAN('U', "hide-unresolved", &report.hide_unresolved,
|
2009-12-29 07:48:34 +07:00
|
|
|
"Only display entries resolved to a symbol"),
|
2010-12-10 03:27:07 +07:00
|
|
|
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
|
|
|
|
"Look for files with symbols relative to this directory"),
|
2011-11-14 01:30:08 +07:00
|
|
|
OPT_STRING('C', "cpu", &report.cpu_list, "cpu",
|
2011-11-17 21:19:04 +07:00
|
|
|
"list of cpus to profile"),
|
|
|
|
OPT_BOOLEAN('I', "show-info", &report.show_full_info,
|
perf tools: Make perf.data more self-descriptive (v8)
The goal of this patch is to include more information about the host
environment into the perf.data so it is more self-descriptive. Overtime,
profiles are captured on various machines and it becomes hard to track
what was recorded, on what machine and when.
This patch provides a way to solve this by extending the perf.data file
with basic information about the host machine. To add those extensions,
we leverage the feature bits capabilities of the perf.data format. The
change is backward compatible with existing perf.data files.
We define the following useful new extensions:
- HEADER_HOSTNAME: the hostname
- HEADER_OSRELEASE: the kernel release number
- HEADER_ARCH: the hw architecture
- HEADER_CPUDESC: generic CPU description
- HEADER_NRCPUS: number of online/avail cpus
- HEADER_CMDLINE: perf command line
- HEADER_VERSION: perf version
- HEADER_TOPOLOGY: cpu topology
- HEADER_EVENT_DESC: full event description (attrs)
- HEADER_CPUID: easy-to-parse low level CPU identication
The small granularity for the entries is to make it easier to extend
without breaking backward compatiblity. Many entries are provided as
ASCII strings.
Perf report/script have been modified to print the basic information as
easy-to-parse ASCII strings. Extended information about CPU and NUMA
topology may be requested with the -I option.
Thanks to David Ahern for reviewing and testing the many versions of
this patch.
$ perf report --stdio
# ========
# captured on : Mon Sep 26 15:22:14 2011
# hostname : quad
# os release : 3.1.0-rc4-tip
# perf version : 3.1.0-rc4
# arch : x86_64
# nrcpus online : 4
# nrcpus avail : 4
# cpudesc : Intel(R) Core(TM)2 Quad CPU Q6600 @ 2.40GHz
# cpuid : GenuineIntel,6,15,11
# total memory : 8105360 kB
# cmdline : /home/eranian/perfmon/official/tip/build/tools/perf/perf record date
# event : name = cycles, type = 0, config = 0x0, config1 = 0x0, config2 = 0x0, excl_usr = 0, excl_kern = 0, id = { 29, 30, 31,
# HEADER_CPU_TOPOLOGY info available, use -I to display
# HEADER_NUMA_TOPOLOGY info available, use -I to display
# ========
#
...
$ perf report --stdio -I
# ========
# captured on : Mon Sep 26 15:22:14 2011
# hostname : quad
# os release : 3.1.0-rc4-tip
# perf version : 3.1.0-rc4
# arch : x86_64
# nrcpus online : 4
# nrcpus avail : 4
# cpudesc : Intel(R) Core(TM)2 Quad CPU Q6600 @ 2.40GHz
# cpuid : GenuineIntel,6,15,11
# total memory : 8105360 kB
# cmdline : /home/eranian/perfmon/official/tip/build/tools/perf/perf record date
# event : name = cycles, type = 0, config = 0x0, config1 = 0x0, config2 = 0x0, excl_usr = 0, excl_kern = 0, id = { 29, 30, 31,
# sibling cores : 0-3
# sibling threads : 0
# sibling threads : 1
# sibling threads : 2
# sibling threads : 3
# node0 meminfo : total = 8320608 kB, free = 7571024 kB
# node0 cpu list : 0-3
# ========
#
...
Reviewed-by: David Ahern <dsahern@gmail.com>
Tested-by: David Ahern <dsahern@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Andi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/20110930134040.GA5575@quad
Signed-off-by: Stephane Eranian <eranian@google.com>
[ committer notes: Use --show-info in the tools as was in the docs, rename
perf_header_fprintf_info to perf_file_section__fprintf_info, fixup
conflict with f69b64f7 "perf: Support setting the disassembler style" ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2011-09-30 20:40:40 +07:00
|
|
|
"Display extended information about perf.data file"),
|
2011-10-06 22:48:31 +07:00
|
|
|
OPT_BOOLEAN(0, "source", &symbol_conf.annotate_src,
|
|
|
|
"Interleave source code with assembly code (default)"),
|
|
|
|
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
|
|
|
|
"Display raw encoding of assembly instructions (default)"),
|
2011-09-16 04:31:41 +07:00
|
|
|
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
|
|
|
|
"Specify disassembler style (e.g. -M intel for intel syntax)"),
|
2011-10-06 02:10:06 +07:00
|
|
|
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
|
|
|
|
"Show a column with the sum of periods"),
|
2013-01-22 16:09:45 +07:00
|
|
|
OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
|
|
|
|
"Show event group information together"),
|
2013-04-01 18:35:20 +07:00
|
|
|
OPT_CALLBACK_NOOPT('b', "branch-stack", &branch_mode, "",
|
2014-11-13 09:05:22 +07:00
|
|
|
"use branch records for per branch histogram filling",
|
|
|
|
parse_branch_mode),
|
|
|
|
OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
|
|
|
|
"add last branch records to call history"),
|
2012-09-04 17:32:30 +07:00
|
|
|
OPT_STRING(0, "objdump", &objdump_path, "path",
|
|
|
|
"objdump binary to use for disassembly and annotations"),
|
2013-03-25 16:18:18 +07:00
|
|
|
OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
|
|
|
|
"Disable symbol demangling"),
|
2014-09-13 11:15:05 +07:00
|
|
|
OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
|
|
|
|
"Enable kernel symbol demangling"),
|
2013-01-24 22:10:36 +07:00
|
|
|
OPT_BOOLEAN(0, "mem-mode", &report.mem_mode, "mem access profile"),
|
2013-05-14 09:09:04 +07:00
|
|
|
OPT_CALLBACK(0, "percent-limit", &report, "percent",
|
|
|
|
"Don't show entries under that percent", parse_percent_limit),
|
2014-01-14 09:52:48 +07:00
|
|
|
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
|
2014-02-07 10:06:07 +07:00
|
|
|
"how to display percentage of filtered entries", parse_filter_percentage),
|
2015-04-25 02:29:45 +07:00
|
|
|
OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
|
|
|
|
"Instruction Tracing options",
|
|
|
|
itrace_parse_synth_opts),
|
2009-05-26 14:17:18 +07:00
|
|
|
OPT_END()
|
2011-11-25 17:19:45 +07:00
|
|
|
};
|
2013-10-15 21:27:32 +07:00
|
|
|
struct perf_data_file file = {
|
|
|
|
.mode = PERF_DATA_MODE_READ,
|
|
|
|
};
|
2014-10-10 02:16:00 +07:00
|
|
|
int ret = hists__init();
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2009-05-26 14:17:18 +07:00
|
|
|
|
2013-12-20 00:53:53 +07:00
|
|
|
perf_config(report__config, &report);
|
2013-01-22 16:09:46 +07:00
|
|
|
|
2009-12-16 05:04:40 +07:00
|
|
|
argc = parse_options(argc, argv, options, report_usage, 0);
|
|
|
|
|
2011-11-17 21:19:04 +07:00
|
|
|
if (report.use_stdio)
|
2010-08-21 20:38:16 +07:00
|
|
|
use_browser = 0;
|
2011-11-17 21:19:04 +07:00
|
|
|
else if (report.use_tui)
|
2010-08-21 20:38:16 +07:00
|
|
|
use_browser = 1;
|
2012-03-20 01:13:29 +07:00
|
|
|
else if (report.use_gtk)
|
|
|
|
use_browser = 2;
|
2010-08-21 20:38:16 +07:00
|
|
|
|
2011-11-17 21:19:04 +07:00
|
|
|
if (report.inverted_callchain)
|
2011-06-07 22:49:46 +07:00
|
|
|
callchain_param.order = ORDER_CALLER;
|
|
|
|
|
2012-10-30 10:56:02 +07:00
|
|
|
if (!input_name || !strlen(input_name)) {
|
2011-12-07 16:02:54 +07:00
|
|
|
if (!fstat(STDIN_FILENO, &st) && S_ISFIFO(st.st_mode))
|
2012-10-30 10:56:02 +07:00
|
|
|
input_name = "-";
|
2011-12-07 16:02:54 +07:00
|
|
|
else
|
2012-10-30 10:56:02 +07:00
|
|
|
input_name = "perf.data";
|
2011-12-07 16:02:54 +07:00
|
|
|
}
|
2013-02-03 13:38:21 +07:00
|
|
|
|
2013-10-15 21:27:32 +07:00
|
|
|
file.path = input_name;
|
|
|
|
file.force = report.force;
|
|
|
|
|
2013-02-03 13:38:21 +07:00
|
|
|
repeat:
|
2013-10-15 21:27:32 +07:00
|
|
|
session = perf_session__new(&file, false, &report.tool);
|
2012-03-09 05:47:47 +07:00
|
|
|
if (session == NULL)
|
2014-09-24 08:33:37 +07:00
|
|
|
return -1;
|
2012-03-09 05:47:47 +07:00
|
|
|
|
2014-06-05 16:00:20 +07:00
|
|
|
if (report.queue_size) {
|
|
|
|
ordered_events__set_alloc_size(&session->ordered_events,
|
|
|
|
report.queue_size);
|
|
|
|
}
|
|
|
|
|
2015-04-25 02:29:45 +07:00
|
|
|
session->itrace_synth_opts = &itrace_synth_opts;
|
|
|
|
|
2012-03-09 05:47:47 +07:00
|
|
|
report.session = session;
|
|
|
|
|
|
|
|
has_br_stack = perf_header__has_feat(&session->header,
|
|
|
|
HEADER_BRANCH_STACK);
|
2011-12-07 16:02:54 +07:00
|
|
|
|
2014-11-13 09:05:22 +07:00
|
|
|
/*
|
|
|
|
* Branch mode is a tristate:
|
|
|
|
* -1 means default, so decide based on the file having branch data.
|
|
|
|
* 0/1 means the user chose a mode.
|
|
|
|
*/
|
|
|
|
if (((branch_mode == -1 && has_br_stack) || branch_mode == 1) &&
|
2015-02-15 09:33:37 +07:00
|
|
|
!branch_call_mode) {
|
2013-04-01 18:35:20 +07:00
|
|
|
sort__mode = SORT_MODE__BRANCH;
|
2013-10-30 15:05:55 +07:00
|
|
|
symbol_conf.cumulate_callchain = false;
|
|
|
|
}
|
2014-11-13 09:05:22 +07:00
|
|
|
if (branch_call_mode) {
|
2014-11-18 08:58:54 +07:00
|
|
|
callchain_param.key = CCKEY_ADDRESS;
|
2014-11-13 09:05:22 +07:00
|
|
|
callchain_param.branch_callstack = 1;
|
|
|
|
symbol_conf.use_callchain = true;
|
|
|
|
callchain_register_param(&callchain_param);
|
|
|
|
if (sort_order == NULL)
|
|
|
|
sort_order = "srcline,symbol,dso";
|
|
|
|
}
|
2012-03-09 05:47:47 +07:00
|
|
|
|
2013-01-24 22:10:36 +07:00
|
|
|
if (report.mem_mode) {
|
2013-04-01 18:35:20 +07:00
|
|
|
if (sort__mode == SORT_MODE__BRANCH) {
|
2013-12-20 12:11:12 +07:00
|
|
|
pr_err("branch and mem mode incompatible\n");
|
2013-01-24 22:10:36 +07:00
|
|
|
goto error;
|
|
|
|
}
|
2013-04-03 19:26:11 +07:00
|
|
|
sort__mode = SORT_MODE__MEMORY;
|
2013-10-30 15:05:55 +07:00
|
|
|
symbol_conf.cumulate_callchain = false;
|
2013-01-24 22:10:36 +07:00
|
|
|
}
|
2012-03-09 05:47:48 +07:00
|
|
|
|
2013-11-01 14:33:13 +07:00
|
|
|
if (setup_sorting() < 0) {
|
2014-03-04 08:46:34 +07:00
|
|
|
if (sort_order)
|
|
|
|
parse_options_usage(report_usage, options, "s", 1);
|
|
|
|
if (field_order)
|
|
|
|
parse_options_usage(sort_order ? NULL : report_usage,
|
|
|
|
options, "F", 1);
|
2013-11-01 14:33:13 +07:00
|
|
|
goto error;
|
|
|
|
}
|
2012-09-14 15:35:28 +07:00
|
|
|
|
2015-05-09 22:19:43 +07:00
|
|
|
/* Force tty output for header output and per-thread stat. */
|
|
|
|
if (report.header || report.header_only || report.show_threads)
|
2013-12-09 17:02:49 +07:00
|
|
|
use_browser = 0;
|
|
|
|
|
2013-11-01 14:33:12 +07:00
|
|
|
if (strcmp(input_name, "-") != 0)
|
|
|
|
setup_browser(true);
|
2014-04-16 09:04:51 +07:00
|
|
|
else
|
2013-11-01 14:33:12 +07:00
|
|
|
use_browser = 0;
|
|
|
|
|
2013-12-09 17:02:49 +07:00
|
|
|
if (report.header || report.header_only) {
|
|
|
|
perf_session__fprintf_info(session, stdout,
|
|
|
|
report.show_full_info);
|
|
|
|
if (report.header_only)
|
|
|
|
return 0;
|
|
|
|
} else if (use_browser == 0) {
|
|
|
|
fputs("# To display the perf.data header info, please use --header/--header-only options.\n#\n",
|
|
|
|
stdout);
|
|
|
|
}
|
|
|
|
|
2010-05-12 09:18:06 +07:00
|
|
|
/*
|
2013-03-28 21:34:10 +07:00
|
|
|
* Only in the TUI browser we are doing integrated annotation,
|
2010-05-12 09:18:06 +07:00
|
|
|
* so don't allocate extra space that won't be used in the stdio
|
|
|
|
* implementation.
|
|
|
|
*/
|
2014-03-18 13:32:26 +07:00
|
|
|
if (ui__has_annotation()) {
|
2011-02-04 18:45:46 +07:00
|
|
|
symbol_conf.priv_size = sizeof(struct annotation);
|
2013-08-08 18:32:22 +07:00
|
|
|
machines__set_symbol_filter(&session->machines,
|
|
|
|
symbol__annotate_init);
|
2010-08-06 05:28:27 +07:00
|
|
|
/*
|
|
|
|
* For searching by name on the "Browse map details".
|
|
|
|
* providing it only in verbose mode not to bloat too
|
|
|
|
* much struct symbol.
|
|
|
|
*/
|
|
|
|
if (verbose) {
|
|
|
|
/*
|
|
|
|
* XXX: Need to provide a less kludgy way to ask for
|
|
|
|
* more space per symbol, the u32 is for the index on
|
|
|
|
* the ui browser.
|
|
|
|
* See symbol__browser_index.
|
|
|
|
*/
|
|
|
|
symbol_conf.priv_size += sizeof(u32);
|
|
|
|
symbol_conf.sort_by_name = true;
|
|
|
|
}
|
|
|
|
}
|
2009-12-16 05:04:40 +07:00
|
|
|
|
2014-08-12 13:40:45 +07:00
|
|
|
if (symbol__init(&session->header.env) < 0)
|
2012-03-09 05:47:47 +07:00
|
|
|
goto error;
|
2009-05-26 14:17:18 +07:00
|
|
|
|
2012-03-16 15:50:55 +07:00
|
|
|
if (argc) {
|
|
|
|
/*
|
|
|
|
* Special case: if there's an argument left then assume that
|
|
|
|
* it's a symbol filter:
|
|
|
|
*/
|
|
|
|
if (argc > 1)
|
|
|
|
usage_with_options(report_usage, options);
|
|
|
|
|
|
|
|
report.symbol_filter_str = argv[0];
|
|
|
|
}
|
2009-06-04 21:24:37 +07:00
|
|
|
|
2013-04-03 19:26:19 +07:00
|
|
|
sort__setup_elide(stdout);
|
2009-07-01 05:01:20 +07:00
|
|
|
|
2012-03-09 05:47:47 +07:00
|
|
|
ret = __cmd_report(&report);
|
2013-02-03 13:38:21 +07:00
|
|
|
if (ret == K_SWITCH_INPUT_DATA) {
|
|
|
|
perf_session__delete(session);
|
|
|
|
goto repeat;
|
|
|
|
} else
|
|
|
|
ret = 0;
|
|
|
|
|
2012-03-09 05:47:47 +07:00
|
|
|
error:
|
|
|
|
perf_session__delete(session);
|
|
|
|
return ret;
|
2009-05-26 14:17:18 +07:00
|
|
|
}
|