2011-01-04 01:39:04 +07:00
|
|
|
#ifndef __PERF_EVSEL_H
|
|
|
|
#define __PERF_EVSEL_H 1
|
|
|
|
|
|
|
|
#include <linux/list.h>
|
2011-01-04 02:45:52 +07:00
|
|
|
#include <stdbool.h>
|
2012-11-14 03:27:28 +07:00
|
|
|
#include <stddef.h>
|
2012-11-20 05:21:03 +07:00
|
|
|
#include <linux/perf_event.h>
|
2014-04-26 02:31:02 +07:00
|
|
|
#include <linux/types.h>
|
2011-01-04 01:39:04 +07:00
|
|
|
#include "xyarray.h"
|
2013-03-05 12:53:26 +07:00
|
|
|
#include "symbol.h"
|
2015-06-23 05:36:08 +07:00
|
|
|
#include "cpumap.h"
|
2015-08-07 17:51:03 +07:00
|
|
|
#include "counts.h"
|
2011-01-04 01:39:04 +07:00
|
|
|
|
2011-01-13 07:39:13 +07:00
|
|
|
struct perf_evsel;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per fd, to map back from PERF_SAMPLE_ID to evsel, only used when there are
|
|
|
|
* more than one entry in the evlist.
|
|
|
|
*/
|
|
|
|
struct perf_sample_id {
|
|
|
|
struct hlist_node node;
|
|
|
|
u64 id;
|
|
|
|
struct perf_evsel *evsel;
|
2014-10-27 20:49:22 +07:00
|
|
|
int idx;
|
|
|
|
int cpu;
|
|
|
|
pid_t tid;
|
2012-10-10 23:52:24 +07:00
|
|
|
|
|
|
|
/* Holds total ID period value for PERF_SAMPLE_READ processing. */
|
|
|
|
u64 period;
|
2011-01-13 07:39:13 +07:00
|
|
|
};
|
|
|
|
|
2014-10-17 22:17:40 +07:00
|
|
|
struct cgroup_sel;
|
|
|
|
|
2015-07-29 16:42:10 +07:00
|
|
|
/*
|
|
|
|
* The 'struct perf_evsel_config_term' is used to pass event
|
|
|
|
* specific configuration data to perf_evsel__config routine.
|
|
|
|
* It is allocated within event parsing and attached to
|
|
|
|
* perf_evsel::config_terms list head.
|
|
|
|
*/
|
|
|
|
enum {
|
2015-07-29 16:42:11 +07:00
|
|
|
PERF_EVSEL__CONFIG_TERM_PERIOD,
|
2015-08-09 13:45:23 +07:00
|
|
|
PERF_EVSEL__CONFIG_TERM_FREQ,
|
2015-08-04 15:30:19 +07:00
|
|
|
PERF_EVSEL__CONFIG_TERM_TIME,
|
perf callchain: Per-event type selection support
This patchkit adds the ability to set callgraph mode (fp, dwarf, lbr) per
event. This in term can reduce sampling overhead and the size of the
perf.data.
Here is an example.
perf record -e 'cpu/cpu-cycles,period=1000,call-graph=fp,time=1/,cpu/instructions,call-graph=lbr/' sleep 1
perf evlist -v
cpu/cpu-cycles,period=1000,call-graph=fp,time=1/: type: 4, size: 112,
config: 0x3c, { sample_period, sample_freq }: 1000, sample_type:
IP|TID|TIME|CALLCHAIN|PERIOD|IDENTIFIER, read_format: ID, disabled: 1,
inherit: 1, mmap: 1, comm: 1, enable_on_exec: 1, task: 1, sample_id_all:
1, exclude_guest: 1, mmap2: 1, comm_exec: 1
cpu/instructions,call-graph=lbr/: type: 4, size: 112, config: 0xc0, {
sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|CALLCHAIN|PERIOD|BRANCH_STACK|IDENTIFIER, read_format: ID,
disabled: 1, inherit: 1, freq: 1, enable_on_exec: 1, sample_id_all: 1,
exclude_guest: 1
Signed-off-by: Kan Liang <kan.liang@intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/1439289050-40510-1-git-send-email-kan.liang@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-08-11 17:30:47 +07:00
|
|
|
PERF_EVSEL__CONFIG_TERM_CALLGRAPH,
|
|
|
|
PERF_EVSEL__CONFIG_TERM_STACK_USER,
|
perf tools: Enable pre-event inherit setting by config terms
This patch allows perf record setting event's attr.inherit bit by
config terms like:
# perf record -e cycles/no-inherit/ ...
# perf record -e cycles/inherit/ ...
So user can control inherit bit for each event separately.
In following example, a.out fork()s in main then do some complex
CPU intensive computations in both of its children.
Basic result with and without inherit:
# perf record -e cycles -e instructions ./a.out
[ perf record: Woken up 9 times to write data ]
[ perf record: Captured and wrote 2.205 MB perf.data (47920 samples) ]
# perf report --stdio
# ...
# Samples: 23K of event 'cycles'
# Event count (approx.): 23641752891
...
# Samples: 24K of event 'instructions'
# Event count (approx.): 30428312415
# perf record -i -e cycles -e instructions ./a.out
[ perf record: Woken up 5 times to write data ]
[ perf record: Captured and wrote 1.111 MB perf.data (24019 samples) ]
...
# Samples: 12K of event 'cycles'
# Event count (approx.): 11699501775
...
# Samples: 12K of event 'instructions'
# Event count (approx.): 15058023559
Cancel inherit for one event when globally enable:
# perf record -e cycles/no-inherit/ -e instructions ./a.out
[ perf record: Woken up 7 times to write data ]
[ perf record: Captured and wrote 1.660 MB perf.data (36004 samples) ]
...
# Samples: 12K of event 'cycles/no-inherit/'
# Event count (approx.): 11895759282
...
# Samples: 24K of event 'instructions'
# Event count (approx.): 30668000441
Enable inherit for one event when globally disable:
# perf record -i -e cycles/inherit/ -e instructions ./a.out
[ perf record: Woken up 7 times to write data ]
[ perf record: Captured and wrote 1.654 MB perf.data (35868 samples) ]
...
# Samples: 23K of event 'cycles/inherit/'
# Event count (approx.): 23285400229
...
# Samples: 11K of event 'instructions'
# Event count (approx.): 14969050259
Committer note:
One can check if the bit was set, in addition to seeing the result in
the perf.data file size as above by doing one of:
# perf record -e cycles -e instructions -a usleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.911 MB perf.data (63 samples) ]
# perf evlist -v
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1, task: 1, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1
instructions: size: 112, config: 0x1, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, freq: 1, sample_id_all: 1, exclude_guest: 1
#
So, the inherit bit was set in both, now, if we disable it globally using
--no-inherit:
# perf record --no-inherit -e cycles -e instructions -a usleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.910 MB perf.data (56 samples) ]
# perf evlist -v
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, mmap: 1, comm: 1, freq: 1, task: 1, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1
instructions: size: 112, config: 0x1, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, freq: 1, sample_id_all: 1, exclude_guest: 1
No inherit bit set, then disabling it and setting just on the cycles event:
# perf record --no-inherit -e cycles/inherit/ -e instructions -a usleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.909 MB perf.data (48 samples) ]
# perf evlist -v
cycles/inherit/: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1, task: 1, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1
instructions: size: 112, config: 0x1, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, freq: 1, sample_id_all: 1, exclude_guest: 1
#
We can see it as well in by using a more verbose level of debug messages in
the tool that sets up the perf_event_attr, 'perf record' in this case:
[root@zoo ~]# perf record -vv --no-inherit -e cycles/inherit/ -e instructions -a usleep 1
------------------------------------------------------------
perf_event_attr:
size 112
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|ID|CPU|PERIOD
read_format ID
disabled 1
inherit 1
mmap 1
comm 1
freq 1
task 1
sample_id_all 1
exclude_guest 1
mmap2 1
comm_exec 1
------------------------------------------------------------
sys_perf_event_open: pid -1 cpu 0 group_fd -1 flags 0x8
sys_perf_event_open: pid -1 cpu 1 group_fd -1 flags 0x8
sys_perf_event_open: pid -1 cpu 2 group_fd -1 flags 0x8
sys_perf_event_open: pid -1 cpu 3 group_fd -1 flags 0x8
------------------------------------------------------------
perf_event_attr:
size 112
config 0x1
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|ID|CPU|PERIOD
read_format ID
disabled 1
freq 1
sample_id_all 1
exclude_guest 1
------------------------------------------------------------
sys_perf_event_open: pid -1 cpu 0 group_fd -1 flags 0x8
<SNIP>
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1446029705-199659-2-git-send-email-wangnan0@huawei.com
[ s/u64/bool/ for the perf_evsel_config_term inherit field - jolsa]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-10-28 17:55:02 +07:00
|
|
|
PERF_EVSEL__CONFIG_TERM_INHERIT,
|
perf tools: Per event max-stack settings
The tooling counterpart, now it is possible to do:
# perf record -e sched:sched_switch/max-stack=10/ -e cycles/call-graph=dwarf,max-stack=4/ -e cpu-cycles/call-graph=dwarf,max-stack=1024/ usleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.052 MB perf.data (5 samples) ]
# perf evlist -v
sched:sched_switch: type: 2, size: 112, config: 0x110, { sample_period, sample_freq }: 1, sample_type: IP|TID|TIME|CALLCHAIN|CPU|PERIOD|RAW|IDENTIFIER, read_format: ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1, sample_max_stack: 10
cycles/call-graph=dwarf,max-stack=4/: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|CALLCHAIN|PERIOD|REGS_USER|STACK_USER|IDENTIFIER, read_format: ID, disabled: 1, inherit: 1, freq: 1, enable_on_exec: 1, sample_id_all: 1, exclude_guest: 1, exclude_callchain_user: 1, sample_regs_user: 0xff0fff, sample_stack_user: 8192, sample_max_stack: 4
cpu-cycles/call-graph=dwarf,max-stack=1024/: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|CALLCHAIN|PERIOD|REGS_USER|STACK_USER|IDENTIFIER, read_format: ID, disabled: 1, inherit: 1, freq: 1, enable_on_exec: 1, sample_id_all: 1, exclude_guest: 1, exclude_callchain_user: 1, sample_regs_user: 0xff0fff, sample_stack_user: 8192, sample_max_stack: 1024
# Tip: use 'perf evlist --trace-fields' to show fields for tracepoint events
Using just /max-stack=N/ means /call-graph=fp,max-stack=N/, that should
be further configurable by means of some .perfconfig knob.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Milian Wolff <milian.wolff@kdab.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: Wang Nan <wangnan0@huawei.com>
Cc: Zefan Li <lizefan@huawei.com>
Link: http://lkml.kernel.org/n/tip-kolmn1yo40p7jhswxwrc7rrd@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-04-29 05:03:42 +07:00
|
|
|
PERF_EVSEL__CONFIG_TERM_MAX_STACK,
|
perf tools: Enable overwrite settings
This patch allows following config terms and option:
Globally setting events to overwrite;
# perf record --overwrite ...
Set specific events to be overwrite or no-overwrite.
# perf record --event cycles/overwrite/ ...
# perf record --event cycles/no-overwrite/ ...
Add missing config terms and update the config term array size because
the longest string length has changed.
For overwritable events, it automatically selects attr.write_backward
since perf requires it to be backward for reading.
Test result:
# perf record --overwrite -e syscalls:*enter_nanosleep* usleep 1
[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.011 MB perf.data (1 samples) ]
# perf evlist -v
syscalls:sys_enter_nanosleep: type: 2, size: 112, config: 0x134, { sample_period, sample_freq }: 1, sample_type: IP|TID|TIME|CPU|PERIOD|RAW, disabled: 1, inherit: 1, mmap: 1, comm: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1, write_backward: 1
# Tip: use 'perf evlist --trace-fields' to show fields for tracepoint events
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1468485287-33422-14-git-send-email-wangnan0@huawei.com
Signed-off-by: He Kuang <hekuang@huawei.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-07-14 15:34:45 +07:00
|
|
|
PERF_EVSEL__CONFIG_TERM_OVERWRITE,
|
2016-09-06 23:37:15 +07:00
|
|
|
PERF_EVSEL__CONFIG_TERM_DRV_CFG,
|
2016-10-13 04:02:06 +07:00
|
|
|
PERF_EVSEL__CONFIG_TERM_BRANCH,
|
2015-07-29 16:42:10 +07:00
|
|
|
PERF_EVSEL__CONFIG_TERM_MAX,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct perf_evsel_config_term {
|
|
|
|
struct list_head list;
|
|
|
|
int type;
|
|
|
|
union {
|
|
|
|
u64 period;
|
2015-08-09 13:45:23 +07:00
|
|
|
u64 freq;
|
2015-08-04 15:30:19 +07:00
|
|
|
bool time;
|
perf callchain: Per-event type selection support
This patchkit adds the ability to set callgraph mode (fp, dwarf, lbr) per
event. This in term can reduce sampling overhead and the size of the
perf.data.
Here is an example.
perf record -e 'cpu/cpu-cycles,period=1000,call-graph=fp,time=1/,cpu/instructions,call-graph=lbr/' sleep 1
perf evlist -v
cpu/cpu-cycles,period=1000,call-graph=fp,time=1/: type: 4, size: 112,
config: 0x3c, { sample_period, sample_freq }: 1000, sample_type:
IP|TID|TIME|CALLCHAIN|PERIOD|IDENTIFIER, read_format: ID, disabled: 1,
inherit: 1, mmap: 1, comm: 1, enable_on_exec: 1, task: 1, sample_id_all:
1, exclude_guest: 1, mmap2: 1, comm_exec: 1
cpu/instructions,call-graph=lbr/: type: 4, size: 112, config: 0xc0, {
sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|CALLCHAIN|PERIOD|BRANCH_STACK|IDENTIFIER, read_format: ID,
disabled: 1, inherit: 1, freq: 1, enable_on_exec: 1, sample_id_all: 1,
exclude_guest: 1
Signed-off-by: Kan Liang <kan.liang@intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/1439289050-40510-1-git-send-email-kan.liang@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-08-11 17:30:47 +07:00
|
|
|
char *callgraph;
|
2016-09-06 23:37:15 +07:00
|
|
|
char *drv_cfg;
|
perf callchain: Per-event type selection support
This patchkit adds the ability to set callgraph mode (fp, dwarf, lbr) per
event. This in term can reduce sampling overhead and the size of the
perf.data.
Here is an example.
perf record -e 'cpu/cpu-cycles,period=1000,call-graph=fp,time=1/,cpu/instructions,call-graph=lbr/' sleep 1
perf evlist -v
cpu/cpu-cycles,period=1000,call-graph=fp,time=1/: type: 4, size: 112,
config: 0x3c, { sample_period, sample_freq }: 1000, sample_type:
IP|TID|TIME|CALLCHAIN|PERIOD|IDENTIFIER, read_format: ID, disabled: 1,
inherit: 1, mmap: 1, comm: 1, enable_on_exec: 1, task: 1, sample_id_all:
1, exclude_guest: 1, mmap2: 1, comm_exec: 1
cpu/instructions,call-graph=lbr/: type: 4, size: 112, config: 0xc0, {
sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|CALLCHAIN|PERIOD|BRANCH_STACK|IDENTIFIER, read_format: ID,
disabled: 1, inherit: 1, freq: 1, enable_on_exec: 1, sample_id_all: 1,
exclude_guest: 1
Signed-off-by: Kan Liang <kan.liang@intel.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Link: http://lkml.kernel.org/r/1439289050-40510-1-git-send-email-kan.liang@intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-08-11 17:30:47 +07:00
|
|
|
u64 stack_user;
|
perf tools: Per event max-stack settings
The tooling counterpart, now it is possible to do:
# perf record -e sched:sched_switch/max-stack=10/ -e cycles/call-graph=dwarf,max-stack=4/ -e cpu-cycles/call-graph=dwarf,max-stack=1024/ usleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.052 MB perf.data (5 samples) ]
# perf evlist -v
sched:sched_switch: type: 2, size: 112, config: 0x110, { sample_period, sample_freq }: 1, sample_type: IP|TID|TIME|CALLCHAIN|CPU|PERIOD|RAW|IDENTIFIER, read_format: ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1, sample_max_stack: 10
cycles/call-graph=dwarf,max-stack=4/: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|CALLCHAIN|PERIOD|REGS_USER|STACK_USER|IDENTIFIER, read_format: ID, disabled: 1, inherit: 1, freq: 1, enable_on_exec: 1, sample_id_all: 1, exclude_guest: 1, exclude_callchain_user: 1, sample_regs_user: 0xff0fff, sample_stack_user: 8192, sample_max_stack: 4
cpu-cycles/call-graph=dwarf,max-stack=1024/: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|CALLCHAIN|PERIOD|REGS_USER|STACK_USER|IDENTIFIER, read_format: ID, disabled: 1, inherit: 1, freq: 1, enable_on_exec: 1, sample_id_all: 1, exclude_guest: 1, exclude_callchain_user: 1, sample_regs_user: 0xff0fff, sample_stack_user: 8192, sample_max_stack: 1024
# Tip: use 'perf evlist --trace-fields' to show fields for tracepoint events
Using just /max-stack=N/ means /call-graph=fp,max-stack=N/, that should
be further configurable by means of some .perfconfig knob.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Milian Wolff <milian.wolff@kdab.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: Wang Nan <wangnan0@huawei.com>
Cc: Zefan Li <lizefan@huawei.com>
Link: http://lkml.kernel.org/n/tip-kolmn1yo40p7jhswxwrc7rrd@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-04-29 05:03:42 +07:00
|
|
|
int max_stack;
|
perf tools: Enable pre-event inherit setting by config terms
This patch allows perf record setting event's attr.inherit bit by
config terms like:
# perf record -e cycles/no-inherit/ ...
# perf record -e cycles/inherit/ ...
So user can control inherit bit for each event separately.
In following example, a.out fork()s in main then do some complex
CPU intensive computations in both of its children.
Basic result with and without inherit:
# perf record -e cycles -e instructions ./a.out
[ perf record: Woken up 9 times to write data ]
[ perf record: Captured and wrote 2.205 MB perf.data (47920 samples) ]
# perf report --stdio
# ...
# Samples: 23K of event 'cycles'
# Event count (approx.): 23641752891
...
# Samples: 24K of event 'instructions'
# Event count (approx.): 30428312415
# perf record -i -e cycles -e instructions ./a.out
[ perf record: Woken up 5 times to write data ]
[ perf record: Captured and wrote 1.111 MB perf.data (24019 samples) ]
...
# Samples: 12K of event 'cycles'
# Event count (approx.): 11699501775
...
# Samples: 12K of event 'instructions'
# Event count (approx.): 15058023559
Cancel inherit for one event when globally enable:
# perf record -e cycles/no-inherit/ -e instructions ./a.out
[ perf record: Woken up 7 times to write data ]
[ perf record: Captured and wrote 1.660 MB perf.data (36004 samples) ]
...
# Samples: 12K of event 'cycles/no-inherit/'
# Event count (approx.): 11895759282
...
# Samples: 24K of event 'instructions'
# Event count (approx.): 30668000441
Enable inherit for one event when globally disable:
# perf record -i -e cycles/inherit/ -e instructions ./a.out
[ perf record: Woken up 7 times to write data ]
[ perf record: Captured and wrote 1.654 MB perf.data (35868 samples) ]
...
# Samples: 23K of event 'cycles/inherit/'
# Event count (approx.): 23285400229
...
# Samples: 11K of event 'instructions'
# Event count (approx.): 14969050259
Committer note:
One can check if the bit was set, in addition to seeing the result in
the perf.data file size as above by doing one of:
# perf record -e cycles -e instructions -a usleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.911 MB perf.data (63 samples) ]
# perf evlist -v
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1, task: 1, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1
instructions: size: 112, config: 0x1, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, freq: 1, sample_id_all: 1, exclude_guest: 1
#
So, the inherit bit was set in both, now, if we disable it globally using
--no-inherit:
# perf record --no-inherit -e cycles -e instructions -a usleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.910 MB perf.data (56 samples) ]
# perf evlist -v
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, mmap: 1, comm: 1, freq: 1, task: 1, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1
instructions: size: 112, config: 0x1, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, freq: 1, sample_id_all: 1, exclude_guest: 1
No inherit bit set, then disabling it and setting just on the cycles event:
# perf record --no-inherit -e cycles/inherit/ -e instructions -a usleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.909 MB perf.data (48 samples) ]
# perf evlist -v
cycles/inherit/: size: 112, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1, task: 1, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1
instructions: size: 112, config: 0x1, { sample_period, sample_freq }: 4000, sample_type: IP|TID|TIME|ID|CPU|PERIOD, read_format: ID, disabled: 1, freq: 1, sample_id_all: 1, exclude_guest: 1
#
We can see it as well in by using a more verbose level of debug messages in
the tool that sets up the perf_event_attr, 'perf record' in this case:
[root@zoo ~]# perf record -vv --no-inherit -e cycles/inherit/ -e instructions -a usleep 1
------------------------------------------------------------
perf_event_attr:
size 112
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|ID|CPU|PERIOD
read_format ID
disabled 1
inherit 1
mmap 1
comm 1
freq 1
task 1
sample_id_all 1
exclude_guest 1
mmap2 1
comm_exec 1
------------------------------------------------------------
sys_perf_event_open: pid -1 cpu 0 group_fd -1 flags 0x8
sys_perf_event_open: pid -1 cpu 1 group_fd -1 flags 0x8
sys_perf_event_open: pid -1 cpu 2 group_fd -1 flags 0x8
sys_perf_event_open: pid -1 cpu 3 group_fd -1 flags 0x8
------------------------------------------------------------
perf_event_attr:
size 112
config 0x1
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|ID|CPU|PERIOD
read_format ID
disabled 1
freq 1
sample_id_all 1
exclude_guest 1
------------------------------------------------------------
sys_perf_event_open: pid -1 cpu 0 group_fd -1 flags 0x8
<SNIP>
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1446029705-199659-2-git-send-email-wangnan0@huawei.com
[ s/u64/bool/ for the perf_evsel_config_term inherit field - jolsa]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-10-28 17:55:02 +07:00
|
|
|
bool inherit;
|
perf tools: Enable overwrite settings
This patch allows following config terms and option:
Globally setting events to overwrite;
# perf record --overwrite ...
Set specific events to be overwrite or no-overwrite.
# perf record --event cycles/overwrite/ ...
# perf record --event cycles/no-overwrite/ ...
Add missing config terms and update the config term array size because
the longest string length has changed.
For overwritable events, it automatically selects attr.write_backward
since perf requires it to be backward for reading.
Test result:
# perf record --overwrite -e syscalls:*enter_nanosleep* usleep 1
[ perf record: Woken up 2 times to write data ]
[ perf record: Captured and wrote 0.011 MB perf.data (1 samples) ]
# perf evlist -v
syscalls:sys_enter_nanosleep: type: 2, size: 112, config: 0x134, { sample_period, sample_freq }: 1, sample_type: IP|TID|TIME|CPU|PERIOD|RAW, disabled: 1, inherit: 1, mmap: 1, comm: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1, write_backward: 1
# Tip: use 'perf evlist --trace-fields' to show fields for tracepoint events
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1468485287-33422-14-git-send-email-wangnan0@huawei.com
Signed-off-by: He Kuang <hekuang@huawei.com>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-07-14 15:34:45 +07:00
|
|
|
bool overwrite;
|
2016-10-13 04:02:06 +07:00
|
|
|
char *branch;
|
2015-07-29 16:42:10 +07:00
|
|
|
} val;
|
|
|
|
};
|
|
|
|
|
2011-02-16 20:10:01 +07:00
|
|
|
/** struct perf_evsel - event selector
|
|
|
|
*
|
2015-08-27 19:07:40 +07:00
|
|
|
* @evlist - evlist this evsel is in, if it is in one.
|
|
|
|
* @node - To insert it into evlist->entries or in other list_heads, say in
|
|
|
|
* the event parsing routines.
|
2011-02-16 20:10:01 +07:00
|
|
|
* @name - Can be set to retain the original event name passed by the user,
|
|
|
|
* so that when showing results in tools such as 'perf stat', we
|
|
|
|
* show the name used, not some alias.
|
2013-08-27 15:23:09 +07:00
|
|
|
* @id_pos: the position of the event id (PERF_SAMPLE_ID or
|
|
|
|
* PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
|
|
|
|
* struct sample_event
|
|
|
|
* @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
|
|
|
|
* PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
|
|
|
|
* is used there is an id sample appended to non-sample events
|
2014-10-23 17:45:13 +07:00
|
|
|
* @priv: And what is in its containing unnamed union are tool specific
|
2011-02-16 20:10:01 +07:00
|
|
|
*/
|
2011-01-04 01:39:04 +07:00
|
|
|
struct perf_evsel {
|
|
|
|
struct list_head node;
|
2015-08-27 19:07:40 +07:00
|
|
|
struct perf_evlist *evlist;
|
2011-01-04 01:39:04 +07:00
|
|
|
struct perf_event_attr attr;
|
|
|
|
char *filter;
|
|
|
|
struct xyarray *fd;
|
2011-03-10 21:15:54 +07:00
|
|
|
struct xyarray *sample_id;
|
|
|
|
u64 *id;
|
2011-01-04 02:45:52 +07:00
|
|
|
struct perf_counts *counts;
|
2013-01-29 18:47:43 +07:00
|
|
|
struct perf_counts *prev_raw_counts;
|
2011-01-04 01:39:04 +07:00
|
|
|
int idx;
|
2012-08-17 02:10:17 +07:00
|
|
|
u32 ids;
|
2011-02-16 20:10:01 +07:00
|
|
|
char *name;
|
2013-11-12 23:58:49 +07:00
|
|
|
double scale;
|
|
|
|
const char *unit;
|
2012-08-07 19:58:03 +07:00
|
|
|
struct event_format *tp_format;
|
2015-10-25 21:51:44 +07:00
|
|
|
off_t id_offset;
|
2016-03-09 12:11:54 +07:00
|
|
|
void *priv;
|
|
|
|
u64 db_id;
|
perf tool: Add cgroup support
This patch adds the ability to filter monitoring based on container groups
(cgroups) for both perf stat and perf record. It is possible to monitor
multiple cgroup in parallel. There is one cgroup per event. The cgroups to
monitor are passed via a new -G option followed by a comma separated list of
cgroup names.
The cgroup filesystem has to be mounted. Given a cgroup name, the perf tool
finds the corresponding directory in the cgroup filesystem and opens it. It
then passes that file descriptor to the kernel.
Example:
$ perf stat -B -a -e cycles:u,cycles:u,cycles:u -G test1,,test2 -- sleep 1
Performance counter stats for 'sleep 1':
2,368,667,414 cycles test1
2,369,661,459 cycles
<not counted> cycles test2
1.001856890 seconds time elapsed
Signed-off-by: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <4d590290.825bdf0a.7d0a.4890@mx.google.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-02-14 16:20:01 +07:00
|
|
|
struct cgroup_sel *cgrp;
|
2013-11-06 20:17:38 +07:00
|
|
|
void *handler;
|
2012-09-10 14:53:50 +07:00
|
|
|
struct cpu_map *cpus;
|
2015-09-08 14:58:55 +07:00
|
|
|
struct cpu_map *own_cpus;
|
2015-06-23 05:36:07 +07:00
|
|
|
struct thread_map *threads;
|
2012-08-02 04:53:11 +07:00
|
|
|
unsigned int sample_size;
|
2013-08-27 15:23:09 +07:00
|
|
|
int id_pos;
|
|
|
|
int is_pos;
|
perf tools: Elliminate alignment holes
perf_evsel:
Before:
/* size: 320, cachelines: 5, members: 35 */
/* sum members: 304, holes: 3, sum holes: 16 */
After:
/* size: 304, cachelines: 5, members: 35 */
/* last cacheline: 48 bytes */
perf_evlist:
Before:
/* size: 2544, cachelines: 40, members: 17 */
/* sum members: 2533, holes: 2, sum holes: 11 */
/* last cacheline: 48 bytes */
After:
/* size: 2536, cachelines: 40, members: 17 */
/* sum members: 2533, holes: 1, sum holes: 3 */
/* last cacheline: 40 bytes */
timechart:
Before:
/* size: 288, cachelines: 5, members: 21 */
/* sum members: 271, holes: 2, sum holes: 10 */
/* padding: 7 */
/* last cacheline: 32 bytes */
After:
/* size: 272, cachelines: 5, members: 21 */
/* sum members: 271, holes: 1, sum holes: 1 */
/* last cacheline: 16 bytes */
thread:
Before:
/* size: 112, cachelines: 2, members: 15 */
/* sum members: 101, holes: 2, sum holes: 11 */
/* last cacheline: 48 bytes */
After:
/* size: 104, cachelines: 2, members: 15 */
/* sum members: 101, holes: 1, sum holes: 3 */
/* last cacheline: 40 bytes */
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-a543w7zjl9yyrg9nkf1teukp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-05-16 03:29:56 +07:00
|
|
|
bool snapshot;
|
2011-05-30 21:55:59 +07:00
|
|
|
bool supported;
|
2012-09-26 22:48:18 +07:00
|
|
|
bool needs_swap;
|
2014-07-14 17:02:56 +07:00
|
|
|
bool no_aux_samples;
|
2014-07-14 17:02:57 +07:00
|
|
|
bool immediate;
|
2014-07-31 13:00:51 +07:00
|
|
|
bool system_wide;
|
2014-07-31 13:00:52 +07:00
|
|
|
bool tracking;
|
2014-11-21 16:31:12 +07:00
|
|
|
bool per_pkg;
|
perf tools: Introduce 'P' modifier to request max precision
The 'P' will cause the event to get maximum possible detected precise
level.
Following record:
$ perf record -e cycles:P ...
will detect maximum precise level for 'cycles' event and use it.
Commiter note:
Testing it:
$ perf record -e cycles:P usleep 1
[ perf record: Woken up 1 times to write data ]
[ perf record: Captured and wrote 0.013 MB perf.data (9 samples) ]
$ perf evlist
cycles:P
$ perf evlist -v
cycles:P: size: 112, { sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|PERIOD, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq: 1,
enable_on_exec: 1, task: 1, precise_ip: 2, sample_id_all: 1, mmap2: 1,
comm_exec: 1
$
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1444068369-20978-6-git-send-email-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-10-06 01:06:05 +07:00
|
|
|
bool precise_max;
|
2016-12-13 14:46:22 +07:00
|
|
|
bool ignore_missing_thread;
|
2012-08-08 17:21:54 +07:00
|
|
|
/* parse modifier helper */
|
|
|
|
int exclude_GH;
|
2013-01-22 16:09:29 +07:00
|
|
|
int nr_members;
|
2012-10-10 22:39:03 +07:00
|
|
|
int sample_read;
|
perf tools: Elliminate alignment holes
perf_evsel:
Before:
/* size: 320, cachelines: 5, members: 35 */
/* sum members: 304, holes: 3, sum holes: 16 */
After:
/* size: 304, cachelines: 5, members: 35 */
/* last cacheline: 48 bytes */
perf_evlist:
Before:
/* size: 2544, cachelines: 40, members: 17 */
/* sum members: 2533, holes: 2, sum holes: 11 */
/* last cacheline: 48 bytes */
After:
/* size: 2536, cachelines: 40, members: 17 */
/* sum members: 2533, holes: 1, sum holes: 3 */
/* last cacheline: 40 bytes */
timechart:
Before:
/* size: 288, cachelines: 5, members: 21 */
/* sum members: 271, holes: 2, sum holes: 10 */
/* padding: 7 */
/* last cacheline: 32 bytes */
After:
/* size: 272, cachelines: 5, members: 21 */
/* sum members: 271, holes: 1, sum holes: 1 */
/* last cacheline: 16 bytes */
thread:
Before:
/* size: 112, cachelines: 2, members: 15 */
/* sum members: 101, holes: 2, sum holes: 11 */
/* last cacheline: 48 bytes */
After:
/* size: 104, cachelines: 2, members: 15 */
/* sum members: 101, holes: 1, sum holes: 3 */
/* last cacheline: 40 bytes */
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-a543w7zjl9yyrg9nkf1teukp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-05-16 03:29:56 +07:00
|
|
|
unsigned long *per_pkg_mask;
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 17:22:36 +07:00
|
|
|
struct perf_evsel *leader;
|
|
|
|
char *group_name;
|
2015-07-10 14:36:09 +07:00
|
|
|
bool cmdline_group_boundary;
|
2015-07-29 16:42:10 +07:00
|
|
|
struct list_head config_terms;
|
perf bpf: Attach eBPF filter to perf event
This is the final patch which makes basic BPF filter work. After
applying this patch, users are allowed to use BPF filter like:
# perf record --event ./hello_world.o ls
A bpf_fd field is appended to 'struct evsel', and setup during the
callback function add_bpf_event() for each 'probe_trace_event'.
PERF_EVENT_IOC_SET_BPF ioctl is used to attach eBPF program to a newly
created perf event. The file descriptor of the eBPF program is passed to
perf record using previous patches, and stored into evsel->bpf_fd.
It is possible that different perf event are created for one kprobe
events for different CPUs. In this case, when trying to call the ioctl,
EEXIST will be return. This patch doesn't treat it as an error.
Committer note:
The bpf proggie used so far:
__attribute__((section("fork=_do_fork"), used))
int fork(void *ctx)
{
return 0;
}
char _license[] __attribute__((section("license"), used)) = "GPL";
int _version __attribute__((section("version"), used)) = 0x40300;
failed to produce any samples, even with forks happening and it being
running in system wide mode.
That is because now the filter is being associated, and the code above
always returns zero, meaning that all forks will be probed but filtered
away ;-/
Change it to 'return 1;' instead and after that:
# trace --no-syscalls --event /tmp/foo.o
0.000 perf_bpf_probe:fork:(ffffffff8109be30))
2.333 perf_bpf_probe:fork:(ffffffff8109be30))
3.725 perf_bpf_probe:fork:(ffffffff8109be30))
4.550 perf_bpf_probe:fork:(ffffffff8109be30))
^C#
And it works with all tools, including 'perf trace'.
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David Ahern <dsahern@gmail.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kaixu Xia <xiakaixu@huawei.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1444826502-49291-8-git-send-email-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-10-14 19:41:18 +07:00
|
|
|
int bpf_fd;
|
perf stat: Collapse identically named events
The uncore PMU has a lot of duplicated PMUs for different subsystems.
When expanding an uncore alias we usually end up with a large
number of identically named aliases, which makes perf stat
output difficult to read.
Automatically sum them up in perf stat, unless --no-merge is specified.
This can be default because only the uncores generally have duplicated
aliases. Other PMUs have unique names.
Before:
% perf stat --no-merge -a -e unc_c_llc_lookup.any sleep 1
Performance counter stats for 'system wide':
694,976 Bytes unc_c_llc_lookup.any
706,304 Bytes unc_c_llc_lookup.any
956,608 Bytes unc_c_llc_lookup.any
782,720 Bytes unc_c_llc_lookup.any
605,696 Bytes unc_c_llc_lookup.any
442,816 Bytes unc_c_llc_lookup.any
659,328 Bytes unc_c_llc_lookup.any
509,312 Bytes unc_c_llc_lookup.any
263,936 Bytes unc_c_llc_lookup.any
592,448 Bytes unc_c_llc_lookup.any
672,448 Bytes unc_c_llc_lookup.any
608,640 Bytes unc_c_llc_lookup.any
641,024 Bytes unc_c_llc_lookup.any
856,896 Bytes unc_c_llc_lookup.any
808,832 Bytes unc_c_llc_lookup.any
684,864 Bytes unc_c_llc_lookup.any
710,464 Bytes unc_c_llc_lookup.any
538,304 Bytes unc_c_llc_lookup.any
1.002577660 seconds time elapsed
After:
% perf stat -a -e unc_c_llc_lookup.any sleep 1
Performance counter stats for 'system wide':
2,685,120 Bytes unc_c_llc_lookup.any
1.002648032 seconds time elapsed
v2: Split collect_aliases. Rename alias flag.
v3: Make sure unsupported/not counted is always printed.
v4: Factor out callback change into separate patch.
v5: Move check for bad results here
Move merged check into collect_data
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-3-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-21 03:17:00 +07:00
|
|
|
bool merged_stat;
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-21 03:17:08 +07:00
|
|
|
const char * metric_expr;
|
2017-03-21 03:17:10 +07:00
|
|
|
const char * metric_name;
|
perf stat: Output JSON MetricExpr metric
Add generic infrastructure to perf stat to output ratios for
"MetricExpr" entries in the event lists. Many events are more useful as
ratios than in raw form, typically some count in relation to total
ticks.
Transfer the MetricExpr information from the alias to the evsel.
We mark the events that need to be collected for MetricExpr, and also
link the events using them with a pointer. The code is careful to always
prefer the right event in the same group to minimize multiplexing
errors. At the moment only a single relation is supported.
Then add a rblist to the stat shadow code that remembers stats based on
the cpu and context.
Then finally update and retrieve and print these values similarly to the
existing hardcoded perf metrics. We use the simple expression parser
added earlier to evaluate the expression.
Normally we just output the result without further commentary, but for
--metric-only this would lead to empty columns. So for this case use the
original event as description.
There is no attempt to automatically add the MetricExpr event, if it is
missing, however we suggest it to the user, because the user tool
doesn't have enough information to reliably construct a group that is
guaranteed to schedule. So we leave that to the user.
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}'
1.000147889 800,085,181 unc_p_clockticks
1.000147889 93,126,241 unc_p_freq_max_os_cycles # 11.6
2.000448381 800,218,217 unc_p_clockticks
2.000448381 142,516,095 unc_p_freq_max_os_cycles # 17.8
3.000639852 800,243,057 unc_p_clockticks
3.000639852 162,292,689 unc_p_freq_max_os_cycles # 20.3
% perf stat -a -I 1000 -e '{unc_p_clockticks,unc_p_freq_max_os_cycles}' --metric-only
# time freq_max_os_cycles %
1.000127077 0.9
2.000301436 0.7
3.000456379 0.0
v2: Change from DivideBy to MetricExpr
v3: Use expr__ prefix. Support more than one other event.
v4: Update description
v5: Only print warning message once for multiple PMUs.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170320201711.14142-11-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-03-21 03:17:08 +07:00
|
|
|
struct perf_evsel **metric_events;
|
|
|
|
bool collect_stat;
|
2011-01-04 01:39:04 +07:00
|
|
|
};
|
|
|
|
|
2014-04-14 22:38:39 +07:00
|
|
|
union u64_swap {
|
|
|
|
u64 val64;
|
|
|
|
u32 val32[2];
|
|
|
|
};
|
|
|
|
|
2011-01-04 08:09:46 +07:00
|
|
|
struct cpu_map;
|
2014-10-13 20:29:50 +07:00
|
|
|
struct target;
|
2011-01-04 08:09:46 +07:00
|
|
|
struct thread_map;
|
2013-12-20 00:43:45 +07:00
|
|
|
struct record_opts;
|
2011-01-04 08:09:46 +07:00
|
|
|
|
2015-06-23 05:36:08 +07:00
|
|
|
static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
return evsel->cpus;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
return perf_evsel__cpus(evsel)->nr;
|
|
|
|
}
|
|
|
|
|
2014-11-21 16:31:06 +07:00
|
|
|
void perf_counts_values__scale(struct perf_counts_values *count,
|
|
|
|
bool scale, s8 *pscaled);
|
|
|
|
|
2015-06-26 16:29:11 +07:00
|
|
|
void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
|
2014-11-21 16:31:05 +07:00
|
|
|
struct perf_counts_values *count);
|
|
|
|
|
2014-10-10 01:29:51 +07:00
|
|
|
int perf_evsel__object_config(size_t object_size,
|
|
|
|
int (*init)(struct perf_evsel *evsel),
|
|
|
|
void (*fini)(struct perf_evsel *evsel));
|
|
|
|
|
2013-11-08 02:41:19 +07:00
|
|
|
struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx);
|
|
|
|
|
|
|
|
static inline struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr)
|
|
|
|
{
|
|
|
|
return perf_evsel__new_idx(attr, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx);
|
|
|
|
|
2015-09-07 15:38:06 +07:00
|
|
|
/*
|
|
|
|
* Returns pointer with encoded error via <linux/err.h> interface.
|
|
|
|
*/
|
2013-11-08 02:41:19 +07:00
|
|
|
static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name)
|
|
|
|
{
|
|
|
|
return perf_evsel__newtp_idx(sys, name, 0);
|
|
|
|
}
|
2012-09-27 06:24:19 +07:00
|
|
|
|
2016-07-29 04:33:20 +07:00
|
|
|
struct perf_evsel *perf_evsel__new_cycles(void);
|
|
|
|
|
2012-09-27 06:24:19 +07:00
|
|
|
struct event_format *event_format__new(const char *sys, const char *name);
|
|
|
|
|
2011-01-19 06:41:45 +07:00
|
|
|
void perf_evsel__init(struct perf_evsel *evsel,
|
|
|
|
struct perf_event_attr *attr, int idx);
|
|
|
|
void perf_evsel__exit(struct perf_evsel *evsel);
|
2011-01-04 01:39:04 +07:00
|
|
|
void perf_evsel__delete(struct perf_evsel *evsel);
|
|
|
|
|
2016-04-12 04:15:29 +07:00
|
|
|
struct callchain_param;
|
|
|
|
|
2011-11-08 23:41:57 +07:00
|
|
|
void perf_evsel__config(struct perf_evsel *evsel,
|
2016-04-12 04:15:29 +07:00
|
|
|
struct record_opts *opts,
|
|
|
|
struct callchain_param *callchain);
|
2016-04-12 04:39:37 +07:00
|
|
|
void perf_evsel__config_callchain(struct perf_evsel *evsel,
|
|
|
|
struct record_opts *opts,
|
|
|
|
struct callchain_param *callchain);
|
2011-11-08 23:41:57 +07:00
|
|
|
|
2013-08-27 15:23:09 +07:00
|
|
|
int __perf_evsel__sample_size(u64 sample_type);
|
|
|
|
void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
|
|
|
|
|
2012-06-12 00:08:07 +07:00
|
|
|
bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
|
|
|
|
|
|
|
|
#define PERF_EVSEL__MAX_ALIASES 8
|
|
|
|
|
|
|
|
extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES];
|
|
|
|
extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES];
|
2012-09-06 23:11:18 +07:00
|
|
|
extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
|
|
|
|
[PERF_EVSEL__MAX_ALIASES];
|
|
|
|
extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX];
|
|
|
|
extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX];
|
2012-06-12 00:08:07 +07:00
|
|
|
int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
|
|
|
|
char *bf, size_t size);
|
2012-06-12 22:34:58 +07:00
|
|
|
const char *perf_evsel__name(struct perf_evsel *evsel);
|
2013-11-12 23:58:49 +07:00
|
|
|
|
2013-01-22 16:09:44 +07:00
|
|
|
const char *perf_evsel__group_name(struct perf_evsel *evsel);
|
|
|
|
int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
|
2012-05-26 02:38:11 +07:00
|
|
|
|
2011-01-13 07:39:13 +07:00
|
|
|
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
|
2011-01-04 02:45:52 +07:00
|
|
|
void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
|
2011-01-04 01:39:04 +07:00
|
|
|
|
2012-12-11 00:53:43 +07:00
|
|
|
void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
|
|
|
|
enum perf_event_sample_format bit);
|
|
|
|
void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
|
|
|
|
enum perf_event_sample_format bit);
|
|
|
|
|
|
|
|
#define perf_evsel__set_sample_bit(evsel, bit) \
|
|
|
|
__perf_evsel__set_sample_bit(evsel, PERF_SAMPLE_##bit)
|
|
|
|
|
|
|
|
#define perf_evsel__reset_sample_bit(evsel, bit) \
|
|
|
|
__perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
|
|
|
|
|
2013-08-27 15:23:09 +07:00
|
|
|
void perf_evsel__set_sample_id(struct perf_evsel *evsel,
|
|
|
|
bool use_sample_identifier);
|
2012-12-11 01:21:30 +07:00
|
|
|
|
2015-07-04 03:05:50 +07:00
|
|
|
int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter);
|
2016-09-16 21:44:04 +07:00
|
|
|
int perf_evsel__append_tp_filter(struct perf_evsel *evsel, const char *filter);
|
2016-09-16 21:44:05 +07:00
|
|
|
int perf_evsel__append_addr_filter(struct perf_evsel *evsel,
|
|
|
|
const char *filter);
|
perf evsel: Rename set_filter to apply_filter
We need to be able to go on constructing a complex filter in multiple
stages, since we can only set one filter per event.
For instance, we need to be able, in 'perf trace' to filter by the
'common_pid' field all the time, if only for the tracer itself, to
avoid a feedback loop, and, in addition, we may want to filter the
raw_syscalls:sys_{enter,exit} events by its 'id' filter, when using
'perf trace -e open,close' or 'perf trace -e !open,close', i.e. when
we are interested in just a subset of syscalls or when we are not
interested in it.
So we will have:
perf_evsel__set_filter(evsel, char *filter)
Replaces whatever is in evsel->filter.
perf_evsel__append_filter(evsel, const char *op, char *filter)
Appends, using op ("&&" or "||") with what is in evsel->filter.
perf_evsel__apply_filter(evsel, filter):
That actually applies a filter, be it the one being
constructed in evsel->filter, or any other, for tools
with more specific ways to build the filter, issuing
the appropriate ioctl for all the evsel fds.
The same changes will be made to the evlist__{set,apply} variants to
keep everything consistent.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: David Ahern <dsahern@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-2s5z9xtpnc2lwio3cv5x0jek@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-07-04 01:53:49 +07:00
|
|
|
int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
|
|
|
|
const char *filter);
|
2015-12-03 16:06:40 +07:00
|
|
|
int perf_evsel__enable(struct perf_evsel *evsel);
|
2015-12-03 16:06:41 +07:00
|
|
|
int perf_evsel__disable(struct perf_evsel *evsel);
|
2012-09-27 01:07:39 +07:00
|
|
|
|
2011-01-12 08:42:19 +07:00
|
|
|
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 17:22:36 +07:00
|
|
|
struct cpu_map *cpus);
|
2011-01-12 08:42:19 +07:00
|
|
|
int perf_evsel__open_per_thread(struct perf_evsel *evsel,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 17:22:36 +07:00
|
|
|
struct thread_map *threads);
|
2011-01-12 08:42:19 +07:00
|
|
|
int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 17:22:36 +07:00
|
|
|
struct thread_map *threads);
|
2011-10-25 19:42:19 +07:00
|
|
|
void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads);
|
2011-01-04 02:48:12 +07:00
|
|
|
|
2012-09-12 05:24:23 +07:00
|
|
|
struct perf_sample;
|
|
|
|
|
2012-09-27 06:22:00 +07:00
|
|
|
void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
|
2012-09-12 05:24:23 +07:00
|
|
|
const char *name);
|
|
|
|
u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
|
|
|
|
const char *name);
|
|
|
|
|
2012-09-27 06:22:00 +07:00
|
|
|
static inline char *perf_evsel__strval(struct perf_evsel *evsel,
|
|
|
|
struct perf_sample *sample,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
return perf_evsel__rawptr(evsel, sample, name);
|
|
|
|
}
|
|
|
|
|
2012-09-18 21:21:50 +07:00
|
|
|
struct format_field;
|
|
|
|
|
2016-05-31 22:47:46 +07:00
|
|
|
u64 format_field__intval(struct format_field *field, struct perf_sample *sample, bool needs_swap);
|
|
|
|
|
2012-09-18 21:21:50 +07:00
|
|
|
struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name);
|
|
|
|
|
2011-01-04 01:49:44 +07:00
|
|
|
#define perf_evsel__match(evsel, t, c) \
|
|
|
|
(evsel->attr.type == PERF_TYPE_##t && \
|
|
|
|
evsel->attr.config == PERF_COUNT_##c)
|
|
|
|
|
2012-09-06 22:46:55 +07:00
|
|
|
static inline bool perf_evsel__match2(struct perf_evsel *e1,
|
|
|
|
struct perf_evsel *e2)
|
|
|
|
{
|
|
|
|
return (e1->attr.type == e2->attr.type) &&
|
|
|
|
(e1->attr.config == e2->attr.config);
|
|
|
|
}
|
|
|
|
|
2013-08-22 06:47:26 +07:00
|
|
|
#define perf_evsel__cmp(a, b) \
|
|
|
|
((a) && \
|
|
|
|
(b) && \
|
|
|
|
(a)->attr.type == (b)->attr.type && \
|
|
|
|
(a)->attr.config == (b)->attr.config)
|
|
|
|
|
2015-06-26 16:29:18 +07:00
|
|
|
int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
|
|
|
|
struct perf_counts_values *count);
|
|
|
|
|
2011-01-04 02:45:52 +07:00
|
|
|
int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
|
|
|
|
int cpu, int thread, bool scale);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* perf_evsel__read_on_cpu - Read out the results on a CPU and thread
|
|
|
|
*
|
|
|
|
* @evsel - event selector to read value
|
|
|
|
* @cpu - CPU of interest
|
|
|
|
* @thread - thread of interest
|
|
|
|
*/
|
|
|
|
static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
|
|
|
|
int cpu, int thread)
|
|
|
|
{
|
|
|
|
return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
|
|
|
|
*
|
|
|
|
* @evsel - event selector to read value
|
|
|
|
* @cpu - CPU of interest
|
|
|
|
* @thread - thread of interest
|
|
|
|
*/
|
|
|
|
static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
|
|
|
|
int cpu, int thread)
|
|
|
|
{
|
|
|
|
return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
|
|
|
|
}
|
|
|
|
|
2012-08-02 22:23:46 +07:00
|
|
|
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
|
2012-09-26 22:48:18 +07:00
|
|
|
struct perf_sample *sample);
|
2012-08-15 02:42:15 +07:00
|
|
|
|
|
|
|
static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
return list_entry(evsel->node.next, struct perf_evsel, node);
|
|
|
|
}
|
2012-11-14 03:27:28 +07:00
|
|
|
|
2013-11-14 01:56:40 +07:00
|
|
|
static inline struct perf_evsel *perf_evsel__prev(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
return list_entry(evsel->node.prev, struct perf_evsel, node);
|
|
|
|
}
|
|
|
|
|
2013-03-05 12:53:26 +07:00
|
|
|
/**
|
|
|
|
* perf_evsel__is_group_leader - Return whether given evsel is a leader event
|
|
|
|
*
|
|
|
|
* @evsel - evsel selector to be tested
|
|
|
|
*
|
|
|
|
* Return %true if @evsel is a group leader or a stand-alone event
|
|
|
|
*/
|
2012-11-29 13:38:30 +07:00
|
|
|
static inline bool perf_evsel__is_group_leader(const struct perf_evsel *evsel)
|
2012-11-14 03:27:28 +07:00
|
|
|
{
|
2012-11-29 13:38:30 +07:00
|
|
|
return evsel->leader == evsel;
|
2012-11-14 03:27:28 +07:00
|
|
|
}
|
2012-12-11 04:17:08 +07:00
|
|
|
|
2013-03-05 12:53:26 +07:00
|
|
|
/**
|
|
|
|
* perf_evsel__is_group_event - Return whether given evsel is a group event
|
|
|
|
*
|
|
|
|
* @evsel - evsel selector to be tested
|
|
|
|
*
|
|
|
|
* Return %true iff event group view is enabled and @evsel is a actual group
|
|
|
|
* leader which has other members in the group
|
|
|
|
*/
|
|
|
|
static inline bool perf_evsel__is_group_event(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
if (!symbol_conf.event_group)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return perf_evsel__is_group_leader(evsel) && evsel->nr_members > 1;
|
|
|
|
}
|
|
|
|
|
2016-07-07 21:51:47 +07:00
|
|
|
bool perf_evsel__is_function_event(struct perf_evsel *evsel);
|
2014-03-02 22:56:40 +07:00
|
|
|
|
perf tools: Introduce bpf-output event
Commit a43eec304259 ("bpf: introduce bpf_perf_event_output() helper")
adds a helper to enable a BPF program to output data to a perf ring
buffer through a new type of perf event, PERF_COUNT_SW_BPF_OUTPUT. This
patch enables perf to create events of that type. Now a perf user can
use the following cmdline to receive output data from BPF programs:
# perf record -a -e bpf-output/no-inherit,name=evt/ \
-e ./test_bpf_output.c/map:channel.event=evt/ ls /
# perf script
perf 1560 [004] 347747.086295: evt: ffffffff811fd201 sys_write ...
perf 1560 [004] 347747.086300: evt: ffffffff811fd201 sys_write ...
perf 1560 [004] 347747.086315: evt: ffffffff811fd201 sys_write ...
...
Test result:
# cat test_bpf_output.c
/************************ BEGIN **************************/
#include <uapi/linux/bpf.h>
struct bpf_map_def {
unsigned int type;
unsigned int key_size;
unsigned int value_size;
unsigned int max_entries;
};
#define SEC(NAME) __attribute__((section(NAME), used))
static u64 (*ktime_get_ns)(void) =
(void *)BPF_FUNC_ktime_get_ns;
static int (*trace_printk)(const char *fmt, int fmt_size, ...) =
(void *)BPF_FUNC_trace_printk;
static int (*get_smp_processor_id)(void) =
(void *)BPF_FUNC_get_smp_processor_id;
static int (*perf_event_output)(void *, struct bpf_map_def *, int, void *, unsigned long) =
(void *)BPF_FUNC_perf_event_output;
struct bpf_map_def SEC("maps") channel = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(u32),
.max_entries = __NR_CPUS__,
};
SEC("func_write=sys_write")
int func_write(void *ctx)
{
struct {
u64 ktime;
int cpuid;
} __attribute__((packed)) output_data;
char error_data[] = "Error: failed to output: %d\n";
output_data.cpuid = get_smp_processor_id();
output_data.ktime = ktime_get_ns();
int err = perf_event_output(ctx, &channel, get_smp_processor_id(),
&output_data, sizeof(output_data));
if (err)
trace_printk(error_data, sizeof(error_data), err);
return 0;
}
char _license[] SEC("license") = "GPL";
int _version SEC("version") = LINUX_VERSION_CODE;
/************************ END ***************************/
# perf record -a -e bpf-output/no-inherit,name=evt/ \
-e ./test_bpf_output.c/map:channel.event=evt/ ls /
# perf script | grep ls
ls 2242 [003] 347851.557563: evt: ffffffff811fd201 sys_write ...
ls 2242 [003] 347851.557571: evt: ffffffff811fd201 sys_write ...
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Brendan Gregg <brendan.d.gregg@gmail.com>
Cc: Cody P Schafer <dev@codyps.com>
Cc: He Kuang <hekuang@huawei.com>
Cc: Jeremie Galarneau <jeremie.galarneau@efficios.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kirill Smelkov <kirr@nexedi.com>
Cc: Li Zefan <lizefan@huawei.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1456132275-98875-11-git-send-email-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-02-22 16:10:37 +07:00
|
|
|
static inline bool perf_evsel__is_bpf_output(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
struct perf_event_attr *attr = &evsel->attr;
|
|
|
|
|
|
|
|
return (attr->config == PERF_COUNT_SW_BPF_OUTPUT) &&
|
|
|
|
(attr->type == PERF_TYPE_SOFTWARE);
|
|
|
|
}
|
|
|
|
|
2012-12-11 04:17:08 +07:00
|
|
|
struct perf_attr_details {
|
|
|
|
bool freq;
|
|
|
|
bool verbose;
|
2013-02-07 03:20:02 +07:00
|
|
|
bool event_group;
|
2015-04-02 20:47:10 +07:00
|
|
|
bool force;
|
perf evlist: Add --trace-fields option to show trace fields
To use dynamic sort keys, it might be good to add an option to see the
list of field names.
$ perf evlist -i perf.data.sched
sched:sched_switch
sched:sched_stat_wait
sched:sched_stat_sleep
sched:sched_stat_iowait
sched:sched_stat_runtime
sched:sched_process_fork
sched:sched_wakeup
sched:sched_wakeup_new
sched:sched_migrate_task
# Tip: use 'perf evlist --trace-fields' to show fields for tracepoint events
$ perf evlist -i perf.data.sched --trace-fields
sched:sched_switch: trace_fields: prev_comm,prev_pid,prev_prio,prev_state,next_comm,next_pid,next_prio
sched:sched_stat_wait: trace_fields: comm,pid,delay
sched:sched_stat_sleep: trace_fields: comm,pid,delay
sched:sched_stat_iowait: trace_fields: comm,pid,delay
sched:sched_stat_runtime: trace_fields: comm,pid,runtime,vruntime
sched:sched_process_fork: trace_fields: parent_comm,parent_pid,child_comm,child_pid
sched:sched_wakeup: trace_fields: comm,pid,prio,success,target_cpu
sched:sched_wakeup_new: trace_fields: comm,pid,prio,success,target_cpu
sched:sched_migrate_task: trace_fields: comm,pid,prio,orig_cpu,dest_cpu
Committer notes:
For another file, in verbose mode:
# perf evlist -v --trace-fields
sched:sched_switch: type: 2, size: 112, config: 0x10b, { sample_period, sample_freq }: 1, sample_type: IP|TID|TIME|CPU|PERIOD|RAW, disabled: 1, inherit: 1, mmap: 1, comm: 1, task: 1, sample_id_all: 1, exclude_guest: 1, mmap2: 1, comm_exec: 1, trace_fields: prev_comm,prev_pid,prev_prio,prev_state,next_comm,next_pid,next_prio
#
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Wang Nan <wangnan0@huawei.com>
Link: http://lkml.kernel.org/r/1452125549-1511-5-git-send-email-namhyung@kernel.org
[ Replaced 'trace_fields=' with 'trace_fields: ' to make the output consistent in -v mode ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-01-07 07:12:29 +07:00
|
|
|
bool trace_fields;
|
2012-12-11 04:17:08 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
int perf_evsel__fprintf(struct perf_evsel *evsel,
|
|
|
|
struct perf_attr_details *details, FILE *fp);
|
2012-12-14 00:16:30 +07:00
|
|
|
|
2016-04-13 01:16:15 +07:00
|
|
|
#define EVSEL__PRINT_IP (1<<0)
|
|
|
|
#define EVSEL__PRINT_SYM (1<<1)
|
|
|
|
#define EVSEL__PRINT_DSO (1<<2)
|
|
|
|
#define EVSEL__PRINT_SYMOFFSET (1<<3)
|
|
|
|
#define EVSEL__PRINT_ONELINE (1<<4)
|
|
|
|
#define EVSEL__PRINT_SRCLINE (1<<5)
|
|
|
|
#define EVSEL__PRINT_UNKNOWN_AS_ADDR (1<<6)
|
2016-11-16 13:06:28 +07:00
|
|
|
#define EVSEL__PRINT_CALLCHAIN_ARROW (1<<7)
|
2016-11-24 08:11:13 +07:00
|
|
|
#define EVSEL__PRINT_SKIP_IGNORED (1<<8)
|
2016-04-13 01:16:15 +07:00
|
|
|
|
2016-04-15 03:45:51 +07:00
|
|
|
struct callchain_cursor;
|
|
|
|
|
2016-04-15 03:53:49 +07:00
|
|
|
int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment,
|
|
|
|
unsigned int print_opts,
|
2016-04-15 03:45:51 +07:00
|
|
|
struct callchain_cursor *cursor, FILE *fp);
|
|
|
|
|
|
|
|
int sample__fprintf_sym(struct perf_sample *sample, struct addr_location *al,
|
|
|
|
int left_alignment, unsigned int print_opts,
|
|
|
|
struct callchain_cursor *cursor, FILE *fp);
|
2016-04-13 01:16:15 +07:00
|
|
|
|
2012-12-14 00:16:30 +07:00
|
|
|
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
|
|
|
|
char *msg, size_t msgsize);
|
2013-11-13 02:46:16 +07:00
|
|
|
int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
|
2012-12-14 01:10:58 +07:00
|
|
|
int err, char *msg, size_t size);
|
2013-01-22 16:09:29 +07:00
|
|
|
|
|
|
|
static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
|
|
|
|
{
|
|
|
|
return evsel->idx - evsel->leader->idx;
|
|
|
|
}
|
2013-01-22 16:09:44 +07:00
|
|
|
|
|
|
|
#define for_each_group_member(_evsel, _leader) \
|
|
|
|
for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \
|
|
|
|
(_evsel) && (_evsel)->leader == (_leader); \
|
|
|
|
(_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node))
|
|
|
|
|
2016-04-18 20:35:03 +07:00
|
|
|
static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evsel)
|
2015-01-06 01:23:05 +07:00
|
|
|
{
|
|
|
|
return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
|
|
|
|
}
|
perf tools: Merge all perf_event_attr print functions
Currently there's 3 (that I found) different and incomplete
implementations of printing perf_event_attr.
This is quite silly. Merge the lot.
While this patch does not retain the exact form all printing that I
found is debug output and thus it should not be critical.
Also, I cannot find a single print_event_desc() caller.
Pre:
$ perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
type 0
size 104
config 0
sample_period 4000
sample_freq 4000
sample_type 0x107
read_format 0
disabled 1 inherit 1
pinned 0 exclusive 0
exclude_user 0 exclude_kernel 0
exclude_hv 0 exclude_idle 0
mmap 1 comm 1
mmap2 1 comm_exec 1
freq 1 inherit_stat 0
enable_on_exec 1 task 1
watermark 0 precise_ip 0
mmap_data 0 sample_id_all 1
exclude_host 0 exclude_guest 1
excl.callchain_kern 0 excl.callchain_user 0
wakeup_events 0
wakeup_watermark 0
bp_type 0
bp_addr 0
config1 0
bp_len 0
config2 0
branch_sample_type 0
sample_regs_user 0
sample_stack_user 0
sample_regs_intr 0
------------------------------------------------------------
$ perf evlist -vv
cycles: sample_freq=4000, size: 104, sample_type: IP|TID|TIME|PERIOD,
disabled: 1, inherit: 1, mmap: 1, mmap2: 1, comm: 1, comm_exec: 1,
freq: 1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1
Post:
$ ./perf record -vv -e cycles -- sleep 1
------------------------------------------------------------
perf_event_attr:
size 112
{ sample_period, sample_freq } 4000
sample_type IP|TID|TIME|PERIOD
disabled 1
inherit 1
mmap 1
comm 1
freq 1
enable_on_exec 1
task 1
sample_id_all 1
exclude_guest 1
mmap2 1
comm_exec 1
------------------------------------------------------------
$ ./perf evlist -vv
cycles: size: 112, { sample_period, sample_freq }: 4000, sample_type:
IP|TID|TIME|PERIOD, disabled: 1, inherit: 1, mmap: 1, comm: 1, freq:
1, enable_on_exec: 1, task: 1, sample_id_all: 1, exclude_guest: 1,
mmap2: 1, comm_exec: 1
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Adrian Hunter <adrian.hunter@intel.com>
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: David Ahern <dsahern@gmail.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20150407091150.644238729@infradead.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2015-04-07 16:09:54 +07:00
|
|
|
|
|
|
|
typedef int (*attr__fprintf_f)(FILE *, const char *, const char *, void *);
|
|
|
|
|
|
|
|
int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
|
|
|
|
attr__fprintf_f attr__fprintf, void *priv);
|
|
|
|
|
2016-06-30 13:14:19 +07:00
|
|
|
char *perf_evsel__env_arch(struct perf_evsel *evsel);
|
|
|
|
|
2011-01-04 01:39:04 +07:00
|
|
|
#endif /* __PERF_EVSEL_H */
|