2012-11-20 05:21:03 +07:00
|
|
|
#include <linux/hw_breakpoint.h>
|
2009-05-26 16:10:09 +07:00
|
|
|
#include "util.h"
|
2009-09-05 02:39:51 +07:00
|
|
|
#include "../perf.h"
|
2011-01-12 05:56:53 +07:00
|
|
|
#include "evlist.h"
|
2011-01-04 01:39:04 +07:00
|
|
|
#include "evsel.h"
|
2009-05-26 16:10:09 +07:00
|
|
|
#include "parse-options.h"
|
|
|
|
#include "parse-events.h"
|
|
|
|
#include "exec_cmd.h"
|
2014-10-07 22:08:49 +07:00
|
|
|
#include "string.h"
|
2010-03-26 05:59:00 +07:00
|
|
|
#include "symbol.h"
|
2009-07-22 01:16:29 +07:00
|
|
|
#include "cache.h"
|
2009-09-12 12:52:51 +07:00
|
|
|
#include "header.h"
|
2014-08-14 09:22:36 +07:00
|
|
|
#include "debug.h"
|
2013-12-09 23:14:23 +07:00
|
|
|
#include <api/fs/debugfs.h>
|
2012-06-15 13:31:39 +07:00
|
|
|
#include "parse-events-bison.h"
|
2012-06-15 13:31:40 +07:00
|
|
|
#define YY_EXTRA_TYPE int
|
2012-03-16 02:09:15 +07:00
|
|
|
#include "parse-events-flex.h"
|
2012-03-16 02:09:18 +07:00
|
|
|
#include "pmu.h"
|
2013-08-27 09:41:53 +07:00
|
|
|
#include "thread_map.h"
|
2012-03-16 02:09:15 +07:00
|
|
|
|
|
|
|
#define MAX_NAME_LEN 100
|
2009-05-26 16:10:09 +07:00
|
|
|
|
|
|
|
struct event_symbol {
|
2009-08-15 17:26:57 +07:00
|
|
|
const char *symbol;
|
|
|
|
const char *alias;
|
2009-05-26 16:10:09 +07:00
|
|
|
};
|
|
|
|
|
2012-05-21 14:12:50 +07:00
|
|
|
#ifdef PARSER_DEBUG
|
|
|
|
extern int parse_events_debug;
|
|
|
|
#endif
|
2012-06-15 13:31:39 +07:00
|
|
|
int parse_events_parse(void *data, void *scanner);
|
2009-09-12 04:19:45 +07:00
|
|
|
|
2014-10-07 22:08:50 +07:00
|
|
|
static struct perf_pmu_event_symbol *perf_pmu_events_list;
|
|
|
|
/*
|
|
|
|
* The variable indicates the number of supported pmu event symbols.
|
|
|
|
* 0 means not initialized and ready to init
|
|
|
|
* -1 means failed to init, don't try anymore
|
|
|
|
* >0 is the number of supported pmu event symbols
|
|
|
|
*/
|
|
|
|
static int perf_pmu_events_list_num;
|
|
|
|
|
2012-07-04 05:00:44 +07:00
|
|
|
static struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
|
|
|
|
[PERF_COUNT_HW_CPU_CYCLES] = {
|
|
|
|
.symbol = "cpu-cycles",
|
|
|
|
.alias = "cycles",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_HW_INSTRUCTIONS] = {
|
|
|
|
.symbol = "instructions",
|
|
|
|
.alias = "",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_HW_CACHE_REFERENCES] = {
|
|
|
|
.symbol = "cache-references",
|
|
|
|
.alias = "",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_HW_CACHE_MISSES] = {
|
|
|
|
.symbol = "cache-misses",
|
|
|
|
.alias = "",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {
|
|
|
|
.symbol = "branch-instructions",
|
|
|
|
.alias = "branches",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_HW_BRANCH_MISSES] = {
|
|
|
|
.symbol = "branch-misses",
|
|
|
|
.alias = "",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_HW_BUS_CYCLES] = {
|
|
|
|
.symbol = "bus-cycles",
|
|
|
|
.alias = "",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = {
|
|
|
|
.symbol = "stalled-cycles-frontend",
|
|
|
|
.alias = "idle-cycles-frontend",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = {
|
|
|
|
.symbol = "stalled-cycles-backend",
|
|
|
|
.alias = "idle-cycles-backend",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_HW_REF_CPU_CYCLES] = {
|
|
|
|
.symbol = "ref-cycles",
|
|
|
|
.alias = "",
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
|
|
|
|
[PERF_COUNT_SW_CPU_CLOCK] = {
|
|
|
|
.symbol = "cpu-clock",
|
|
|
|
.alias = "",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_SW_TASK_CLOCK] = {
|
|
|
|
.symbol = "task-clock",
|
|
|
|
.alias = "",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_SW_PAGE_FAULTS] = {
|
|
|
|
.symbol = "page-faults",
|
|
|
|
.alias = "faults",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_SW_CONTEXT_SWITCHES] = {
|
|
|
|
.symbol = "context-switches",
|
|
|
|
.alias = "cs",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_SW_CPU_MIGRATIONS] = {
|
|
|
|
.symbol = "cpu-migrations",
|
|
|
|
.alias = "migrations",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
|
|
|
|
.symbol = "minor-faults",
|
|
|
|
.alias = "",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
|
|
|
|
.symbol = "major-faults",
|
|
|
|
.alias = "",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
|
|
|
|
.symbol = "alignment-faults",
|
|
|
|
.alias = "",
|
|
|
|
},
|
|
|
|
[PERF_COUNT_SW_EMULATION_FAULTS] = {
|
|
|
|
.symbol = "emulation-faults",
|
|
|
|
.alias = "",
|
|
|
|
},
|
2013-09-01 01:50:52 +07:00
|
|
|
[PERF_COUNT_SW_DUMMY] = {
|
|
|
|
.symbol = "dummy",
|
|
|
|
.alias = "",
|
|
|
|
},
|
2009-05-26 16:10:09 +07:00
|
|
|
};
|
|
|
|
|
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events!
In the past few months the perfcounters subsystem has grown out its
initial role of counting hardware events, and has become (and is
becoming) a much broader generic event enumeration, reporting, logging,
monitoring, analysis facility.
Naming its core object 'perf_counter' and naming the subsystem
'perfcounters' has become more and more of a misnomer. With pending
code like hw-breakpoints support the 'counter' name is less and
less appropriate.
All in one, we've decided to rename the subsystem to 'performance
events' and to propagate this rename through all fields, variables
and API names. (in an ABI compatible fashion)
The word 'event' is also a bit shorter than 'counter' - which makes
it slightly more convenient to write/handle as well.
Thanks goes to Stephane Eranian who first observed this misnomer and
suggested a rename.
User-space tooling and ABI compatibility is not affected - this patch
should be function-invariant. (Also, defconfigs were not touched to
keep the size down.)
This patch has been generated via the following script:
FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')
sed -i \
-e 's/PERF_EVENT_/PERF_RECORD_/g' \
-e 's/PERF_COUNTER/PERF_EVENT/g' \
-e 's/perf_counter/perf_event/g' \
-e 's/nb_counters/nb_events/g' \
-e 's/swcounter/swevent/g' \
-e 's/tpcounter_event/tp_event/g' \
$FILES
for N in $(find . -name perf_counter.[ch]); do
M=$(echo $N | sed 's/perf_counter/perf_event/g')
mv $N $M
done
FILES=$(find . -name perf_event.*)
sed -i \
-e 's/COUNTER_MASK/REG_MASK/g' \
-e 's/COUNTER/EVENT/g' \
-e 's/\<event\>/event_id/g' \
-e 's/counter/event/g' \
-e 's/Counter/Event/g' \
$FILES
... to keep it as correct as possible. This script can also be
used by anyone who has pending perfcounters patches - it converts
a Linux kernel tree over to the new naming. We tried to time this
change to the point in time where the amount of pending patches
is the smallest: the end of the merge window.
Namespace clashes were fixed up in a preparatory patch - and some
stylistic fallout will be fixed up in a subsequent patch.
( NOTE: 'counters' are still the proper terminology when we deal
with hardware registers - and these sed scripts are a bit
over-eager in renaming them. I've undone some of that, but
in case there's something left where 'counter' would be
better than 'event' we can undo that on an individual basis
instead of touching an otherwise nicely automated patch. )
Suggested-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-21 17:02:48 +07:00
|
|
|
#define __PERF_EVENT_FIELD(config, name) \
|
|
|
|
((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
|
2009-05-26 14:17:18 +07:00
|
|
|
|
perf stat: Add stalled cycles to the default output
The new default output looks like this:
Performance counter stats for './loop_1b_instructions':
236.010686 task-clock # 0.996 CPUs utilized
0 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
99 page-faults # 0.000 M/sec
756,487,646 cycles # 3.205 GHz
354,938,996 stalled-cycles # 46.92% of all cycles are idle
1,001,403,797 instructions # 1.32 insns per cycle
# 0.35 stalled cycles per insn
100,279,773 branches # 424.895 M/sec
12,646 branch-misses # 0.013 % of all branches
0.236902540 seconds time elapsed
We dropped cache-refs and cache-misses and added stalled-cycles - this is a
more generic "how well utilized is the CPU" metric.
If the stalled-cycles ratio is too high then more specific measurements can be
taken to figure out the source of the inefficiency.
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/n/tip-pbpl2l4mn797s69bclfpwkwn@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-04-27 10:20:22 +07:00
|
|
|
#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
|
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events!
In the past few months the perfcounters subsystem has grown out its
initial role of counting hardware events, and has become (and is
becoming) a much broader generic event enumeration, reporting, logging,
monitoring, analysis facility.
Naming its core object 'perf_counter' and naming the subsystem
'perfcounters' has become more and more of a misnomer. With pending
code like hw-breakpoints support the 'counter' name is less and
less appropriate.
All in one, we've decided to rename the subsystem to 'performance
events' and to propagate this rename through all fields, variables
and API names. (in an ABI compatible fashion)
The word 'event' is also a bit shorter than 'counter' - which makes
it slightly more convenient to write/handle as well.
Thanks goes to Stephane Eranian who first observed this misnomer and
suggested a rename.
User-space tooling and ABI compatibility is not affected - this patch
should be function-invariant. (Also, defconfigs were not touched to
keep the size down.)
This patch has been generated via the following script:
FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')
sed -i \
-e 's/PERF_EVENT_/PERF_RECORD_/g' \
-e 's/PERF_COUNTER/PERF_EVENT/g' \
-e 's/perf_counter/perf_event/g' \
-e 's/nb_counters/nb_events/g' \
-e 's/swcounter/swevent/g' \
-e 's/tpcounter_event/tp_event/g' \
$FILES
for N in $(find . -name perf_counter.[ch]); do
M=$(echo $N | sed 's/perf_counter/perf_event/g')
mv $N $M
done
FILES=$(find . -name perf_event.*)
sed -i \
-e 's/COUNTER_MASK/REG_MASK/g' \
-e 's/COUNTER/EVENT/g' \
-e 's/\<event\>/event_id/g' \
-e 's/counter/event/g' \
-e 's/Counter/Event/g' \
$FILES
... to keep it as correct as possible. This script can also be
used by anyone who has pending perfcounters patches - it converts
a Linux kernel tree over to the new naming. We tried to time this
change to the point in time where the amount of pending patches
is the smallest: the end of the merge window.
Namespace clashes were fixed up in a preparatory patch - and some
stylistic fallout will be fixed up in a subsequent patch.
( NOTE: 'counters' are still the proper terminology when we deal
with hardware registers - and these sed scripts are a bit
over-eager in renaming them. I've undone some of that, but
in case there's something left where 'counter' would be
better than 'event' we can undo that on an individual basis
instead of touching an otherwise nicely automated patch. )
Suggested-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-21 17:02:48 +07:00
|
|
|
#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
|
perf stat: Add stalled cycles to the default output
The new default output looks like this:
Performance counter stats for './loop_1b_instructions':
236.010686 task-clock # 0.996 CPUs utilized
0 context-switches # 0.000 M/sec
0 CPU-migrations # 0.000 M/sec
99 page-faults # 0.000 M/sec
756,487,646 cycles # 3.205 GHz
354,938,996 stalled-cycles # 46.92% of all cycles are idle
1,001,403,797 instructions # 1.32 insns per cycle
# 0.35 stalled cycles per insn
100,279,773 branches # 424.895 M/sec
12,646 branch-misses # 0.013 % of all branches
0.236902540 seconds time elapsed
We dropped cache-refs and cache-misses and added stalled-cycles - this is a
more generic "how well utilized is the CPU" metric.
If the stalled-cycles ratio is too high then more specific measurements can be
taken to figure out the source of the inefficiency.
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/n/tip-pbpl2l4mn797s69bclfpwkwn@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2011-04-27 10:20:22 +07:00
|
|
|
#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
|
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events!
In the past few months the perfcounters subsystem has grown out its
initial role of counting hardware events, and has become (and is
becoming) a much broader generic event enumeration, reporting, logging,
monitoring, analysis facility.
Naming its core object 'perf_counter' and naming the subsystem
'perfcounters' has become more and more of a misnomer. With pending
code like hw-breakpoints support the 'counter' name is less and
less appropriate.
All in one, we've decided to rename the subsystem to 'performance
events' and to propagate this rename through all fields, variables
and API names. (in an ABI compatible fashion)
The word 'event' is also a bit shorter than 'counter' - which makes
it slightly more convenient to write/handle as well.
Thanks goes to Stephane Eranian who first observed this misnomer and
suggested a rename.
User-space tooling and ABI compatibility is not affected - this patch
should be function-invariant. (Also, defconfigs were not touched to
keep the size down.)
This patch has been generated via the following script:
FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')
sed -i \
-e 's/PERF_EVENT_/PERF_RECORD_/g' \
-e 's/PERF_COUNTER/PERF_EVENT/g' \
-e 's/perf_counter/perf_event/g' \
-e 's/nb_counters/nb_events/g' \
-e 's/swcounter/swevent/g' \
-e 's/tpcounter_event/tp_event/g' \
$FILES
for N in $(find . -name perf_counter.[ch]); do
M=$(echo $N | sed 's/perf_counter/perf_event/g')
mv $N $M
done
FILES=$(find . -name perf_event.*)
sed -i \
-e 's/COUNTER_MASK/REG_MASK/g' \
-e 's/COUNTER/EVENT/g' \
-e 's/\<event\>/event_id/g' \
-e 's/counter/event/g' \
-e 's/Counter/Event/g' \
$FILES
... to keep it as correct as possible. This script can also be
used by anyone who has pending perfcounters patches - it converts
a Linux kernel tree over to the new naming. We tried to time this
change to the point in time where the amount of pending patches
is the smallest: the end of the merge window.
Namespace clashes were fixed up in a preparatory patch - and some
stylistic fallout will be fixed up in a subsequent patch.
( NOTE: 'counters' are still the proper terminology when we deal
with hardware registers - and these sed scripts are a bit
over-eager in renaming them. I've undone some of that, but
in case there's something left where 'counter' would be
better than 'event' we can undo that on an individual basis
instead of touching an otherwise nicely automated patch. )
Suggested-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-21 17:02:48 +07:00
|
|
|
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
|
2009-05-26 14:17:18 +07:00
|
|
|
|
2009-09-05 02:39:51 +07:00
|
|
|
#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
|
2009-07-21 23:20:22 +07:00
|
|
|
while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
|
2009-09-05 02:39:51 +07:00
|
|
|
if (sys_dirent.d_type == DT_DIR && \
|
2009-07-21 23:20:22 +07:00
|
|
|
(strcmp(sys_dirent.d_name, ".")) && \
|
|
|
|
(strcmp(sys_dirent.d_name, "..")))
|
|
|
|
|
2009-08-06 21:48:54 +07:00
|
|
|
static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
|
|
|
|
{
|
|
|
|
char evt_path[MAXPATHLEN];
|
|
|
|
int fd;
|
|
|
|
|
2011-11-16 23:03:07 +07:00
|
|
|
snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path,
|
2009-08-06 21:48:54 +07:00
|
|
|
sys_dir->d_name, evt_dir->d_name);
|
|
|
|
fd = open(evt_path, O_RDONLY);
|
|
|
|
if (fd < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
close(fd);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-09-05 02:39:51 +07:00
|
|
|
#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
|
2009-07-21 23:20:22 +07:00
|
|
|
while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
|
2009-09-05 02:39:51 +07:00
|
|
|
if (evt_dirent.d_type == DT_DIR && \
|
2009-07-21 23:20:22 +07:00
|
|
|
(strcmp(evt_dirent.d_name, ".")) && \
|
2009-08-06 21:48:54 +07:00
|
|
|
(strcmp(evt_dirent.d_name, "..")) && \
|
|
|
|
(!tp_event_has_id(&sys_dirent, &evt_dirent)))
|
2009-07-21 23:20:22 +07:00
|
|
|
|
2009-09-17 15:34:51 +07:00
|
|
|
#define MAX_EVENT_LENGTH 512
|
2009-07-21 23:20:22 +07:00
|
|
|
|
|
|
|
|
2009-08-28 08:09:58 +07:00
|
|
|
struct tracepoint_path *tracepoint_id_to_path(u64 config)
|
2009-07-21 23:20:22 +07:00
|
|
|
{
|
2009-08-28 08:09:58 +07:00
|
|
|
struct tracepoint_path *path = NULL;
|
2009-07-21 23:20:22 +07:00
|
|
|
DIR *sys_dir, *evt_dir;
|
|
|
|
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
|
2012-03-13 22:51:02 +07:00
|
|
|
char id_buf[24];
|
2009-09-24 20:39:09 +07:00
|
|
|
int fd;
|
2009-07-21 23:20:22 +07:00
|
|
|
u64 id;
|
|
|
|
char evt_path[MAXPATHLEN];
|
2009-09-24 20:39:09 +07:00
|
|
|
char dir_path[MAXPATHLEN];
|
2009-07-21 23:20:22 +07:00
|
|
|
|
2011-11-16 23:03:07 +07:00
|
|
|
sys_dir = opendir(tracing_events_path);
|
2009-07-21 23:20:22 +07:00
|
|
|
if (!sys_dir)
|
2009-09-24 20:39:09 +07:00
|
|
|
return NULL;
|
2009-09-05 02:39:51 +07:00
|
|
|
|
|
|
|
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
|
2009-09-24 20:39:09 +07:00
|
|
|
|
2011-11-16 23:03:07 +07:00
|
|
|
snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
|
2009-09-24 20:39:09 +07:00
|
|
|
sys_dirent.d_name);
|
|
|
|
evt_dir = opendir(dir_path);
|
|
|
|
if (!evt_dir)
|
2009-09-05 02:39:51 +07:00
|
|
|
continue;
|
2009-09-24 20:39:09 +07:00
|
|
|
|
2009-09-05 02:39:51 +07:00
|
|
|
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
|
2009-09-24 20:39:09 +07:00
|
|
|
|
|
|
|
snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
|
2009-07-21 23:20:22 +07:00
|
|
|
evt_dirent.d_name);
|
2009-09-24 20:39:09 +07:00
|
|
|
fd = open(evt_path, O_RDONLY);
|
2009-07-21 23:20:22 +07:00
|
|
|
if (fd < 0)
|
|
|
|
continue;
|
|
|
|
if (read(fd, id_buf, sizeof(id_buf)) < 0) {
|
|
|
|
close(fd);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
close(fd);
|
|
|
|
id = atoll(id_buf);
|
|
|
|
if (id == config) {
|
|
|
|
closedir(evt_dir);
|
|
|
|
closedir(sys_dir);
|
2009-12-06 16:16:30 +07:00
|
|
|
path = zalloc(sizeof(*path));
|
2009-08-28 08:09:58 +07:00
|
|
|
path->system = malloc(MAX_EVENT_LENGTH);
|
|
|
|
if (!path->system) {
|
|
|
|
free(path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
path->name = malloc(MAX_EVENT_LENGTH);
|
|
|
|
if (!path->name) {
|
2013-12-28 02:55:14 +07:00
|
|
|
zfree(&path->system);
|
2009-08-28 08:09:58 +07:00
|
|
|
free(path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
strncpy(path->system, sys_dirent.d_name,
|
|
|
|
MAX_EVENT_LENGTH);
|
|
|
|
strncpy(path->name, evt_dirent.d_name,
|
|
|
|
MAX_EVENT_LENGTH);
|
|
|
|
return path;
|
2009-07-21 23:20:22 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
closedir(evt_dir);
|
|
|
|
}
|
|
|
|
|
|
|
|
closedir(sys_dir);
|
2009-08-28 08:09:58 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-06-26 14:14:05 +07:00
|
|
|
struct tracepoint_path *tracepoint_name_to_path(const char *name)
|
|
|
|
{
|
|
|
|
struct tracepoint_path *path = zalloc(sizeof(*path));
|
|
|
|
char *str = strchr(name, ':');
|
|
|
|
|
|
|
|
if (path == NULL || str == NULL) {
|
|
|
|
free(path);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
path->system = strndup(name, str - name);
|
|
|
|
path->name = strdup(str+1);
|
|
|
|
|
|
|
|
if (path->system == NULL || path->name == NULL) {
|
2013-12-28 02:55:14 +07:00
|
|
|
zfree(&path->system);
|
|
|
|
zfree(&path->name);
|
2013-06-26 14:14:05 +07:00
|
|
|
free(path);
|
|
|
|
path = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return path;
|
|
|
|
}
|
|
|
|
|
2011-03-10 12:23:28 +07:00
|
|
|
const char *event_type(int type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case PERF_TYPE_HARDWARE:
|
|
|
|
return "hardware";
|
|
|
|
|
|
|
|
case PERF_TYPE_SOFTWARE:
|
|
|
|
return "software";
|
|
|
|
|
|
|
|
case PERF_TYPE_TRACEPOINT:
|
|
|
|
return "tracepoint";
|
|
|
|
|
|
|
|
case PERF_TYPE_HW_CACHE:
|
|
|
|
return "hardware-cache";
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return "unknown";
|
|
|
|
}
|
|
|
|
|
2012-09-10 14:53:50 +07:00
|
|
|
|
|
|
|
|
2013-11-12 23:58:49 +07:00
|
|
|
static struct perf_evsel *
|
|
|
|
__add_event(struct list_head *list, int *idx,
|
|
|
|
struct perf_event_attr *attr,
|
|
|
|
char *name, struct cpu_map *cpus)
|
2012-03-16 02:09:15 +07:00
|
|
|
{
|
|
|
|
struct perf_evsel *evsel;
|
|
|
|
|
|
|
|
event_attr_init(attr);
|
|
|
|
|
2013-11-08 02:41:19 +07:00
|
|
|
evsel = perf_evsel__new_idx(attr, (*idx)++);
|
2013-07-03 02:27:25 +07:00
|
|
|
if (!evsel)
|
2013-11-12 23:58:49 +07:00
|
|
|
return NULL;
|
2012-03-16 02:09:15 +07:00
|
|
|
|
2012-09-10 14:53:50 +07:00
|
|
|
evsel->cpus = cpus;
|
2012-06-12 23:45:00 +07:00
|
|
|
if (name)
|
|
|
|
evsel->name = strdup(name);
|
2012-05-21 14:12:51 +07:00
|
|
|
list_add_tail(&evsel->node, list);
|
2013-11-12 23:58:49 +07:00
|
|
|
return evsel;
|
2012-03-16 02:09:15 +07:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:25 +07:00
|
|
|
static int add_event(struct list_head *list, int *idx,
|
2012-09-10 14:53:50 +07:00
|
|
|
struct perf_event_attr *attr, char *name)
|
|
|
|
{
|
2013-11-12 23:58:49 +07:00
|
|
|
return __add_event(list, idx, attr, name, NULL) ? 0 : -ENOMEM;
|
2012-09-10 14:53:50 +07:00
|
|
|
}
|
|
|
|
|
2012-06-12 00:08:07 +07:00
|
|
|
static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 01:22:46 +07:00
|
|
|
{
|
|
|
|
int i, j;
|
2009-07-01 10:04:34 +07:00
|
|
|
int n, longest = -1;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 01:22:46 +07:00
|
|
|
|
|
|
|
for (i = 0; i < size; i++) {
|
2012-06-12 00:08:07 +07:00
|
|
|
for (j = 0; j < PERF_EVSEL__MAX_ALIASES && names[i][j]; j++) {
|
2009-07-01 10:04:34 +07:00
|
|
|
n = strlen(names[i][j]);
|
2012-03-16 02:09:15 +07:00
|
|
|
if (n > longest && !strncasecmp(str, names[i][j], n))
|
2009-07-01 10:04:34 +07:00
|
|
|
longest = n;
|
|
|
|
}
|
2012-03-16 02:09:15 +07:00
|
|
|
if (longest > 0)
|
2009-07-01 10:04:34 +07:00
|
|
|
return i;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 01:22:46 +07:00
|
|
|
}
|
|
|
|
|
2009-06-07 02:04:17 +07:00
|
|
|
return -1;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 01:22:46 +07:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:25 +07:00
|
|
|
int parse_events_add_cache(struct list_head *list, int *idx,
|
2012-03-16 02:09:15 +07:00
|
|
|
char *type, char *op_result1, char *op_result2)
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 01:22:46 +07:00
|
|
|
{
|
2012-03-16 02:09:15 +07:00
|
|
|
struct perf_event_attr attr;
|
|
|
|
char name[MAX_NAME_LEN];
|
2009-07-01 10:04:34 +07:00
|
|
|
int cache_type = -1, cache_op = -1, cache_result = -1;
|
2012-03-16 02:09:15 +07:00
|
|
|
char *op_result[2] = { op_result1, op_result2 };
|
|
|
|
int i, n;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 01:22:46 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* No fallback - if we cannot get a clear cache type
|
|
|
|
* then bail out:
|
|
|
|
*/
|
2012-06-12 00:08:07 +07:00
|
|
|
cache_type = parse_aliases(type, perf_evsel__hw_cache,
|
2012-03-16 02:09:15 +07:00
|
|
|
PERF_COUNT_HW_CACHE_MAX);
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 01:22:46 +07:00
|
|
|
if (cache_type == -1)
|
2012-03-16 02:09:15 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
n = snprintf(name, MAX_NAME_LEN, "%s", type);
|
2009-07-01 10:04:34 +07:00
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
for (i = 0; (i < 2) && (op_result[i]); i++) {
|
|
|
|
char *str = op_result[i];
|
|
|
|
|
2012-09-06 00:51:33 +07:00
|
|
|
n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str);
|
2009-07-01 10:04:34 +07:00
|
|
|
|
|
|
|
if (cache_op == -1) {
|
2012-06-12 00:08:07 +07:00
|
|
|
cache_op = parse_aliases(str, perf_evsel__hw_cache_op,
|
2012-03-16 02:09:15 +07:00
|
|
|
PERF_COUNT_HW_CACHE_OP_MAX);
|
2009-07-01 10:04:34 +07:00
|
|
|
if (cache_op >= 0) {
|
2012-06-12 00:08:07 +07:00
|
|
|
if (!perf_evsel__is_cache_op_valid(cache_type, cache_op))
|
2012-03-16 02:09:15 +07:00
|
|
|
return -EINVAL;
|
2009-07-01 10:04:34 +07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cache_result == -1) {
|
2012-06-12 00:08:07 +07:00
|
|
|
cache_result = parse_aliases(str, perf_evsel__hw_cache_result,
|
|
|
|
PERF_COUNT_HW_CACHE_RESULT_MAX);
|
2009-07-01 10:04:34 +07:00
|
|
|
if (cache_result >= 0)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 01:22:46 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fall back to reads:
|
|
|
|
*/
|
2009-06-07 02:04:17 +07:00
|
|
|
if (cache_op == -1)
|
|
|
|
cache_op = PERF_COUNT_HW_CACHE_OP_READ;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 01:22:46 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fall back to accesses:
|
|
|
|
*/
|
|
|
|
if (cache_result == -1)
|
|
|
|
cache_result = PERF_COUNT_HW_CACHE_RESULT_ACCESS;
|
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
memset(&attr, 0, sizeof(attr));
|
|
|
|
attr.config = cache_type | (cache_op << 8) | (cache_result << 16);
|
|
|
|
attr.type = PERF_TYPE_HW_CACHE;
|
|
|
|
return add_event(list, idx, &attr, name);
|
2009-09-12 04:19:45 +07:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:25 +07:00
|
|
|
static int add_tracepoint(struct list_head *list, int *idx,
|
2012-03-16 02:09:15 +07:00
|
|
|
char *sys_name, char *evt_name)
|
2009-09-12 04:19:45 +07:00
|
|
|
{
|
2012-09-27 03:13:07 +07:00
|
|
|
struct perf_evsel *evsel;
|
2009-09-12 04:19:45 +07:00
|
|
|
|
2013-11-08 02:41:19 +07:00
|
|
|
evsel = perf_evsel__newtp_idx(sys_name, evt_name, (*idx)++);
|
2013-07-03 02:27:25 +07:00
|
|
|
if (!evsel)
|
2012-09-27 03:13:07 +07:00
|
|
|
return -ENOMEM;
|
2009-09-12 04:19:45 +07:00
|
|
|
|
2012-09-27 03:13:07 +07:00
|
|
|
list_add_tail(&evsel->node, list);
|
2013-07-03 02:27:25 +07:00
|
|
|
|
2012-09-27 03:13:07 +07:00
|
|
|
return 0;
|
perf_counter: Implement generalized cache event types
Extend generic event enumeration with the PERF_TYPE_HW_CACHE
method.
This is a 3-dimensional space:
{ L1-D, L1-I, L2, ITLB, DTLB, BPU } x
{ load, store, prefetch } x
{ accesses, misses }
User-space passes in the 3 coordinates and the kernel provides
a counter. (if the hardware supports that type and if the
combination makes sense.)
Combinations that make no sense produce a -EINVAL.
Combinations that are not supported by the hardware produce -ENOTSUP.
Extend the tools to deal with this, and rewrite the event symbol
parsing code with various popular aliases for the units and
access methods above. So 'l1-cache-miss' and 'l1d-read-ops' are
both valid aliases.
( x86 is supported for now, with the Nehalem event table filled in,
and with Core2 and Atom having placeholder tables. )
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-06-06 01:22:46 +07:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:25 +07:00
|
|
|
static int add_tracepoint_multi_event(struct list_head *list, int *idx,
|
2012-12-17 20:08:37 +07:00
|
|
|
char *sys_name, char *evt_name)
|
2009-09-12 04:19:45 +07:00
|
|
|
{
|
|
|
|
char evt_path[MAXPATHLEN];
|
|
|
|
struct dirent *evt_ent;
|
|
|
|
DIR *evt_dir;
|
2012-03-16 02:09:15 +07:00
|
|
|
int ret = 0;
|
2009-09-12 04:19:45 +07:00
|
|
|
|
2011-11-16 23:03:07 +07:00
|
|
|
snprintf(evt_path, MAXPATHLEN, "%s/%s", tracing_events_path, sys_name);
|
2009-09-12 04:19:45 +07:00
|
|
|
evt_dir = opendir(evt_path);
|
|
|
|
if (!evt_dir) {
|
|
|
|
perror("Can't open event dir");
|
2012-03-16 02:09:15 +07:00
|
|
|
return -1;
|
2009-09-12 04:19:45 +07:00
|
|
|
}
|
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
while (!ret && (evt_ent = readdir(evt_dir))) {
|
2009-09-12 04:19:45 +07:00
|
|
|
if (!strcmp(evt_ent->d_name, ".")
|
|
|
|
|| !strcmp(evt_ent->d_name, "..")
|
|
|
|
|| !strcmp(evt_ent->d_name, "enable")
|
|
|
|
|| !strcmp(evt_ent->d_name, "filter"))
|
|
|
|
continue;
|
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
if (!strglobmatch(evt_ent->d_name, evt_name))
|
2010-01-06 05:47:17 +07:00
|
|
|
continue;
|
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
ret = add_tracepoint(list, idx, sys_name, evt_ent->d_name);
|
2009-09-12 04:19:45 +07:00
|
|
|
}
|
|
|
|
|
2012-12-17 20:08:36 +07:00
|
|
|
closedir(evt_dir);
|
2012-03-16 02:09:15 +07:00
|
|
|
return ret;
|
2009-09-12 04:19:45 +07:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:25 +07:00
|
|
|
static int add_tracepoint_event(struct list_head *list, int *idx,
|
2012-12-17 20:08:37 +07:00
|
|
|
char *sys_name, char *evt_name)
|
|
|
|
{
|
|
|
|
return strpbrk(evt_name, "*?") ?
|
|
|
|
add_tracepoint_multi_event(list, idx, sys_name, evt_name) :
|
|
|
|
add_tracepoint(list, idx, sys_name, evt_name);
|
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:25 +07:00
|
|
|
static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
|
2012-12-17 20:08:37 +07:00
|
|
|
char *sys_name, char *evt_name)
|
|
|
|
{
|
|
|
|
struct dirent *events_ent;
|
|
|
|
DIR *events_dir;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
events_dir = opendir(tracing_events_path);
|
|
|
|
if (!events_dir) {
|
|
|
|
perror("Can't open event dir");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (!ret && (events_ent = readdir(events_dir))) {
|
|
|
|
if (!strcmp(events_ent->d_name, ".")
|
|
|
|
|| !strcmp(events_ent->d_name, "..")
|
|
|
|
|| !strcmp(events_ent->d_name, "enable")
|
|
|
|
|| !strcmp(events_ent->d_name, "header_event")
|
|
|
|
|| !strcmp(events_ent->d_name, "header_page"))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!strglobmatch(events_ent->d_name, sys_name))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = add_tracepoint_event(list, idx, events_ent->d_name,
|
|
|
|
evt_name);
|
|
|
|
}
|
|
|
|
|
|
|
|
closedir(events_dir);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:25 +07:00
|
|
|
int parse_events_add_tracepoint(struct list_head *list, int *idx,
|
2012-03-16 02:09:15 +07:00
|
|
|
char *sys, char *event)
|
2009-07-21 23:20:22 +07:00
|
|
|
{
|
2012-12-17 20:08:37 +07:00
|
|
|
if (strpbrk(sys, "*?"))
|
|
|
|
return add_tracepoint_multi_sys(list, idx, sys, event);
|
|
|
|
else
|
|
|
|
return add_tracepoint_event(list, idx, sys, event);
|
2009-07-21 23:20:22 +07:00
|
|
|
}
|
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
static int
|
|
|
|
parse_breakpoint_type(const char *type, struct perf_event_attr *attr)
|
2009-11-23 21:42:35 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 3; i++) {
|
2012-03-16 02:09:15 +07:00
|
|
|
if (!type || !type[i])
|
2009-11-23 21:42:35 +07:00
|
|
|
break;
|
|
|
|
|
2012-06-29 14:22:54 +07:00
|
|
|
#define CHECK_SET_TYPE(bit) \
|
|
|
|
do { \
|
|
|
|
if (attr->bp_type & bit) \
|
|
|
|
return -EINVAL; \
|
|
|
|
else \
|
|
|
|
attr->bp_type |= bit; \
|
|
|
|
} while (0)
|
|
|
|
|
2009-11-23 21:42:35 +07:00
|
|
|
switch (type[i]) {
|
|
|
|
case 'r':
|
2012-06-29 14:22:54 +07:00
|
|
|
CHECK_SET_TYPE(HW_BREAKPOINT_R);
|
2009-11-23 21:42:35 +07:00
|
|
|
break;
|
|
|
|
case 'w':
|
2012-06-29 14:22:54 +07:00
|
|
|
CHECK_SET_TYPE(HW_BREAKPOINT_W);
|
2009-11-23 21:42:35 +07:00
|
|
|
break;
|
|
|
|
case 'x':
|
2012-06-29 14:22:54 +07:00
|
|
|
CHECK_SET_TYPE(HW_BREAKPOINT_X);
|
2009-11-23 21:42:35 +07:00
|
|
|
break;
|
|
|
|
default:
|
2012-03-16 02:09:15 +07:00
|
|
|
return -EINVAL;
|
2009-11-23 21:42:35 +07:00
|
|
|
}
|
|
|
|
}
|
2012-03-16 02:09:15 +07:00
|
|
|
|
2012-06-29 14:22:54 +07:00
|
|
|
#undef CHECK_SET_TYPE
|
|
|
|
|
2009-11-23 21:42:35 +07:00
|
|
|
if (!attr->bp_type) /* Default */
|
|
|
|
attr->bp_type = HW_BREAKPOINT_R | HW_BREAKPOINT_W;
|
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
return 0;
|
2009-11-23 21:42:35 +07:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:25 +07:00
|
|
|
int parse_events_add_breakpoint(struct list_head *list, int *idx,
|
2014-05-29 22:26:51 +07:00
|
|
|
void *ptr, char *type, u64 len)
|
2009-11-23 21:42:35 +07:00
|
|
|
{
|
2012-03-16 02:09:15 +07:00
|
|
|
struct perf_event_attr attr;
|
2009-11-23 21:42:35 +07:00
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
memset(&attr, 0, sizeof(attr));
|
2012-03-21 01:15:39 +07:00
|
|
|
attr.bp_addr = (unsigned long) ptr;
|
2009-11-23 21:42:35 +07:00
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
if (parse_breakpoint_type(type, &attr))
|
|
|
|
return -EINVAL;
|
2009-11-23 21:42:35 +07:00
|
|
|
|
2014-05-29 22:26:51 +07:00
|
|
|
/* Provide some defaults if len is not specified */
|
|
|
|
if (!len) {
|
|
|
|
if (attr.bp_type == HW_BREAKPOINT_X)
|
|
|
|
len = sizeof(long);
|
|
|
|
else
|
|
|
|
len = HW_BREAKPOINT_LEN_4;
|
|
|
|
}
|
|
|
|
|
|
|
|
attr.bp_len = len;
|
2009-07-01 10:04:34 +07:00
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
attr.type = PERF_TYPE_BREAKPOINT;
|
2012-07-15 02:03:10 +07:00
|
|
|
attr.sample_period = 1;
|
2011-04-27 08:55:40 +07:00
|
|
|
|
2012-06-29 04:18:49 +07:00
|
|
|
return add_event(list, idx, &attr, NULL);
|
2009-06-22 18:14:28 +07:00
|
|
|
}
|
|
|
|
|
2012-03-16 02:09:16 +07:00
|
|
|
static int config_term(struct perf_event_attr *attr,
|
2013-01-19 02:29:49 +07:00
|
|
|
struct parse_events_term *term)
|
2012-03-16 02:09:16 +07:00
|
|
|
{
|
2012-04-25 23:24:57 +07:00
|
|
|
#define CHECK_TYPE_VAL(type) \
|
|
|
|
do { \
|
|
|
|
if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \
|
|
|
|
return -EINVAL; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
switch (term->type_term) {
|
2012-03-16 02:09:16 +07:00
|
|
|
case PARSE_EVENTS__TERM_TYPE_CONFIG:
|
2012-04-25 23:24:57 +07:00
|
|
|
CHECK_TYPE_VAL(NUM);
|
2012-03-16 02:09:16 +07:00
|
|
|
attr->config = term->val.num;
|
|
|
|
break;
|
|
|
|
case PARSE_EVENTS__TERM_TYPE_CONFIG1:
|
2012-04-25 23:24:57 +07:00
|
|
|
CHECK_TYPE_VAL(NUM);
|
2012-03-16 02:09:16 +07:00
|
|
|
attr->config1 = term->val.num;
|
|
|
|
break;
|
|
|
|
case PARSE_EVENTS__TERM_TYPE_CONFIG2:
|
2012-04-25 23:24:57 +07:00
|
|
|
CHECK_TYPE_VAL(NUM);
|
2012-03-16 02:09:16 +07:00
|
|
|
attr->config2 = term->val.num;
|
|
|
|
break;
|
|
|
|
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
|
2012-04-25 23:24:57 +07:00
|
|
|
CHECK_TYPE_VAL(NUM);
|
2012-03-16 02:09:16 +07:00
|
|
|
attr->sample_period = term->val.num;
|
|
|
|
break;
|
|
|
|
case PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE:
|
|
|
|
/*
|
|
|
|
* TODO uncomment when the field is available
|
|
|
|
* attr->branch_sample_type = term->val.num;
|
|
|
|
*/
|
|
|
|
break;
|
2012-05-21 14:12:53 +07:00
|
|
|
case PARSE_EVENTS__TERM_TYPE_NAME:
|
|
|
|
CHECK_TYPE_VAL(STR);
|
|
|
|
break;
|
2012-03-16 02:09:16 +07:00
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2012-04-25 23:24:57 +07:00
|
|
|
|
2012-03-16 02:09:16 +07:00
|
|
|
return 0;
|
2012-04-25 23:24:57 +07:00
|
|
|
#undef CHECK_TYPE_VAL
|
2012-03-16 02:09:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int config_attr(struct perf_event_attr *attr,
|
|
|
|
struct list_head *head, int fail)
|
|
|
|
{
|
2013-01-19 02:29:49 +07:00
|
|
|
struct parse_events_term *term;
|
2012-03-16 02:09:16 +07:00
|
|
|
|
|
|
|
list_for_each_entry(term, head, list)
|
|
|
|
if (config_term(attr, term) && fail)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:25 +07:00
|
|
|
int parse_events_add_numeric(struct list_head *list, int *idx,
|
2012-08-08 00:43:13 +07:00
|
|
|
u32 type, u64 config,
|
2012-03-16 02:09:16 +07:00
|
|
|
struct list_head *head_config)
|
2009-05-26 16:10:09 +07:00
|
|
|
{
|
2012-03-16 02:09:15 +07:00
|
|
|
struct perf_event_attr attr;
|
2009-07-01 10:04:34 +07:00
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
memset(&attr, 0, sizeof(attr));
|
|
|
|
attr.type = type;
|
|
|
|
attr.config = config;
|
2012-03-16 02:09:16 +07:00
|
|
|
|
|
|
|
if (head_config &&
|
|
|
|
config_attr(&attr, head_config, 1))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-06-12 23:45:00 +07:00
|
|
|
return add_event(list, idx, &attr, NULL);
|
2009-07-01 10:04:34 +07:00
|
|
|
}
|
2009-05-26 16:10:09 +07:00
|
|
|
|
2013-01-19 02:29:49 +07:00
|
|
|
static int parse_events__is_name_term(struct parse_events_term *term)
|
2012-05-21 14:12:53 +07:00
|
|
|
{
|
|
|
|
return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
|
|
|
|
}
|
|
|
|
|
2012-06-12 23:45:00 +07:00
|
|
|
static char *pmu_event_name(struct list_head *head_terms)
|
2012-05-21 14:12:53 +07:00
|
|
|
{
|
2013-01-19 02:29:49 +07:00
|
|
|
struct parse_events_term *term;
|
2012-05-21 14:12:53 +07:00
|
|
|
|
|
|
|
list_for_each_entry(term, head_terms, list)
|
|
|
|
if (parse_events__is_name_term(term))
|
|
|
|
return term->val.str;
|
|
|
|
|
2012-06-12 23:45:00 +07:00
|
|
|
return NULL;
|
2012-05-21 14:12:53 +07:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:25 +07:00
|
|
|
int parse_events_add_pmu(struct list_head *list, int *idx,
|
2012-03-16 02:09:18 +07:00
|
|
|
char *name, struct list_head *head_config)
|
|
|
|
{
|
|
|
|
struct perf_event_attr attr;
|
2014-09-24 21:04:06 +07:00
|
|
|
struct perf_pmu_info info;
|
2012-03-16 02:09:18 +07:00
|
|
|
struct perf_pmu *pmu;
|
2013-11-12 23:58:49 +07:00
|
|
|
struct perf_evsel *evsel;
|
2012-03-16 02:09:18 +07:00
|
|
|
|
|
|
|
pmu = perf_pmu__find(name);
|
|
|
|
if (!pmu)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-07-31 13:00:49 +07:00
|
|
|
if (pmu->default_config) {
|
|
|
|
memcpy(&attr, pmu->default_config,
|
|
|
|
sizeof(struct perf_event_attr));
|
|
|
|
} else {
|
|
|
|
memset(&attr, 0, sizeof(attr));
|
|
|
|
}
|
2012-03-16 02:09:18 +07:00
|
|
|
|
2014-08-16 02:08:40 +07:00
|
|
|
if (!head_config) {
|
|
|
|
attr.type = pmu->type;
|
|
|
|
evsel = __add_event(list, idx, &attr, NULL, pmu->cpus);
|
|
|
|
return evsel ? 0 : -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2014-09-24 21:04:06 +07:00
|
|
|
if (perf_pmu__check_alias(pmu, head_config, &info))
|
2012-06-15 13:31:41 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2012-03-16 02:09:18 +07:00
|
|
|
/*
|
|
|
|
* Configure hardcoded terms first, no need to check
|
|
|
|
* return value when called with fail == 0 ;)
|
|
|
|
*/
|
|
|
|
config_attr(&attr, head_config, 0);
|
|
|
|
|
|
|
|
if (perf_pmu__config(pmu, &attr, head_config))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2013-11-12 23:58:49 +07:00
|
|
|
evsel = __add_event(list, idx, &attr, pmu_event_name(head_config),
|
|
|
|
pmu->cpus);
|
|
|
|
if (evsel) {
|
2014-09-24 21:04:06 +07:00
|
|
|
evsel->unit = info.unit;
|
|
|
|
evsel->scale = info.scale;
|
2014-11-21 16:31:12 +07:00
|
|
|
evsel->per_pkg = info.per_pkg;
|
2014-11-21 16:31:13 +07:00
|
|
|
evsel->snapshot = info.snapshot;
|
2013-11-12 23:58:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return evsel ? 0 : -ENOMEM;
|
2012-03-16 02:09:18 +07:00
|
|
|
}
|
|
|
|
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 17:22:36 +07:00
|
|
|
int parse_events__modifier_group(struct list_head *list,
|
|
|
|
char *event_mod)
|
2012-08-08 17:14:14 +07:00
|
|
|
{
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 17:22:36 +07:00
|
|
|
return parse_events__modifier_event(list, event_mod, true);
|
|
|
|
}
|
|
|
|
|
2012-08-15 02:35:48 +07:00
|
|
|
void parse_events__set_leader(char *name, struct list_head *list)
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 17:22:36 +07:00
|
|
|
{
|
|
|
|
struct perf_evsel *leader;
|
|
|
|
|
2012-08-15 02:35:48 +07:00
|
|
|
__perf_evlist__set_leader(list);
|
|
|
|
leader = list_entry(list->next, struct perf_evsel, node);
|
perf tools: Enable grouping logic for parsed events
This patch adds a functionality that allows to create event groups
based on the way they are specified on the command line. Adding
functionality to the '{}' group syntax introduced in earlier patch.
The current '--group/-g' option behaviour remains intact. If you
specify it for record/stat/top command, all the specified events
become members of a single group with the first event as a group
leader.
With the new '{}' group syntax you can create group like:
# perf record -e '{cycles,faults}' ls
resulting in single event group containing 'cycles' and 'faults'
events, with cycles event as group leader.
All groups are created with regards to threads and cpus. Thus
recording an event group within a 2 threads on server with
4 CPUs will create 8 separate groups.
Examples (first event in brackets is group leader):
# 1 group (cpu-clock,task-clock)
perf record --group -e cpu-clock,task-clock ls
perf record -e '{cpu-clock,task-clock}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock},{minor-faults,major-faults}' ls
# 1 group (cpu-clock,task-clock,minor-faults,major-faults)
perf record --group -e cpu-clock,task-clock -e minor-faults,major-faults ls
perf record -e '{cpu-clock,task-clock,minor-faults,major-faults}' ls
# 2 groups (cpu-clock,task-clock) (minor-faults,major-faults)
perf record -e '{cpu-clock,task-clock} -e '{minor-faults,major-faults}' \
-e instructions ls
# 1 group
# (cpu-clock,task-clock,minor-faults,major-faults,instructions)
perf record --group -e cpu-clock,task-clock \
-e minor-faults,major-faults -e instructions ls perf record -e
'{cpu-clock,task-clock,minor-faults,major-faults,instructions}' ls
It's possible to use standard event modifier for a group, which spans
over all events in the group and updates each event modifier settings,
for example:
# perf record -r '{faults:k,cache-references}:p'
resulting in ':kp' modifier being used for 'faults' and ':p' modifier
being used for 'cache-references' event.
Reviewed-by: Namhyung Kim <namhyung@kernel.org>
Signed-off-by: Jiri Olsa <jolsa@redhat.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ulrich Drepper <drepper@gmail.com>
Link: http://lkml.kernel.org/n/tip-ho42u0wcr8mn1otkalqi13qp@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2012-08-08 17:22:36 +07:00
|
|
|
leader->group_name = name ? strdup(name) : NULL;
|
2012-08-08 17:14:14 +07:00
|
|
|
}
|
|
|
|
|
2013-07-03 02:27:25 +07:00
|
|
|
/* list_event is assumed to point to malloc'ed memory */
|
2012-03-21 01:15:40 +07:00
|
|
|
void parse_events_update_lists(struct list_head *list_event,
|
|
|
|
struct list_head *list_all)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Called for single event definition. Update the
|
2012-08-08 17:14:14 +07:00
|
|
|
* 'all event' list, and reinit the 'single event'
|
2012-03-21 01:15:40 +07:00
|
|
|
* list, for next event definition.
|
|
|
|
*/
|
|
|
|
list_splice_tail(list_event, list_all);
|
2012-05-21 14:12:51 +07:00
|
|
|
free(list_event);
|
2012-03-21 01:15:40 +07:00
|
|
|
}
|
|
|
|
|
2012-08-08 17:21:54 +07:00
|
|
|
struct event_modifier {
|
|
|
|
int eu;
|
|
|
|
int ek;
|
|
|
|
int eh;
|
|
|
|
int eH;
|
|
|
|
int eG;
|
|
|
|
int precise;
|
|
|
|
int exclude_GH;
|
2012-10-10 22:39:03 +07:00
|
|
|
int sample_read;
|
perf tools: Add support for pinned modifier
This commit adds support for a new modifier "D", which requests that the
event, or group of events, be pinned to the PMU.
The "p" modifier is already taken for precise, and "P" may be used in
future to mean "fully precise".
So we use "D", which stands for pinneD - and looks like a padlock, or if
you're using the ":D" syntax perf smiles at you.
This is an oft-requested feature from our HW folks, who want to be able
to run a large number of events, but also want 100% accurate results for
instructions per cycle.
Comparison of results with and without pinning:
$ perf stat -e '{cycles,instructions}:D' -e cycles,instructions,...
79,590,480,683 cycles # 0.000 GHz
166,123,716,524 instructions # 2.09 insns per cycle
# 0.11 stalled cycles per insn
79,352,134,463 cycles # 0.000 GHz [11.11%]
165,178,301,818 instructions # 2.08 insns per cycle
# 0.11 stalled cycles per insn [11.13%]
As you can see although perf does a very good job of scaling the values
in the non-pinned case, there is some small discrepancy.
The patch is fairly straight forward, the one detail is that we need to
make sure we only request pinning for the group leader when we have a
group.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Tested-by: Jiri Olsa <jolsa@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1375795686-4226-1-git-send-email-michael@ellerman.id.au
[ Use perf_evsel__is_group_leader instead of open coded equivalent, as
suggested by Jiri Olsa ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-08-06 20:28:05 +07:00
|
|
|
int pinned;
|
2012-08-08 17:21:54 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int get_event_modifier(struct event_modifier *mod, char *str,
|
|
|
|
struct perf_evsel *evsel)
|
2009-07-01 10:04:34 +07:00
|
|
|
{
|
2012-08-08 17:21:54 +07:00
|
|
|
int eu = evsel ? evsel->attr.exclude_user : 0;
|
|
|
|
int ek = evsel ? evsel->attr.exclude_kernel : 0;
|
|
|
|
int eh = evsel ? evsel->attr.exclude_hv : 0;
|
|
|
|
int eH = evsel ? evsel->attr.exclude_host : 0;
|
|
|
|
int eG = evsel ? evsel->attr.exclude_guest : 0;
|
|
|
|
int precise = evsel ? evsel->attr.precise_ip : 0;
|
2012-10-10 22:39:03 +07:00
|
|
|
int sample_read = 0;
|
perf tools: Add support for pinned modifier
This commit adds support for a new modifier "D", which requests that the
event, or group of events, be pinned to the PMU.
The "p" modifier is already taken for precise, and "P" may be used in
future to mean "fully precise".
So we use "D", which stands for pinneD - and looks like a padlock, or if
you're using the ":D" syntax perf smiles at you.
This is an oft-requested feature from our HW folks, who want to be able
to run a large number of events, but also want 100% accurate results for
instructions per cycle.
Comparison of results with and without pinning:
$ perf stat -e '{cycles,instructions}:D' -e cycles,instructions,...
79,590,480,683 cycles # 0.000 GHz
166,123,716,524 instructions # 2.09 insns per cycle
# 0.11 stalled cycles per insn
79,352,134,463 cycles # 0.000 GHz [11.11%]
165,178,301,818 instructions # 2.08 insns per cycle
# 0.11 stalled cycles per insn [11.13%]
As you can see although perf does a very good job of scaling the values
in the non-pinned case, there is some small discrepancy.
The patch is fairly straight forward, the one detail is that we need to
make sure we only request pinning for the group leader when we have a
group.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Tested-by: Jiri Olsa <jolsa@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1375795686-4226-1-git-send-email-michael@ellerman.id.au
[ Use perf_evsel__is_group_leader instead of open coded equivalent, as
suggested by Jiri Olsa ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-08-06 20:28:05 +07:00
|
|
|
int pinned = evsel ? evsel->attr.pinned : 0;
|
2009-06-06 14:58:57 +07:00
|
|
|
|
2012-08-08 17:21:54 +07:00
|
|
|
int exclude = eu | ek | eh;
|
|
|
|
int exclude_GH = evsel ? evsel->exclude_GH : 0;
|
|
|
|
|
|
|
|
memset(mod, 0, sizeof(*mod));
|
2011-04-27 09:06:33 +07:00
|
|
|
|
2009-07-01 10:04:34 +07:00
|
|
|
while (*str) {
|
2010-04-09 04:03:20 +07:00
|
|
|
if (*str == 'u') {
|
|
|
|
if (!exclude)
|
|
|
|
exclude = eu = ek = eh = 1;
|
2009-07-01 10:04:34 +07:00
|
|
|
eu = 0;
|
2010-04-09 04:03:20 +07:00
|
|
|
} else if (*str == 'k') {
|
|
|
|
if (!exclude)
|
|
|
|
exclude = eu = ek = eh = 1;
|
2009-07-01 10:04:34 +07:00
|
|
|
ek = 0;
|
2010-04-09 04:03:20 +07:00
|
|
|
} else if (*str == 'h') {
|
|
|
|
if (!exclude)
|
|
|
|
exclude = eu = ek = eh = 1;
|
2009-07-01 10:04:34 +07:00
|
|
|
eh = 0;
|
2012-01-04 23:54:19 +07:00
|
|
|
} else if (*str == 'G') {
|
|
|
|
if (!exclude_GH)
|
|
|
|
exclude_GH = eG = eH = 1;
|
|
|
|
eG = 0;
|
|
|
|
} else if (*str == 'H') {
|
|
|
|
if (!exclude_GH)
|
|
|
|
exclude_GH = eG = eH = 1;
|
|
|
|
eH = 0;
|
2010-04-09 04:03:20 +07:00
|
|
|
} else if (*str == 'p') {
|
|
|
|
precise++;
|
2012-09-14 03:59:13 +07:00
|
|
|
/* use of precise requires exclude_guest */
|
|
|
|
if (!exclude_GH)
|
|
|
|
eG = 1;
|
2012-10-10 22:39:03 +07:00
|
|
|
} else if (*str == 'S') {
|
|
|
|
sample_read = 1;
|
perf tools: Add support for pinned modifier
This commit adds support for a new modifier "D", which requests that the
event, or group of events, be pinned to the PMU.
The "p" modifier is already taken for precise, and "P" may be used in
future to mean "fully precise".
So we use "D", which stands for pinneD - and looks like a padlock, or if
you're using the ":D" syntax perf smiles at you.
This is an oft-requested feature from our HW folks, who want to be able
to run a large number of events, but also want 100% accurate results for
instructions per cycle.
Comparison of results with and without pinning:
$ perf stat -e '{cycles,instructions}:D' -e cycles,instructions,...
79,590,480,683 cycles # 0.000 GHz
166,123,716,524 instructions # 2.09 insns per cycle
# 0.11 stalled cycles per insn
79,352,134,463 cycles # 0.000 GHz [11.11%]
165,178,301,818 instructions # 2.08 insns per cycle
# 0.11 stalled cycles per insn [11.13%]
As you can see although perf does a very good job of scaling the values
in the non-pinned case, there is some small discrepancy.
The patch is fairly straight forward, the one detail is that we need to
make sure we only request pinning for the group leader when we have a
group.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Tested-by: Jiri Olsa <jolsa@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1375795686-4226-1-git-send-email-michael@ellerman.id.au
[ Use perf_evsel__is_group_leader instead of open coded equivalent, as
suggested by Jiri Olsa ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-08-06 20:28:05 +07:00
|
|
|
} else if (*str == 'D') {
|
|
|
|
pinned = 1;
|
2010-04-09 04:03:20 +07:00
|
|
|
} else
|
2009-07-01 10:04:34 +07:00
|
|
|
break;
|
2010-04-09 04:03:20 +07:00
|
|
|
|
2009-07-01 10:04:34 +07:00
|
|
|
++str;
|
2009-05-26 14:17:18 +07:00
|
|
|
}
|
2011-04-27 09:06:33 +07:00
|
|
|
|
2012-03-16 02:09:15 +07:00
|
|
|
/*
|
|
|
|
* precise ip:
|
|
|
|
*
|
|
|
|
* 0 - SAMPLE_IP can have arbitrary skid
|
|
|
|
* 1 - SAMPLE_IP must have constant skid
|
|
|
|
* 2 - SAMPLE_IP requested to have 0 skid
|
|
|
|
* 3 - SAMPLE_IP must have 0 skid
|
|
|
|
*
|
|
|
|
* See also PERF_RECORD_MISC_EXACT_IP
|
|
|
|
*/
|
|
|
|
if (precise > 3)
|
|
|
|
return -EINVAL;
|
2011-04-27 09:06:33 +07:00
|
|
|
|
2012-08-08 17:21:54 +07:00
|
|
|
mod->eu = eu;
|
|
|
|
mod->ek = ek;
|
|
|
|
mod->eh = eh;
|
|
|
|
mod->eH = eH;
|
|
|
|
mod->eG = eG;
|
|
|
|
mod->precise = precise;
|
|
|
|
mod->exclude_GH = exclude_GH;
|
2012-10-10 22:39:03 +07:00
|
|
|
mod->sample_read = sample_read;
|
perf tools: Add support for pinned modifier
This commit adds support for a new modifier "D", which requests that the
event, or group of events, be pinned to the PMU.
The "p" modifier is already taken for precise, and "P" may be used in
future to mean "fully precise".
So we use "D", which stands for pinneD - and looks like a padlock, or if
you're using the ":D" syntax perf smiles at you.
This is an oft-requested feature from our HW folks, who want to be able
to run a large number of events, but also want 100% accurate results for
instructions per cycle.
Comparison of results with and without pinning:
$ perf stat -e '{cycles,instructions}:D' -e cycles,instructions,...
79,590,480,683 cycles # 0.000 GHz
166,123,716,524 instructions # 2.09 insns per cycle
# 0.11 stalled cycles per insn
79,352,134,463 cycles # 0.000 GHz [11.11%]
165,178,301,818 instructions # 2.08 insns per cycle
# 0.11 stalled cycles per insn [11.13%]
As you can see although perf does a very good job of scaling the values
in the non-pinned case, there is some small discrepancy.
The patch is fairly straight forward, the one detail is that we need to
make sure we only request pinning for the group leader when we have a
group.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Tested-by: Jiri Olsa <jolsa@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1375795686-4226-1-git-send-email-michael@ellerman.id.au
[ Use perf_evsel__is_group_leader instead of open coded equivalent, as
suggested by Jiri Olsa ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-08-06 20:28:05 +07:00
|
|
|
mod->pinned = pinned;
|
|
|
|
|
2012-08-08 17:21:54 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-11-13 21:32:58 +07:00
|
|
|
/*
|
|
|
|
* Basic modifier sanity check to validate it contains only one
|
|
|
|
* instance of any modifier (apart from 'p') present.
|
|
|
|
*/
|
|
|
|
static int check_modifier(char *str)
|
|
|
|
{
|
|
|
|
char *p = str;
|
|
|
|
|
|
|
|
/* The sizeof includes 0 byte as well. */
|
perf tools: Add support for pinned modifier
This commit adds support for a new modifier "D", which requests that the
event, or group of events, be pinned to the PMU.
The "p" modifier is already taken for precise, and "P" may be used in
future to mean "fully precise".
So we use "D", which stands for pinneD - and looks like a padlock, or if
you're using the ":D" syntax perf smiles at you.
This is an oft-requested feature from our HW folks, who want to be able
to run a large number of events, but also want 100% accurate results for
instructions per cycle.
Comparison of results with and without pinning:
$ perf stat -e '{cycles,instructions}:D' -e cycles,instructions,...
79,590,480,683 cycles # 0.000 GHz
166,123,716,524 instructions # 2.09 insns per cycle
# 0.11 stalled cycles per insn
79,352,134,463 cycles # 0.000 GHz [11.11%]
165,178,301,818 instructions # 2.08 insns per cycle
# 0.11 stalled cycles per insn [11.13%]
As you can see although perf does a very good job of scaling the values
in the non-pinned case, there is some small discrepancy.
The patch is fairly straight forward, the one detail is that we need to
make sure we only request pinning for the group leader when we have a
group.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Tested-by: Jiri Olsa <jolsa@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1375795686-4226-1-git-send-email-michael@ellerman.id.au
[ Use perf_evsel__is_group_leader instead of open coded equivalent, as
suggested by Jiri Olsa ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-08-06 20:28:05 +07:00
|
|
|
if (strlen(str) > (sizeof("ukhGHpppSD") - 1))
|
2012-11-13 21:32:58 +07:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
if (*p != 'p' && strchr(p + 1, *p))
|
|
|
|
return -1;
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-08-08 17:21:54 +07:00
|
|
|
int parse_events__modifier_event(struct list_head *list, char *str, bool add)
|
|
|
|
{
|
|
|
|
struct perf_evsel *evsel;
|
|
|
|
struct event_modifier mod;
|
|
|
|
|
|
|
|
if (str == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2012-11-13 21:32:58 +07:00
|
|
|
if (check_modifier(str))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-08-08 17:21:54 +07:00
|
|
|
if (!add && get_event_modifier(&mod, str, NULL))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-01-10 20:37:27 +07:00
|
|
|
__evlist__for_each(list, evsel) {
|
2012-08-08 17:21:54 +07:00
|
|
|
if (add && get_event_modifier(&mod, str, evsel))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
evsel->attr.exclude_user = mod.eu;
|
|
|
|
evsel->attr.exclude_kernel = mod.ek;
|
|
|
|
evsel->attr.exclude_hv = mod.eh;
|
|
|
|
evsel->attr.precise_ip = mod.precise;
|
|
|
|
evsel->attr.exclude_host = mod.eH;
|
|
|
|
evsel->attr.exclude_guest = mod.eG;
|
|
|
|
evsel->exclude_GH = mod.exclude_GH;
|
2012-10-10 22:39:03 +07:00
|
|
|
evsel->sample_read = mod.sample_read;
|
perf tools: Add support for pinned modifier
This commit adds support for a new modifier "D", which requests that the
event, or group of events, be pinned to the PMU.
The "p" modifier is already taken for precise, and "P" may be used in
future to mean "fully precise".
So we use "D", which stands for pinneD - and looks like a padlock, or if
you're using the ":D" syntax perf smiles at you.
This is an oft-requested feature from our HW folks, who want to be able
to run a large number of events, but also want 100% accurate results for
instructions per cycle.
Comparison of results with and without pinning:
$ perf stat -e '{cycles,instructions}:D' -e cycles,instructions,...
79,590,480,683 cycles # 0.000 GHz
166,123,716,524 instructions # 2.09 insns per cycle
# 0.11 stalled cycles per insn
79,352,134,463 cycles # 0.000 GHz [11.11%]
165,178,301,818 instructions # 2.08 insns per cycle
# 0.11 stalled cycles per insn [11.13%]
As you can see although perf does a very good job of scaling the values
in the non-pinned case, there is some small discrepancy.
The patch is fairly straight forward, the one detail is that we need to
make sure we only request pinning for the group leader when we have a
group.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@redhat.com>
Tested-by: Jiri Olsa <jolsa@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1375795686-4226-1-git-send-email-michael@ellerman.id.au
[ Use perf_evsel__is_group_leader instead of open coded equivalent, as
suggested by Jiri Olsa ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2013-08-06 20:28:05 +07:00
|
|
|
|
|
|
|
if (perf_evsel__is_group_leader(evsel))
|
|
|
|
evsel->attr.pinned = mod.pinned;
|
2012-03-16 02:09:15 +07:00
|
|
|
}
|
2011-04-27 09:06:33 +07:00
|
|
|
|
2009-07-01 10:04:34 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2009-05-26 16:10:09 +07:00
|
|
|
|
2012-08-17 02:10:21 +07:00
|
|
|
int parse_events_name(struct list_head *list, char *name)
|
|
|
|
{
|
|
|
|
struct perf_evsel *evsel;
|
|
|
|
|
2014-01-10 20:37:27 +07:00
|
|
|
__evlist__for_each(list, evsel) {
|
2012-08-17 02:10:21 +07:00
|
|
|
if (!evsel->name)
|
|
|
|
evsel->name = strdup(name);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-10-07 22:08:50 +07:00
|
|
|
static int
|
|
|
|
comp_pmu(const void *p1, const void *p2)
|
|
|
|
{
|
|
|
|
struct perf_pmu_event_symbol *pmu1 = (struct perf_pmu_event_symbol *) p1;
|
|
|
|
struct perf_pmu_event_symbol *pmu2 = (struct perf_pmu_event_symbol *) p2;
|
|
|
|
|
|
|
|
return strcmp(pmu1->symbol, pmu2->symbol);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void perf_pmu__parse_cleanup(void)
|
|
|
|
{
|
|
|
|
if (perf_pmu_events_list_num > 0) {
|
|
|
|
struct perf_pmu_event_symbol *p;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < perf_pmu_events_list_num; i++) {
|
|
|
|
p = perf_pmu_events_list + i;
|
|
|
|
free(p->symbol);
|
|
|
|
}
|
|
|
|
free(perf_pmu_events_list);
|
|
|
|
perf_pmu_events_list = NULL;
|
|
|
|
perf_pmu_events_list_num = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define SET_SYMBOL(str, stype) \
|
|
|
|
do { \
|
|
|
|
p->symbol = str; \
|
|
|
|
if (!p->symbol) \
|
|
|
|
goto err; \
|
|
|
|
p->type = stype; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the pmu events list from sysfs
|
|
|
|
* Save it into perf_pmu_events_list
|
|
|
|
*/
|
|
|
|
static void perf_pmu__parse_init(void)
|
|
|
|
{
|
|
|
|
|
|
|
|
struct perf_pmu *pmu = NULL;
|
|
|
|
struct perf_pmu_alias *alias;
|
|
|
|
int len = 0;
|
|
|
|
|
|
|
|
pmu = perf_pmu__find("cpu");
|
|
|
|
if ((pmu == NULL) || list_empty(&pmu->aliases)) {
|
|
|
|
perf_pmu_events_list_num = -1;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
list_for_each_entry(alias, &pmu->aliases, list) {
|
|
|
|
if (strchr(alias->name, '-'))
|
|
|
|
len++;
|
|
|
|
len++;
|
|
|
|
}
|
|
|
|
perf_pmu_events_list = malloc(sizeof(struct perf_pmu_event_symbol) * len);
|
|
|
|
if (!perf_pmu_events_list)
|
|
|
|
return;
|
|
|
|
perf_pmu_events_list_num = len;
|
|
|
|
|
|
|
|
len = 0;
|
|
|
|
list_for_each_entry(alias, &pmu->aliases, list) {
|
|
|
|
struct perf_pmu_event_symbol *p = perf_pmu_events_list + len;
|
|
|
|
char *tmp = strchr(alias->name, '-');
|
|
|
|
|
|
|
|
if (tmp != NULL) {
|
|
|
|
SET_SYMBOL(strndup(alias->name, tmp - alias->name),
|
|
|
|
PMU_EVENT_SYMBOL_PREFIX);
|
|
|
|
p++;
|
|
|
|
SET_SYMBOL(strdup(++tmp), PMU_EVENT_SYMBOL_SUFFIX);
|
|
|
|
len += 2;
|
|
|
|
} else {
|
|
|
|
SET_SYMBOL(strdup(alias->name), PMU_EVENT_SYMBOL);
|
|
|
|
len++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
qsort(perf_pmu_events_list, len,
|
|
|
|
sizeof(struct perf_pmu_event_symbol), comp_pmu);
|
|
|
|
|
|
|
|
return;
|
|
|
|
err:
|
|
|
|
perf_pmu__parse_cleanup();
|
|
|
|
}
|
|
|
|
|
|
|
|
enum perf_pmu_event_symbol_type
|
|
|
|
perf_pmu__parse_check(const char *name)
|
|
|
|
{
|
|
|
|
struct perf_pmu_event_symbol p, *r;
|
|
|
|
|
|
|
|
/* scan kernel pmu events from sysfs if needed */
|
|
|
|
if (perf_pmu_events_list_num == 0)
|
|
|
|
perf_pmu__parse_init();
|
|
|
|
/*
|
|
|
|
* name "cpu" could be prefix of cpu-cycles or cpu// events.
|
|
|
|
* cpu-cycles has been handled by hardcode.
|
|
|
|
* So it must be cpu// events, not kernel pmu event.
|
|
|
|
*/
|
|
|
|
if ((perf_pmu_events_list_num <= 0) || !strcmp(name, "cpu"))
|
|
|
|
return PMU_EVENT_SYMBOL_ERR;
|
|
|
|
|
|
|
|
p.symbol = strdup(name);
|
|
|
|
r = bsearch(&p, perf_pmu_events_list,
|
|
|
|
(size_t) perf_pmu_events_list_num,
|
|
|
|
sizeof(struct perf_pmu_event_symbol), comp_pmu);
|
|
|
|
free(p.symbol);
|
|
|
|
return r ? r->type : PMU_EVENT_SYMBOL_ERR;
|
|
|
|
}
|
|
|
|
|
2012-06-15 13:31:40 +07:00
|
|
|
static int parse_events__scanner(const char *str, void *data, int start_token)
|
2009-07-01 10:04:34 +07:00
|
|
|
{
|
2012-03-16 02:09:15 +07:00
|
|
|
YY_BUFFER_STATE buffer;
|
2012-06-15 13:31:39 +07:00
|
|
|
void *scanner;
|
2012-06-15 13:31:38 +07:00
|
|
|
int ret;
|
2009-09-12 04:19:45 +07:00
|
|
|
|
2012-06-15 13:31:40 +07:00
|
|
|
ret = parse_events_lex_init_extra(start_token, &scanner);
|
2012-06-15 13:31:39 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
buffer = parse_events__scan_string(str, scanner);
|
2009-06-06 14:58:57 +07:00
|
|
|
|
2012-05-21 14:12:50 +07:00
|
|
|
#ifdef PARSER_DEBUG
|
|
|
|
parse_events_debug = 1;
|
|
|
|
#endif
|
2012-06-15 13:31:39 +07:00
|
|
|
ret = parse_events_parse(data, scanner);
|
|
|
|
|
|
|
|
parse_events__flush_buffer(buffer, scanner);
|
|
|
|
parse_events__delete_buffer(buffer, scanner);
|
|
|
|
parse_events_lex_destroy(scanner);
|
|
|
|
return ret;
|
|
|
|
}
|
2009-09-12 04:19:45 +07:00
|
|
|
|
2012-06-15 13:31:40 +07:00
|
|
|
/*
|
|
|
|
* parse event config string, return a list of event terms.
|
|
|
|
*/
|
|
|
|
int parse_events_terms(struct list_head *terms, const char *str)
|
|
|
|
{
|
2013-01-19 02:56:57 +07:00
|
|
|
struct parse_events_terms data = {
|
2012-06-15 13:31:40 +07:00
|
|
|
.terms = NULL,
|
|
|
|
};
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = parse_events__scanner(str, &data, PE_START_TERMS);
|
|
|
|
if (!ret) {
|
|
|
|
list_splice(data.terms, terms);
|
2013-12-28 02:55:14 +07:00
|
|
|
zfree(&data.terms);
|
2012-06-15 13:31:40 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-07-04 20:20:23 +07:00
|
|
|
if (data.terms)
|
|
|
|
parse_events__free_terms(data.terms);
|
2012-06-15 13:31:40 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-01-15 20:39:51 +07:00
|
|
|
int parse_events(struct perf_evlist *evlist, const char *str)
|
2012-06-15 13:31:39 +07:00
|
|
|
{
|
2013-01-19 02:56:57 +07:00
|
|
|
struct parse_events_evlist data = {
|
2012-06-15 13:31:39 +07:00
|
|
|
.list = LIST_HEAD_INIT(data.list),
|
|
|
|
.idx = evlist->nr_entries,
|
|
|
|
};
|
|
|
|
int ret;
|
2009-09-12 04:19:45 +07:00
|
|
|
|
2012-06-15 13:31:40 +07:00
|
|
|
ret = parse_events__scanner(str, &data, PE_START_EVENTS);
|
2014-10-07 22:08:50 +07:00
|
|
|
perf_pmu__parse_cleanup();
|
2012-03-16 02:09:15 +07:00
|
|
|
if (!ret) {
|
2012-06-15 13:31:38 +07:00
|
|
|
int entries = data.idx - evlist->nr_entries;
|
|
|
|
perf_evlist__splice_list_tail(evlist, &data.list, entries);
|
2013-01-22 16:09:29 +07:00
|
|
|
evlist->nr_groups += data.nr_groups;
|
2012-03-16 02:09:15 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2009-09-12 04:19:45 +07:00
|
|
|
|
2012-03-21 01:15:40 +07:00
|
|
|
/*
|
|
|
|
* There are 2 users - builtin-record and builtin-test objects.
|
|
|
|
* Both call perf_evlist__delete in case of error, so we dont
|
|
|
|
* need to bother.
|
|
|
|
*/
|
2009-09-12 04:19:45 +07:00
|
|
|
return ret;
|
2009-05-26 16:10:09 +07:00
|
|
|
}
|
|
|
|
|
2011-07-14 16:25:32 +07:00
|
|
|
int parse_events_option(const struct option *opt, const char *str,
|
2012-09-11 05:15:03 +07:00
|
|
|
int unset __maybe_unused)
|
2011-07-14 16:25:32 +07:00
|
|
|
{
|
|
|
|
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
|
2013-01-15 20:39:51 +07:00
|
|
|
int ret = parse_events(evlist, str);
|
2012-10-27 03:30:06 +07:00
|
|
|
|
|
|
|
if (ret) {
|
|
|
|
fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
|
|
|
|
fprintf(stderr, "Run 'perf list' for a list of valid events\n");
|
|
|
|
}
|
|
|
|
return ret;
|
2011-07-14 16:25:32 +07:00
|
|
|
}
|
|
|
|
|
2011-01-12 05:56:53 +07:00
|
|
|
int parse_filter(const struct option *opt, const char *str,
|
2012-09-11 05:15:03 +07:00
|
|
|
int unset __maybe_unused)
|
2009-10-15 10:22:07 +07:00
|
|
|
{
|
2011-01-12 05:56:53 +07:00
|
|
|
struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
|
2011-01-04 01:39:04 +07:00
|
|
|
struct perf_evsel *last = NULL;
|
2009-10-15 10:22:07 +07:00
|
|
|
|
2011-01-12 05:56:53 +07:00
|
|
|
if (evlist->nr_entries > 0)
|
2012-08-15 02:42:15 +07:00
|
|
|
last = perf_evlist__last(evlist);
|
2011-01-04 01:39:04 +07:00
|
|
|
|
|
|
|
if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
|
2009-10-15 10:22:07 +07:00
|
|
|
fprintf(stderr,
|
2014-10-02 01:05:32 +07:00
|
|
|
"--filter option should follow a -e tracepoint option\n");
|
2009-10-15 10:22:07 +07:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2011-01-04 01:39:04 +07:00
|
|
|
last->filter = strdup(str);
|
|
|
|
if (last->filter == NULL) {
|
2009-10-15 10:22:07 +07:00
|
|
|
fprintf(stderr, "not enough memory to hold filter string\n");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-06-06 17:24:17 +07:00
|
|
|
static const char * const event_type_descriptors[] = {
|
|
|
|
"Hardware event",
|
|
|
|
"Software event",
|
|
|
|
"Tracepoint event",
|
|
|
|
"Hardware cache event",
|
2009-12-29 15:37:07 +07:00
|
|
|
"Raw hardware event descriptor",
|
|
|
|
"Hardware breakpoint",
|
2009-06-06 17:24:17 +07:00
|
|
|
};
|
|
|
|
|
2009-07-21 23:20:22 +07:00
|
|
|
/*
|
|
|
|
* Print the events from <debugfs_mount_point>/tracing/events
|
|
|
|
*/
|
|
|
|
|
2012-08-09 21:31:52 +07:00
|
|
|
void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
|
|
|
|
bool name_only)
|
2009-07-21 23:20:22 +07:00
|
|
|
{
|
|
|
|
DIR *sys_dir, *evt_dir;
|
|
|
|
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
|
|
|
|
char evt_path[MAXPATHLEN];
|
2009-09-24 20:39:09 +07:00
|
|
|
char dir_path[MAXPATHLEN];
|
2009-07-21 23:20:22 +07:00
|
|
|
|
2011-11-16 23:03:07 +07:00
|
|
|
sys_dir = opendir(tracing_events_path);
|
2009-07-21 23:20:22 +07:00
|
|
|
if (!sys_dir)
|
2009-09-24 20:39:09 +07:00
|
|
|
return;
|
2009-09-05 02:39:51 +07:00
|
|
|
|
|
|
|
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
|
2014-12-18 03:24:45 +07:00
|
|
|
if (subsys_glob != NULL &&
|
2011-02-18 00:38:58 +07:00
|
|
|
!strglobmatch(sys_dirent.d_name, subsys_glob))
|
|
|
|
continue;
|
2009-09-24 20:39:09 +07:00
|
|
|
|
2011-11-16 23:03:07 +07:00
|
|
|
snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
|
2009-09-24 20:39:09 +07:00
|
|
|
sys_dirent.d_name);
|
|
|
|
evt_dir = opendir(dir_path);
|
|
|
|
if (!evt_dir)
|
2009-09-05 02:39:51 +07:00
|
|
|
continue;
|
2009-09-24 20:39:09 +07:00
|
|
|
|
2009-09-05 02:39:51 +07:00
|
|
|
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
|
2014-12-18 03:24:45 +07:00
|
|
|
if (event_glob != NULL &&
|
2011-02-18 00:38:58 +07:00
|
|
|
!strglobmatch(evt_dirent.d_name, event_glob))
|
|
|
|
continue;
|
|
|
|
|
2012-08-09 21:31:52 +07:00
|
|
|
if (name_only) {
|
|
|
|
printf("%s:%s ", sys_dirent.d_name, evt_dirent.d_name);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-07-21 23:20:22 +07:00
|
|
|
snprintf(evt_path, MAXPATHLEN, "%s:%s",
|
|
|
|
sys_dirent.d_name, evt_dirent.d_name);
|
2011-04-30 03:52:42 +07:00
|
|
|
printf(" %-50s [%s]\n", evt_path,
|
2009-12-29 15:37:07 +07:00
|
|
|
event_type_descriptors[PERF_TYPE_TRACEPOINT]);
|
2009-07-21 23:20:22 +07:00
|
|
|
}
|
|
|
|
closedir(evt_dir);
|
|
|
|
}
|
|
|
|
closedir(sys_dir);
|
|
|
|
}
|
|
|
|
|
2011-01-03 23:50:45 +07:00
|
|
|
/*
|
|
|
|
* Check whether event is in <debugfs_mount_point>/tracing/events
|
|
|
|
*/
|
|
|
|
|
|
|
|
int is_valid_tracepoint(const char *event_string)
|
|
|
|
{
|
|
|
|
DIR *sys_dir, *evt_dir;
|
|
|
|
struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
|
|
|
|
char evt_path[MAXPATHLEN];
|
|
|
|
char dir_path[MAXPATHLEN];
|
|
|
|
|
2011-11-16 23:03:07 +07:00
|
|
|
sys_dir = opendir(tracing_events_path);
|
2011-01-03 23:50:45 +07:00
|
|
|
if (!sys_dir)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for_each_subsystem(sys_dir, sys_dirent, sys_next) {
|
|
|
|
|
2011-11-16 23:03:07 +07:00
|
|
|
snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
|
2011-01-03 23:50:45 +07:00
|
|
|
sys_dirent.d_name);
|
|
|
|
evt_dir = opendir(dir_path);
|
|
|
|
if (!evt_dir)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
|
|
|
|
snprintf(evt_path, MAXPATHLEN, "%s:%s",
|
|
|
|
sys_dirent.d_name, evt_dirent.d_name);
|
|
|
|
if (!strcmp(evt_path, event_string)) {
|
|
|
|
closedir(evt_dir);
|
|
|
|
closedir(sys_dir);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
closedir(evt_dir);
|
|
|
|
}
|
|
|
|
closedir(sys_dir);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-08-27 09:41:53 +07:00
|
|
|
static bool is_event_supported(u8 type, unsigned config)
|
|
|
|
{
|
|
|
|
bool ret = true;
|
2013-12-31 03:39:45 +07:00
|
|
|
int open_return;
|
2013-08-27 09:41:53 +07:00
|
|
|
struct perf_evsel *evsel;
|
|
|
|
struct perf_event_attr attr = {
|
|
|
|
.type = type,
|
|
|
|
.config = config,
|
|
|
|
.disabled = 1,
|
|
|
|
};
|
|
|
|
struct {
|
|
|
|
struct thread_map map;
|
|
|
|
int threads[1];
|
|
|
|
} tmap = {
|
|
|
|
.map.nr = 1,
|
|
|
|
.threads = { 0 },
|
|
|
|
};
|
|
|
|
|
2013-11-08 02:41:19 +07:00
|
|
|
evsel = perf_evsel__new(&attr);
|
2013-08-27 09:41:53 +07:00
|
|
|
if (evsel) {
|
2013-12-31 03:39:45 +07:00
|
|
|
open_return = perf_evsel__open(evsel, NULL, &tmap.map);
|
|
|
|
ret = open_return >= 0;
|
|
|
|
|
|
|
|
if (open_return == -EACCES) {
|
|
|
|
/*
|
|
|
|
* This happens if the paranoid value
|
|
|
|
* /proc/sys/kernel/perf_event_paranoid is set to 2
|
|
|
|
* Re-run with exclude_kernel set; we don't do that
|
|
|
|
* by default as some ARM machines do not support it.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
evsel->attr.exclude_kernel = 1;
|
|
|
|
ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
|
|
|
|
}
|
2013-08-27 09:41:53 +07:00
|
|
|
perf_evsel__delete(evsel);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-07-04 05:00:44 +07:00
|
|
|
static void __print_events_type(u8 type, struct event_symbol *syms,
|
|
|
|
unsigned max)
|
2011-02-18 00:38:58 +07:00
|
|
|
{
|
|
|
|
char name[64];
|
2012-07-04 05:00:44 +07:00
|
|
|
unsigned i;
|
2011-02-18 00:38:58 +07:00
|
|
|
|
2012-07-04 05:00:44 +07:00
|
|
|
for (i = 0; i < max ; i++, syms++) {
|
2013-08-27 09:41:53 +07:00
|
|
|
if (!is_event_supported(type, i))
|
|
|
|
continue;
|
|
|
|
|
2011-02-18 00:38:58 +07:00
|
|
|
if (strlen(syms->alias))
|
|
|
|
snprintf(name, sizeof(name), "%s OR %s",
|
|
|
|
syms->symbol, syms->alias);
|
|
|
|
else
|
|
|
|
snprintf(name, sizeof(name), "%s", syms->symbol);
|
|
|
|
|
2013-08-27 09:41:53 +07:00
|
|
|
printf(" %-50s [%s]\n", name, event_type_descriptors[type]);
|
2011-02-18 00:38:58 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-07-04 05:00:44 +07:00
|
|
|
void print_events_type(u8 type)
|
|
|
|
{
|
|
|
|
if (type == PERF_TYPE_SOFTWARE)
|
|
|
|
__print_events_type(type, event_symbols_sw, PERF_COUNT_SW_MAX);
|
|
|
|
else
|
|
|
|
__print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX);
|
|
|
|
}
|
|
|
|
|
2012-08-09 21:31:52 +07:00
|
|
|
int print_hwcache_events(const char *event_glob, bool name_only)
|
2011-02-18 00:38:58 +07:00
|
|
|
{
|
|
|
|
unsigned int type, op, i, printed = 0;
|
2012-06-12 00:08:07 +07:00
|
|
|
char name[64];
|
2011-02-18 00:38:58 +07:00
|
|
|
|
|
|
|
for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) {
|
|
|
|
for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) {
|
|
|
|
/* skip invalid cache type */
|
2012-06-12 00:08:07 +07:00
|
|
|
if (!perf_evsel__is_cache_op_valid(type, op))
|
2011-02-18 00:38:58 +07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
|
2012-06-12 00:08:07 +07:00
|
|
|
__perf_evsel__hw_cache_type_op_res_name(type, op, i,
|
|
|
|
name, sizeof(name));
|
2011-04-30 03:52:42 +07:00
|
|
|
if (event_glob != NULL && !strglobmatch(name, event_glob))
|
2011-02-18 00:38:58 +07:00
|
|
|
continue;
|
|
|
|
|
2013-08-27 09:41:53 +07:00
|
|
|
if (!is_event_supported(PERF_TYPE_HW_CACHE,
|
|
|
|
type | (op << 8) | (i << 16)))
|
|
|
|
continue;
|
|
|
|
|
2012-08-09 21:31:52 +07:00
|
|
|
if (name_only)
|
|
|
|
printf("%s ", name);
|
|
|
|
else
|
|
|
|
printf(" %-50s [%s]\n", name,
|
|
|
|
event_type_descriptors[PERF_TYPE_HW_CACHE]);
|
2011-02-18 00:38:58 +07:00
|
|
|
++printed;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-21 01:02:29 +07:00
|
|
|
if (printed)
|
|
|
|
printf("\n");
|
2011-02-18 00:38:58 +07:00
|
|
|
return printed;
|
|
|
|
}
|
|
|
|
|
2012-07-04 05:00:44 +07:00
|
|
|
static void print_symbol_events(const char *event_glob, unsigned type,
|
2012-08-09 21:31:52 +07:00
|
|
|
struct event_symbol *syms, unsigned max,
|
|
|
|
bool name_only)
|
2009-05-26 16:10:09 +07:00
|
|
|
{
|
2012-07-04 05:00:44 +07:00
|
|
|
unsigned i, printed = 0;
|
2011-04-30 03:52:42 +07:00
|
|
|
char name[MAX_NAME_LEN];
|
2009-05-26 16:10:09 +07:00
|
|
|
|
2012-07-04 05:00:44 +07:00
|
|
|
for (i = 0; i < max; i++, syms++) {
|
2011-02-18 00:38:58 +07:00
|
|
|
|
2014-12-18 03:24:45 +07:00
|
|
|
if (event_glob != NULL &&
|
2011-02-18 00:38:58 +07:00
|
|
|
!(strglobmatch(syms->symbol, event_glob) ||
|
|
|
|
(syms->alias && strglobmatch(syms->alias, event_glob))))
|
|
|
|
continue;
|
2009-05-26 16:10:09 +07:00
|
|
|
|
2013-08-27 09:41:53 +07:00
|
|
|
if (!is_event_supported(type, i))
|
|
|
|
continue;
|
|
|
|
|
2012-08-09 21:31:52 +07:00
|
|
|
if (name_only) {
|
|
|
|
printf("%s ", syms->symbol);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2009-06-22 18:14:28 +07:00
|
|
|
if (strlen(syms->alias))
|
2011-04-30 03:52:42 +07:00
|
|
|
snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias);
|
2009-06-22 18:14:28 +07:00
|
|
|
else
|
2011-04-30 03:52:42 +07:00
|
|
|
strncpy(name, syms->symbol, MAX_NAME_LEN);
|
2009-05-26 16:10:09 +07:00
|
|
|
|
2012-07-04 05:00:44 +07:00
|
|
|
printf(" %-50s [%s]\n", name, event_type_descriptors[type]);
|
|
|
|
|
|
|
|
printed++;
|
2009-05-26 16:10:09 +07:00
|
|
|
}
|
|
|
|
|
2012-07-04 05:00:44 +07:00
|
|
|
if (printed)
|
2011-02-18 00:38:58 +07:00
|
|
|
printf("\n");
|
2012-07-04 05:00:44 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Print the help text for the event symbols:
|
|
|
|
*/
|
2012-08-09 21:31:52 +07:00
|
|
|
void print_events(const char *event_glob, bool name_only)
|
2012-07-04 05:00:44 +07:00
|
|
|
{
|
2012-08-09 21:31:52 +07:00
|
|
|
if (!name_only) {
|
|
|
|
printf("\n");
|
|
|
|
printf("List of pre-defined events (to be used in -e):\n");
|
|
|
|
}
|
2012-07-04 05:00:44 +07:00
|
|
|
|
|
|
|
print_symbol_events(event_glob, PERF_TYPE_HARDWARE,
|
2012-08-09 21:31:52 +07:00
|
|
|
event_symbols_hw, PERF_COUNT_HW_MAX, name_only);
|
2012-07-04 05:00:44 +07:00
|
|
|
|
|
|
|
print_symbol_events(event_glob, PERF_TYPE_SOFTWARE,
|
2012-08-09 21:31:52 +07:00
|
|
|
event_symbols_sw, PERF_COUNT_SW_MAX, name_only);
|
2012-07-04 05:00:44 +07:00
|
|
|
|
2012-08-09 21:31:52 +07:00
|
|
|
print_hwcache_events(event_glob, name_only);
|
2011-02-18 00:38:58 +07:00
|
|
|
|
2013-04-21 01:02:29 +07:00
|
|
|
print_pmu_events(event_glob, name_only);
|
|
|
|
|
2011-02-18 00:38:58 +07:00
|
|
|
if (event_glob != NULL)
|
|
|
|
return;
|
2009-07-01 20:06:18 +07:00
|
|
|
|
2012-08-09 21:31:52 +07:00
|
|
|
if (!name_only) {
|
|
|
|
printf(" %-50s [%s]\n",
|
|
|
|
"rNNN",
|
|
|
|
event_type_descriptors[PERF_TYPE_RAW]);
|
|
|
|
printf(" %-50s [%s]\n",
|
|
|
|
"cpu/t1=v1[,t2=v2,t3 ...]/modifier",
|
|
|
|
event_type_descriptors[PERF_TYPE_RAW]);
|
2012-09-29 00:47:07 +07:00
|
|
|
printf(" (see 'man perf-list' on how to encode it)\n");
|
2012-08-09 21:31:52 +07:00
|
|
|
printf("\n");
|
|
|
|
|
|
|
|
printf(" %-50s [%s]\n",
|
2014-05-29 22:26:51 +07:00
|
|
|
"mem:<addr>[/len][:access]",
|
2009-12-29 15:37:07 +07:00
|
|
|
event_type_descriptors[PERF_TYPE_BREAKPOINT]);
|
2012-08-09 21:31:52 +07:00
|
|
|
printf("\n");
|
|
|
|
}
|
2009-11-23 21:42:35 +07:00
|
|
|
|
2012-08-09 21:31:52 +07:00
|
|
|
print_tracepoint_events(NULL, NULL, name_only);
|
2009-05-26 16:10:09 +07:00
|
|
|
}
|
2012-03-16 02:09:16 +07:00
|
|
|
|
2013-01-19 02:29:49 +07:00
|
|
|
int parse_events__is_hardcoded_term(struct parse_events_term *term)
|
2012-03-16 02:09:16 +07:00
|
|
|
{
|
2012-04-25 23:24:57 +07:00
|
|
|
return term->type_term != PARSE_EVENTS__TERM_TYPE_USER;
|
2012-03-16 02:09:16 +07:00
|
|
|
}
|
|
|
|
|
2013-01-19 02:29:49 +07:00
|
|
|
static int new_term(struct parse_events_term **_term, int type_val,
|
2012-04-25 23:24:57 +07:00
|
|
|
int type_term, char *config,
|
2012-08-08 00:43:13 +07:00
|
|
|
char *str, u64 num)
|
2012-03-16 02:09:16 +07:00
|
|
|
{
|
2013-01-19 02:29:49 +07:00
|
|
|
struct parse_events_term *term;
|
2012-03-16 02:09:16 +07:00
|
|
|
|
|
|
|
term = zalloc(sizeof(*term));
|
|
|
|
if (!term)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(&term->list);
|
2012-04-25 23:24:57 +07:00
|
|
|
term->type_val = type_val;
|
|
|
|
term->type_term = type_term;
|
2012-03-16 02:09:16 +07:00
|
|
|
term->config = config;
|
|
|
|
|
2012-04-25 23:24:57 +07:00
|
|
|
switch (type_val) {
|
2012-03-16 02:09:16 +07:00
|
|
|
case PARSE_EVENTS__TERM_TYPE_NUM:
|
|
|
|
term->val.num = num;
|
|
|
|
break;
|
|
|
|
case PARSE_EVENTS__TERM_TYPE_STR:
|
|
|
|
term->val.str = str;
|
|
|
|
break;
|
|
|
|
default:
|
2013-07-04 20:20:24 +07:00
|
|
|
free(term);
|
2012-03-16 02:09:16 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*_term = term;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-01-19 02:29:49 +07:00
|
|
|
int parse_events_term__num(struct parse_events_term **term,
|
2012-08-08 00:43:13 +07:00
|
|
|
int type_term, char *config, u64 num)
|
2012-04-25 23:24:57 +07:00
|
|
|
{
|
|
|
|
return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
|
|
|
|
config, NULL, num);
|
|
|
|
}
|
|
|
|
|
2013-01-19 02:29:49 +07:00
|
|
|
int parse_events_term__str(struct parse_events_term **term,
|
2012-04-25 23:24:57 +07:00
|
|
|
int type_term, char *config, char *str)
|
|
|
|
{
|
|
|
|
return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
|
|
|
|
config, str, 0);
|
|
|
|
}
|
|
|
|
|
2013-01-19 02:29:49 +07:00
|
|
|
int parse_events_term__sym_hw(struct parse_events_term **term,
|
2012-10-10 19:53:17 +07:00
|
|
|
char *config, unsigned idx)
|
|
|
|
{
|
|
|
|
struct event_symbol *sym;
|
|
|
|
|
|
|
|
BUG_ON(idx >= PERF_COUNT_HW_MAX);
|
|
|
|
sym = &event_symbols_hw[idx];
|
|
|
|
|
|
|
|
if (config)
|
|
|
|
return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
|
|
|
|
PARSE_EVENTS__TERM_TYPE_USER, config,
|
|
|
|
(char *) sym->symbol, 0);
|
|
|
|
else
|
|
|
|
return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
|
|
|
|
PARSE_EVENTS__TERM_TYPE_USER,
|
|
|
|
(char *) "event", (char *) sym->symbol, 0);
|
|
|
|
}
|
|
|
|
|
2013-01-19 02:29:49 +07:00
|
|
|
int parse_events_term__clone(struct parse_events_term **new,
|
|
|
|
struct parse_events_term *term)
|
2012-06-15 13:31:41 +07:00
|
|
|
{
|
|
|
|
return new_term(new, term->type_val, term->type_term, term->config,
|
|
|
|
term->val.str, term->val.num);
|
|
|
|
}
|
|
|
|
|
2012-03-16 02:09:16 +07:00
|
|
|
void parse_events__free_terms(struct list_head *terms)
|
|
|
|
{
|
2013-01-19 02:29:49 +07:00
|
|
|
struct parse_events_term *term, *h;
|
2012-03-16 02:09:16 +07:00
|
|
|
|
|
|
|
list_for_each_entry_safe(term, h, terms, list)
|
|
|
|
free(term);
|
|
|
|
}
|