2019-05-29 21:18:02 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2017-09-01 02:40:31 +07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2017, Intel Corporation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Manage metrics and groups of metrics from JSON files */
|
|
|
|
|
|
|
|
#include "metricgroup.h"
|
2019-08-30 01:56:40 +07:00
|
|
|
#include "debug.h"
|
2017-09-01 02:40:31 +07:00
|
|
|
#include "evlist.h"
|
2019-08-21 20:54:14 +07:00
|
|
|
#include "evsel.h"
|
2017-09-01 02:40:31 +07:00
|
|
|
#include "strbuf.h"
|
|
|
|
#include "pmu.h"
|
|
|
|
#include "expr.h"
|
|
|
|
#include "rblist.h"
|
|
|
|
#include <string.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include "pmu-events/pmu-events.h"
|
|
|
|
#include "strlist.h"
|
|
|
|
#include <assert.h>
|
2019-06-26 07:13:51 +07:00
|
|
|
#include <linux/ctype.h>
|
2019-08-30 01:56:40 +07:00
|
|
|
#include <linux/string.h>
|
2019-07-04 22:06:20 +07:00
|
|
|
#include <linux/zalloc.h>
|
2019-08-21 20:54:14 +07:00
|
|
|
#include <subcmd/parse-options.h>
|
2017-09-01 02:40:31 +07:00
|
|
|
|
|
|
|
struct metric_event *metricgroup__lookup(struct rblist *metric_events,
|
2019-07-21 18:23:51 +07:00
|
|
|
struct evsel *evsel,
|
2017-09-01 02:40:31 +07:00
|
|
|
bool create)
|
|
|
|
{
|
|
|
|
struct rb_node *nd;
|
|
|
|
struct metric_event me = {
|
|
|
|
.evsel = evsel
|
|
|
|
};
|
perf script: Allow computing 'perf stat' style metrics
Add support for computing 'perf stat' style metrics in 'perf script'.
When using leader sampling we can get metrics for each sampling period
by computing formulas over the values of the different group members.
This allows things like fine grained IPC tracking through sampling, much
more fine grained than with 'perf stat'.
The metric is still averaged over the sampling period, it is not just
for the sampling point.
This patch adds a new metric output field for 'perf script' that uses
the existing 'perf stat' metrics infrastructure to compute any metrics
supported by 'perf stat'.
For example to sample IPC:
$ perf record -e '{ref-cycles,cycles,instructions}:S' -a sleep 1
$ perf script -F metric,ip,sym,time,cpu,comm
...
alsa-sink-ALC32 [000] 42815.856074: 7fd65937d6cc [unknown]
alsa-sink-ALC32 [000] 42815.856074: 7fd65937d6cc [unknown]
alsa-sink-ALC32 [000] 42815.856074: 7fd65937d6cc [unknown]
alsa-sink-ALC32 [000] 42815.856074: metric: 0.13 insn per cycle
swapper [000] 42815.857961: ffffffff81655df0 __schedule
swapper [000] 42815.857961: ffffffff81655df0 __schedule
swapper [000] 42815.857961: ffffffff81655df0 __schedule
swapper [000] 42815.857961: metric: 0.23 insn per cycle
qemu-system-x86 [000] 42815.858130: ffffffff8165ad0e _raw_spin_unlock_irqrestore
qemu-system-x86 [000] 42815.858130: ffffffff8165ad0e _raw_spin_unlock_irqrestore
qemu-system-x86 [000] 42815.858130: ffffffff8165ad0e _raw_spin_unlock_irqrestore
qemu-system-x86 [000] 42815.858130: metric: 0.46 insn per cycle
:4972 [000] 42815.858312: ffffffffa080e5f2 vmx_vcpu_run
:4972 [000] 42815.858312: ffffffffa080e5f2 vmx_vcpu_run
:4972 [000] 42815.858312: ffffffffa080e5f2 vmx_vcpu_run
:4972 [000] 42815.858312: metric: 0.45 insn per cycle
TopDown:
This requires disabling SMT if you have it enabled, because SMT would
require sampling per core, which is not supported.
$ perf record -e '{ref-cycles,topdown-fetch-bubbles,\
topdown-recovery-bubbles,\
topdown-slots-retired,topdown-total-slots,\
topdown-slots-issued}:S' -a sleep 1
$ perf script --header -I -F cpu,ip,sym,event,metric,period
...
[000] 121108 ref-cycles: ffffffff8165222e copy_user_enhanced_fast_string
[000] 190350 topdown-fetch-bubbles: ffffffff8165222e copy_user_enhanced_fast_string
[000] 2055 topdown-recovery-bubbles: ffffffff8165222e copy_user_enhanced_fast_string
[000] 148729 topdown-slots-retired: ffffffff8165222e copy_user_enhanced_fast_string
[000] 144324 topdown-total-slots: ffffffff8165222e copy_user_enhanced_fast_string
[000] 160852 topdown-slots-issued: ffffffff8165222e copy_user_enhanced_fast_string
[000] metric: 33.0% frontend bound
[000] metric: 3.5% bad speculation
[000] metric: 25.8% retiring
[000] metric: 37.7% backend bound
[000] 112112 ref-cycles: ffffffff8165aec8 _raw_spin_lock_irqsave
[000] 357222 topdown-fetch-bubbles: ffffffff8165aec8 _raw_spin_lock_irqsave
[000] 3325 topdown-recovery-bubbles: ffffffff8165aec8 _raw_spin_lock_irqsave
[000] 323553 topdown-slots-retired: ffffffff8165aec8 _raw_spin_lock_irqsave
[000] 270507 topdown-total-slots: ffffffff8165aec8 _raw_spin_lock_irqsave
[000] 341226 topdown-slots-issued: ffffffff8165aec8 _raw_spin_lock_irqsave
[000] metric: 33.0% frontend bound
[000] metric: 2.9% bad speculation
[000] metric: 29.9% retiring
[000] metric: 34.2% backend bound
...
v2:
Use evsel->priv for new fields
Port to new base line, support fp output.
Handle stats in ->stats, not ->priv
Minor cleanups
Extra explanation about the use of the term 'averaging', from Andi in the
thread in the Link: tag below:
<quote Andi>
The current samples contains the sum of event counts for a sampling period.
EventA-1 EventA-2 EventA-3 EventA-4
EventB-1 EventB-2 EventC-3
gap with no events overflow
|-----------------------------------------------------------------|
period-start period-end
^ ^
| |
previous sample current sample
So EventA = 4 and EventB = 3 at the sample point
I generate a metric, let's say EventA / EventB. It applies to the whole period.
But the metric is over a longer time which does not have the same behavior. For
example the gap above doesn't have any events, while they are clustered at the
beginning and end of the sample period.
But we're summing everything together. The metric doesn't know that the gap is
different than the busy period.
That's what I'm trying to express with averaging.
</quote>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20171117214300.32746-4-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-11-18 04:43:00 +07:00
|
|
|
|
|
|
|
if (!metric_events)
|
|
|
|
return NULL;
|
|
|
|
|
2017-09-01 02:40:31 +07:00
|
|
|
nd = rblist__find(metric_events, &me);
|
|
|
|
if (nd)
|
|
|
|
return container_of(nd, struct metric_event, nd);
|
|
|
|
if (create) {
|
|
|
|
rblist__add_node(metric_events, &me);
|
|
|
|
nd = rblist__find(metric_events, &me);
|
|
|
|
if (nd)
|
|
|
|
return container_of(nd, struct metric_event, nd);
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
|
|
|
|
{
|
|
|
|
struct metric_event *a = container_of(rb_node,
|
|
|
|
struct metric_event,
|
|
|
|
nd);
|
|
|
|
const struct metric_event *b = entry;
|
|
|
|
|
|
|
|
if (a->evsel == b->evsel)
|
|
|
|
return 0;
|
|
|
|
if ((char *)a->evsel < (char *)b->evsel)
|
|
|
|
return -1;
|
|
|
|
return +1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
|
|
|
|
const void *entry)
|
|
|
|
{
|
|
|
|
struct metric_event *me = malloc(sizeof(struct metric_event));
|
|
|
|
|
|
|
|
if (!me)
|
|
|
|
return NULL;
|
|
|
|
memcpy(me, entry, sizeof(struct metric_event));
|
|
|
|
me->evsel = ((struct metric_event *)entry)->evsel;
|
|
|
|
INIT_LIST_HEAD(&me->head);
|
|
|
|
return &me->nd;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void metricgroup__rblist_init(struct rblist *metric_events)
|
|
|
|
{
|
|
|
|
rblist__init(metric_events);
|
|
|
|
metric_events->node_cmp = metric_event_cmp;
|
|
|
|
metric_events->node_new = metric_event_new;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct egroup {
|
|
|
|
struct list_head nd;
|
|
|
|
int idnum;
|
|
|
|
const char **ids;
|
|
|
|
const char *metric_name;
|
|
|
|
const char *metric_expr;
|
perf metricgroup: Scale the metric result
Some metrics define the scale unit, such as
{
"BriefDescription": "Intel Optane DC persistent memory read latency (ns). Derived from unc_m_pmm_rpq_occupancy.all",
"Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_READ_LATENCY",
"MetricExpr": "UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS",
"MetricName": "UNC_M_PMM_READ_LATENCY",
"PerPkg": "1",
"ScaleUnit": "6000000000ns",
"UMask": "0x1",
"Unit": "iMC"
},
For above example, the ratio should be,
ratio = (UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS) * 6000000000
But in current code, the ratio is not scaled ( * 6000000000)
With this patch, the ratio is scaled and the unit (ns) is printed.
For example,
# 219.4 ns UNC_M_PMM_READ_LATENCY
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20190828055932.8269-4-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-08-28 12:59:31 +07:00
|
|
|
const char *metric_unit;
|
2017-09-01 02:40:31 +07:00
|
|
|
};
|
|
|
|
|
2019-07-21 18:23:51 +07:00
|
|
|
static bool record_evsel(int *ind, struct evsel **start,
|
perf stat: Fix group lookup for metric group
The metric group code tries to find a group it added earlier in the
evlist. Fix the lookup to handle groups with partially overlaps
correctly. When a sub string match fails and we reset the match, we have
to compare the first element again.
I also renamed the find_evsel function to find_evsel_group to make its
purpose clearer.
With the earlier changes this fixes:
Before:
% perf stat -M UPI,IPC sleep 1
...
1,032,922 uops_retired.retire_slots # 1.1 UPI
1,896,096 inst_retired.any
1,896,096 inst_retired.any
1,177,254 cpu_clk_unhalted.thread
After:
% perf stat -M UPI,IPC sleep 1
...
1,013,193 uops_retired.retire_slots # 1.1 UPI
932,033 inst_retired.any
932,033 inst_retired.any # 0.9 IPC
1,091,245 cpu_clk_unhalted.thread
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Fixes: b18f3e365019 ("perf stat: Support JSON metrics in perf stat")
Link: http://lkml.kernel.org/r/20190624193711.35241-4-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-06-25 02:37:10 +07:00
|
|
|
int idnum,
|
2019-07-21 18:23:51 +07:00
|
|
|
struct evsel **metric_events,
|
|
|
|
struct evsel *ev)
|
perf stat: Fix group lookup for metric group
The metric group code tries to find a group it added earlier in the
evlist. Fix the lookup to handle groups with partially overlaps
correctly. When a sub string match fails and we reset the match, we have
to compare the first element again.
I also renamed the find_evsel function to find_evsel_group to make its
purpose clearer.
With the earlier changes this fixes:
Before:
% perf stat -M UPI,IPC sleep 1
...
1,032,922 uops_retired.retire_slots # 1.1 UPI
1,896,096 inst_retired.any
1,896,096 inst_retired.any
1,177,254 cpu_clk_unhalted.thread
After:
% perf stat -M UPI,IPC sleep 1
...
1,013,193 uops_retired.retire_slots # 1.1 UPI
932,033 inst_retired.any
932,033 inst_retired.any # 0.9 IPC
1,091,245 cpu_clk_unhalted.thread
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Fixes: b18f3e365019 ("perf stat: Support JSON metrics in perf stat")
Link: http://lkml.kernel.org/r/20190624193711.35241-4-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-06-25 02:37:10 +07:00
|
|
|
{
|
|
|
|
metric_events[*ind] = ev;
|
|
|
|
if (*ind == 0)
|
|
|
|
*start = ev;
|
|
|
|
if (++*ind == idnum) {
|
|
|
|
metric_events[*ind] = NULL;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-21 18:23:52 +07:00
|
|
|
static struct evsel *find_evsel_group(struct evlist *perf_evlist,
|
2019-07-21 18:23:51 +07:00
|
|
|
const char **ids,
|
|
|
|
int idnum,
|
|
|
|
struct evsel **metric_events)
|
2017-09-01 02:40:31 +07:00
|
|
|
{
|
2019-07-21 18:23:51 +07:00
|
|
|
struct evsel *ev, *start = NULL;
|
2017-09-01 02:40:31 +07:00
|
|
|
int ind = 0;
|
|
|
|
|
|
|
|
evlist__for_each_entry (perf_evlist, ev) {
|
perf stat: Fix group lookup for metric group
The metric group code tries to find a group it added earlier in the
evlist. Fix the lookup to handle groups with partially overlaps
correctly. When a sub string match fails and we reset the match, we have
to compare the first element again.
I also renamed the find_evsel function to find_evsel_group to make its
purpose clearer.
With the earlier changes this fixes:
Before:
% perf stat -M UPI,IPC sleep 1
...
1,032,922 uops_retired.retire_slots # 1.1 UPI
1,896,096 inst_retired.any
1,896,096 inst_retired.any
1,177,254 cpu_clk_unhalted.thread
After:
% perf stat -M UPI,IPC sleep 1
...
1,013,193 uops_retired.retire_slots # 1.1 UPI
932,033 inst_retired.any
932,033 inst_retired.any # 0.9 IPC
1,091,245 cpu_clk_unhalted.thread
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Fixes: b18f3e365019 ("perf stat: Support JSON metrics in perf stat")
Link: http://lkml.kernel.org/r/20190624193711.35241-4-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-06-25 02:37:10 +07:00
|
|
|
if (ev->collect_stat)
|
|
|
|
continue;
|
2017-09-01 02:40:31 +07:00
|
|
|
if (!strcmp(ev->name, ids[ind])) {
|
perf stat: Fix group lookup for metric group
The metric group code tries to find a group it added earlier in the
evlist. Fix the lookup to handle groups with partially overlaps
correctly. When a sub string match fails and we reset the match, we have
to compare the first element again.
I also renamed the find_evsel function to find_evsel_group to make its
purpose clearer.
With the earlier changes this fixes:
Before:
% perf stat -M UPI,IPC sleep 1
...
1,032,922 uops_retired.retire_slots # 1.1 UPI
1,896,096 inst_retired.any
1,896,096 inst_retired.any
1,177,254 cpu_clk_unhalted.thread
After:
% perf stat -M UPI,IPC sleep 1
...
1,013,193 uops_retired.retire_slots # 1.1 UPI
932,033 inst_retired.any
932,033 inst_retired.any # 0.9 IPC
1,091,245 cpu_clk_unhalted.thread
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Fixes: b18f3e365019 ("perf stat: Support JSON metrics in perf stat")
Link: http://lkml.kernel.org/r/20190624193711.35241-4-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-06-25 02:37:10 +07:00
|
|
|
if (record_evsel(&ind, &start, idnum,
|
|
|
|
metric_events, ev))
|
2017-09-01 02:40:31 +07:00
|
|
|
return start;
|
|
|
|
} else {
|
perf stat: Fix group lookup for metric group
The metric group code tries to find a group it added earlier in the
evlist. Fix the lookup to handle groups with partially overlaps
correctly. When a sub string match fails and we reset the match, we have
to compare the first element again.
I also renamed the find_evsel function to find_evsel_group to make its
purpose clearer.
With the earlier changes this fixes:
Before:
% perf stat -M UPI,IPC sleep 1
...
1,032,922 uops_retired.retire_slots # 1.1 UPI
1,896,096 inst_retired.any
1,896,096 inst_retired.any
1,177,254 cpu_clk_unhalted.thread
After:
% perf stat -M UPI,IPC sleep 1
...
1,013,193 uops_retired.retire_slots # 1.1 UPI
932,033 inst_retired.any
932,033 inst_retired.any # 0.9 IPC
1,091,245 cpu_clk_unhalted.thread
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Fixes: b18f3e365019 ("perf stat: Support JSON metrics in perf stat")
Link: http://lkml.kernel.org/r/20190624193711.35241-4-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-06-25 02:37:10 +07:00
|
|
|
/*
|
|
|
|
* We saw some other event that is not
|
|
|
|
* in our list of events. Discard
|
|
|
|
* the whole match and start again.
|
|
|
|
*/
|
2017-09-01 02:40:31 +07:00
|
|
|
ind = 0;
|
|
|
|
start = NULL;
|
perf stat: Fix group lookup for metric group
The metric group code tries to find a group it added earlier in the
evlist. Fix the lookup to handle groups with partially overlaps
correctly. When a sub string match fails and we reset the match, we have
to compare the first element again.
I also renamed the find_evsel function to find_evsel_group to make its
purpose clearer.
With the earlier changes this fixes:
Before:
% perf stat -M UPI,IPC sleep 1
...
1,032,922 uops_retired.retire_slots # 1.1 UPI
1,896,096 inst_retired.any
1,896,096 inst_retired.any
1,177,254 cpu_clk_unhalted.thread
After:
% perf stat -M UPI,IPC sleep 1
...
1,013,193 uops_retired.retire_slots # 1.1 UPI
932,033 inst_retired.any
932,033 inst_retired.any # 0.9 IPC
1,091,245 cpu_clk_unhalted.thread
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Fixes: b18f3e365019 ("perf stat: Support JSON metrics in perf stat")
Link: http://lkml.kernel.org/r/20190624193711.35241-4-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-06-25 02:37:10 +07:00
|
|
|
if (!strcmp(ev->name, ids[ind])) {
|
|
|
|
if (record_evsel(&ind, &start, idnum,
|
|
|
|
metric_events, ev))
|
|
|
|
return start;
|
|
|
|
}
|
2017-09-01 02:40:31 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* This can happen when an alias expands to multiple
|
|
|
|
* events, like for uncore events.
|
|
|
|
* We don't support this case for now.
|
|
|
|
*/
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int metricgroup__setup_events(struct list_head *groups,
|
2019-07-21 18:23:52 +07:00
|
|
|
struct evlist *perf_evlist,
|
2017-09-01 02:40:31 +07:00
|
|
|
struct rblist *metric_events_list)
|
|
|
|
{
|
|
|
|
struct metric_event *me;
|
|
|
|
struct metric_expr *expr;
|
|
|
|
int i = 0;
|
|
|
|
int ret = 0;
|
|
|
|
struct egroup *eg;
|
2019-07-21 18:23:51 +07:00
|
|
|
struct evsel *evsel;
|
2017-09-01 02:40:31 +07:00
|
|
|
|
|
|
|
list_for_each_entry (eg, groups, nd) {
|
2019-07-21 18:23:51 +07:00
|
|
|
struct evsel **metric_events;
|
2017-09-01 02:40:31 +07:00
|
|
|
|
|
|
|
metric_events = calloc(sizeof(void *), eg->idnum + 1);
|
|
|
|
if (!metric_events) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
perf stat: Fix group lookup for metric group
The metric group code tries to find a group it added earlier in the
evlist. Fix the lookup to handle groups with partially overlaps
correctly. When a sub string match fails and we reset the match, we have
to compare the first element again.
I also renamed the find_evsel function to find_evsel_group to make its
purpose clearer.
With the earlier changes this fixes:
Before:
% perf stat -M UPI,IPC sleep 1
...
1,032,922 uops_retired.retire_slots # 1.1 UPI
1,896,096 inst_retired.any
1,896,096 inst_retired.any
1,177,254 cpu_clk_unhalted.thread
After:
% perf stat -M UPI,IPC sleep 1
...
1,013,193 uops_retired.retire_slots # 1.1 UPI
932,033 inst_retired.any
932,033 inst_retired.any # 0.9 IPC
1,091,245 cpu_clk_unhalted.thread
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Fixes: b18f3e365019 ("perf stat: Support JSON metrics in perf stat")
Link: http://lkml.kernel.org/r/20190624193711.35241-4-andi@firstfloor.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-06-25 02:37:10 +07:00
|
|
|
evsel = find_evsel_group(perf_evlist, eg->ids, eg->idnum,
|
|
|
|
metric_events);
|
2017-09-01 02:40:31 +07:00
|
|
|
if (!evsel) {
|
|
|
|
pr_debug("Cannot resolve %s: %s\n",
|
|
|
|
eg->metric_name, eg->metric_expr);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
for (i = 0; i < eg->idnum; i++)
|
|
|
|
metric_events[i]->collect_stat = true;
|
|
|
|
me = metricgroup__lookup(metric_events_list, evsel, true);
|
|
|
|
if (!me) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
expr = malloc(sizeof(struct metric_expr));
|
|
|
|
if (!expr) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
expr->metric_expr = eg->metric_expr;
|
|
|
|
expr->metric_name = eg->metric_name;
|
perf metricgroup: Scale the metric result
Some metrics define the scale unit, such as
{
"BriefDescription": "Intel Optane DC persistent memory read latency (ns). Derived from unc_m_pmm_rpq_occupancy.all",
"Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_READ_LATENCY",
"MetricExpr": "UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS",
"MetricName": "UNC_M_PMM_READ_LATENCY",
"PerPkg": "1",
"ScaleUnit": "6000000000ns",
"UMask": "0x1",
"Unit": "iMC"
},
For above example, the ratio should be,
ratio = (UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS) * 6000000000
But in current code, the ratio is not scaled ( * 6000000000)
With this patch, the ratio is scaled and the unit (ns) is printed.
For example,
# 219.4 ns UNC_M_PMM_READ_LATENCY
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20190828055932.8269-4-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-08-28 12:59:31 +07:00
|
|
|
expr->metric_unit = eg->metric_unit;
|
2017-09-01 02:40:31 +07:00
|
|
|
expr->metric_events = metric_events;
|
|
|
|
list_add(&expr->nd, &me->head);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool match_metric(const char *n, const char *list)
|
|
|
|
{
|
|
|
|
int len;
|
|
|
|
char *m;
|
|
|
|
|
|
|
|
if (!list)
|
|
|
|
return false;
|
|
|
|
if (!strcmp(list, "all"))
|
|
|
|
return true;
|
|
|
|
if (!n)
|
|
|
|
return !strcasecmp(list, "No_group");
|
|
|
|
len = strlen(list);
|
|
|
|
m = strcasestr(n, list);
|
|
|
|
if (!m)
|
|
|
|
return false;
|
|
|
|
if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
|
|
|
|
(m[len] == 0 || m[len] == ';'))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-09-01 02:40:32 +07:00
|
|
|
struct mep {
|
|
|
|
struct rb_node nd;
|
|
|
|
const char *name;
|
|
|
|
struct strlist *metrics;
|
|
|
|
};
|
|
|
|
|
|
|
|
static int mep_cmp(struct rb_node *rb_node, const void *entry)
|
|
|
|
{
|
|
|
|
struct mep *a = container_of(rb_node, struct mep, nd);
|
|
|
|
struct mep *b = (struct mep *)entry;
|
|
|
|
|
|
|
|
return strcmp(a->name, b->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
|
|
|
|
const void *entry)
|
|
|
|
{
|
|
|
|
struct mep *me = malloc(sizeof(struct mep));
|
|
|
|
|
|
|
|
if (!me)
|
|
|
|
return NULL;
|
|
|
|
memcpy(me, entry, sizeof(struct mep));
|
|
|
|
me->name = strdup(me->name);
|
|
|
|
if (!me->name)
|
|
|
|
goto out_me;
|
|
|
|
me->metrics = strlist__new(NULL, NULL);
|
|
|
|
if (!me->metrics)
|
|
|
|
goto out_name;
|
|
|
|
return &me->nd;
|
|
|
|
out_name:
|
2019-07-04 22:06:20 +07:00
|
|
|
zfree(&me->name);
|
2017-09-01 02:40:32 +07:00
|
|
|
out_me:
|
|
|
|
free(me);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct mep *mep_lookup(struct rblist *groups, const char *name)
|
|
|
|
{
|
|
|
|
struct rb_node *nd;
|
|
|
|
struct mep me = {
|
|
|
|
.name = name
|
|
|
|
};
|
|
|
|
nd = rblist__find(groups, &me);
|
|
|
|
if (nd)
|
|
|
|
return container_of(nd, struct mep, nd);
|
|
|
|
rblist__add_node(groups, &me);
|
|
|
|
nd = rblist__find(groups, &me);
|
|
|
|
if (nd)
|
|
|
|
return container_of(nd, struct mep, nd);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mep_delete(struct rblist *rl __maybe_unused,
|
|
|
|
struct rb_node *nd)
|
|
|
|
{
|
|
|
|
struct mep *me = container_of(nd, struct mep, nd);
|
|
|
|
|
|
|
|
strlist__delete(me->metrics);
|
2019-07-04 22:06:20 +07:00
|
|
|
zfree(&me->name);
|
2017-09-01 02:40:32 +07:00
|
|
|
free(me);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
|
|
|
|
{
|
|
|
|
struct str_node *sn;
|
|
|
|
int n = 0;
|
|
|
|
|
|
|
|
strlist__for_each_entry (sn, metrics) {
|
|
|
|
if (raw)
|
|
|
|
printf("%s%s", n > 0 ? " " : "", sn->s);
|
|
|
|
else
|
|
|
|
printf(" %s\n", sn->s);
|
|
|
|
n++;
|
|
|
|
}
|
|
|
|
if (raw)
|
|
|
|
putchar('\n');
|
|
|
|
}
|
|
|
|
|
|
|
|
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
|
2019-02-13 19:32:41 +07:00
|
|
|
bool raw, bool details)
|
2017-09-01 02:40:32 +07:00
|
|
|
{
|
2017-10-17 01:32:18 +07:00
|
|
|
struct pmu_events_map *map = perf_pmu__find_map(NULL);
|
2017-09-01 02:40:32 +07:00
|
|
|
struct pmu_event *pe;
|
|
|
|
int i;
|
|
|
|
struct rblist groups;
|
|
|
|
struct rb_node *node, *next;
|
|
|
|
struct strlist *metriclist = NULL;
|
|
|
|
|
|
|
|
if (!map)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!metricgroups) {
|
|
|
|
metriclist = strlist__new(NULL, NULL);
|
|
|
|
if (!metriclist)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
rblist__init(&groups);
|
|
|
|
groups.node_new = mep_new;
|
|
|
|
groups.node_cmp = mep_cmp;
|
|
|
|
groups.node_delete = mep_delete;
|
|
|
|
for (i = 0; ; i++) {
|
|
|
|
const char *g;
|
|
|
|
pe = &map->table[i];
|
|
|
|
|
|
|
|
if (!pe->name && !pe->metric_group && !pe->metric_name)
|
|
|
|
break;
|
|
|
|
if (!pe->metric_expr)
|
|
|
|
continue;
|
|
|
|
g = pe->metric_group;
|
|
|
|
if (!g && pe->metric_name) {
|
|
|
|
if (pe->name)
|
|
|
|
continue;
|
|
|
|
g = "No_group";
|
|
|
|
}
|
|
|
|
if (g) {
|
|
|
|
char *omg;
|
|
|
|
char *mg = strdup(g);
|
|
|
|
|
|
|
|
if (!mg)
|
|
|
|
return;
|
|
|
|
omg = mg;
|
|
|
|
while ((g = strsep(&mg, ";")) != NULL) {
|
|
|
|
struct mep *me;
|
|
|
|
char *s;
|
|
|
|
|
2019-06-26 21:21:47 +07:00
|
|
|
g = skip_spaces(g);
|
2017-09-01 02:40:32 +07:00
|
|
|
if (*g == 0)
|
|
|
|
g = "No_group";
|
|
|
|
if (filter && !strstr(g, filter))
|
|
|
|
continue;
|
|
|
|
if (raw)
|
|
|
|
s = (char *)pe->metric_name;
|
|
|
|
else {
|
2018-07-31 04:35:04 +07:00
|
|
|
if (asprintf(&s, "%s\n%*s%s]",
|
|
|
|
pe->metric_name, 8, "[", pe->desc) < 0)
|
2017-09-01 02:40:32 +07:00
|
|
|
return;
|
2019-02-13 19:32:41 +07:00
|
|
|
|
|
|
|
if (details) {
|
|
|
|
if (asprintf(&s, "%s\n%*s%s]",
|
|
|
|
s, 8, "[", pe->metric_expr) < 0)
|
|
|
|
return;
|
|
|
|
}
|
2017-09-01 02:40:32 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!s)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!metricgroups) {
|
|
|
|
strlist__add(metriclist, s);
|
|
|
|
} else {
|
|
|
|
me = mep_lookup(&groups, g);
|
|
|
|
if (!me)
|
|
|
|
continue;
|
|
|
|
strlist__add(me->metrics, s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free(omg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (metricgroups && !raw)
|
|
|
|
printf("\nMetric Groups:\n\n");
|
|
|
|
else if (metrics && !raw)
|
|
|
|
printf("\nMetrics:\n\n");
|
|
|
|
|
2018-12-07 02:18:16 +07:00
|
|
|
for (node = rb_first_cached(&groups.entries); node; node = next) {
|
2017-09-01 02:40:32 +07:00
|
|
|
struct mep *me = container_of(node, struct mep, nd);
|
|
|
|
|
|
|
|
if (metricgroups)
|
2019-06-29 05:07:36 +07:00
|
|
|
printf("%s%s%s", me->name, metrics && !raw ? ":" : "", raw ? " " : "\n");
|
2017-09-01 02:40:32 +07:00
|
|
|
if (metrics)
|
|
|
|
metricgroup__print_strlist(me->metrics, raw);
|
|
|
|
next = rb_next(node);
|
|
|
|
rblist__remove_node(&groups, node);
|
|
|
|
}
|
|
|
|
if (!metricgroups)
|
|
|
|
metricgroup__print_strlist(metriclist, raw);
|
|
|
|
strlist__delete(metriclist);
|
|
|
|
}
|
|
|
|
|
2017-09-01 02:40:31 +07:00
|
|
|
static int metricgroup__add_metric(const char *metric, struct strbuf *events,
|
|
|
|
struct list_head *group_list)
|
|
|
|
{
|
2017-10-17 01:32:18 +07:00
|
|
|
struct pmu_events_map *map = perf_pmu__find_map(NULL);
|
2017-09-01 02:40:31 +07:00
|
|
|
struct pmu_event *pe;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
if (!map)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
for (i = 0; ; i++) {
|
|
|
|
pe = &map->table[i];
|
|
|
|
|
|
|
|
if (!pe->name && !pe->metric_group && !pe->metric_name)
|
|
|
|
break;
|
|
|
|
if (!pe->metric_expr)
|
|
|
|
continue;
|
|
|
|
if (match_metric(pe->metric_group, metric) ||
|
|
|
|
match_metric(pe->metric_name, metric)) {
|
|
|
|
const char **ids;
|
|
|
|
int idnum;
|
|
|
|
struct egroup *eg;
|
2019-06-29 05:07:37 +07:00
|
|
|
bool no_group = false;
|
2017-09-01 02:40:31 +07:00
|
|
|
|
|
|
|
pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
|
|
|
|
|
|
|
|
if (expr__find_other(pe->metric_expr,
|
|
|
|
NULL, &ids, &idnum) < 0)
|
|
|
|
continue;
|
|
|
|
if (events->len > 0)
|
|
|
|
strbuf_addf(events, ",");
|
|
|
|
for (j = 0; j < idnum; j++) {
|
|
|
|
pr_debug("found event %s\n", ids[j]);
|
2019-06-29 05:07:37 +07:00
|
|
|
/*
|
|
|
|
* Duration time maps to a software event and can make
|
|
|
|
* groups not count. Always use it outside a
|
|
|
|
* group.
|
|
|
|
*/
|
|
|
|
if (!strcmp(ids[j], "duration_time")) {
|
|
|
|
if (j > 0)
|
|
|
|
strbuf_addf(events, "}:W,");
|
|
|
|
strbuf_addf(events, "duration_time");
|
|
|
|
no_group = true;
|
|
|
|
continue;
|
|
|
|
}
|
2017-09-01 02:40:31 +07:00
|
|
|
strbuf_addf(events, "%s%s",
|
2019-06-29 05:07:37 +07:00
|
|
|
j == 0 || no_group ? "{" : ",",
|
2017-09-01 02:40:31 +07:00
|
|
|
ids[j]);
|
2019-06-29 05:07:37 +07:00
|
|
|
no_group = false;
|
2017-09-01 02:40:31 +07:00
|
|
|
}
|
2019-06-29 05:07:37 +07:00
|
|
|
if (!no_group)
|
|
|
|
strbuf_addf(events, "}:W");
|
2017-09-01 02:40:31 +07:00
|
|
|
|
|
|
|
eg = malloc(sizeof(struct egroup));
|
|
|
|
if (!eg) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
eg->ids = ids;
|
|
|
|
eg->idnum = idnum;
|
|
|
|
eg->metric_name = pe->metric_name;
|
|
|
|
eg->metric_expr = pe->metric_expr;
|
perf metricgroup: Scale the metric result
Some metrics define the scale unit, such as
{
"BriefDescription": "Intel Optane DC persistent memory read latency (ns). Derived from unc_m_pmm_rpq_occupancy.all",
"Counter": "0,1,2,3",
"EventCode": "0xE0",
"EventName": "UNC_M_PMM_READ_LATENCY",
"MetricExpr": "UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS",
"MetricName": "UNC_M_PMM_READ_LATENCY",
"PerPkg": "1",
"ScaleUnit": "6000000000ns",
"UMask": "0x1",
"Unit": "iMC"
},
For above example, the ratio should be,
ratio = (UNC_M_PMM_RPQ_OCCUPANCY.ALL / UNC_M_PMM_RPQ_INSERTS / UNC_M_CLOCKTICKS) * 6000000000
But in current code, the ratio is not scaled ( * 6000000000)
With this patch, the ratio is scaled and the unit (ns) is printed.
For example,
# 219.4 ns UNC_M_PMM_READ_LATENCY
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20190828055932.8269-4-yao.jin@linux.intel.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2019-08-28 12:59:31 +07:00
|
|
|
eg->metric_unit = pe->unit;
|
2017-09-01 02:40:31 +07:00
|
|
|
list_add_tail(&eg->nd, group_list);
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int metricgroup__add_metric_list(const char *list, struct strbuf *events,
|
|
|
|
struct list_head *group_list)
|
|
|
|
{
|
|
|
|
char *llist, *nlist, *p;
|
|
|
|
int ret = -EINVAL;
|
|
|
|
|
|
|
|
nlist = strdup(list);
|
|
|
|
if (!nlist)
|
|
|
|
return -ENOMEM;
|
|
|
|
llist = nlist;
|
2017-09-15 03:57:35 +07:00
|
|
|
|
|
|
|
strbuf_init(events, 100);
|
|
|
|
strbuf_addf(events, "%s", "");
|
|
|
|
|
2017-09-01 02:40:31 +07:00
|
|
|
while ((p = strsep(&llist, ",")) != NULL) {
|
|
|
|
ret = metricgroup__add_metric(p, events, group_list);
|
|
|
|
if (ret == -EINVAL) {
|
|
|
|
fprintf(stderr, "Cannot find metric or group `%s'\n",
|
|
|
|
p);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
free(nlist);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void metricgroup__free_egroups(struct list_head *group_list)
|
|
|
|
{
|
|
|
|
struct egroup *eg, *egtmp;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
list_for_each_entry_safe (eg, egtmp, group_list, nd) {
|
|
|
|
for (i = 0; i < eg->idnum; i++)
|
2019-07-04 22:06:20 +07:00
|
|
|
zfree(&eg->ids[i]);
|
|
|
|
zfree(&eg->ids);
|
2019-07-04 22:20:21 +07:00
|
|
|
list_del_init(&eg->nd);
|
2017-09-01 02:40:31 +07:00
|
|
|
free(eg);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int metricgroup__parse_groups(const struct option *opt,
|
|
|
|
const char *str,
|
|
|
|
struct rblist *metric_events)
|
|
|
|
{
|
|
|
|
struct parse_events_error parse_error;
|
2019-07-21 18:23:52 +07:00
|
|
|
struct evlist *perf_evlist = *(struct evlist **)opt->value;
|
2017-09-01 02:40:31 +07:00
|
|
|
struct strbuf extra_events;
|
|
|
|
LIST_HEAD(group_list);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (metric_events->nr_entries == 0)
|
|
|
|
metricgroup__rblist_init(metric_events);
|
|
|
|
ret = metricgroup__add_metric_list(str, &extra_events, &group_list);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
pr_debug("adding %s\n", extra_events.buf);
|
|
|
|
memset(&parse_error, 0, sizeof(struct parse_events_error));
|
|
|
|
ret = parse_events(perf_evlist, extra_events.buf, &parse_error);
|
|
|
|
if (ret) {
|
2017-09-14 04:50:06 +07:00
|
|
|
parse_events_print_error(&parse_error, extra_events.buf);
|
2017-09-01 02:40:31 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
strbuf_release(&extra_events);
|
|
|
|
ret = metricgroup__setup_events(&group_list, perf_evlist,
|
|
|
|
metric_events);
|
|
|
|
out:
|
|
|
|
metricgroup__free_egroups(&group_list);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-06-26 14:17:01 +07:00
|
|
|
|
|
|
|
bool metricgroup__has_metric(const char *metric)
|
|
|
|
{
|
|
|
|
struct pmu_events_map *map = perf_pmu__find_map(NULL);
|
|
|
|
struct pmu_event *pe;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!map)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (i = 0; ; i++) {
|
|
|
|
pe = &map->table[i];
|
|
|
|
|
|
|
|
if (!pe->name && !pe->metric_group && !pe->metric_name)
|
|
|
|
break;
|
|
|
|
if (!pe->metric_expr)
|
|
|
|
continue;
|
|
|
|
if (match_metric(pe->metric_name, metric))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|