mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 08:46:49 +07:00
86895b480a
Adding new --per-node option to aggregate counts per NUMA nodes for system-wide mode measurements. You can specify --per-node in live mode: # perf stat -a -I 1000 -e cycles --per-node # time node cpus counts unit events 1.000542550 N0 20 6,202,097 cycles 1.000542550 N1 20 639,559 cycles 2.002040063 N0 20 7,412,495 cycles 2.002040063 N1 20 2,185,577 cycles 3.003451699 N0 20 6,508,917 cycles 3.003451699 N1 20 765,607 cycles ... Or in the record/report stat session: # perf stat record -a -I 1000 -e cycles # time counts unit events 1.000536937 10,008,468 cycles 2.002090152 9,578,539 cycles 3.003625233 7,647,869 cycles 4.005135036 7,032,086 cycles ^C 4.340902364 3,923,893 cycles # perf stat report --per-node # time node cpus counts unit events 1.000536937 N0 20 9,355,086 cycles 1.000536937 N1 20 653,382 cycles 2.002090152 N0 20 7,712,838 cycles 2.002090152 N1 20 1,865,701 cycles 3.003625233 N0 20 6,604,441 cycles 3.003625233 N1 20 1,043,428 cycles 4.005135036 N0 20 6,350,522 cycles 4.005135036 N1 20 681,564 cycles 4.340902364 N0 20 3,403,188 cycles 4.340902364 N1 20 520,705 cycles Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexey Budankov <alexey.budankov@linux.intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Joe Mario <jmario@redhat.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20190904073415.723-4-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
225 lines
5.4 KiB
C
225 lines
5.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __PERF_STATS_H
|
|
#define __PERF_STATS_H
|
|
|
|
#include <linux/types.h>
|
|
#include <stdio.h>
|
|
#include <sys/types.h>
|
|
#include <sys/resource.h>
|
|
#include "rblist.h"
|
|
|
|
struct perf_cpu_map;
|
|
struct perf_stat_config;
|
|
struct timespec;
|
|
|
|
struct stats {
|
|
double n, mean, M2;
|
|
u64 max, min;
|
|
};
|
|
|
|
enum perf_stat_evsel_id {
|
|
PERF_STAT_EVSEL_ID__NONE = 0,
|
|
PERF_STAT_EVSEL_ID__CYCLES_IN_TX,
|
|
PERF_STAT_EVSEL_ID__TRANSACTION_START,
|
|
PERF_STAT_EVSEL_ID__ELISION_START,
|
|
PERF_STAT_EVSEL_ID__CYCLES_IN_TX_CP,
|
|
PERF_STAT_EVSEL_ID__TOPDOWN_TOTAL_SLOTS,
|
|
PERF_STAT_EVSEL_ID__TOPDOWN_SLOTS_ISSUED,
|
|
PERF_STAT_EVSEL_ID__TOPDOWN_SLOTS_RETIRED,
|
|
PERF_STAT_EVSEL_ID__TOPDOWN_FETCH_BUBBLES,
|
|
PERF_STAT_EVSEL_ID__TOPDOWN_RECOVERY_BUBBLES,
|
|
PERF_STAT_EVSEL_ID__SMI_NUM,
|
|
PERF_STAT_EVSEL_ID__APERF,
|
|
PERF_STAT_EVSEL_ID__MAX,
|
|
};
|
|
|
|
struct perf_stat_evsel {
|
|
struct stats res_stats[3];
|
|
enum perf_stat_evsel_id id;
|
|
u64 *group_data;
|
|
};
|
|
|
|
enum aggr_mode {
|
|
AGGR_NONE,
|
|
AGGR_GLOBAL,
|
|
AGGR_SOCKET,
|
|
AGGR_DIE,
|
|
AGGR_CORE,
|
|
AGGR_THREAD,
|
|
AGGR_UNSET,
|
|
AGGR_NODE,
|
|
};
|
|
|
|
enum {
|
|
CTX_BIT_USER = 1 << 0,
|
|
CTX_BIT_KERNEL = 1 << 1,
|
|
CTX_BIT_HV = 1 << 2,
|
|
CTX_BIT_HOST = 1 << 3,
|
|
CTX_BIT_IDLE = 1 << 4,
|
|
CTX_BIT_MAX = 1 << 5,
|
|
};
|
|
|
|
#define NUM_CTX CTX_BIT_MAX
|
|
|
|
enum stat_type {
|
|
STAT_NONE = 0,
|
|
STAT_NSECS,
|
|
STAT_CYCLES,
|
|
STAT_STALLED_CYCLES_FRONT,
|
|
STAT_STALLED_CYCLES_BACK,
|
|
STAT_BRANCHES,
|
|
STAT_CACHEREFS,
|
|
STAT_L1_DCACHE,
|
|
STAT_L1_ICACHE,
|
|
STAT_LL_CACHE,
|
|
STAT_ITLB_CACHE,
|
|
STAT_DTLB_CACHE,
|
|
STAT_CYCLES_IN_TX,
|
|
STAT_TRANSACTION,
|
|
STAT_ELISION,
|
|
STAT_TOPDOWN_TOTAL_SLOTS,
|
|
STAT_TOPDOWN_SLOTS_ISSUED,
|
|
STAT_TOPDOWN_SLOTS_RETIRED,
|
|
STAT_TOPDOWN_FETCH_BUBBLES,
|
|
STAT_TOPDOWN_RECOVERY_BUBBLES,
|
|
STAT_SMI_NUM,
|
|
STAT_APERF,
|
|
STAT_MAX
|
|
};
|
|
|
|
struct runtime_stat {
|
|
struct rblist value_list;
|
|
};
|
|
|
|
typedef int (*aggr_get_id_t)(struct perf_stat_config *config,
|
|
struct perf_cpu_map *m, int cpu);
|
|
|
|
struct perf_stat_config {
|
|
enum aggr_mode aggr_mode;
|
|
bool scale;
|
|
bool no_inherit;
|
|
bool identifier;
|
|
bool csv_output;
|
|
bool interval_clear;
|
|
bool metric_only;
|
|
bool null_run;
|
|
bool ru_display;
|
|
bool big_num;
|
|
bool no_merge;
|
|
bool walltime_run_table;
|
|
bool all_kernel;
|
|
bool all_user;
|
|
FILE *output;
|
|
unsigned int interval;
|
|
unsigned int timeout;
|
|
unsigned int initial_delay;
|
|
unsigned int unit_width;
|
|
unsigned int metric_only_len;
|
|
int times;
|
|
int run_count;
|
|
int print_free_counters_hint;
|
|
int print_mixed_hw_group_error;
|
|
struct runtime_stat *stats;
|
|
int stats_num;
|
|
const char *csv_sep;
|
|
struct stats *walltime_nsecs_stats;
|
|
struct rusage ru_data;
|
|
struct perf_cpu_map *aggr_map;
|
|
aggr_get_id_t aggr_get_id;
|
|
struct perf_cpu_map *cpus_aggr_map;
|
|
u64 *walltime_run;
|
|
struct rblist metric_events;
|
|
};
|
|
|
|
void update_stats(struct stats *stats, u64 val);
|
|
double avg_stats(struct stats *stats);
|
|
double stddev_stats(struct stats *stats);
|
|
double rel_stddev_stats(double stddev, double avg);
|
|
|
|
static inline void init_stats(struct stats *stats)
|
|
{
|
|
stats->n = 0.0;
|
|
stats->mean = 0.0;
|
|
stats->M2 = 0.0;
|
|
stats->min = (u64) -1;
|
|
stats->max = 0;
|
|
}
|
|
|
|
struct evsel;
|
|
struct evlist;
|
|
|
|
struct perf_aggr_thread_value {
|
|
struct evsel *counter;
|
|
int id;
|
|
double uval;
|
|
u64 val;
|
|
u64 run;
|
|
u64 ena;
|
|
};
|
|
|
|
bool __perf_evsel_stat__is(struct evsel *evsel,
|
|
enum perf_stat_evsel_id id);
|
|
|
|
#define perf_stat_evsel__is(evsel, id) \
|
|
__perf_evsel_stat__is(evsel, PERF_STAT_EVSEL_ID__ ## id)
|
|
|
|
extern struct runtime_stat rt_stat;
|
|
extern struct stats walltime_nsecs_stats;
|
|
|
|
typedef void (*print_metric_t)(struct perf_stat_config *config,
|
|
void *ctx, const char *color, const char *unit,
|
|
const char *fmt, double val);
|
|
typedef void (*new_line_t)(struct perf_stat_config *config, void *ctx);
|
|
|
|
void runtime_stat__init(struct runtime_stat *st);
|
|
void runtime_stat__exit(struct runtime_stat *st);
|
|
void perf_stat__init_shadow_stats(void);
|
|
void perf_stat__reset_shadow_stats(void);
|
|
void perf_stat__reset_shadow_per_stat(struct runtime_stat *st);
|
|
void perf_stat__update_shadow_stats(struct evsel *counter, u64 count,
|
|
int cpu, struct runtime_stat *st);
|
|
struct perf_stat_output_ctx {
|
|
void *ctx;
|
|
print_metric_t print_metric;
|
|
new_line_t new_line;
|
|
bool force_header;
|
|
};
|
|
|
|
void perf_stat__print_shadow_stats(struct perf_stat_config *config,
|
|
struct evsel *evsel,
|
|
double avg, int cpu,
|
|
struct perf_stat_output_ctx *out,
|
|
struct rblist *metric_events,
|
|
struct runtime_stat *st);
|
|
void perf_stat__collect_metric_expr(struct evlist *);
|
|
|
|
int perf_evlist__alloc_stats(struct evlist *evlist, bool alloc_raw);
|
|
void perf_evlist__free_stats(struct evlist *evlist);
|
|
void perf_evlist__reset_stats(struct evlist *evlist);
|
|
void perf_evlist__reset_prev_raw_counts(struct evlist *evlist);
|
|
|
|
int perf_stat_process_counter(struct perf_stat_config *config,
|
|
struct evsel *counter);
|
|
struct perf_tool;
|
|
union perf_event;
|
|
struct perf_session;
|
|
struct target;
|
|
|
|
int perf_event__process_stat_event(struct perf_session *session,
|
|
union perf_event *event);
|
|
|
|
size_t perf_event__fprintf_stat(union perf_event *event, FILE *fp);
|
|
size_t perf_event__fprintf_stat_round(union perf_event *event, FILE *fp);
|
|
size_t perf_event__fprintf_stat_config(union perf_event *event, FILE *fp);
|
|
|
|
int create_perf_stat_counter(struct evsel *evsel,
|
|
struct perf_stat_config *config,
|
|
struct target *target);
|
|
void
|
|
perf_evlist__print_counters(struct evlist *evlist,
|
|
struct perf_stat_config *config,
|
|
struct target *_target,
|
|
struct timespec *ts,
|
|
int argc, const char **argv);
|
|
#endif
|