linux_dsm_epyc7002/arch/powerpc/perf/power8-pmu.c
Athira Rajeev 9d4fc86dcd powerpc/perf: Update Power PMU cache_events to u64 type
Events of type PERF_TYPE_HW_CACHE was described for Power PMU
as: int (*cache_events)[type][op][result];

where type, op, result values unpacked from the event attribute config
value is used to generate the raw event code at runtime.

So far the event code values which used to create these cache-related
events were within 32 bit and `int` type worked. In power10,
some of the event codes are of 64-bit value and hence update the
Power PMU cache_events to `u64` type in `power_pmu` struct.
Also propagate this change to existing all PMU driver code paths
which are using ppmu->cache_events.

Signed-off-by: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/1594996707-3727-4-git-send-email-atrajeev@linux.vnet.ibm.com
2020-07-22 21:56:40 +10:00

401 lines
11 KiB
C
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Performance counter support for POWER8 processors.
*
* Copyright 2009 Paul Mackerras, IBM Corporation.
* Copyright 2013 Michael Ellerman, IBM Corporation.
*/
#define pr_fmt(fmt) "power8-pmu: " fmt
#include "isa207-common.h"
/*
* Some power8 event codes.
*/
#define EVENT(_name, _code) _name = _code,
enum {
#include "power8-events-list.h"
};
#undef EVENT
/* MMCRA IFM bits - POWER8 */
#define POWER8_MMCRA_IFM1 0x0000000040000000UL
#define POWER8_MMCRA_IFM2 0x0000000080000000UL
#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
#define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL
/*
* Raw event encoding for PowerISA v2.07 (Power8):
*
* 60 56 52 48 44 40 36 32
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* | | [ ] [ thresh_cmp ] [ thresh_ctl ]
* | | | |
* | | *- IFM (Linux) thresh start/stop OR FAB match -*
* | *- BHRB (Linux)
* *- EBB (Linux)
*
* 28 24 20 16 12 8 4 0
* | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
* [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
* | | | | |
* | | | | *- mark
* | | *- L1/L2/L3 cache_sel |
* | | |
* | *- sampling mode for marked events *- combine
* |
* *- thresh_sel
*
* Below uses IBM bit numbering.
*
* MMCR1[x:y] = unit (PMCxUNIT)
* MMCR1[x] = combine (PMCxCOMB)
*
* if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
* # PM_MRK_FAB_RSP_MATCH
* MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
* else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
* # PM_MRK_FAB_RSP_MATCH_CYC
* MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
* else
* MMCRA[48:55] = thresh_ctl (THRESH START/END)
*
* if thresh_sel:
* MMCRA[45:47] = thresh_sel
*
* if thresh_cmp:
* MMCRA[22:24] = thresh_cmp[0:2]
* MMCRA[25:31] = thresh_cmp[3:9]
*
* if unit == 6 or unit == 7
* MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
* else if unit == 8 or unit == 9:
* if cache_sel[0] == 0: # L3 bank
* MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
* else if cache_sel[0] == 1:
* MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
* else if cache_sel[1]: # L1 event
* MMCR1[16] = cache_sel[2]
 * MMCR1[17] = cache_sel[3]
*
* if mark:
* MMCRA[63] = 1 (SAMPLE_ENABLE)
* MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
*
* if EBB and BHRB:
* MMCRA[32:33] = IFM
*
*/
/* PowerISA v2.07 format attribute structure*/
extern struct attribute_group isa207_pmu_format_group;
/* Table of alternatives, sorted by column 0 */
static const unsigned int event_alternatives[][MAX_ALT] = {
{ PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT },
{ PM_BR_MRK_2PATH, PM_BR_MRK_2PATH_ALT },
{ PM_L3_CO_MEPF, PM_L3_CO_MEPF_ALT },
{ PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L2MISS_ALT },
{ PM_CMPLU_STALL_ALT, PM_CMPLU_STALL },
{ PM_BR_2PATH, PM_BR_2PATH_ALT },
{ PM_INST_DISP, PM_INST_DISP_ALT },
{ PM_RUN_CYC_ALT, PM_RUN_CYC },
{ PM_MRK_FILT_MATCH, PM_MRK_FILT_MATCH_ALT },
{ PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
{ PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
};
static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
{
int num_alt = 0;
num_alt = isa207_get_alternatives(event, alt,
ARRAY_SIZE(event_alternatives), flags,
event_alternatives);
return num_alt;
}
GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
GENERIC_EVENT_ATTR(mem_access, MEM_ACCESS);
CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN);
CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
static struct attribute *power8_events_attr[] = {
GENERIC_EVENT_PTR(PM_CYC),
GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
GENERIC_EVENT_PTR(PM_CMPLU_STALL),
GENERIC_EVENT_PTR(PM_INST_CMPL),
GENERIC_EVENT_PTR(PM_BRU_FIN),
GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
GENERIC_EVENT_PTR(PM_LD_REF_L1),
GENERIC_EVENT_PTR(PM_LD_MISS_L1),
GENERIC_EVENT_PTR(MEM_ACCESS),
CACHE_EVENT_PTR(PM_LD_MISS_L1),
CACHE_EVENT_PTR(PM_LD_REF_L1),
CACHE_EVENT_PTR(PM_L1_PREF),
CACHE_EVENT_PTR(PM_ST_MISS_L1),
CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
CACHE_EVENT_PTR(PM_INST_FROM_L1),
CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
CACHE_EVENT_PTR(PM_DATA_FROM_L3),
CACHE_EVENT_PTR(PM_L3_PREF_ALL),
CACHE_EVENT_PTR(PM_L2_ST_MISS),
CACHE_EVENT_PTR(PM_L2_ST),
CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
CACHE_EVENT_PTR(PM_BRU_FIN),
CACHE_EVENT_PTR(PM_DTLB_MISS),
CACHE_EVENT_PTR(PM_ITLB_MISS),
NULL
};
static struct attribute_group power8_pmu_events_group = {
.name = "events",
.attrs = power8_events_attr,
};
static const struct attribute_group *power8_pmu_attr_groups[] = {
&isa207_pmu_format_group,
&power8_pmu_events_group,
NULL,
};
static int power8_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
[PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
[PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
static u64 power8_bhrb_filter_map(u64 branch_sample_type)
{
u64 pmu_bhrb_filter = 0;
/* BHRB and regular PMU events share the same privilege state
* filter configuration. BHRB is always recorded along with a
* regular PMU event. As the privilege state filter is handled
* in the basic PMC configuration of the accompanying regular
* PMU event, we ignore any separate BHRB specific request.
*/
/* No branch filter requested */
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
return pmu_bhrb_filter;
/* Invalid branch filter options - HW does not support */
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
return -1;
if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
return -1;
if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
return -1;
if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
return pmu_bhrb_filter;
}
/* Every thing else is unsupported */
return -1;
}
static void power8_config_bhrb(u64 pmu_bhrb_filter)
{
pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK;
/* Enable BHRB filter in PMU */
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
#define C(x) PERF_COUNT_HW_CACHE_##x
/*
* Table of generalized cache-related events.
* 0 means not supported, -1 means nonsensical, other values
* are event codes.
*/
static u64 power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[ C(L1D) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
[ C(RESULT_MISS) ] = PM_LD_MISS_L1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_ST_MISS_L1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_L1_PREF,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(L1I) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
[ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(LL) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
[ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = PM_L2_ST,
[ C(RESULT_MISS) ] = PM_L2_ST_MISS,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
[ C(RESULT_MISS) ] = 0,
},
},
[ C(DTLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_DTLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(ITLB) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = 0,
[ C(RESULT_MISS) ] = PM_ITLB_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(BPU) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = PM_BRU_FIN,
[ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
[ C(NODE) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = -1,
[ C(RESULT_MISS) ] = -1,
},
},
};
#undef C
static struct power_pmu power8_pmu = {
.name = "POWER8",
.n_counter = MAX_PMU_COUNTERS,
.max_alternatives = MAX_ALT + 1,
.add_fields = ISA207_ADD_FIELDS,
.test_adder = ISA207_TEST_ADDER,
.compute_mmcr = isa207_compute_mmcr,
.config_bhrb = power8_config_bhrb,
.bhrb_filter_map = power8_bhrb_filter_map,
.get_constraint = isa207_get_constraint,
.get_alternatives = power8_get_alternatives,
.get_mem_data_src = isa207_get_mem_data_src,
.get_mem_weight = isa207_get_mem_weight,
.disable_pmc = isa207_disable_pmc,
.flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events,
.cache_events = &power8_cache_events,
.attr_groups = power8_pmu_attr_groups,
.bhrb_nr = 32,
};
int init_power8_pmu(void)
{
int rc;
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
return -ENODEV;
rc = register_power_pmu(&power8_pmu);
if (rc)
return rc;
/* Tell userspace that EBB is supported */
cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
if (cpu_has_feature(CPU_FTR_PMAO_BUG))
pr_info("PMAO restore workaround active.\n");
return 0;
}