perf/x86: Only show format attributes when supported

Only show the Intel format attributes in sysfs when the feature is actually
supported with the current model numbers. This allows programs to probe
what format attributes are available, and give a sensible error message
to users if they are not.

This handles near all cases for intel attributes since Nehalem,
except the (obscure) case when the model number if known, but PEBS
is disabled in PERF_CAPABILITIES.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20170822185201.9261-2-andi@firstfloor.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Andi Kleen 2017-08-22 11:52:00 -07:00 committed by Ingo Molnar
parent d0618410ec
commit a5df70c354

View File

@ -3415,12 +3415,26 @@ static struct attribute *intel_arch3_formats_attr[] = {
&format_attr_any.attr, &format_attr_any.attr,
&format_attr_inv.attr, &format_attr_inv.attr,
&format_attr_cmask.attr, &format_attr_cmask.attr,
NULL,
};
static struct attribute *hsw_format_attr[] = {
&format_attr_in_tx.attr, &format_attr_in_tx.attr,
&format_attr_in_tx_cp.attr, &format_attr_in_tx_cp.attr,
&format_attr_offcore_rsp.attr,
&format_attr_ldlat.attr,
NULL
};
&format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */ static struct attribute *nhm_format_attr[] = {
&format_attr_ldlat.attr, /* PEBS load latency */ &format_attr_offcore_rsp.attr,
NULL, &format_attr_ldlat.attr,
NULL
};
static struct attribute *slm_format_attr[] = {
&format_attr_offcore_rsp.attr,
NULL
}; };
static struct attribute *skl_format_attr[] = { static struct attribute *skl_format_attr[] = {
@ -3795,6 +3809,7 @@ __init int intel_pmu_init(void)
unsigned int unused; unsigned int unused;
struct extra_reg *er; struct extra_reg *er;
int version, i; int version, i;
struct attribute **extra_attr = NULL;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
switch (boot_cpu_data.x86) { switch (boot_cpu_data.x86) {
@ -3906,6 +3921,7 @@ __init int intel_pmu_init(void)
intel_pmu_pebs_data_source_nhm(); intel_pmu_pebs_data_source_nhm();
x86_add_quirk(intel_nehalem_quirk); x86_add_quirk(intel_nehalem_quirk);
x86_pmu.pebs_no_tlb = 1; x86_pmu.pebs_no_tlb = 1;
extra_attr = nhm_format_attr;
pr_cont("Nehalem events, "); pr_cont("Nehalem events, ");
break; break;
@ -3941,6 +3957,7 @@ __init int intel_pmu_init(void)
x86_pmu.extra_regs = intel_slm_extra_regs; x86_pmu.extra_regs = intel_slm_extra_regs;
x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.cpu_events = slm_events_attrs; x86_pmu.cpu_events = slm_events_attrs;
extra_attr = slm_format_attr;
pr_cont("Silvermont events, "); pr_cont("Silvermont events, ");
break; break;
@ -3966,6 +3983,7 @@ __init int intel_pmu_init(void)
x86_pmu.lbr_pt_coexist = true; x86_pmu.lbr_pt_coexist = true;
x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.cpu_events = glm_events_attrs; x86_pmu.cpu_events = glm_events_attrs;
extra_attr = slm_format_attr;
pr_cont("Goldmont events, "); pr_cont("Goldmont events, ");
break; break;
@ -3992,6 +4010,7 @@ __init int intel_pmu_init(void)
x86_pmu.cpu_events = glm_events_attrs; x86_pmu.cpu_events = glm_events_attrs;
/* Goldmont Plus has 4-wide pipeline */ /* Goldmont Plus has 4-wide pipeline */
event_attr_td_total_slots_scale_glm.event_str = "4"; event_attr_td_total_slots_scale_glm.event_str = "4";
extra_attr = slm_format_attr;
pr_cont("Goldmont plus events, "); pr_cont("Goldmont plus events, ");
break; break;
@ -4021,6 +4040,7 @@ __init int intel_pmu_init(void)
X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1);
intel_pmu_pebs_data_source_nhm(); intel_pmu_pebs_data_source_nhm();
extra_attr = nhm_format_attr;
pr_cont("Westmere events, "); pr_cont("Westmere events, ");
break; break;
@ -4057,6 +4077,8 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] =
X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1);
extra_attr = nhm_format_attr;
pr_cont("SandyBridge events, "); pr_cont("SandyBridge events, ");
break; break;
@ -4091,6 +4113,8 @@ __init int intel_pmu_init(void)
intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] =
X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1);
extra_attr = nhm_format_attr;
pr_cont("IvyBridge events, "); pr_cont("IvyBridge events, ");
break; break;
@ -4119,6 +4143,8 @@ __init int intel_pmu_init(void)
x86_pmu.get_event_constraints = hsw_get_event_constraints; x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.cpu_events = hsw_events_attrs; x86_pmu.cpu_events = hsw_events_attrs;
x86_pmu.lbr_double_abort = true; x86_pmu.lbr_double_abort = true;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
pr_cont("Haswell events, "); pr_cont("Haswell events, ");
break; break;
@ -4155,6 +4181,8 @@ __init int intel_pmu_init(void)
x86_pmu.get_event_constraints = hsw_get_event_constraints; x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.cpu_events = hsw_events_attrs; x86_pmu.cpu_events = hsw_events_attrs;
x86_pmu.limit_period = bdw_limit_period; x86_pmu.limit_period = bdw_limit_period;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
pr_cont("Broadwell events, "); pr_cont("Broadwell events, ");
break; break;
@ -4173,7 +4201,7 @@ __init int intel_pmu_init(void)
/* all extra regs are per-cpu when HT is on */ /* all extra regs are per-cpu when HT is on */
x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.flags |= PMU_FL_HAS_RSP_1;
x86_pmu.flags |= PMU_FL_NO_HT_SHARING; x86_pmu.flags |= PMU_FL_NO_HT_SHARING;
extra_attr = slm_format_attr;
pr_cont("Knights Landing/Mill events, "); pr_cont("Knights Landing/Mill events, ");
break; break;
@ -4204,9 +4232,9 @@ __init int intel_pmu_init(void)
x86_pmu.hw_config = hsw_hw_config; x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints; x86_pmu.get_event_constraints = hsw_get_event_constraints;
x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
skl_format_attr); hsw_format_attr : nhm_format_attr;
WARN_ON(!x86_pmu.format_attrs); extra_attr = merge_attr(extra_attr, skl_format_attr);
x86_pmu.cpu_events = hsw_events_attrs; x86_pmu.cpu_events = hsw_events_attrs;
intel_pmu_pebs_data_source_skl( intel_pmu_pebs_data_source_skl(
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
@ -4229,6 +4257,12 @@ __init int intel_pmu_init(void)
} }
} }
if (version >= 2 && extra_attr) {
x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr,
extra_attr);
WARN_ON(!x86_pmu.format_attrs);
}
if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);