mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-14 11:36:53 +07:00
25462f7f52
This patch defines a new function pointer struct (kvm_pmu_ops) to support vPMU for both Intel and AMD. The functions pointers defined in this new struct will be linked with Intel and AMD functions later. In the meanwhile the struct that maps from event_sel bits to PERF_TYPE_HARDWARE events is renamed and moved from Intel specific code to kvm_host.h as a common struct. Reviewed-by: Joerg Roedel <jroedel@suse.de> Tested-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Wei Huang <wei@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
359 lines
9.1 KiB
C
359 lines
9.1 KiB
C
/*
|
|
* KVM PMU support for Intel CPUs
|
|
*
|
|
* Copyright 2011 Red Hat, Inc. and/or its affiliates.
|
|
*
|
|
* Authors:
|
|
* Avi Kivity <avi@redhat.com>
|
|
* Gleb Natapov <gleb@redhat.com>
|
|
*
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
* the COPYING file in the top-level directory.
|
|
*
|
|
*/
|
|
#include <linux/types.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/perf_event.h>
|
|
#include <asm/perf_event.h>
|
|
#include "x86.h"
|
|
#include "cpuid.h"
|
|
#include "lapic.h"
|
|
#include "pmu.h"
|
|
|
|
static struct kvm_event_hw_type_mapping intel_arch_events[] = {
|
|
/* Index must match CPUID 0x0A.EBX bit vector */
|
|
[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
|
|
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
|
|
[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
|
|
[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
|
|
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
|
|
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
|
|
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
|
|
[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
|
|
};
|
|
|
|
/* mapping between fixed pmc index and intel_arch_events array */
|
|
static int fixed_pmc_events[] = {1, 0, 7};
|
|
|
|
static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
|
|
u8 new_ctrl = fixed_ctrl_field(data, i);
|
|
u8 old_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, i);
|
|
struct kvm_pmc *pmc;
|
|
|
|
pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
|
|
|
|
if (old_ctrl == new_ctrl)
|
|
continue;
|
|
|
|
reprogram_fixed_counter(pmc, new_ctrl, i);
|
|
}
|
|
|
|
pmu->fixed_ctr_ctrl = data;
|
|
}
|
|
|
|
/* function is called when global control register has been updated. */
|
|
static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
|
|
{
|
|
int bit;
|
|
u64 diff = pmu->global_ctrl ^ data;
|
|
|
|
pmu->global_ctrl = data;
|
|
|
|
for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
|
|
reprogram_counter(pmu, bit);
|
|
}
|
|
|
|
static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
|
|
u8 event_select,
|
|
u8 unit_mask)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
|
|
if (intel_arch_events[i].eventsel == event_select
|
|
&& intel_arch_events[i].unit_mask == unit_mask
|
|
&& (pmu->available_event_types & (1 << i)))
|
|
break;
|
|
|
|
if (i == ARRAY_SIZE(intel_arch_events))
|
|
return PERF_COUNT_HW_MAX;
|
|
|
|
return intel_arch_events[i].event_type;
|
|
}
|
|
|
|
static unsigned intel_find_fixed_event(int idx)
|
|
{
|
|
if (idx >= ARRAY_SIZE(fixed_pmc_events))
|
|
return PERF_COUNT_HW_MAX;
|
|
|
|
return intel_arch_events[fixed_pmc_events[idx]].event_type;
|
|
}
|
|
|
|
/* check if a PMC is enabled by comparising it with globl_ctrl bits. */
|
|
static bool intel_pmc_is_enabled(struct kvm_pmc *pmc)
|
|
{
|
|
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
|
|
|
|
return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
|
|
}
|
|
|
|
static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
|
|
{
|
|
if (pmc_idx < INTEL_PMC_IDX_FIXED)
|
|
return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
|
|
MSR_P6_EVNTSEL0);
|
|
else {
|
|
u32 idx = pmc_idx - INTEL_PMC_IDX_FIXED;
|
|
|
|
return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
|
|
}
|
|
}
|
|
|
|
/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
|
|
static int intel_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
|
|
{
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
bool fixed = idx & (1u << 30);
|
|
|
|
idx &= ~(3u << 30);
|
|
|
|
return (!fixed && idx >= pmu->nr_arch_gp_counters) ||
|
|
(fixed && idx >= pmu->nr_arch_fixed_counters);
|
|
}
|
|
|
|
static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
|
|
unsigned idx)
|
|
{
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
bool fixed = idx & (1u << 30);
|
|
struct kvm_pmc *counters;
|
|
|
|
idx &= ~(3u << 30);
|
|
if (!fixed && idx >= pmu->nr_arch_gp_counters)
|
|
return NULL;
|
|
if (fixed && idx >= pmu->nr_arch_fixed_counters)
|
|
return NULL;
|
|
counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
|
|
|
|
return &counters[idx];
|
|
}
|
|
|
|
static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
|
|
{
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
int ret;
|
|
|
|
switch (msr) {
|
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
|
case MSR_CORE_PERF_GLOBAL_STATUS:
|
|
case MSR_CORE_PERF_GLOBAL_CTRL:
|
|
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
|
|
ret = pmu->version > 1;
|
|
break;
|
|
default:
|
|
ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
|
|
get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
|
|
get_fixed_pmc(pmu, msr);
|
|
break;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
|
|
{
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
struct kvm_pmc *pmc;
|
|
|
|
switch (msr) {
|
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
|
*data = pmu->fixed_ctr_ctrl;
|
|
return 0;
|
|
case MSR_CORE_PERF_GLOBAL_STATUS:
|
|
*data = pmu->global_status;
|
|
return 0;
|
|
case MSR_CORE_PERF_GLOBAL_CTRL:
|
|
*data = pmu->global_ctrl;
|
|
return 0;
|
|
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
|
|
*data = pmu->global_ovf_ctrl;
|
|
return 0;
|
|
default:
|
|
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
|
|
(pmc = get_fixed_pmc(pmu, msr))) {
|
|
*data = pmc_read_counter(pmc);
|
|
return 0;
|
|
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
|
|
*data = pmc->eventsel;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
{
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
struct kvm_pmc *pmc;
|
|
u32 msr = msr_info->index;
|
|
u64 data = msr_info->data;
|
|
|
|
switch (msr) {
|
|
case MSR_CORE_PERF_FIXED_CTR_CTRL:
|
|
if (pmu->fixed_ctr_ctrl == data)
|
|
return 0;
|
|
if (!(data & 0xfffffffffffff444ull)) {
|
|
reprogram_fixed_counters(pmu, data);
|
|
return 0;
|
|
}
|
|
break;
|
|
case MSR_CORE_PERF_GLOBAL_STATUS:
|
|
if (msr_info->host_initiated) {
|
|
pmu->global_status = data;
|
|
return 0;
|
|
}
|
|
break; /* RO MSR */
|
|
case MSR_CORE_PERF_GLOBAL_CTRL:
|
|
if (pmu->global_ctrl == data)
|
|
return 0;
|
|
if (!(data & pmu->global_ctrl_mask)) {
|
|
global_ctrl_changed(pmu, data);
|
|
return 0;
|
|
}
|
|
break;
|
|
case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
|
|
if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
|
|
if (!msr_info->host_initiated)
|
|
pmu->global_status &= ~data;
|
|
pmu->global_ovf_ctrl = data;
|
|
return 0;
|
|
}
|
|
break;
|
|
default:
|
|
if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
|
|
(pmc = get_fixed_pmc(pmu, msr))) {
|
|
if (!msr_info->host_initiated)
|
|
data = (s64)(s32)data;
|
|
pmc->counter += data - pmc_read_counter(pmc);
|
|
return 0;
|
|
} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
|
|
if (data == pmc->eventsel)
|
|
return 0;
|
|
if (!(data & pmu->reserved_bits)) {
|
|
reprogram_gp_counter(pmc, data);
|
|
return 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
struct kvm_cpuid_entry2 *entry;
|
|
union cpuid10_eax eax;
|
|
union cpuid10_edx edx;
|
|
|
|
pmu->nr_arch_gp_counters = 0;
|
|
pmu->nr_arch_fixed_counters = 0;
|
|
pmu->counter_bitmask[KVM_PMC_GP] = 0;
|
|
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
|
|
pmu->version = 0;
|
|
pmu->reserved_bits = 0xffffffff00200000ull;
|
|
|
|
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
|
|
if (!entry)
|
|
return;
|
|
eax.full = entry->eax;
|
|
edx.full = entry->edx;
|
|
|
|
pmu->version = eax.split.version_id;
|
|
if (!pmu->version)
|
|
return;
|
|
|
|
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
|
|
INTEL_PMC_MAX_GENERIC);
|
|
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
|
|
pmu->available_event_types = ~entry->ebx &
|
|
((1ull << eax.split.mask_length) - 1);
|
|
|
|
if (pmu->version == 1) {
|
|
pmu->nr_arch_fixed_counters = 0;
|
|
} else {
|
|
pmu->nr_arch_fixed_counters =
|
|
min_t(int, edx.split.num_counters_fixed,
|
|
INTEL_PMC_MAX_FIXED);
|
|
pmu->counter_bitmask[KVM_PMC_FIXED] =
|
|
((u64)1 << edx.split.bit_width_fixed) - 1;
|
|
}
|
|
|
|
pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
|
|
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
|
|
pmu->global_ctrl_mask = ~pmu->global_ctrl;
|
|
|
|
entry = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
if (entry &&
|
|
(boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
|
|
(entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
|
|
pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
|
|
}
|
|
|
|
static void intel_pmu_init(struct kvm_vcpu *vcpu)
|
|
{
|
|
int i;
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
|
|
pmu->gp_counters[i].type = KVM_PMC_GP;
|
|
pmu->gp_counters[i].vcpu = vcpu;
|
|
pmu->gp_counters[i].idx = i;
|
|
}
|
|
|
|
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
|
|
pmu->fixed_counters[i].type = KVM_PMC_FIXED;
|
|
pmu->fixed_counters[i].vcpu = vcpu;
|
|
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
|
|
}
|
|
}
|
|
|
|
static void intel_pmu_reset(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
int i;
|
|
|
|
for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
|
|
struct kvm_pmc *pmc = &pmu->gp_counters[i];
|
|
|
|
pmc_stop_counter(pmc);
|
|
pmc->counter = pmc->eventsel = 0;
|
|
}
|
|
|
|
for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
|
|
pmc_stop_counter(&pmu->fixed_counters[i]);
|
|
|
|
pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
|
|
pmu->global_ovf_ctrl = 0;
|
|
}
|
|
|
|
struct kvm_pmu_ops intel_pmu_ops = {
|
|
.find_arch_event = intel_find_arch_event,
|
|
.find_fixed_event = intel_find_fixed_event,
|
|
.pmc_is_enabled = intel_pmc_is_enabled,
|
|
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
|
|
.msr_idx_to_pmc = intel_msr_idx_to_pmc,
|
|
.is_valid_msr_idx = intel_is_valid_msr_idx,
|
|
.is_valid_msr = intel_is_valid_msr,
|
|
.get_msr = intel_pmu_get_msr,
|
|
.set_msr = intel_pmu_set_msr,
|
|
.refresh = intel_pmu_refresh,
|
|
.init = intel_pmu_init,
|
|
.reset = intel_pmu_reset,
|
|
};
|