2019-06-04 15:11:32 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-06-19 20:45:05 +07:00
|
|
|
/*
|
|
|
|
* KVM PMU support for AMD
|
|
|
|
*
|
|
|
|
* Copyright 2015, Red Hat, Inc. and/or its affiliates.
|
|
|
|
*
|
|
|
|
* Author:
|
|
|
|
* Wei Huang <wei@redhat.com>
|
|
|
|
*
|
|
|
|
* Implementation is based on pmu_intel.c file
|
|
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <linux/perf_event.h>
|
|
|
|
#include "x86.h"
|
|
|
|
#include "cpuid.h"
|
|
|
|
#include "lapic.h"
|
|
|
|
#include "pmu.h"
|
|
|
|
|
2018-02-06 02:24:52 +07:00
|
|
|
enum pmu_type {
|
|
|
|
PMU_TYPE_COUNTER = 0,
|
|
|
|
PMU_TYPE_EVNTSEL,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum index {
|
|
|
|
INDEX_ZERO = 0,
|
|
|
|
INDEX_ONE,
|
|
|
|
INDEX_TWO,
|
|
|
|
INDEX_THREE,
|
|
|
|
INDEX_FOUR,
|
|
|
|
INDEX_FIVE,
|
|
|
|
INDEX_ERROR,
|
|
|
|
};
|
|
|
|
|
2015-06-12 12:34:55 +07:00
|
|
|
/* duplicated from amd_perfmon_event_map, K7 and above should work. */
|
|
|
|
static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
|
|
|
|
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
|
|
|
|
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
|
2016-08-24 20:12:08 +07:00
|
|
|
[2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
|
|
|
|
[3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
|
2015-06-12 12:34:55 +07:00
|
|
|
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
|
|
|
|
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
|
|
|
|
[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
|
|
|
|
[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
|
|
|
|
};
|
|
|
|
|
2018-02-06 02:24:52 +07:00
|
|
|
static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
|
|
|
|
{
|
|
|
|
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
|
|
|
|
|
|
|
|
if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
|
|
|
|
if (type == PMU_TYPE_COUNTER)
|
|
|
|
return MSR_F15H_PERF_CTR;
|
|
|
|
else
|
|
|
|
return MSR_F15H_PERF_CTL;
|
|
|
|
} else {
|
|
|
|
if (type == PMU_TYPE_COUNTER)
|
|
|
|
return MSR_K7_PERFCTR0;
|
|
|
|
else
|
|
|
|
return MSR_K7_EVNTSEL0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum index msr_to_index(u32 msr)
|
|
|
|
{
|
|
|
|
switch (msr) {
|
|
|
|
case MSR_F15H_PERF_CTL0:
|
|
|
|
case MSR_F15H_PERF_CTR0:
|
|
|
|
case MSR_K7_EVNTSEL0:
|
|
|
|
case MSR_K7_PERFCTR0:
|
|
|
|
return INDEX_ZERO;
|
|
|
|
case MSR_F15H_PERF_CTL1:
|
|
|
|
case MSR_F15H_PERF_CTR1:
|
|
|
|
case MSR_K7_EVNTSEL1:
|
|
|
|
case MSR_K7_PERFCTR1:
|
|
|
|
return INDEX_ONE;
|
|
|
|
case MSR_F15H_PERF_CTL2:
|
|
|
|
case MSR_F15H_PERF_CTR2:
|
|
|
|
case MSR_K7_EVNTSEL2:
|
|
|
|
case MSR_K7_PERFCTR2:
|
|
|
|
return INDEX_TWO;
|
|
|
|
case MSR_F15H_PERF_CTL3:
|
|
|
|
case MSR_F15H_PERF_CTR3:
|
|
|
|
case MSR_K7_EVNTSEL3:
|
|
|
|
case MSR_K7_PERFCTR3:
|
|
|
|
return INDEX_THREE;
|
|
|
|
case MSR_F15H_PERF_CTL4:
|
|
|
|
case MSR_F15H_PERF_CTR4:
|
|
|
|
return INDEX_FOUR;
|
|
|
|
case MSR_F15H_PERF_CTL5:
|
|
|
|
case MSR_F15H_PERF_CTR5:
|
|
|
|
return INDEX_FIVE;
|
|
|
|
default:
|
|
|
|
return INDEX_ERROR;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
|
|
|
|
enum pmu_type type)
|
|
|
|
{
|
|
|
|
switch (msr) {
|
|
|
|
case MSR_F15H_PERF_CTL0:
|
|
|
|
case MSR_F15H_PERF_CTL1:
|
|
|
|
case MSR_F15H_PERF_CTL2:
|
|
|
|
case MSR_F15H_PERF_CTL3:
|
|
|
|
case MSR_F15H_PERF_CTL4:
|
|
|
|
case MSR_F15H_PERF_CTL5:
|
|
|
|
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
|
|
|
|
if (type != PMU_TYPE_EVNTSEL)
|
|
|
|
return NULL;
|
|
|
|
break;
|
|
|
|
case MSR_F15H_PERF_CTR0:
|
|
|
|
case MSR_F15H_PERF_CTR1:
|
|
|
|
case MSR_F15H_PERF_CTR2:
|
|
|
|
case MSR_F15H_PERF_CTR3:
|
|
|
|
case MSR_F15H_PERF_CTR4:
|
|
|
|
case MSR_F15H_PERF_CTR5:
|
|
|
|
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
|
|
|
|
if (type != PMU_TYPE_COUNTER)
|
|
|
|
return NULL;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return &pmu->gp_counters[msr_to_index(msr)];
|
|
|
|
}
|
|
|
|
|
2015-06-19 20:45:05 +07:00
|
|
|
static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
|
|
|
|
u8 event_select,
|
|
|
|
u8 unit_mask)
|
|
|
|
{
|
2015-06-12 12:34:55 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
|
|
|
|
if (amd_event_mapping[i].eventsel == event_select
|
|
|
|
&& amd_event_mapping[i].unit_mask == unit_mask)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (i == ARRAY_SIZE(amd_event_mapping))
|
|
|
|
return PERF_COUNT_HW_MAX;
|
|
|
|
|
|
|
|
return amd_event_mapping[i].event_type;
|
2015-06-19 20:45:05 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
|
|
|
|
static unsigned amd_find_fixed_event(int idx)
|
|
|
|
{
|
|
|
|
return PERF_COUNT_HW_MAX;
|
|
|
|
}
|
|
|
|
|
2015-06-12 12:34:55 +07:00
|
|
|
/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
|
|
|
|
* AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
|
|
|
|
*/
|
2015-06-19 20:45:05 +07:00
|
|
|
static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
|
|
|
|
{
|
2015-06-12 12:34:55 +07:00
|
|
|
return true;
|
2015-06-19 20:45:05 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
|
|
|
|
{
|
2018-02-06 02:24:52 +07:00
|
|
|
unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
|
|
|
|
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
|
|
|
|
|
|
|
|
if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
|
|
|
|
/*
|
|
|
|
* The idx is contiguous. The MSRs are not. The counter MSRs
|
|
|
|
* are interleaved with the event select MSRs.
|
|
|
|
*/
|
|
|
|
pmc_idx *= 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
|
2015-06-19 20:45:05 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
|
2019-10-27 17:52:40 +07:00
|
|
|
static int amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
|
2015-06-19 20:45:05 +07:00
|
|
|
{
|
2015-06-12 12:34:55 +07:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
|
|
|
|
idx &= ~(3u << 30);
|
|
|
|
|
|
|
|
return (idx >= pmu->nr_arch_gp_counters);
|
2015-06-19 20:45:05 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* idx is the ECX register of RDPMC instruction */
|
2019-10-27 17:52:40 +07:00
|
|
|
static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned int idx, u64 *mask)
|
2015-06-19 20:45:05 +07:00
|
|
|
{
|
2015-06-12 12:34:55 +07:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct kvm_pmc *counters;
|
|
|
|
|
|
|
|
idx &= ~(3u << 30);
|
|
|
|
if (idx >= pmu->nr_arch_gp_counters)
|
|
|
|
return NULL;
|
|
|
|
counters = pmu->gp_counters;
|
|
|
|
|
|
|
|
return &counters[idx];
|
2015-06-19 20:45:05 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
|
2019-10-27 17:52:41 +07:00
|
|
|
{
|
|
|
|
/* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
|
2015-06-19 20:45:05 +07:00
|
|
|
{
|
2015-06-12 12:34:55 +07:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
2019-10-27 17:52:41 +07:00
|
|
|
struct kvm_pmc *pmc;
|
2015-06-12 12:34:55 +07:00
|
|
|
|
2019-10-27 17:52:41 +07:00
|
|
|
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
|
|
|
|
pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
|
2015-06-12 12:34:55 +07:00
|
|
|
|
2019-10-27 17:52:41 +07:00
|
|
|
return pmc;
|
2015-06-19 20:45:05 +07:00
|
|
|
}
|
|
|
|
|
2020-05-29 14:43:44 +07:00
|
|
|
static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
2015-06-19 20:45:05 +07:00
|
|
|
{
|
2015-06-12 12:34:55 +07:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct kvm_pmc *pmc;
|
2020-05-29 14:43:44 +07:00
|
|
|
u32 msr = msr_info->index;
|
2015-06-12 12:34:55 +07:00
|
|
|
|
2018-02-06 02:24:52 +07:00
|
|
|
/* MSR_PERFCTRn */
|
|
|
|
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
|
2015-06-12 12:34:55 +07:00
|
|
|
if (pmc) {
|
2020-05-29 14:43:44 +07:00
|
|
|
msr_info->data = pmc_read_counter(pmc);
|
2015-06-12 12:34:55 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2018-02-06 02:24:52 +07:00
|
|
|
/* MSR_EVNTSELn */
|
|
|
|
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
|
2015-06-12 12:34:55 +07:00
|
|
|
if (pmc) {
|
2020-05-29 14:43:44 +07:00
|
|
|
msr_info->data = pmc->eventsel;
|
2015-06-12 12:34:55 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-19 20:45:05 +07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
|
|
|
{
|
2015-06-12 12:34:55 +07:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
struct kvm_pmc *pmc;
|
|
|
|
u32 msr = msr_info->index;
|
|
|
|
u64 data = msr_info->data;
|
|
|
|
|
2018-02-06 02:24:52 +07:00
|
|
|
/* MSR_PERFCTRn */
|
|
|
|
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
|
2015-06-12 12:34:55 +07:00
|
|
|
if (pmc) {
|
|
|
|
pmc->counter += data - pmc_read_counter(pmc);
|
|
|
|
return 0;
|
|
|
|
}
|
2018-02-06 02:24:52 +07:00
|
|
|
/* MSR_EVNTSELn */
|
|
|
|
pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
|
2015-06-12 12:34:55 +07:00
|
|
|
if (pmc) {
|
|
|
|
if (data == pmc->eventsel)
|
|
|
|
return 0;
|
|
|
|
if (!(data & pmu->reserved_bits)) {
|
|
|
|
reprogram_gp_counter(pmc, data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-19 20:45:05 +07:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2015-06-12 12:34:55 +07:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
|
2018-02-06 02:24:52 +07:00
|
|
|
if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
|
|
|
|
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
|
|
|
|
else
|
|
|
|
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
|
|
|
|
|
2015-06-12 12:34:55 +07:00
|
|
|
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
|
|
|
|
pmu->reserved_bits = 0xffffffff00200000ull;
|
2019-05-09 00:02:48 +07:00
|
|
|
pmu->version = 1;
|
2015-06-12 12:34:55 +07:00
|
|
|
/* not applicable to AMD; but clean them to prevent any fall out */
|
|
|
|
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
|
|
|
|
pmu->nr_arch_fixed_counters = 0;
|
|
|
|
pmu->global_status = 0;
|
2019-10-27 17:52:43 +07:00
|
|
|
bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
|
2015-06-19 20:45:05 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_pmu_init(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2015-06-12 12:34:55 +07:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
int i;
|
|
|
|
|
2018-02-06 02:24:52 +07:00
|
|
|
BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
|
|
|
|
|
|
|
|
for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
|
2015-06-12 12:34:55 +07:00
|
|
|
pmu->gp_counters[i].type = KVM_PMC_GP;
|
|
|
|
pmu->gp_counters[i].vcpu = vcpu;
|
|
|
|
pmu->gp_counters[i].idx = i;
|
KVM: x86/vPMU: Reuse perf_event to avoid unnecessary pmc_reprogram_counter
The perf_event_create_kernel_counter() in the pmc_reprogram_counter() is
a heavyweight and high-frequency operation, especially when host disables
the watchdog (maximum 21000000 ns) which leads to an unacceptable latency
of the guest NMI handler. It limits the use of vPMUs in the guest.
When a vPMC is fully enabled, the legacy reprogram_*_counter() would stop
and release its existing perf_event (if any) every time EVEN in most cases
almost the same requested perf_event will be created and configured again.
For each vPMC, if the reuqested config ('u64 eventsel' for gp and 'u8 ctrl'
for fixed) is the same as its current config AND a new sample period based
on pmc->counter is accepted by host perf interface, the current event could
be reused safely as a new created one does. Otherwise, do release the
undesirable perf_event and reprogram a new one as usual.
It's light-weight to call pmc_pause_counter (disable, read and reset event)
and pmc_resume_counter (recalibrate period and re-enable event) as guest
expects instead of release-and-create again on any condition. Compared to
use the filterable event->attr or hw.config, a new 'u64 current_config'
field is added to save the last original programed config for each vPMC.
Based on this implementation, the number of calls to pmc_reprogram_counter
is reduced by ~82.5% for a gp sampling event and ~99.9% for a fixed event.
In the usage of multiplexing perf sampling mode, the average latency of the
guest NMI handler is reduced from 104923 ns to 48393 ns (~2.16x speed up).
If host disables watchdog, the minimum latecy of guest NMI handler could be
speed up at ~3413x (from 20407603 to 5979 ns) and at ~786x in the average.
Suggested-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Like Xu <like.xu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-10-27 17:52:42 +07:00
|
|
|
pmu->gp_counters[i].current_config = 0;
|
2015-06-12 12:34:55 +07:00
|
|
|
}
|
2015-06-19 20:45:05 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void amd_pmu_reset(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2015-06-12 12:34:55 +07:00
|
|
|
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
|
|
|
|
int i;
|
|
|
|
|
2018-02-06 02:24:52 +07:00
|
|
|
for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
|
2015-06-12 12:34:55 +07:00
|
|
|
struct kvm_pmc *pmc = &pmu->gp_counters[i];
|
|
|
|
|
|
|
|
pmc_stop_counter(pmc);
|
|
|
|
pmc->counter = pmc->eventsel = 0;
|
|
|
|
}
|
2015-06-19 20:45:05 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
struct kvm_pmu_ops amd_pmu_ops = {
|
|
|
|
.find_arch_event = amd_find_arch_event,
|
|
|
|
.find_fixed_event = amd_find_fixed_event,
|
|
|
|
.pmc_is_enabled = amd_pmc_is_enabled,
|
|
|
|
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
|
2019-10-27 17:52:40 +07:00
|
|
|
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
|
2019-10-27 17:52:41 +07:00
|
|
|
.msr_idx_to_pmc = amd_msr_idx_to_pmc,
|
2019-10-27 17:52:40 +07:00
|
|
|
.is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
|
2015-06-19 20:45:05 +07:00
|
|
|
.is_valid_msr = amd_is_valid_msr,
|
|
|
|
.get_msr = amd_pmu_get_msr,
|
|
|
|
.set_msr = amd_pmu_set_msr,
|
|
|
|
.refresh = amd_pmu_refresh,
|
|
|
|
.init = amd_pmu_init,
|
|
|
|
.reset = amd_pmu_reset,
|
|
|
|
};
|