mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-02-22 08:13:31 +07:00
KVM: arm/arm64: Remove pmc->bitmask
We currently use pmc->bitmask to determine the width of the pmc - however it's superfluous as the pmc index already describes if the pmc is a cycle counter or event counter. The architecture clearly describes the widths of these counters. Let's remove the bitmask to simplify the code. Signed-off-by: Andrew Murray <andrew.murray@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
parent
30d97754b2
commit
218907cbc2
@ -17,7 +17,6 @@
|
|||||||
struct kvm_pmc {
|
struct kvm_pmc {
|
||||||
u8 idx; /* index into the pmu->pmc array */
|
u8 idx; /* index into the pmu->pmc array */
|
||||||
struct perf_event *perf_event;
|
struct perf_event *perf_event;
|
||||||
u64 bitmask;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kvm_pmu {
|
struct kvm_pmu {
|
||||||
|
@ -14,6 +14,18 @@
|
|||||||
#include <kvm/arm_vgic.h>
|
#include <kvm/arm_vgic.h>
|
||||||
|
|
||||||
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
|
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter
|
||||||
|
* @vcpu: The vcpu pointer
|
||||||
|
* @select_idx: The counter index
|
||||||
|
*/
|
||||||
|
static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx)
|
||||||
|
{
|
||||||
|
return (select_idx == ARMV8_PMU_CYCLE_IDX &&
|
||||||
|
__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_pmu_get_counter_value - get PMU counter value
|
* kvm_pmu_get_counter_value - get PMU counter value
|
||||||
* @vcpu: The vcpu pointer
|
* @vcpu: The vcpu pointer
|
||||||
@ -36,7 +48,10 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
|
|||||||
counter += perf_event_read_value(pmc->perf_event, &enabled,
|
counter += perf_event_read_value(pmc->perf_event, &enabled,
|
||||||
&running);
|
&running);
|
||||||
|
|
||||||
return counter & pmc->bitmask;
|
if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
|
||||||
|
counter = lower_32_bits(counter);
|
||||||
|
|
||||||
|
return counter;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -102,7 +117,6 @@ void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
|
|||||||
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
|
||||||
kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
|
kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
|
||||||
pmu->pmc[i].idx = i;
|
pmu->pmc[i].idx = i;
|
||||||
pmu->pmc[i].bitmask = 0xffffffffUL;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -337,8 +351,6 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
|
|||||||
*/
|
*/
|
||||||
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
||||||
{
|
{
|
||||||
struct kvm_pmu *pmu = &vcpu->arch.pmu;
|
|
||||||
struct kvm_pmc *pmc;
|
|
||||||
u64 mask;
|
u64 mask;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -357,11 +369,6 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
|
|||||||
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
|
for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
|
||||||
kvm_pmu_set_counter_value(vcpu, i, 0);
|
kvm_pmu_set_counter_value(vcpu, i, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (val & ARMV8_PMU_PMCR_LC) {
|
|
||||||
pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
|
|
||||||
pmc->bitmask = 0xffffffffffffffffUL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
|
static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
|
||||||
@ -409,7 +416,10 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
|
|||||||
|
|
||||||
counter = kvm_pmu_get_counter_value(vcpu, select_idx);
|
counter = kvm_pmu_get_counter_value(vcpu, select_idx);
|
||||||
/* The initial sample period (overflow count) of an event. */
|
/* The initial sample period (overflow count) of an event. */
|
||||||
attr.sample_period = (-counter) & pmc->bitmask;
|
if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
|
||||||
|
attr.sample_period = (-counter) & GENMASK(63, 0);
|
||||||
|
else
|
||||||
|
attr.sample_period = (-counter) & GENMASK(31, 0);
|
||||||
|
|
||||||
event = perf_event_create_kernel_counter(&attr, -1, current,
|
event = perf_event_create_kernel_counter(&attr, -1, current,
|
||||||
kvm_pmu_perf_overflow, pmc);
|
kvm_pmu_perf_overflow, pmc);
|
||||||
|
Loading…
Reference in New Issue
Block a user