mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 01:04:07 +07:00
a25e91028a
Ensure we're actually accounting run_delay before we claim that we'll expose it to the guest. If we're not, then we just pretend like steal time isn't supported in order to avoid any confusion. Signed-off-by: Andrew Jones <drjones@redhat.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20200622142710.18677-1-drjones@redhat.com
141 lines
3.0 KiB
C
141 lines
3.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (C) 2019 Arm Ltd.
|
|
|
|
#include <linux/arm-smccc.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/sched/stat.h>
|
|
|
|
#include <asm/kvm_mmu.h>
|
|
#include <asm/pvclock-abi.h>
|
|
|
|
#include <kvm/arm_hypercalls.h>
|
|
|
|
void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm *kvm = vcpu->kvm;
|
|
u64 steal;
|
|
__le64 steal_le;
|
|
u64 offset;
|
|
int idx;
|
|
u64 base = vcpu->arch.steal.base;
|
|
|
|
if (base == GPA_INVALID)
|
|
return;
|
|
|
|
/* Let's do the local bookkeeping */
|
|
steal = vcpu->arch.steal.steal;
|
|
steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal;
|
|
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
|
|
vcpu->arch.steal.steal = steal;
|
|
|
|
steal_le = cpu_to_le64(steal);
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time);
|
|
kvm_put_guest(kvm, base + offset, steal_le, u64);
|
|
srcu_read_unlock(&kvm->srcu, idx);
|
|
}
|
|
|
|
long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
|
|
{
|
|
u32 feature = smccc_get_arg1(vcpu);
|
|
long val = SMCCC_RET_NOT_SUPPORTED;
|
|
|
|
switch (feature) {
|
|
case ARM_SMCCC_HV_PV_TIME_FEATURES:
|
|
case ARM_SMCCC_HV_PV_TIME_ST:
|
|
val = SMCCC_RET_SUCCESS;
|
|
break;
|
|
}
|
|
|
|
return val;
|
|
}
|
|
|
|
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct pvclock_vcpu_stolen_time init_values = {};
|
|
struct kvm *kvm = vcpu->kvm;
|
|
u64 base = vcpu->arch.steal.base;
|
|
int idx;
|
|
|
|
if (base == GPA_INVALID)
|
|
return base;
|
|
|
|
/*
|
|
* Start counting stolen time from the time the guest requests
|
|
* the feature enabled.
|
|
*/
|
|
vcpu->arch.steal.steal = 0;
|
|
vcpu->arch.steal.last_steal = current->sched_info.run_delay;
|
|
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
kvm_write_guest(kvm, base, &init_values, sizeof(init_values));
|
|
srcu_read_unlock(&kvm->srcu, idx);
|
|
|
|
return base;
|
|
}
|
|
|
|
static bool kvm_arm_pvtime_supported(void)
|
|
{
|
|
return !!sched_info_on();
|
|
}
|
|
|
|
int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
|
|
struct kvm_device_attr *attr)
|
|
{
|
|
u64 __user *user = (u64 __user *)attr->addr;
|
|
struct kvm *kvm = vcpu->kvm;
|
|
u64 ipa;
|
|
int ret = 0;
|
|
int idx;
|
|
|
|
if (!kvm_arm_pvtime_supported() ||
|
|
attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
|
|
return -ENXIO;
|
|
|
|
if (get_user(ipa, user))
|
|
return -EFAULT;
|
|
if (!IS_ALIGNED(ipa, 64))
|
|
return -EINVAL;
|
|
if (vcpu->arch.steal.base != GPA_INVALID)
|
|
return -EEXIST;
|
|
|
|
/* Check the address is in a valid memslot */
|
|
idx = srcu_read_lock(&kvm->srcu);
|
|
if (kvm_is_error_hva(gfn_to_hva(kvm, ipa >> PAGE_SHIFT)))
|
|
ret = -EINVAL;
|
|
srcu_read_unlock(&kvm->srcu, idx);
|
|
|
|
if (!ret)
|
|
vcpu->arch.steal.base = ipa;
|
|
|
|
return ret;
|
|
}
|
|
|
|
int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
|
|
struct kvm_device_attr *attr)
|
|
{
|
|
u64 __user *user = (u64 __user *)attr->addr;
|
|
u64 ipa;
|
|
|
|
if (!kvm_arm_pvtime_supported() ||
|
|
attr->attr != KVM_ARM_VCPU_PVTIME_IPA)
|
|
return -ENXIO;
|
|
|
|
ipa = vcpu->arch.steal.base;
|
|
|
|
if (put_user(ipa, user))
|
|
return -EFAULT;
|
|
return 0;
|
|
}
|
|
|
|
int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
|
|
struct kvm_device_attr *attr)
|
|
{
|
|
switch (attr->attr) {
|
|
case KVM_ARM_VCPU_PVTIME_IPA:
|
|
if (kvm_arm_pvtime_supported())
|
|
return 0;
|
|
}
|
|
return -ENXIO;
|
|
}
|