mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
c447e76b4c
The MPX feature requires eager KVM FPU restore support. We have verified that MPX cannot work correctly with the current lazy KVM FPU restore mechanism. Eager KVM FPU restore should be enabled if the MPX feature is exposed to VM. Signed-off-by: Yang Zhang <yang.z.zhang@intel.com> Signed-off-by: Liang Li <liang.z.li@intel.com> [Also activate the FPU on AMD processors. - Paolo] Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
129 lines
3.4 KiB
C
129 lines
3.4 KiB
C
#ifndef ARCH_X86_KVM_CPUID_H
|
|
#define ARCH_X86_KVM_CPUID_H
|
|
|
|
#include "x86.h"
|
|
|
|
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
|
|
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
|
|
u32 function, u32 index);
|
|
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
|
|
struct kvm_cpuid_entry2 __user *entries,
|
|
unsigned int type);
|
|
int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
|
|
struct kvm_cpuid *cpuid,
|
|
struct kvm_cpuid_entry __user *entries);
|
|
int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
|
|
struct kvm_cpuid2 *cpuid,
|
|
struct kvm_cpuid_entry2 __user *entries);
|
|
int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
|
|
struct kvm_cpuid2 *cpuid,
|
|
struct kvm_cpuid_entry2 __user *entries);
|
|
void kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
|
|
|
|
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
|
|
|
|
static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
|
|
{
|
|
return vcpu->arch.maxphyaddr;
|
|
}
|
|
|
|
static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
if (!static_cpu_has(X86_FEATURE_XSAVE))
|
|
return false;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
|
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
|
|
}
|
|
|
|
static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
|
|
}
|
|
|
|
static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
return best && (best->ebx & bit(X86_FEATURE_SMEP));
|
|
}
|
|
|
|
static inline bool guest_cpuid_has_smap(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
return best && (best->ebx & bit(X86_FEATURE_SMAP));
|
|
}
|
|
|
|
static inline bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
return best && (best->ebx & bit(X86_FEATURE_FSGSBASE));
|
|
}
|
|
|
|
static inline bool guest_cpuid_has_osvw(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
return best && (best->ecx & bit(X86_FEATURE_OSVW));
|
|
}
|
|
|
|
static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
|
return best && (best->ecx & bit(X86_FEATURE_PCID));
|
|
}
|
|
|
|
static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
|
return best && (best->ecx & bit(X86_FEATURE_X2APIC));
|
|
}
|
|
|
|
static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 0, 0);
|
|
return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
|
|
}
|
|
|
|
static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
|
|
return best && (best->edx & bit(X86_FEATURE_GBPAGES));
|
|
}
|
|
|
|
static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
return best && (best->ebx & bit(X86_FEATURE_RTM));
|
|
}
|
|
|
|
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_cpuid_entry2 *best;
|
|
|
|
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
|
return best && (best->ebx & bit(X86_FEATURE_MPX));
|
|
}
|
|
#endif
|