mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-20 06:17:45 +07:00
KVM: x86: Refactor and rename bit() to feature_bit() macro
Rename bit() to __feature_bit() to give it a more descriptive name, and add a macro, feature_bit(), to stuff the X68_FEATURE_ prefix to keep line lengths manageable for code that hardcodes the bit to be retrieved. No functional change intended. Cc: Jim Mattson <jmattson@google.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
a7c48c3f56
commit
87382003e3
@ -62,7 +62,7 @@ u64 kvm_supported_xcr0(void)
|
||||
return xcr0;
|
||||
}
|
||||
|
||||
#define F(x) bit(X86_FEATURE_##x)
|
||||
#define F feature_bit
|
||||
|
||||
int kvm_update_cpuid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -80,12 +80,14 @@ static __always_inline void reverse_cpuid_check(unsigned x86_leaf)
|
||||
* "word" (stored in bits 31:5). The word is used to index into arrays of
|
||||
* bit masks that hold the per-cpu feature capabilities, e.g. this_cpu_has().
|
||||
*/
|
||||
static __always_inline u32 bit(int x86_feature)
|
||||
static __always_inline u32 __feature_bit(int x86_feature)
|
||||
{
|
||||
reverse_cpuid_check(x86_feature / 32);
|
||||
return 1 << (x86_feature & 31);
|
||||
}
|
||||
|
||||
#define feature_bit(name) __feature_bit(X86_FEATURE_##name)
|
||||
|
||||
static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
|
||||
{
|
||||
unsigned x86_leaf = x86_feature / 32;
|
||||
@ -126,7 +128,7 @@ static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_
|
||||
if (!reg)
|
||||
return false;
|
||||
|
||||
return *reg & bit(x86_feature);
|
||||
return *reg & __feature_bit(x86_feature);
|
||||
}
|
||||
|
||||
static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature)
|
||||
@ -135,7 +137,7 @@ static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x8
|
||||
|
||||
reg = guest_cpuid_get_register(vcpu, x86_feature);
|
||||
if (reg)
|
||||
*reg &= ~bit(x86_feature);
|
||||
*reg &= ~__feature_bit(x86_feature);
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
|
||||
|
@ -5929,14 +5929,14 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
|
||||
guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
|
||||
}
|
||||
|
||||
#define F(x) bit(X86_FEATURE_##x)
|
||||
#define F feature_bit
|
||||
|
||||
static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
|
||||
{
|
||||
switch (func) {
|
||||
case 0x1:
|
||||
if (avic)
|
||||
entry->ecx &= ~bit(X86_FEATURE_X2APIC);
|
||||
entry->ecx &= ~F(X2APIC);
|
||||
break;
|
||||
case 0x80000001:
|
||||
if (nested)
|
||||
|
@ -6989,28 +6989,28 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
|
||||
} while (0)
|
||||
|
||||
entry = kvm_find_cpuid_entry(vcpu, 0x1, 0);
|
||||
cr4_fixed1_update(X86_CR4_VME, edx, bit(X86_FEATURE_VME));
|
||||
cr4_fixed1_update(X86_CR4_PVI, edx, bit(X86_FEATURE_VME));
|
||||
cr4_fixed1_update(X86_CR4_TSD, edx, bit(X86_FEATURE_TSC));
|
||||
cr4_fixed1_update(X86_CR4_DE, edx, bit(X86_FEATURE_DE));
|
||||
cr4_fixed1_update(X86_CR4_PSE, edx, bit(X86_FEATURE_PSE));
|
||||
cr4_fixed1_update(X86_CR4_PAE, edx, bit(X86_FEATURE_PAE));
|
||||
cr4_fixed1_update(X86_CR4_MCE, edx, bit(X86_FEATURE_MCE));
|
||||
cr4_fixed1_update(X86_CR4_PGE, edx, bit(X86_FEATURE_PGE));
|
||||
cr4_fixed1_update(X86_CR4_OSFXSR, edx, bit(X86_FEATURE_FXSR));
|
||||
cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, bit(X86_FEATURE_XMM));
|
||||
cr4_fixed1_update(X86_CR4_VMXE, ecx, bit(X86_FEATURE_VMX));
|
||||
cr4_fixed1_update(X86_CR4_SMXE, ecx, bit(X86_FEATURE_SMX));
|
||||
cr4_fixed1_update(X86_CR4_PCIDE, ecx, bit(X86_FEATURE_PCID));
|
||||
cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, bit(X86_FEATURE_XSAVE));
|
||||
cr4_fixed1_update(X86_CR4_VME, edx, feature_bit(VME));
|
||||
cr4_fixed1_update(X86_CR4_PVI, edx, feature_bit(VME));
|
||||
cr4_fixed1_update(X86_CR4_TSD, edx, feature_bit(TSC));
|
||||
cr4_fixed1_update(X86_CR4_DE, edx, feature_bit(DE));
|
||||
cr4_fixed1_update(X86_CR4_PSE, edx, feature_bit(PSE));
|
||||
cr4_fixed1_update(X86_CR4_PAE, edx, feature_bit(PAE));
|
||||
cr4_fixed1_update(X86_CR4_MCE, edx, feature_bit(MCE));
|
||||
cr4_fixed1_update(X86_CR4_PGE, edx, feature_bit(PGE));
|
||||
cr4_fixed1_update(X86_CR4_OSFXSR, edx, feature_bit(FXSR));
|
||||
cr4_fixed1_update(X86_CR4_OSXMMEXCPT, edx, feature_bit(XMM));
|
||||
cr4_fixed1_update(X86_CR4_VMXE, ecx, feature_bit(VMX));
|
||||
cr4_fixed1_update(X86_CR4_SMXE, ecx, feature_bit(SMX));
|
||||
cr4_fixed1_update(X86_CR4_PCIDE, ecx, feature_bit(PCID));
|
||||
cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, feature_bit(XSAVE));
|
||||
|
||||
entry = kvm_find_cpuid_entry(vcpu, 0x7, 0);
|
||||
cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, bit(X86_FEATURE_FSGSBASE));
|
||||
cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP));
|
||||
cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP));
|
||||
cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU));
|
||||
cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP));
|
||||
cr4_fixed1_update(X86_CR4_LA57, ecx, bit(X86_FEATURE_LA57));
|
||||
cr4_fixed1_update(X86_CR4_FSGSBASE, ebx, feature_bit(FSGSBASE));
|
||||
cr4_fixed1_update(X86_CR4_SMEP, ebx, feature_bit(SMEP));
|
||||
cr4_fixed1_update(X86_CR4_SMAP, ebx, feature_bit(SMAP));
|
||||
cr4_fixed1_update(X86_CR4_PKE, ecx, feature_bit(PKU));
|
||||
cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP));
|
||||
cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57));
|
||||
|
||||
#undef cr4_fixed1_update
|
||||
}
|
||||
@ -7144,7 +7144,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
||||
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
|
||||
{
|
||||
if (func == 1 && nested)
|
||||
entry->ecx |= bit(X86_FEATURE_VMX);
|
||||
entry->ecx |= feature_bit(VMX);
|
||||
}
|
||||
|
||||
static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
|
||||
|
@ -904,7 +904,7 @@ static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 reserved_bits = __cr4_reserved_bits(cpu_has, c);
|
||||
|
||||
if (cpuid_ecx(0x7) & bit(X86_FEATURE_LA57))
|
||||
if (cpuid_ecx(0x7) & feature_bit(LA57))
|
||||
reserved_bits &= ~X86_CR4_LA57;
|
||||
|
||||
if (kvm_x86_ops->umip_emulated())
|
||||
|
Loading…
Reference in New Issue
Block a user