KVM: x86: replace static const variables with macros

Even though the compiler is able to replace static const variables with
their value, it will warn about them being unused when Linux is built with W=1.
Use good old macros instead, this is not C++.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Paolo Bonzini 2020-10-30 13:39:55 -04:00
parent 699116c45e
commit 8a967d655e
3 changed files with 21 additions and 21 deletions

View File

@ -225,7 +225,7 @@ static gfn_t get_mmio_spte_gfn(u64 spte)
{ {
u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask; u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) gpa |= (spte >> SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)
& shadow_nonpresent_or_rsvd_mask; & shadow_nonpresent_or_rsvd_mask;
return gpa >> PAGE_SHIFT; return gpa >> PAGE_SHIFT;
@ -591,15 +591,15 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
static u64 restore_acc_track_spte(u64 spte) static u64 restore_acc_track_spte(u64 spte)
{ {
u64 new_spte = spte; u64 new_spte = spte;
u64 saved_bits = (spte >> shadow_acc_track_saved_bits_shift) u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT)
& shadow_acc_track_saved_bits_mask; & SHADOW_ACC_TRACK_SAVED_BITS_MASK;
WARN_ON_ONCE(spte_ad_enabled(spte)); WARN_ON_ONCE(spte_ad_enabled(spte));
WARN_ON_ONCE(!is_access_track_spte(spte)); WARN_ON_ONCE(!is_access_track_spte(spte));
new_spte &= ~shadow_acc_track_mask; new_spte &= ~shadow_acc_track_mask;
new_spte &= ~(shadow_acc_track_saved_bits_mask << new_spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
shadow_acc_track_saved_bits_shift); SHADOW_ACC_TRACK_SAVED_BITS_SHIFT);
new_spte |= saved_bits; new_spte |= saved_bits;
return new_spte; return new_spte;

View File

@ -55,7 +55,7 @@ u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
mask |= shadow_mmio_value | access; mask |= shadow_mmio_value | access;
mask |= gpa | shadow_nonpresent_or_rsvd_mask; mask |= gpa | shadow_nonpresent_or_rsvd_mask;
mask |= (gpa & shadow_nonpresent_or_rsvd_mask) mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
<< shadow_nonpresent_or_rsvd_mask_len; << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
return mask; return mask;
} }
@ -231,12 +231,12 @@ u64 mark_spte_for_access_track(u64 spte)
!spte_can_locklessly_be_made_writable(spte), !spte_can_locklessly_be_made_writable(spte),
"kvm: Writable SPTE is not locklessly dirty-trackable\n"); "kvm: Writable SPTE is not locklessly dirty-trackable\n");
WARN_ONCE(spte & (shadow_acc_track_saved_bits_mask << WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
shadow_acc_track_saved_bits_shift), SHADOW_ACC_TRACK_SAVED_BITS_SHIFT),
"kvm: Access Tracking saved bit locations are not zero\n"); "kvm: Access Tracking saved bit locations are not zero\n");
spte |= (spte & shadow_acc_track_saved_bits_mask) << spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) <<
shadow_acc_track_saved_bits_shift; SHADOW_ACC_TRACK_SAVED_BITS_SHIFT;
spte &= ~shadow_acc_track_mask; spte &= ~shadow_acc_track_mask;
return spte; return spte;
@ -245,7 +245,7 @@ u64 mark_spte_for_access_track(u64 spte)
void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask) void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
{ {
BUG_ON((u64)(unsigned)access_mask != access_mask); BUG_ON((u64)(unsigned)access_mask != access_mask);
WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len)); WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN));
WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
shadow_mmio_value = mmio_value | SPTE_MMIO_MASK; shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
shadow_mmio_access_mask = access_mask; shadow_mmio_access_mask = access_mask;
@ -306,9 +306,9 @@ void kvm_mmu_reset_all_pte_masks(void)
low_phys_bits = boot_cpu_data.x86_phys_bits; low_phys_bits = boot_cpu_data.x86_phys_bits;
if (boot_cpu_has_bug(X86_BUG_L1TF) && if (boot_cpu_has_bug(X86_BUG_L1TF) &&
!WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
52 - shadow_nonpresent_or_rsvd_mask_len)) { 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) {
low_phys_bits = boot_cpu_data.x86_cache_bits low_phys_bits = boot_cpu_data.x86_cache_bits
- shadow_nonpresent_or_rsvd_mask_len; - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
shadow_nonpresent_or_rsvd_mask = shadow_nonpresent_or_rsvd_mask =
rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
} }

View File

@ -104,20 +104,20 @@ extern u64 __read_mostly shadow_acc_track_mask;
*/ */
extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask; extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
/*
* The number of high-order 1 bits to use in the mask above.
*/
#define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5
/* /*
* The mask/shift to use for saving the original R/X bits when marking the PTE * The mask/shift to use for saving the original R/X bits when marking the PTE
* as not-present for access tracking purposes. We do not save the W bit as the * as not-present for access tracking purposes. We do not save the W bit as the
* PTEs being access tracked also need to be dirty tracked, so the W bit will be * PTEs being access tracked also need to be dirty tracked, so the W bit will be
* restored only when a write is attempted to the page. * restored only when a write is attempted to the page.
*/ */
static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK | #define SHADOW_ACC_TRACK_SAVED_BITS_MASK (PT64_EPT_READABLE_MASK | \
PT64_EPT_EXECUTABLE_MASK; PT64_EPT_EXECUTABLE_MASK)
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT; #define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT PT64_SECOND_AVAIL_BITS_SHIFT
/*
* The number of high-order 1 bits to use in the mask above.
*/
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
/* /*
* In some cases, we need to preserve the GFN of a non-present or reserved * In some cases, we need to preserve the GFN of a non-present or reserved