mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 07:30:53 +07:00
KVM: MMU: Fix race when instantiating a shadow pte
For improved concurrency, the guest walk is performed concurrently with other vcpus. This means that we need to revalidate the guest ptes once we have write-protected the guest page tables, at which point they can no longer be modified. The current code attempts to avoid this check if the shadow page table is not new, on the assumption that if it has existed before, the guest could not have modified the pte without the shadow lock. However the assumption is incorrect, as the racing vcpu could have modified the pte, then instantiated the shadow page, before our vcpu regains control: vcpu0 vcpu1 fault walk pte modify pte fault in same pagetable instantiate shadow page lookup shadow page conclude it is old instantiate spte based on stale guest pte We could do something clever with generation counters, but a test run by Marcelo suggests this is unnecessary and we can just do the revalidation unconditionally. The pte will be in the processor cache and the check can be quite fast. Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
parent
8c35f237fb
commit
f7d9c7b7b9
@ -681,8 +681,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||
unsigned level,
|
||||
int metaphysical,
|
||||
unsigned access,
|
||||
u64 *parent_pte,
|
||||
bool *new_page)
|
||||
u64 *parent_pte)
|
||||
{
|
||||
union kvm_mmu_page_role role;
|
||||
unsigned index;
|
||||
@ -722,8 +721,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
|
||||
vcpu->arch.mmu.prefetch_page(vcpu, sp);
|
||||
if (!metaphysical)
|
||||
rmap_write_protect(vcpu->kvm, gfn);
|
||||
if (new_page)
|
||||
*new_page = 1;
|
||||
return sp;
|
||||
}
|
||||
|
||||
@ -1006,8 +1003,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
|
||||
>> PAGE_SHIFT;
|
||||
new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
|
||||
v, level - 1,
|
||||
1, ACC_ALL, &table[index],
|
||||
NULL);
|
||||
1, ACC_ALL, &table[index]);
|
||||
if (!new_table) {
|
||||
pgprintk("nonpaging_map: ENOMEM\n");
|
||||
kvm_release_page_clean(page);
|
||||
@ -1100,7 +1096,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
|
||||
|
||||
ASSERT(!VALID_PAGE(root));
|
||||
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
|
||||
PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
|
||||
PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
|
||||
root = __pa(sp->spt);
|
||||
++sp->root_count;
|
||||
vcpu->arch.mmu.root_hpa = root;
|
||||
@ -1121,7 +1117,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
|
||||
root_gfn = 0;
|
||||
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
|
||||
PT32_ROOT_LEVEL, !is_paging(vcpu),
|
||||
ACC_ALL, NULL, NULL);
|
||||
ACC_ALL, NULL);
|
||||
root = __pa(sp->spt);
|
||||
++sp->root_count;
|
||||
vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
|
||||
|
@ -300,7 +300,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
u64 shadow_pte;
|
||||
int metaphysical;
|
||||
gfn_t table_gfn;
|
||||
bool new_page = 0;
|
||||
|
||||
shadow_ent = ((u64 *)__va(shadow_addr)) + index;
|
||||
if (level == PT_PAGE_TABLE_LEVEL)
|
||||
@ -322,8 +321,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
|
||||
}
|
||||
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
|
||||
metaphysical, access,
|
||||
shadow_ent, &new_page);
|
||||
if (new_page && !metaphysical) {
|
||||
shadow_ent);
|
||||
if (!metaphysical) {
|
||||
int r;
|
||||
pt_element_t curr_pte;
|
||||
r = kvm_read_guest_atomic(vcpu->kvm,
|
||||
|
Loading…
Reference in New Issue
Block a user