mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-03-05 05:09:02 +07:00
KVM: MMU: abstract spte write-protect
Introduce a common function to abstract spte write-protect to cleanup the code Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
This commit is contained in:
parent
2f84569f97
commit
d13bc5b5a1
@ -1050,36 +1050,48 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
|
|||||||
rmap_remove(kvm, sptep);
|
rmap_remove(kvm, sptep);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Return true if the spte is dropped. */
|
||||||
|
static bool spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush)
|
||||||
|
{
|
||||||
|
u64 spte = *sptep;
|
||||||
|
|
||||||
|
if (!is_writable_pte(spte))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
|
||||||
|
|
||||||
|
*flush |= true;
|
||||||
|
if (is_large_pte(spte)) {
|
||||||
|
WARN_ON(page_header(__pa(sptep))->role.level ==
|
||||||
|
PT_PAGE_TABLE_LEVEL);
|
||||||
|
drop_spte(kvm, sptep);
|
||||||
|
--kvm->stat.lpages;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
spte = spte & ~PT_WRITABLE_MASK;
|
||||||
|
mmu_spte_update(sptep, spte);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
__rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
|
__rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
|
||||||
{
|
{
|
||||||
u64 *sptep;
|
u64 *sptep;
|
||||||
struct rmap_iterator iter;
|
struct rmap_iterator iter;
|
||||||
bool write_protected = false;
|
bool flush = false;
|
||||||
|
|
||||||
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
|
for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
|
||||||
BUG_ON(!(*sptep & PT_PRESENT_MASK));
|
BUG_ON(!(*sptep & PT_PRESENT_MASK));
|
||||||
rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
|
if (spte_write_protect(kvm, sptep, &flush)) {
|
||||||
|
sptep = rmap_get_first(*rmapp, &iter);
|
||||||
if (!is_writable_pte(*sptep)) {
|
|
||||||
sptep = rmap_get_next(&iter);
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (level == PT_PAGE_TABLE_LEVEL) {
|
sptep = rmap_get_next(&iter);
|
||||||
mmu_spte_update(sptep, *sptep & ~PT_WRITABLE_MASK);
|
|
||||||
sptep = rmap_get_next(&iter);
|
|
||||||
} else {
|
|
||||||
BUG_ON(!is_large_pte(*sptep));
|
|
||||||
drop_spte(kvm, sptep);
|
|
||||||
--kvm->stat.lpages;
|
|
||||||
sptep = rmap_get_first(*rmapp, &iter);
|
|
||||||
}
|
|
||||||
|
|
||||||
write_protected = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return write_protected;
|
return flush;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -3886,6 +3898,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu)
|
|||||||
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
|
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
|
||||||
{
|
{
|
||||||
struct kvm_mmu_page *sp;
|
struct kvm_mmu_page *sp;
|
||||||
|
bool flush = false;
|
||||||
|
|
||||||
list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
|
list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
|
||||||
int i;
|
int i;
|
||||||
@ -3900,16 +3913,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
|
|||||||
!is_last_spte(pt[i], sp->role.level))
|
!is_last_spte(pt[i], sp->role.level))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (is_large_pte(pt[i])) {
|
spte_write_protect(kvm, &pt[i], &flush);
|
||||||
drop_spte(kvm, &pt[i]);
|
|
||||||
--kvm->stat.lpages;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* avoid RMW */
|
|
||||||
if (is_writable_pte(pt[i]))
|
|
||||||
mmu_spte_update(&pt[i],
|
|
||||||
pt[i] & ~PT_WRITABLE_MASK);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kvm_flush_remote_tlbs(kvm);
|
kvm_flush_remote_tlbs(kvm);
|
||||||
|
Loading…
Reference in New Issue
Block a user