KVM: x86: Consolidate VM allocation and free for VMX and SVM

Move the VM allocation and free code to common x86 as the logic is
more or less identical across SVM and VMX.

Note, although hyperv.hv_pa_pg is part of the common kvm->arch, it's
(currently) only allocated by VMX VMs.  But, since kfree() plays nice
when passed a NULL pointer, the superfluous call for SVM is harmless
and avoids future churn if SVM gains support for HyperV's direct TLB
flush.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
[Make vm_size a field instead of a function. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Sean Christopherson 2020-01-26 16:41:13 -08:00 committed by Paolo Bonzini
parent 1a625056cc
commit 562b6b089d
4 changed files with 13 additions and 39 deletions

View File

@ -1059,8 +1059,7 @@ struct kvm_x86_ops {
bool (*has_emulated_msr)(int index);
void (*cpuid_update)(struct kvm_vcpu *vcpu);
struct kvm *(*vm_alloc)(void);
void (*vm_free)(struct kvm *);
unsigned int vm_size;
int (*vm_init)(struct kvm *kvm);
void (*vm_destroy)(struct kvm *kvm);
@ -1278,13 +1277,10 @@ extern struct kmem_cache *x86_fpu_cache;
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
return kvm_x86_ops->vm_alloc();
}
static inline void kvm_arch_free_vm(struct kvm *kvm)
{
return kvm_x86_ops->vm_free(kvm);
return __vmalloc(kvm_x86_ops->vm_size,
GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
}
void kvm_arch_free_vm(struct kvm *kvm);
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)

View File

@ -1944,19 +1944,6 @@ static void __unregister_enc_region_locked(struct kvm *kvm,
kfree(region);
}
static struct kvm *svm_vm_alloc(void)
{
BUILD_BUG_ON(offsetof(struct kvm_svm, kvm) != 0);
return __vmalloc(sizeof(struct kvm_svm),
GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
}
static void svm_vm_free(struct kvm *kvm)
{
vfree(kvm);
}
static void sev_vm_destroy(struct kvm *kvm)
{
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
@ -7395,8 +7382,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.vcpu_free = svm_free_vcpu,
.vcpu_reset = svm_vcpu_reset,
.vm_alloc = svm_vm_alloc,
.vm_free = svm_vm_free,
.vm_size = sizeof(struct kvm_svm),
.vm_init = svm_vm_init,
.vm_destroy = svm_vm_destroy,

View File

@ -6679,20 +6679,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx_complete_interrupts(vmx);
}
static struct kvm *vmx_vm_alloc(void)
{
BUILD_BUG_ON(offsetof(struct kvm_vmx, kvm) != 0);
return __vmalloc(sizeof(struct kvm_vmx),
GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
}
static void vmx_vm_free(struct kvm *kvm)
{
kfree(kvm->arch.hyperv.hv_pa_pg);
vfree(kvm);
}
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@ -7835,9 +7821,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.cpu_has_accelerated_tpr = report_flexpriority,
.has_emulated_msr = vmx_has_emulated_msr,
.vm_size = sizeof(struct kvm_vmx),
.vm_init = vmx_vm_init,
.vm_alloc = vmx_vm_alloc,
.vm_free = vmx_vm_free,
.vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu,

View File

@ -9622,6 +9622,13 @@ void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
kvm_x86_ops->sched_in(vcpu, cpu);
}
void kvm_arch_free_vm(struct kvm *kvm)
{
kfree(kvm->arch.hyperv.hv_pa_pg);
vfree(kvm);
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
if (type)