kvm: nVMX: VMCLEAR an active shadow VMCS after last use

After a successful VM-entry with the "VMCS shadowing" VM-execution
control set, the shadow VMCS referenced by the VMCS link pointer field
in the current VMCS becomes active on the logical processor.

A VMCS that is made active on more than one logical processor may become
corrupted. Therefore, before an active VMCS can be migrated to another
logical processor, the first logical processor must execute a VMCLEAR
for the active VMCS. VMCLEAR both ensures that all VMCS data are written
to memory and makes the VMCS inactive.

Signed-off-by: Jim Mattson <jmattson@google.com>
Reviewed-By: David Matlack <dmatlack@google.com>
Message-Id: <1477668579-22555-1-git-send-email-jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
Jim Mattson 2016-10-28 08:29:39 -07:00 committed by Paolo Bonzini
parent ea26e4ec08
commit 355f4fb140

View File

@ -187,6 +187,7 @@ struct vmcs {
*/
struct loaded_vmcs {
struct vmcs *vmcs;
struct vmcs *shadow_vmcs;
int cpu;
int launched;
struct list_head loaded_vmcss_on_cpu_link;
@ -411,7 +412,6 @@ struct nested_vmx {
* memory during VMXOFF, VMCLEAR, VMPTRLD.
*/
struct vmcs12 *cached_vmcs12;
struct vmcs *current_shadow_vmcs;
/*
* Indicates if the shadow vmcs must be updated with the
* data hold by vmcs12
@ -1418,6 +1418,8 @@ static void vmcs_clear(struct vmcs *vmcs)
static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
{
vmcs_clear(loaded_vmcs->vmcs);
if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
vmcs_clear(loaded_vmcs->shadow_vmcs);
loaded_vmcs->cpu = -1;
loaded_vmcs->launched = 0;
}
@ -3533,6 +3535,7 @@ static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
loaded_vmcs_clear(loaded_vmcs);
free_vmcs(loaded_vmcs->vmcs);
loaded_vmcs->vmcs = NULL;
WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
}
static void free_kvm_area(void)
@ -6667,6 +6670,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
if (!item)
return NULL;
item->vmcs02.vmcs = alloc_vmcs();
item->vmcs02.shadow_vmcs = NULL;
if (!item->vmcs02.vmcs) {
kfree(item);
return NULL;
@ -7043,7 +7047,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */
vmcs_clear(shadow_vmcs);
vmx->nested.current_shadow_vmcs = shadow_vmcs;
vmx->vmcs01.shadow_vmcs = shadow_vmcs;
}
INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
@ -7145,8 +7149,11 @@ static void free_nested(struct vcpu_vmx *vmx)
free_page((unsigned long)vmx->nested.msr_bitmap);
vmx->nested.msr_bitmap = NULL;
}
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
if (enable_shadow_vmcs) {
vmcs_clear(vmx->vmcs01.shadow_vmcs);
free_vmcs(vmx->vmcs01.shadow_vmcs);
vmx->vmcs01.shadow_vmcs = NULL;
}
kfree(vmx->nested.cached_vmcs12);
/* Unpin physical memory we referred to in current vmcs02 */
if (vmx->nested.apic_access_page) {
@ -7323,7 +7330,7 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
int i;
unsigned long field;
u64 field_value;
struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
const unsigned long *fields = shadow_read_write_fields;
const int num_fields = max_shadow_read_write_fields;
@ -7372,7 +7379,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
int i, q;
unsigned long field;
u64 field_value = 0;
struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
vmcs_load(shadow_vmcs);
@ -7562,7 +7569,7 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_SHADOW_VMCS);
vmcs_write64(VMCS_LINK_POINTER,
__pa(vmx->nested.current_shadow_vmcs));
__pa(vmx->vmcs01.shadow_vmcs));
vmx->nested.sync_shadow_vmcs = true;
}
}
@ -9127,6 +9134,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
vmx->loaded_vmcs = &vmx->vmcs01;
vmx->loaded_vmcs->vmcs = alloc_vmcs();
vmx->loaded_vmcs->shadow_vmcs = NULL;
if (!vmx->loaded_vmcs->vmcs)
goto free_msrs;
if (!vmm_exclusive)