2018-12-04 04:53:18 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
#include <linux/frame.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
|
|
|
|
#include <asm/debugreg.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
|
|
|
|
#include "cpuid.h"
|
|
|
|
#include "hyperv.h"
|
|
|
|
#include "mmu.h"
|
|
|
|
#include "nested.h"
|
|
|
|
#include "trace.h"
|
|
|
|
#include "x86.h"
|
|
|
|
|
|
|
|
static bool __read_mostly enable_shadow_vmcs = 1;
|
|
|
|
module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
|
|
|
|
|
|
|
|
static bool __read_mostly nested_early_check = 0;
|
|
|
|
module_param(nested_early_check, bool, S_IRUGO);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hyper-V requires all of these, so mark them as supported even though
|
|
|
|
* they are just treated the same as all-context.
|
|
|
|
*/
|
|
|
|
#define VMX_VPID_EXTENT_SUPPORTED_MASK \
|
|
|
|
(VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
|
|
|
|
VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
|
|
|
|
VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
|
|
|
|
VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
|
|
|
|
|
|
|
|
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
|
|
|
|
|
|
|
|
enum {
|
|
|
|
VMX_VMREAD_BITMAP,
|
|
|
|
VMX_VMWRITE_BITMAP,
|
|
|
|
VMX_BITMAP_NR
|
|
|
|
};
|
|
|
|
static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
|
|
|
|
|
|
|
|
#define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
|
|
|
|
#define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
|
|
|
|
|
|
|
|
static u16 shadow_read_only_fields[] = {
|
|
|
|
#define SHADOW_FIELD_RO(x) x,
|
|
|
|
#include "vmcs_shadow_fields.h"
|
|
|
|
};
|
|
|
|
static int max_shadow_read_only_fields =
|
|
|
|
ARRAY_SIZE(shadow_read_only_fields);
|
|
|
|
|
|
|
|
static u16 shadow_read_write_fields[] = {
|
|
|
|
#define SHADOW_FIELD_RW(x) x,
|
|
|
|
#include "vmcs_shadow_fields.h"
|
|
|
|
};
|
|
|
|
static int max_shadow_read_write_fields =
|
|
|
|
ARRAY_SIZE(shadow_read_write_fields);
|
|
|
|
|
2019-01-21 14:27:05 +07:00
|
|
|
static void init_vmcs_shadow_fields(void)
|
2018-12-04 04:53:18 +07:00
|
|
|
{
|
|
|
|
int i, j;
|
|
|
|
|
|
|
|
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
|
|
|
|
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
|
|
|
|
|
|
|
|
for (i = j = 0; i < max_shadow_read_only_fields; i++) {
|
|
|
|
u16 field = shadow_read_only_fields[i];
|
|
|
|
|
|
|
|
if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
|
|
|
|
(i + 1 == max_shadow_read_only_fields ||
|
|
|
|
shadow_read_only_fields[i + 1] != field + 1))
|
|
|
|
pr_err("Missing field from shadow_read_only_field %x\n",
|
|
|
|
field + 1);
|
|
|
|
|
|
|
|
clear_bit(field, vmx_vmread_bitmap);
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
if (field & 1)
|
|
|
|
continue;
|
|
|
|
#endif
|
|
|
|
if (j < i)
|
|
|
|
shadow_read_only_fields[j] = field;
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
max_shadow_read_only_fields = j;
|
|
|
|
|
|
|
|
for (i = j = 0; i < max_shadow_read_write_fields; i++) {
|
|
|
|
u16 field = shadow_read_write_fields[i];
|
|
|
|
|
|
|
|
if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
|
|
|
|
(i + 1 == max_shadow_read_write_fields ||
|
|
|
|
shadow_read_write_fields[i + 1] != field + 1))
|
|
|
|
pr_err("Missing field from shadow_read_write_field %x\n",
|
|
|
|
field + 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PML and the preemption timer can be emulated, but the
|
|
|
|
* processor cannot vmwrite to fields that don't exist
|
|
|
|
* on bare metal.
|
|
|
|
*/
|
|
|
|
switch (field) {
|
|
|
|
case GUEST_PML_INDEX:
|
|
|
|
if (!cpu_has_vmx_pml())
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
case VMX_PREEMPTION_TIMER_VALUE:
|
|
|
|
if (!cpu_has_vmx_preemption_timer())
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
case GUEST_INTR_STATUS:
|
|
|
|
if (!cpu_has_vmx_apicv())
|
|
|
|
continue;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
clear_bit(field, vmx_vmwrite_bitmap);
|
|
|
|
clear_bit(field, vmx_vmread_bitmap);
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
if (field & 1)
|
|
|
|
continue;
|
|
|
|
#endif
|
|
|
|
if (j < i)
|
|
|
|
shadow_read_write_fields[j] = field;
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
max_shadow_read_write_fields = j;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
|
|
|
|
* set the success or error code of an emulated VMX instruction (as specified
|
|
|
|
* by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
|
|
|
|
* instruction.
|
|
|
|
*/
|
|
|
|
static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
|
|
|
|
& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
|
|
|
|
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
|
|
|
|
return kvm_skip_emulated_instruction(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
|
|
|
|
& ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
|
|
|
|
X86_EFLAGS_SF | X86_EFLAGS_OF))
|
|
|
|
| X86_EFLAGS_CF);
|
|
|
|
return kvm_skip_emulated_instruction(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
|
|
|
|
u32 vm_instruction_error)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* failValid writes the error number to the current VMCS, which
|
|
|
|
* can't be done if there isn't a current VMCS.
|
|
|
|
*/
|
|
|
|
if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
|
|
|
|
return nested_vmx_failInvalid(vcpu);
|
|
|
|
|
|
|
|
vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
|
|
|
|
& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
|
|
|
|
X86_EFLAGS_SF | X86_EFLAGS_OF))
|
|
|
|
| X86_EFLAGS_ZF);
|
|
|
|
get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
|
|
|
|
/*
|
|
|
|
* We don't need to force a shadow sync because
|
|
|
|
* VM_INSTRUCTION_ERROR is not shadowed
|
|
|
|
*/
|
|
|
|
return kvm_skip_emulated_instruction(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
|
|
|
|
{
|
|
|
|
/* TODO: not to reset guest simply here. */
|
|
|
|
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
|
|
|
|
pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
|
|
|
|
vmcs_write64(VMCS_LINK_POINTER, -1ull);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
if (!vmx->nested.hv_evmcs)
|
|
|
|
return;
|
|
|
|
|
|
|
|
kunmap(vmx->nested.hv_evmcs_page);
|
|
|
|
kvm_release_page_dirty(vmx->nested.hv_evmcs_page);
|
|
|
|
vmx->nested.hv_evmcs_vmptr = -1ull;
|
|
|
|
vmx->nested.hv_evmcs_page = NULL;
|
|
|
|
vmx->nested.hv_evmcs = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free whatever needs to be freed from vmx->nested when L1 goes down, or
|
|
|
|
* just stops using VMX.
|
|
|
|
*/
|
|
|
|
static void free_nested(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vmx->nested.vmxon = false;
|
|
|
|
vmx->nested.smm.vmxon = false;
|
|
|
|
free_vpid(vmx->nested.vpid02);
|
|
|
|
vmx->nested.posted_intr_nv = -1;
|
|
|
|
vmx->nested.current_vmptr = -1ull;
|
|
|
|
if (enable_shadow_vmcs) {
|
|
|
|
vmx_disable_shadow_vmcs(vmx);
|
|
|
|
vmcs_clear(vmx->vmcs01.shadow_vmcs);
|
|
|
|
free_vmcs(vmx->vmcs01.shadow_vmcs);
|
|
|
|
vmx->vmcs01.shadow_vmcs = NULL;
|
|
|
|
}
|
|
|
|
kfree(vmx->nested.cached_vmcs12);
|
|
|
|
kfree(vmx->nested.cached_shadow_vmcs12);
|
|
|
|
/* Unpin physical memory we referred to in the vmcs02 */
|
|
|
|
if (vmx->nested.apic_access_page) {
|
|
|
|
kvm_release_page_dirty(vmx->nested.apic_access_page);
|
|
|
|
vmx->nested.apic_access_page = NULL;
|
|
|
|
}
|
|
|
|
if (vmx->nested.virtual_apic_page) {
|
|
|
|
kvm_release_page_dirty(vmx->nested.virtual_apic_page);
|
|
|
|
vmx->nested.virtual_apic_page = NULL;
|
|
|
|
}
|
|
|
|
if (vmx->nested.pi_desc_page) {
|
|
|
|
kunmap(vmx->nested.pi_desc_page);
|
|
|
|
kvm_release_page_dirty(vmx->nested.pi_desc_page);
|
|
|
|
vmx->nested.pi_desc_page = NULL;
|
|
|
|
vmx->nested.pi_desc = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
|
|
|
|
|
|
|
|
nested_release_evmcs(vcpu);
|
|
|
|
|
|
|
|
free_loaded_vmcs(&vmx->nested.vmcs02);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
if (vmx->loaded_vmcs == vmcs)
|
|
|
|
return;
|
|
|
|
|
|
|
|
cpu = get_cpu();
|
|
|
|
vmx_vcpu_put(vcpu);
|
|
|
|
vmx->loaded_vmcs = vmcs;
|
|
|
|
vmx_vcpu_load(vcpu, cpu);
|
|
|
|
put_cpu();
|
|
|
|
|
|
|
|
vm_entry_controls_reset_shadow(vmx);
|
|
|
|
vm_exit_controls_reset_shadow(vmx);
|
|
|
|
vmx_segment_cache_clear(vmx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ensure that the current vmcs of the logical processor is the
|
|
|
|
* vmcs01 of the vcpu before calling free_nested().
|
|
|
|
*/
|
|
|
|
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu_load(vcpu);
|
2019-01-30 01:12:35 +07:00
|
|
|
vmx_leave_nested(vcpu);
|
2018-12-04 04:53:18 +07:00
|
|
|
vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
|
|
|
|
free_nested(vcpu);
|
|
|
|
vcpu_put(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
|
|
|
|
struct x86_exception *fault)
|
|
|
|
{
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
u32 exit_reason;
|
|
|
|
unsigned long exit_qualification = vcpu->arch.exit_qualification;
|
|
|
|
|
|
|
|
if (vmx->nested.pml_full) {
|
|
|
|
exit_reason = EXIT_REASON_PML_FULL;
|
|
|
|
vmx->nested.pml_full = false;
|
|
|
|
exit_qualification &= INTR_INFO_UNBLOCK_NMI;
|
|
|
|
} else if (fault->error_code & PFERR_RSVD_MASK)
|
|
|
|
exit_reason = EXIT_REASON_EPT_MISCONFIG;
|
|
|
|
else
|
|
|
|
exit_reason = EXIT_REASON_EPT_VIOLATION;
|
|
|
|
|
|
|
|
nested_vmx_vmexit(vcpu, exit_reason, 0, exit_qualification);
|
|
|
|
vmcs12->guest_physical_address = fault->address;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
WARN_ON(mmu_is_nested(vcpu));
|
|
|
|
|
|
|
|
vcpu->arch.mmu = &vcpu->arch.guest_mmu;
|
|
|
|
kvm_init_shadow_ept_mmu(vcpu,
|
|
|
|
to_vmx(vcpu)->nested.msrs.ept_caps &
|
|
|
|
VMX_EPT_EXECUTE_ONLY_BIT,
|
|
|
|
nested_ept_ad_enabled(vcpu),
|
|
|
|
nested_ept_get_cr3(vcpu));
|
|
|
|
vcpu->arch.mmu->set_cr3 = vmx_set_cr3;
|
|
|
|
vcpu->arch.mmu->get_cr3 = nested_ept_get_cr3;
|
|
|
|
vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
|
|
|
|
vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
|
|
|
|
|
|
|
|
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
vcpu->arch.mmu = &vcpu->arch.root_mmu;
|
|
|
|
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
|
|
|
|
u16 error_code)
|
|
|
|
{
|
|
|
|
bool inequality, bit;
|
|
|
|
|
|
|
|
bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
|
|
|
|
inequality =
|
|
|
|
(error_code & vmcs12->page_fault_error_code_mask) !=
|
|
|
|
vmcs12->page_fault_error_code_match;
|
|
|
|
return inequality ^ bit;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* KVM wants to inject page-faults which it got to the guest. This function
|
|
|
|
* checks whether in a nested guest, we need to inject them to L1 or L2.
|
|
|
|
*/
|
|
|
|
static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
|
|
|
|
{
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
unsigned int nr = vcpu->arch.exception.nr;
|
|
|
|
bool has_payload = vcpu->arch.exception.has_payload;
|
|
|
|
unsigned long payload = vcpu->arch.exception.payload;
|
|
|
|
|
|
|
|
if (nr == PF_VECTOR) {
|
|
|
|
if (vcpu->arch.exception.nested_apf) {
|
|
|
|
*exit_qual = vcpu->arch.apf.nested_apf_token;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (nested_vmx_is_page_fault_vmexit(vmcs12,
|
|
|
|
vcpu->arch.exception.error_code)) {
|
|
|
|
*exit_qual = has_payload ? payload : vcpu->arch.cr2;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
} else if (vmcs12->exception_bitmap & (1u << nr)) {
|
|
|
|
if (nr == DB_VECTOR) {
|
|
|
|
if (!has_payload) {
|
|
|
|
payload = vcpu->arch.dr6;
|
|
|
|
payload &= ~(DR6_FIXED_1 | DR6_BT);
|
|
|
|
payload ^= DR6_RTM;
|
|
|
|
}
|
|
|
|
*exit_qual = payload;
|
|
|
|
} else
|
|
|
|
*exit_qual = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
|
|
|
|
struct x86_exception *fault)
|
|
|
|
{
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
|
|
|
|
WARN_ON(!is_guest_mode(vcpu));
|
|
|
|
|
|
|
|
if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
|
|
|
|
!to_vmx(vcpu)->nested.nested_run_pending) {
|
|
|
|
vmcs12->vm_exit_intr_error_code = fault->error_code;
|
|
|
|
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
|
|
|
|
PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
|
|
|
|
INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
|
|
|
|
fault->address);
|
|
|
|
} else {
|
|
|
|
kvm_inject_page_fault(vcpu, fault);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
|
|
|
|
{
|
|
|
|
return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, vmcs12->io_bitmap_a) ||
|
|
|
|
!page_address_valid(vcpu, vmcs12->io_bitmap_b))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, vmcs12->msr_bitmap))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if MSR is intercepted for L01 MSR bitmap.
|
|
|
|
*/
|
|
|
|
static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
|
|
|
|
{
|
|
|
|
unsigned long *msr_bitmap;
|
|
|
|
int f = sizeof(unsigned long);
|
|
|
|
|
|
|
|
if (!cpu_has_vmx_msr_bitmap())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
|
|
|
|
|
|
|
|
if (msr <= 0x1fff) {
|
|
|
|
return !!test_bit(msr, msr_bitmap + 0x800 / f);
|
|
|
|
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
|
|
|
|
msr &= 0x1fff;
|
|
|
|
return !!test_bit(msr, msr_bitmap + 0xc00 / f);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If a msr is allowed by L0, we should check whether it is allowed by L1.
|
|
|
|
* The corresponding bit will be cleared unless both of L0 and L1 allow it.
|
|
|
|
*/
|
|
|
|
static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
|
|
|
|
unsigned long *msr_bitmap_nested,
|
|
|
|
u32 msr, int type)
|
|
|
|
{
|
|
|
|
int f = sizeof(unsigned long);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
|
|
|
|
* have the write-low and read-high bitmap offsets the wrong way round.
|
|
|
|
* We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
|
|
|
|
*/
|
|
|
|
if (msr <= 0x1fff) {
|
|
|
|
if (type & MSR_TYPE_R &&
|
|
|
|
!test_bit(msr, msr_bitmap_l1 + 0x000 / f))
|
|
|
|
/* read-low */
|
|
|
|
__clear_bit(msr, msr_bitmap_nested + 0x000 / f);
|
|
|
|
|
|
|
|
if (type & MSR_TYPE_W &&
|
|
|
|
!test_bit(msr, msr_bitmap_l1 + 0x800 / f))
|
|
|
|
/* write-low */
|
|
|
|
__clear_bit(msr, msr_bitmap_nested + 0x800 / f);
|
|
|
|
|
|
|
|
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
|
|
|
|
msr &= 0x1fff;
|
|
|
|
if (type & MSR_TYPE_R &&
|
|
|
|
!test_bit(msr, msr_bitmap_l1 + 0x400 / f))
|
|
|
|
/* read-high */
|
|
|
|
__clear_bit(msr, msr_bitmap_nested + 0x400 / f);
|
|
|
|
|
|
|
|
if (type & MSR_TYPE_W &&
|
|
|
|
!test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
|
|
|
|
/* write-high */
|
|
|
|
__clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Merge L0's and L1's MSR bitmap, return false to indicate that
|
|
|
|
* we do not use the hardware.
|
|
|
|
*/
|
|
|
|
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
int msr;
|
|
|
|
struct page *page;
|
|
|
|
unsigned long *msr_bitmap_l1;
|
|
|
|
unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
|
|
|
|
/*
|
|
|
|
* pred_cmd & spec_ctrl are trying to verify two things:
|
|
|
|
*
|
|
|
|
* 1. L0 gave a permission to L1 to actually passthrough the MSR. This
|
|
|
|
* ensures that we do not accidentally generate an L02 MSR bitmap
|
|
|
|
* from the L12 MSR bitmap that is too permissive.
|
|
|
|
* 2. That L1 or L2s have actually used the MSR. This avoids
|
|
|
|
* unnecessarily merging of the bitmap if the MSR is unused. This
|
|
|
|
* works properly because we only update the L01 MSR bitmap lazily.
|
|
|
|
* So even if L0 should pass L1 these MSRs, the L01 bitmap is only
|
|
|
|
* updated to reflect this when L1 (or its L2s) actually write to
|
|
|
|
* the MSR.
|
|
|
|
*/
|
|
|
|
bool pred_cmd = !msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD);
|
|
|
|
bool spec_ctrl = !msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL);
|
|
|
|
|
|
|
|
/* Nothing to do if the MSR bitmap is not in use. */
|
|
|
|
if (!cpu_has_vmx_msr_bitmap() ||
|
|
|
|
!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
|
|
|
|
!pred_cmd && !spec_ctrl)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->msr_bitmap);
|
|
|
|
if (is_error_page(page))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
msr_bitmap_l1 = (unsigned long *)kmap(page);
|
|
|
|
if (nested_cpu_has_apic_reg_virt(vmcs12)) {
|
|
|
|
/*
|
|
|
|
* L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
|
|
|
|
* just lets the processor take the value from the virtual-APIC page;
|
|
|
|
* take those 256 bits directly from the L1 bitmap.
|
|
|
|
*/
|
|
|
|
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
|
|
|
|
unsigned word = msr / BITS_PER_LONG;
|
|
|
|
msr_bitmap_l0[word] = msr_bitmap_l1[word];
|
|
|
|
msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
|
|
|
|
unsigned word = msr / BITS_PER_LONG;
|
|
|
|
msr_bitmap_l0[word] = ~0;
|
|
|
|
msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nested_vmx_disable_intercept_for_msr(
|
|
|
|
msr_bitmap_l1, msr_bitmap_l0,
|
|
|
|
X2APIC_MSR(APIC_TASKPRI),
|
|
|
|
MSR_TYPE_W);
|
|
|
|
|
|
|
|
if (nested_cpu_has_vid(vmcs12)) {
|
|
|
|
nested_vmx_disable_intercept_for_msr(
|
|
|
|
msr_bitmap_l1, msr_bitmap_l0,
|
|
|
|
X2APIC_MSR(APIC_EOI),
|
|
|
|
MSR_TYPE_W);
|
|
|
|
nested_vmx_disable_intercept_for_msr(
|
|
|
|
msr_bitmap_l1, msr_bitmap_l0,
|
|
|
|
X2APIC_MSR(APIC_SELF_IPI),
|
|
|
|
MSR_TYPE_W);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (spec_ctrl)
|
|
|
|
nested_vmx_disable_intercept_for_msr(
|
|
|
|
msr_bitmap_l1, msr_bitmap_l0,
|
|
|
|
MSR_IA32_SPEC_CTRL,
|
|
|
|
MSR_TYPE_R | MSR_TYPE_W);
|
|
|
|
|
|
|
|
if (pred_cmd)
|
|
|
|
nested_vmx_disable_intercept_for_msr(
|
|
|
|
msr_bitmap_l1, msr_bitmap_l0,
|
|
|
|
MSR_IA32_PRED_CMD,
|
|
|
|
MSR_TYPE_W);
|
|
|
|
|
|
|
|
kunmap(page);
|
|
|
|
kvm_release_page_clean(page);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
struct vmcs12 *shadow;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
|
|
|
|
vmcs12->vmcs_link_pointer == -1ull)
|
|
|
|
return;
|
|
|
|
|
|
|
|
shadow = get_shadow_vmcs12(vcpu);
|
|
|
|
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
|
|
|
|
|
|
|
|
memcpy(shadow, kmap(page), VMCS12_SIZE);
|
|
|
|
|
|
|
|
kunmap(page);
|
|
|
|
kvm_release_page_clean(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
|
|
|
|
vmcs12->vmcs_link_pointer == -1ull)
|
|
|
|
return;
|
|
|
|
|
|
|
|
kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
|
|
|
|
get_shadow_vmcs12(vcpu), VMCS12_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In nested virtualization, check if L1 has set
|
|
|
|
* VM_EXIT_ACK_INTR_ON_EXIT
|
|
|
|
*/
|
|
|
|
static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return get_vmcs12(vcpu)->vm_exit_controls &
|
|
|
|
VM_EXIT_ACK_INTR_ON_EXIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
|
|
|
|
!page_address_valid(vcpu, vmcs12->apic_access_addr))
|
|
|
|
return -EINVAL;
|
|
|
|
else
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
|
|
|
|
!nested_cpu_has_apic_reg_virt(vmcs12) &&
|
|
|
|
!nested_cpu_has_vid(vmcs12) &&
|
|
|
|
!nested_cpu_has_posted_intr(vmcs12))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If virtualize x2apic mode is enabled,
|
|
|
|
* virtualize apic access must be disabled.
|
|
|
|
*/
|
|
|
|
if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
|
|
|
|
nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If virtual interrupt delivery is enabled,
|
|
|
|
* we must exit on external interrupts.
|
|
|
|
*/
|
|
|
|
if (nested_cpu_has_vid(vmcs12) &&
|
|
|
|
!nested_exit_on_intr(vcpu))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* bits 15:8 should be zero in posted_intr_nv,
|
|
|
|
* the descriptor address has been already checked
|
|
|
|
* in nested_get_vmcs12_pages.
|
|
|
|
*
|
|
|
|
* bits 5:0 of posted_intr_desc_addr should be zero.
|
|
|
|
*/
|
|
|
|
if (nested_cpu_has_posted_intr(vmcs12) &&
|
|
|
|
(!nested_cpu_has_vid(vmcs12) ||
|
|
|
|
!nested_exit_intr_ack_set(vcpu) ||
|
|
|
|
(vmcs12->posted_intr_nv & 0xff00) ||
|
|
|
|
(vmcs12->posted_intr_desc_addr & 0x3f) ||
|
|
|
|
(vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu))))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* tpr shadow is needed by all apicv features. */
|
|
|
|
if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
|
2018-12-13 01:30:08 +07:00
|
|
|
u32 count, u64 addr)
|
2018-12-04 04:53:18 +07:00
|
|
|
{
|
|
|
|
int maxphyaddr;
|
|
|
|
|
|
|
|
if (count == 0)
|
|
|
|
return 0;
|
|
|
|
maxphyaddr = cpuid_maxphyaddr(vcpu);
|
|
|
|
if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
|
2018-12-13 01:30:08 +07:00
|
|
|
(addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
|
2018-12-04 04:53:18 +07:00
|
|
|
return -EINVAL;
|
2018-12-13 01:30:08 +07:00
|
|
|
|
2018-12-04 04:53:18 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:09 +07:00
|
|
|
static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
2018-12-04 04:53:18 +07:00
|
|
|
{
|
2018-12-13 01:30:08 +07:00
|
|
|
if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_load_count,
|
|
|
|
vmcs12->vm_exit_msr_load_addr) ||
|
|
|
|
nested_vmx_check_msr_switch(vcpu, vmcs12->vm_exit_msr_store_count,
|
2018-12-13 01:30:09 +07:00
|
|
|
vmcs12->vm_exit_msr_store_addr))
|
2018-12-04 04:53:18 +07:00
|
|
|
return -EINVAL;
|
2018-12-13 01:30:08 +07:00
|
|
|
|
2018-12-04 04:53:18 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:10 +07:00
|
|
|
static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
2018-12-13 01:30:09 +07:00
|
|
|
{
|
|
|
|
if (nested_vmx_check_msr_switch(vcpu, vmcs12->vm_entry_msr_load_count,
|
|
|
|
vmcs12->vm_entry_msr_load_addr))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-04 04:53:18 +07:00
|
|
|
static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
if (!nested_cpu_has_pml(vmcs12))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!nested_cpu_has_ept(vmcs12) ||
|
|
|
|
!page_address_valid(vcpu, vmcs12->pml_address))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
|
|
|
|
!nested_cpu_has_ept(vmcs12))
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
|
|
|
|
!nested_cpu_has_ept(vmcs12))
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
if (!nested_cpu_has_shadow_vmcs(vmcs12))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, vmcs12->vmread_bitmap) ||
|
|
|
|
!page_address_valid(vcpu, vmcs12->vmwrite_bitmap))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmx_msr_entry *e)
|
|
|
|
{
|
|
|
|
/* x2APIC MSR accesses are not allowed */
|
|
|
|
if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
|
|
|
|
return -EINVAL;
|
|
|
|
if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
|
|
|
|
e->index == MSR_IA32_UCODE_REV)
|
|
|
|
return -EINVAL;
|
|
|
|
if (e->reserved != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmx_msr_entry *e)
|
|
|
|
{
|
|
|
|
if (e->index == MSR_FS_BASE ||
|
|
|
|
e->index == MSR_GS_BASE ||
|
|
|
|
e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
|
|
|
|
nested_vmx_msr_check_common(vcpu, e))
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmx_msr_entry *e)
|
|
|
|
{
|
|
|
|
if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
|
|
|
|
nested_vmx_msr_check_common(vcpu, e))
|
|
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load guest's/host's msr at nested entry/exit.
|
|
|
|
* return 0 for success, entry index for failure.
|
|
|
|
*/
|
|
|
|
static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
|
|
|
{
|
|
|
|
u32 i;
|
|
|
|
struct vmx_msr_entry e;
|
|
|
|
struct msr_data msr;
|
|
|
|
|
|
|
|
msr.host_initiated = false;
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
|
|
|
|
&e, sizeof(e))) {
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
"%s cannot read MSR entry (%u, 0x%08llx)\n",
|
|
|
|
__func__, i, gpa + i * sizeof(e));
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
if (nested_vmx_load_msr_check(vcpu, &e)) {
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
"%s check failed (%u, 0x%x, 0x%x)\n",
|
|
|
|
__func__, i, e.index, e.reserved);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
msr.index = e.index;
|
|
|
|
msr.data = e.value;
|
|
|
|
if (kvm_set_msr(vcpu, &msr)) {
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
|
|
|
|
__func__, i, e.index, e.value);
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
|
|
return i + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
|
|
|
|
{
|
|
|
|
u32 i;
|
|
|
|
struct vmx_msr_entry e;
|
|
|
|
|
|
|
|
for (i = 0; i < count; i++) {
|
|
|
|
struct msr_data msr_info;
|
|
|
|
if (kvm_vcpu_read_guest(vcpu,
|
|
|
|
gpa + i * sizeof(e),
|
|
|
|
&e, 2 * sizeof(u32))) {
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
"%s cannot read MSR entry (%u, 0x%08llx)\n",
|
|
|
|
__func__, i, gpa + i * sizeof(e));
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (nested_vmx_store_msr_check(vcpu, &e)) {
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
"%s check failed (%u, 0x%x, 0x%x)\n",
|
|
|
|
__func__, i, e.index, e.reserved);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
msr_info.host_initiated = false;
|
|
|
|
msr_info.index = e.index;
|
|
|
|
if (kvm_get_msr(vcpu, &msr_info)) {
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
"%s cannot read MSR (%u, 0x%x)\n",
|
|
|
|
__func__, i, e.index);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
if (kvm_vcpu_write_guest(vcpu,
|
|
|
|
gpa + i * sizeof(e) +
|
|
|
|
offsetof(struct vmx_msr_entry, value),
|
|
|
|
&msr_info.data, sizeof(msr_info.data))) {
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
|
|
|
|
__func__, i, e.index, msr_info.data);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
|
|
|
|
{
|
|
|
|
unsigned long invalid_mask;
|
|
|
|
|
|
|
|
invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
|
|
|
|
return (val & invalid_mask) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load guest's/host's cr3 at nested entry/exit. nested_ept is true if we are
|
|
|
|
* emulating VM entry into a guest with EPT enabled.
|
|
|
|
* Returns 0 on success, 1 on failure. Invalid state exit qualification code
|
|
|
|
* is assigned to entry_failure_code on failure.
|
|
|
|
*/
|
|
|
|
static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
|
|
|
|
u32 *entry_failure_code)
|
|
|
|
{
|
|
|
|
if (cr3 != kvm_read_cr3(vcpu) || (!nested_ept && pdptrs_changed(vcpu))) {
|
|
|
|
if (!nested_cr3_valid(vcpu, cr3)) {
|
|
|
|
*entry_failure_code = ENTRY_FAIL_DEFAULT;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If PAE paging and EPT are both on, CR3 is not used by the CPU and
|
|
|
|
* must not be dereferenced.
|
|
|
|
*/
|
|
|
|
if (!is_long_mode(vcpu) && is_pae(vcpu) && is_paging(vcpu) &&
|
|
|
|
!nested_ept) {
|
|
|
|
if (!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) {
|
|
|
|
*entry_failure_code = ENTRY_FAIL_PDPTE;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nested_ept)
|
|
|
|
kvm_mmu_new_cr3(vcpu, cr3, false);
|
|
|
|
|
|
|
|
vcpu->arch.cr3 = cr3;
|
|
|
|
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
|
|
|
|
|
|
|
|
kvm_init_mmu(vcpu, false);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns if KVM is able to config CPU to tag TLB entries
|
|
|
|
* populated by L2 differently than TLB entries populated
|
|
|
|
* by L1.
|
|
|
|
*
|
|
|
|
* If L1 uses EPT, then TLB entries are tagged with different EPTP.
|
|
|
|
*
|
|
|
|
* If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
|
|
|
|
* with different VPID (L1 entries are tagged with vmx->vpid
|
|
|
|
* while L2 entries are tagged with vmx->nested.vpid02).
|
|
|
|
*/
|
|
|
|
static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
|
|
|
|
return nested_cpu_has_ept(vmcs12) ||
|
|
|
|
(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u16 nested_get_vpid02(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
return vmx->nested.vpid02 ? vmx->nested.vpid02 : vmx->vpid;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
|
|
|
|
{
|
|
|
|
return fixed_bits_valid(control, low, high);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 vmx_control_msr(u32 low, u32 high)
|
|
|
|
{
|
|
|
|
return low | ((u64)high << 32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
|
|
|
|
{
|
|
|
|
superset &= mask;
|
|
|
|
subset &= mask;
|
|
|
|
|
|
|
|
return (superset | subset) == superset;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
|
|
|
|
{
|
|
|
|
const u64 feature_and_reserved =
|
|
|
|
/* feature (except bit 48; see below) */
|
|
|
|
BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
|
|
|
|
/* reserved */
|
|
|
|
BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
|
|
|
|
u64 vmx_basic = vmx->nested.msrs.basic;
|
|
|
|
|
|
|
|
if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* KVM does not emulate a version of VMX that constrains physical
|
|
|
|
* addresses of VMX structures (e.g. VMCS) to 32-bits.
|
|
|
|
*/
|
|
|
|
if (data & BIT_ULL(48))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (vmx_basic_vmcs_revision_id(vmx_basic) !=
|
|
|
|
vmx_basic_vmcs_revision_id(data))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
vmx->nested.msrs.basic = data;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
|
|
|
|
{
|
|
|
|
u64 supported;
|
|
|
|
u32 *lowp, *highp;
|
|
|
|
|
|
|
|
switch (msr_index) {
|
|
|
|
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
|
|
|
|
lowp = &vmx->nested.msrs.pinbased_ctls_low;
|
|
|
|
highp = &vmx->nested.msrs.pinbased_ctls_high;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
|
|
|
|
lowp = &vmx->nested.msrs.procbased_ctls_low;
|
|
|
|
highp = &vmx->nested.msrs.procbased_ctls_high;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
|
|
|
|
lowp = &vmx->nested.msrs.exit_ctls_low;
|
|
|
|
highp = &vmx->nested.msrs.exit_ctls_high;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
|
|
|
|
lowp = &vmx->nested.msrs.entry_ctls_low;
|
|
|
|
highp = &vmx->nested.msrs.entry_ctls_high;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_PROCBASED_CTLS2:
|
|
|
|
lowp = &vmx->nested.msrs.secondary_ctls_low;
|
|
|
|
highp = &vmx->nested.msrs.secondary_ctls_high;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
supported = vmx_control_msr(*lowp, *highp);
|
|
|
|
|
|
|
|
/* Check must-be-1 bits are still 1. */
|
|
|
|
if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Check must-be-0 bits are still 0. */
|
|
|
|
if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*lowp = data;
|
|
|
|
*highp = data >> 32;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
|
|
|
|
{
|
|
|
|
const u64 feature_and_reserved_bits =
|
|
|
|
/* feature */
|
|
|
|
BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
|
|
|
|
BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
|
|
|
|
/* reserved */
|
|
|
|
GENMASK_ULL(13, 9) | BIT_ULL(31);
|
|
|
|
u64 vmx_misc;
|
|
|
|
|
|
|
|
vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
|
|
|
|
vmx->nested.msrs.misc_high);
|
|
|
|
|
|
|
|
if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if ((vmx->nested.msrs.pinbased_ctls_high &
|
|
|
|
PIN_BASED_VMX_PREEMPTION_TIMER) &&
|
|
|
|
vmx_misc_preemption_timer_rate(data) !=
|
|
|
|
vmx_misc_preemption_timer_rate(vmx_misc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
vmx->nested.msrs.misc_low = data;
|
|
|
|
vmx->nested.msrs.misc_high = data >> 32;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If L1 has read-only VM-exit information fields, use the
|
|
|
|
* less permissive vmx_vmwrite_bitmap to specify write
|
|
|
|
* permissions for the shadow VMCS.
|
|
|
|
*/
|
|
|
|
if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
|
|
|
|
vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
|
|
|
|
{
|
|
|
|
u64 vmx_ept_vpid_cap;
|
|
|
|
|
|
|
|
vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps,
|
|
|
|
vmx->nested.msrs.vpid_caps);
|
|
|
|
|
|
|
|
/* Every bit is either reserved or a feature bit. */
|
|
|
|
if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
vmx->nested.msrs.ept_caps = data;
|
|
|
|
vmx->nested.msrs.vpid_caps = data >> 32;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
|
|
|
|
{
|
|
|
|
u64 *msr;
|
|
|
|
|
|
|
|
switch (msr_index) {
|
|
|
|
case MSR_IA32_VMX_CR0_FIXED0:
|
|
|
|
msr = &vmx->nested.msrs.cr0_fixed0;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_CR4_FIXED0:
|
|
|
|
msr = &vmx->nested.msrs.cr4_fixed0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* 1 bits (which indicates bits which "must-be-1" during VMX operation)
|
|
|
|
* must be 1 in the restored value.
|
|
|
|
*/
|
|
|
|
if (!is_bitwise_subset(data, *msr, -1ULL))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*msr = data;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called when userspace is restoring VMX MSRs.
|
|
|
|
*
|
|
|
|
* Returns 0 on success, non-0 otherwise.
|
|
|
|
*/
|
|
|
|
int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't allow changes to the VMX capability MSRs while the vCPU
|
|
|
|
* is in VMX operation.
|
|
|
|
*/
|
|
|
|
if (vmx->nested.vmxon)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
switch (msr_index) {
|
|
|
|
case MSR_IA32_VMX_BASIC:
|
|
|
|
return vmx_restore_vmx_basic(vmx, data);
|
|
|
|
case MSR_IA32_VMX_PINBASED_CTLS:
|
|
|
|
case MSR_IA32_VMX_PROCBASED_CTLS:
|
|
|
|
case MSR_IA32_VMX_EXIT_CTLS:
|
|
|
|
case MSR_IA32_VMX_ENTRY_CTLS:
|
|
|
|
/*
|
|
|
|
* The "non-true" VMX capability MSRs are generated from the
|
|
|
|
* "true" MSRs, so we do not support restoring them directly.
|
|
|
|
*
|
|
|
|
* If userspace wants to emulate VMX_BASIC[55]=0, userspace
|
|
|
|
* should restore the "true" MSRs with the must-be-1 bits
|
|
|
|
* set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
|
|
|
|
* DEFAULT SETTINGS".
|
|
|
|
*/
|
|
|
|
return -EINVAL;
|
|
|
|
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
|
|
|
|
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
|
|
|
|
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
|
|
|
|
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
|
|
|
|
case MSR_IA32_VMX_PROCBASED_CTLS2:
|
|
|
|
return vmx_restore_control_msr(vmx, msr_index, data);
|
|
|
|
case MSR_IA32_VMX_MISC:
|
|
|
|
return vmx_restore_vmx_misc(vmx, data);
|
|
|
|
case MSR_IA32_VMX_CR0_FIXED0:
|
|
|
|
case MSR_IA32_VMX_CR4_FIXED0:
|
|
|
|
return vmx_restore_fixed0_msr(vmx, msr_index, data);
|
|
|
|
case MSR_IA32_VMX_CR0_FIXED1:
|
|
|
|
case MSR_IA32_VMX_CR4_FIXED1:
|
|
|
|
/*
|
|
|
|
* These MSRs are generated based on the vCPU's CPUID, so we
|
|
|
|
* do not support restoring them directly.
|
|
|
|
*/
|
|
|
|
return -EINVAL;
|
|
|
|
case MSR_IA32_VMX_EPT_VPID_CAP:
|
|
|
|
return vmx_restore_vmx_ept_vpid_cap(vmx, data);
|
|
|
|
case MSR_IA32_VMX_VMCS_ENUM:
|
|
|
|
vmx->nested.msrs.vmcs_enum = data;
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
/*
|
|
|
|
* The rest of the VMX capability MSRs do not support restore.
|
|
|
|
*/
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns 0 on success, non-0 otherwise. */
|
|
|
|
int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
|
|
|
|
{
|
|
|
|
switch (msr_index) {
|
|
|
|
case MSR_IA32_VMX_BASIC:
|
|
|
|
*pdata = msrs->basic;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
|
|
|
|
case MSR_IA32_VMX_PINBASED_CTLS:
|
|
|
|
*pdata = vmx_control_msr(
|
|
|
|
msrs->pinbased_ctls_low,
|
|
|
|
msrs->pinbased_ctls_high);
|
|
|
|
if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
|
|
|
|
*pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
|
|
|
|
case MSR_IA32_VMX_PROCBASED_CTLS:
|
|
|
|
*pdata = vmx_control_msr(
|
|
|
|
msrs->procbased_ctls_low,
|
|
|
|
msrs->procbased_ctls_high);
|
|
|
|
if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
|
|
|
|
*pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
|
|
|
|
case MSR_IA32_VMX_EXIT_CTLS:
|
|
|
|
*pdata = vmx_control_msr(
|
|
|
|
msrs->exit_ctls_low,
|
|
|
|
msrs->exit_ctls_high);
|
|
|
|
if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
|
|
|
|
*pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
|
|
|
|
case MSR_IA32_VMX_ENTRY_CTLS:
|
|
|
|
*pdata = vmx_control_msr(
|
|
|
|
msrs->entry_ctls_low,
|
|
|
|
msrs->entry_ctls_high);
|
|
|
|
if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
|
|
|
|
*pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_MISC:
|
|
|
|
*pdata = vmx_control_msr(
|
|
|
|
msrs->misc_low,
|
|
|
|
msrs->misc_high);
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_CR0_FIXED0:
|
|
|
|
*pdata = msrs->cr0_fixed0;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_CR0_FIXED1:
|
|
|
|
*pdata = msrs->cr0_fixed1;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_CR4_FIXED0:
|
|
|
|
*pdata = msrs->cr4_fixed0;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_CR4_FIXED1:
|
|
|
|
*pdata = msrs->cr4_fixed1;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_VMCS_ENUM:
|
|
|
|
*pdata = msrs->vmcs_enum;
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_PROCBASED_CTLS2:
|
|
|
|
*pdata = vmx_control_msr(
|
|
|
|
msrs->secondary_ctls_low,
|
|
|
|
msrs->secondary_ctls_high);
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_EPT_VPID_CAP:
|
|
|
|
*pdata = msrs->ept_caps |
|
|
|
|
((u64)msrs->vpid_caps << 32);
|
|
|
|
break;
|
|
|
|
case MSR_IA32_VMX_VMFUNC:
|
|
|
|
*pdata = msrs->vmfunc_controls;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the writable VMCS shadow fields back to the VMCS12, in case
|
|
|
|
* they have been modified by the L1 guest. Note that the "read-only"
|
|
|
|
* VM-exit information fields are actually writable if the vCPU is
|
|
|
|
* configured to support "VMWRITE to any supported field in the VMCS."
|
|
|
|
*/
|
|
|
|
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
const u16 *fields[] = {
|
|
|
|
shadow_read_write_fields,
|
|
|
|
shadow_read_only_fields
|
|
|
|
};
|
|
|
|
const int max_fields[] = {
|
|
|
|
max_shadow_read_write_fields,
|
|
|
|
max_shadow_read_only_fields
|
|
|
|
};
|
|
|
|
int i, q;
|
|
|
|
unsigned long field;
|
|
|
|
u64 field_value;
|
|
|
|
struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
|
|
|
vmcs_load(shadow_vmcs);
|
|
|
|
|
|
|
|
for (q = 0; q < ARRAY_SIZE(fields); q++) {
|
|
|
|
for (i = 0; i < max_fields[q]; i++) {
|
|
|
|
field = fields[q][i];
|
|
|
|
field_value = __vmcs_readl(field);
|
|
|
|
vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Skip the VM-exit information fields if they are read-only.
|
|
|
|
*/
|
|
|
|
if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
vmcs_clear(shadow_vmcs);
|
|
|
|
vmcs_load(vmx->loaded_vmcs->vmcs);
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
}
|
|
|
|
|
|
|
|
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
const u16 *fields[] = {
|
|
|
|
shadow_read_write_fields,
|
|
|
|
shadow_read_only_fields
|
|
|
|
};
|
|
|
|
const int max_fields[] = {
|
|
|
|
max_shadow_read_write_fields,
|
|
|
|
max_shadow_read_only_fields
|
|
|
|
};
|
|
|
|
int i, q;
|
|
|
|
unsigned long field;
|
|
|
|
u64 field_value = 0;
|
|
|
|
struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
|
|
|
|
|
|
|
|
vmcs_load(shadow_vmcs);
|
|
|
|
|
|
|
|
for (q = 0; q < ARRAY_SIZE(fields); q++) {
|
|
|
|
for (i = 0; i < max_fields[q]; i++) {
|
|
|
|
field = fields[q][i];
|
|
|
|
vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value);
|
|
|
|
__vmcs_writel(field, field_value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
vmcs_clear(shadow_vmcs);
|
|
|
|
vmcs_load(vmx->loaded_vmcs->vmcs);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
|
|
|
|
struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
|
|
|
|
|
|
|
|
/* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
|
|
|
|
vmcs12->tpr_threshold = evmcs->tpr_threshold;
|
|
|
|
vmcs12->guest_rip = evmcs->guest_rip;
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
|
|
|
|
vmcs12->guest_rsp = evmcs->guest_rsp;
|
|
|
|
vmcs12->guest_rflags = evmcs->guest_rflags;
|
|
|
|
vmcs12->guest_interruptibility_info =
|
|
|
|
evmcs->guest_interruptibility_info;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
|
|
|
|
vmcs12->cpu_based_vm_exec_control =
|
|
|
|
evmcs->cpu_based_vm_exec_control;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
|
|
|
|
vmcs12->exception_bitmap = evmcs->exception_bitmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
|
|
|
|
vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
|
|
|
|
vmcs12->vm_entry_intr_info_field =
|
|
|
|
evmcs->vm_entry_intr_info_field;
|
|
|
|
vmcs12->vm_entry_exception_error_code =
|
|
|
|
evmcs->vm_entry_exception_error_code;
|
|
|
|
vmcs12->vm_entry_instruction_len =
|
|
|
|
evmcs->vm_entry_instruction_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
|
|
|
|
vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
|
|
|
|
vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
|
|
|
|
vmcs12->host_cr0 = evmcs->host_cr0;
|
|
|
|
vmcs12->host_cr3 = evmcs->host_cr3;
|
|
|
|
vmcs12->host_cr4 = evmcs->host_cr4;
|
|
|
|
vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
|
|
|
|
vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
|
|
|
|
vmcs12->host_rip = evmcs->host_rip;
|
|
|
|
vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
|
|
|
|
vmcs12->host_es_selector = evmcs->host_es_selector;
|
|
|
|
vmcs12->host_cs_selector = evmcs->host_cs_selector;
|
|
|
|
vmcs12->host_ss_selector = evmcs->host_ss_selector;
|
|
|
|
vmcs12->host_ds_selector = evmcs->host_ds_selector;
|
|
|
|
vmcs12->host_fs_selector = evmcs->host_fs_selector;
|
|
|
|
vmcs12->host_gs_selector = evmcs->host_gs_selector;
|
|
|
|
vmcs12->host_tr_selector = evmcs->host_tr_selector;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
|
|
|
|
vmcs12->pin_based_vm_exec_control =
|
|
|
|
evmcs->pin_based_vm_exec_control;
|
|
|
|
vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
|
|
|
|
vmcs12->secondary_vm_exec_control =
|
|
|
|
evmcs->secondary_vm_exec_control;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
|
|
|
|
vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
|
|
|
|
vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
|
|
|
|
vmcs12->msr_bitmap = evmcs->msr_bitmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
|
|
|
|
vmcs12->guest_es_base = evmcs->guest_es_base;
|
|
|
|
vmcs12->guest_cs_base = evmcs->guest_cs_base;
|
|
|
|
vmcs12->guest_ss_base = evmcs->guest_ss_base;
|
|
|
|
vmcs12->guest_ds_base = evmcs->guest_ds_base;
|
|
|
|
vmcs12->guest_fs_base = evmcs->guest_fs_base;
|
|
|
|
vmcs12->guest_gs_base = evmcs->guest_gs_base;
|
|
|
|
vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
|
|
|
|
vmcs12->guest_tr_base = evmcs->guest_tr_base;
|
|
|
|
vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
|
|
|
|
vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
|
|
|
|
vmcs12->guest_es_limit = evmcs->guest_es_limit;
|
|
|
|
vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
|
|
|
|
vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
|
|
|
|
vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
|
|
|
|
vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
|
|
|
|
vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
|
|
|
|
vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
|
|
|
|
vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
|
|
|
|
vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
|
|
|
|
vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
|
|
|
|
vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
|
|
|
|
vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
|
|
|
|
vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
|
|
|
|
vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
|
|
|
|
vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
|
|
|
|
vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
|
|
|
|
vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
|
|
|
|
vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
|
|
|
|
vmcs12->guest_es_selector = evmcs->guest_es_selector;
|
|
|
|
vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
|
|
|
|
vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
|
|
|
|
vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
|
|
|
|
vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
|
|
|
|
vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
|
|
|
|
vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
|
|
|
|
vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
|
|
|
|
vmcs12->tsc_offset = evmcs->tsc_offset;
|
|
|
|
vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
|
|
|
|
vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
|
|
|
|
vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
|
|
|
|
vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
|
|
|
|
vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
|
|
|
|
vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
|
|
|
|
vmcs12->guest_cr0 = evmcs->guest_cr0;
|
|
|
|
vmcs12->guest_cr3 = evmcs->guest_cr3;
|
|
|
|
vmcs12->guest_cr4 = evmcs->guest_cr4;
|
|
|
|
vmcs12->guest_dr7 = evmcs->guest_dr7;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
|
|
|
|
vmcs12->host_fs_base = evmcs->host_fs_base;
|
|
|
|
vmcs12->host_gs_base = evmcs->host_gs_base;
|
|
|
|
vmcs12->host_tr_base = evmcs->host_tr_base;
|
|
|
|
vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
|
|
|
|
vmcs12->host_idtr_base = evmcs->host_idtr_base;
|
|
|
|
vmcs12->host_rsp = evmcs->host_rsp;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
|
|
|
|
vmcs12->ept_pointer = evmcs->ept_pointer;
|
|
|
|
vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(!(evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
|
|
|
|
vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
|
|
|
|
vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
|
|
|
|
vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
|
|
|
|
vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
|
|
|
|
vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
|
|
|
|
vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
|
|
|
|
vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
|
|
|
|
vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
|
|
|
|
vmcs12->guest_pending_dbg_exceptions =
|
|
|
|
evmcs->guest_pending_dbg_exceptions;
|
|
|
|
vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
|
|
|
|
vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
|
|
|
|
vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
|
|
|
|
vmcs12->guest_activity_state = evmcs->guest_activity_state;
|
|
|
|
vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not used?
|
|
|
|
* vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
|
|
|
|
* vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
|
|
|
|
* vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
|
|
|
|
* vmcs12->cr3_target_value0 = evmcs->cr3_target_value0;
|
|
|
|
* vmcs12->cr3_target_value1 = evmcs->cr3_target_value1;
|
|
|
|
* vmcs12->cr3_target_value2 = evmcs->cr3_target_value2;
|
|
|
|
* vmcs12->cr3_target_value3 = evmcs->cr3_target_value3;
|
|
|
|
* vmcs12->page_fault_error_code_mask =
|
|
|
|
* evmcs->page_fault_error_code_mask;
|
|
|
|
* vmcs12->page_fault_error_code_match =
|
|
|
|
* evmcs->page_fault_error_code_match;
|
|
|
|
* vmcs12->cr3_target_count = evmcs->cr3_target_count;
|
|
|
|
* vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
|
|
|
|
* vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
|
|
|
|
* vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read only fields:
|
|
|
|
* vmcs12->guest_physical_address = evmcs->guest_physical_address;
|
|
|
|
* vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
|
|
|
|
* vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
|
|
|
|
* vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
|
|
|
|
* vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
|
|
|
|
* vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
|
|
|
|
* vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
|
|
|
|
* vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
|
|
|
|
* vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
|
|
|
|
* vmcs12->exit_qualification = evmcs->exit_qualification;
|
|
|
|
* vmcs12->guest_linear_address = evmcs->guest_linear_address;
|
|
|
|
*
|
|
|
|
* Not present in struct vmcs12:
|
|
|
|
* vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
|
|
|
|
* vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
|
|
|
|
* vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
|
|
|
|
* vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
|
|
|
|
*/
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
|
|
|
|
struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Should not be changed by KVM:
|
|
|
|
*
|
|
|
|
* evmcs->host_es_selector = vmcs12->host_es_selector;
|
|
|
|
* evmcs->host_cs_selector = vmcs12->host_cs_selector;
|
|
|
|
* evmcs->host_ss_selector = vmcs12->host_ss_selector;
|
|
|
|
* evmcs->host_ds_selector = vmcs12->host_ds_selector;
|
|
|
|
* evmcs->host_fs_selector = vmcs12->host_fs_selector;
|
|
|
|
* evmcs->host_gs_selector = vmcs12->host_gs_selector;
|
|
|
|
* evmcs->host_tr_selector = vmcs12->host_tr_selector;
|
|
|
|
* evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
|
|
|
|
* evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
|
|
|
|
* evmcs->host_cr0 = vmcs12->host_cr0;
|
|
|
|
* evmcs->host_cr3 = vmcs12->host_cr3;
|
|
|
|
* evmcs->host_cr4 = vmcs12->host_cr4;
|
|
|
|
* evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
|
|
|
|
* evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
|
|
|
|
* evmcs->host_rip = vmcs12->host_rip;
|
|
|
|
* evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
|
|
|
|
* evmcs->host_fs_base = vmcs12->host_fs_base;
|
|
|
|
* evmcs->host_gs_base = vmcs12->host_gs_base;
|
|
|
|
* evmcs->host_tr_base = vmcs12->host_tr_base;
|
|
|
|
* evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
|
|
|
|
* evmcs->host_idtr_base = vmcs12->host_idtr_base;
|
|
|
|
* evmcs->host_rsp = vmcs12->host_rsp;
|
|
|
|
* sync_vmcs12() doesn't read these:
|
|
|
|
* evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
|
|
|
|
* evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
|
|
|
|
* evmcs->msr_bitmap = vmcs12->msr_bitmap;
|
|
|
|
* evmcs->ept_pointer = vmcs12->ept_pointer;
|
|
|
|
* evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
|
|
|
|
* evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
|
|
|
|
* evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
|
|
|
|
* evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
|
|
|
|
* evmcs->cr3_target_value0 = vmcs12->cr3_target_value0;
|
|
|
|
* evmcs->cr3_target_value1 = vmcs12->cr3_target_value1;
|
|
|
|
* evmcs->cr3_target_value2 = vmcs12->cr3_target_value2;
|
|
|
|
* evmcs->cr3_target_value3 = vmcs12->cr3_target_value3;
|
|
|
|
* evmcs->tpr_threshold = vmcs12->tpr_threshold;
|
|
|
|
* evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
|
|
|
|
* evmcs->exception_bitmap = vmcs12->exception_bitmap;
|
|
|
|
* evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
|
|
|
|
* evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
|
|
|
|
* evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
|
|
|
|
* evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
|
|
|
|
* evmcs->page_fault_error_code_mask =
|
|
|
|
* vmcs12->page_fault_error_code_mask;
|
|
|
|
* evmcs->page_fault_error_code_match =
|
|
|
|
* vmcs12->page_fault_error_code_match;
|
|
|
|
* evmcs->cr3_target_count = vmcs12->cr3_target_count;
|
|
|
|
* evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
|
|
|
|
* evmcs->tsc_offset = vmcs12->tsc_offset;
|
|
|
|
* evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
|
|
|
|
* evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
|
|
|
|
* evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
|
|
|
|
* evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
|
|
|
|
* evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
|
|
|
|
* evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
|
|
|
|
* evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
|
|
|
|
* evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
|
|
|
|
*
|
|
|
|
* Not present in struct vmcs12:
|
|
|
|
* evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
|
|
|
|
* evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
|
|
|
|
* evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
|
|
|
|
* evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
|
|
|
|
*/
|
|
|
|
|
|
|
|
evmcs->guest_es_selector = vmcs12->guest_es_selector;
|
|
|
|
evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
|
|
|
|
evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
|
|
|
|
evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
|
|
|
|
evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
|
|
|
|
evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
|
|
|
|
evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
|
|
|
|
evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
|
|
|
|
|
|
|
|
evmcs->guest_es_limit = vmcs12->guest_es_limit;
|
|
|
|
evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
|
|
|
|
evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
|
|
|
|
evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
|
|
|
|
evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
|
|
|
|
evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
|
|
|
|
evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
|
|
|
|
evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
|
|
|
|
evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
|
|
|
|
evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
|
|
|
|
|
|
|
|
evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
|
|
|
|
evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
|
|
|
|
evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
|
|
|
|
evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
|
|
|
|
evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
|
|
|
|
evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
|
|
|
|
evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
|
|
|
|
evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
|
|
|
|
|
|
|
|
evmcs->guest_es_base = vmcs12->guest_es_base;
|
|
|
|
evmcs->guest_cs_base = vmcs12->guest_cs_base;
|
|
|
|
evmcs->guest_ss_base = vmcs12->guest_ss_base;
|
|
|
|
evmcs->guest_ds_base = vmcs12->guest_ds_base;
|
|
|
|
evmcs->guest_fs_base = vmcs12->guest_fs_base;
|
|
|
|
evmcs->guest_gs_base = vmcs12->guest_gs_base;
|
|
|
|
evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
|
|
|
|
evmcs->guest_tr_base = vmcs12->guest_tr_base;
|
|
|
|
evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
|
|
|
|
evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
|
|
|
|
|
|
|
|
evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
|
|
|
|
evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
|
|
|
|
|
|
|
|
evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
|
|
|
|
evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
|
|
|
|
evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
|
|
|
|
evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
|
|
|
|
|
|
|
|
evmcs->guest_pending_dbg_exceptions =
|
|
|
|
vmcs12->guest_pending_dbg_exceptions;
|
|
|
|
evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
|
|
|
|
evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
|
|
|
|
|
|
|
|
evmcs->guest_activity_state = vmcs12->guest_activity_state;
|
|
|
|
evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
|
|
|
|
|
|
|
|
evmcs->guest_cr0 = vmcs12->guest_cr0;
|
|
|
|
evmcs->guest_cr3 = vmcs12->guest_cr3;
|
|
|
|
evmcs->guest_cr4 = vmcs12->guest_cr4;
|
|
|
|
evmcs->guest_dr7 = vmcs12->guest_dr7;
|
|
|
|
|
|
|
|
evmcs->guest_physical_address = vmcs12->guest_physical_address;
|
|
|
|
|
|
|
|
evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
|
|
|
|
evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
|
|
|
|
evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
|
|
|
|
evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
|
|
|
|
evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
|
|
|
|
evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
|
|
|
|
evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
|
|
|
|
evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
|
|
|
|
|
|
|
|
evmcs->exit_qualification = vmcs12->exit_qualification;
|
|
|
|
|
|
|
|
evmcs->guest_linear_address = vmcs12->guest_linear_address;
|
|
|
|
evmcs->guest_rsp = vmcs12->guest_rsp;
|
|
|
|
evmcs->guest_rflags = vmcs12->guest_rflags;
|
|
|
|
|
|
|
|
evmcs->guest_interruptibility_info =
|
|
|
|
vmcs12->guest_interruptibility_info;
|
|
|
|
evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
|
|
|
|
evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
|
|
|
|
evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
|
|
|
|
evmcs->vm_entry_exception_error_code =
|
|
|
|
vmcs12->vm_entry_exception_error_code;
|
|
|
|
evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
|
|
|
|
|
|
|
|
evmcs->guest_rip = vmcs12->guest_rip;
|
|
|
|
|
|
|
|
evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is an equivalent of the nested hypervisor executing the vmptrld
|
|
|
|
* instruction.
|
|
|
|
*/
|
|
|
|
static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
|
|
|
|
bool from_launch)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
struct hv_vp_assist_page assist_page;
|
|
|
|
|
|
|
|
if (likely(!vmx->nested.enlightened_vmcs_enabled))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (unlikely(!kvm_hv_get_assist_page(vcpu, &assist_page)))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (unlikely(!assist_page.enlighten_vmentry))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (unlikely(assist_page.current_nested_vmcs !=
|
|
|
|
vmx->nested.hv_evmcs_vmptr)) {
|
|
|
|
|
|
|
|
if (!vmx->nested.hv_evmcs)
|
|
|
|
vmx->nested.current_vmptr = -1ull;
|
|
|
|
|
|
|
|
nested_release_evmcs(vcpu);
|
|
|
|
|
|
|
|
vmx->nested.hv_evmcs_page = kvm_vcpu_gpa_to_page(
|
|
|
|
vcpu, assist_page.current_nested_vmcs);
|
|
|
|
|
|
|
|
if (unlikely(is_error_page(vmx->nested.hv_evmcs_page)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Currently, KVM only supports eVMCS version 1
|
|
|
|
* (== KVM_EVMCS_VERSION) and thus we expect guest to set this
|
|
|
|
* value to first u32 field of eVMCS which should specify eVMCS
|
|
|
|
* VersionNumber.
|
|
|
|
*
|
|
|
|
* Guest should be aware of supported eVMCS versions by host by
|
|
|
|
* examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
|
|
|
|
* expected to set this CPUID leaf according to the value
|
|
|
|
* returned in vmcs_version from nested_enable_evmcs().
|
|
|
|
*
|
|
|
|
* However, it turns out that Microsoft Hyper-V fails to comply
|
|
|
|
* to their own invented interface: When Hyper-V use eVMCS, it
|
|
|
|
* just sets first u32 field of eVMCS to revision_id specified
|
|
|
|
* in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
|
|
|
|
* which is one of the supported versions specified in
|
|
|
|
* CPUID.0x4000000A.EAX[0:15].
|
|
|
|
*
|
|
|
|
* To overcome Hyper-V bug, we accept here either a supported
|
|
|
|
* eVMCS version or VMCS12 revision_id as valid values for first
|
|
|
|
* u32 field of eVMCS.
|
|
|
|
*/
|
|
|
|
if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
|
|
|
|
(vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
|
|
|
|
nested_release_evmcs(vcpu);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
vmx->nested.dirty_vmcs12 = true;
|
|
|
|
/*
|
|
|
|
* As we keep L2 state for one guest only 'hv_clean_fields' mask
|
|
|
|
* can't be used when we switch between them. Reset it here for
|
|
|
|
* simplicity.
|
|
|
|
*/
|
|
|
|
vmx->nested.hv_evmcs->hv_clean_fields &=
|
|
|
|
~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
|
|
|
|
vmx->nested.hv_evmcs_vmptr = assist_page.current_nested_vmcs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unlike normal vmcs12, enlightened vmcs12 is not fully
|
|
|
|
* reloaded from guest's memory (read only fields, fields not
|
|
|
|
* present in struct hv_enlightened_vmcs, ...). Make sure there
|
|
|
|
* are no leftovers.
|
|
|
|
*/
|
|
|
|
if (from_launch) {
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
memset(vmcs12, 0, sizeof(*vmcs12));
|
|
|
|
vmcs12->hdr.revision_id = VMCS12_REVISION;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nested_sync_from_vmcs12(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* hv_evmcs may end up being not mapped after migration (when
|
|
|
|
* L2 was running), map it here to make sure vmcs12 changes are
|
|
|
|
* properly reflected.
|
|
|
|
*/
|
|
|
|
if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs)
|
|
|
|
nested_vmx_handle_enlightened_vmptrld(vcpu, false);
|
|
|
|
|
|
|
|
if (vmx->nested.hv_evmcs) {
|
|
|
|
copy_vmcs12_to_enlightened(vmx);
|
|
|
|
/* All fields are clean */
|
|
|
|
vmx->nested.hv_evmcs->hv_clean_fields |=
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
|
|
|
|
} else {
|
|
|
|
copy_vmcs12_to_shadow(vmx);
|
|
|
|
}
|
|
|
|
|
|
|
|
vmx->nested.need_vmcs12_sync = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx =
|
|
|
|
container_of(timer, struct vcpu_vmx, nested.preemption_timer);
|
|
|
|
|
|
|
|
vmx->nested.preemption_timer_expired = true;
|
|
|
|
kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
|
|
|
|
kvm_vcpu_kick(&vmx->vcpu);
|
|
|
|
|
|
|
|
return HRTIMER_NORESTART;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A timer value of zero is architecturally guaranteed to cause
|
|
|
|
* a VMExit prior to executing any instructions in the guest.
|
|
|
|
*/
|
|
|
|
if (preemption_timeout == 0) {
|
|
|
|
vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vcpu->arch.virtual_tsc_khz == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
|
|
|
|
preemption_timeout *= 1000000;
|
|
|
|
do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
|
|
|
|
hrtimer_start(&vmx->nested.preemption_timer,
|
|
|
|
ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
if (vmx->nested.nested_run_pending &&
|
|
|
|
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
|
|
|
|
return vmcs12->guest_ia32_efer;
|
|
|
|
else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
|
|
|
|
return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
|
|
|
|
else
|
|
|
|
return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If vmcs02 hasn't been initialized, set the constant vmcs02 state
|
|
|
|
* according to L0's settings (vmcs12 is irrelevant here). Host
|
|
|
|
* fields that come from L0 and are not constant, e.g. HOST_CR3,
|
|
|
|
* will be set as needed prior to VMLAUNCH/VMRESUME.
|
|
|
|
*/
|
|
|
|
if (vmx->nested.vmcs02_initialized)
|
|
|
|
return;
|
|
|
|
vmx->nested.vmcs02_initialized = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We don't care what the EPTP value is we just need to guarantee
|
|
|
|
* it's valid so we don't get a false positive when doing early
|
|
|
|
* consistency checks.
|
|
|
|
*/
|
|
|
|
if (enable_ept && nested_early_check)
|
|
|
|
vmcs_write64(EPT_POINTER, construct_eptp(&vmx->vcpu, 0));
|
|
|
|
|
|
|
|
/* All VMFUNCs are currently emulated through L0 vmexits. */
|
|
|
|
if (cpu_has_vmx_vmfunc())
|
|
|
|
vmcs_write64(VM_FUNCTION_CONTROL, 0);
|
|
|
|
|
|
|
|
if (cpu_has_vmx_posted_intr())
|
|
|
|
vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
|
|
|
|
|
|
|
|
if (cpu_has_vmx_msr_bitmap())
|
|
|
|
vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
|
|
|
|
|
|
|
|
if (enable_pml)
|
|
|
|
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the MSR load/store lists to match L0's settings. Only the
|
|
|
|
* addresses are constant (for vmcs02), the counts can change based
|
|
|
|
* on L2's behavior, e.g. switching to/from long mode.
|
|
|
|
*/
|
|
|
|
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
|
|
|
|
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
|
|
|
|
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
|
|
|
|
|
|
|
|
vmx_set_constant_host_state(vmx);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prepare_vmcs02_early_full(struct vcpu_vmx *vmx,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
prepare_vmcs02_constant_state(vmx);
|
|
|
|
|
|
|
|
vmcs_write64(VMCS_LINK_POINTER, -1ull);
|
|
|
|
|
|
|
|
if (enable_vpid) {
|
|
|
|
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
|
|
|
|
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
|
|
|
|
else
|
|
|
|
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
u32 exec_control, vmcs12_exec_ctrl;
|
|
|
|
u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
|
|
|
|
|
|
|
|
if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
|
|
|
|
prepare_vmcs02_early_full(vmx, vmcs12);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PIN CONTROLS
|
|
|
|
*/
|
|
|
|
exec_control = vmcs12->pin_based_vm_exec_control;
|
|
|
|
|
|
|
|
/* Preemption timer setting is computed directly in vmx_vcpu_run. */
|
|
|
|
exec_control |= vmcs_config.pin_based_exec_ctrl;
|
|
|
|
exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
|
|
|
|
vmx->loaded_vmcs->hv_timer_armed = false;
|
|
|
|
|
|
|
|
/* Posted interrupts setting is only taken from vmcs12. */
|
|
|
|
if (nested_cpu_has_posted_intr(vmcs12)) {
|
|
|
|
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
|
|
|
|
vmx->nested.pi_pending = false;
|
|
|
|
} else {
|
|
|
|
exec_control &= ~PIN_BASED_POSTED_INTR;
|
|
|
|
}
|
|
|
|
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* EXEC CONTROLS
|
|
|
|
*/
|
|
|
|
exec_control = vmx_exec_control(vmx); /* L0's desires */
|
|
|
|
exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
|
|
|
|
exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
|
|
|
|
exec_control &= ~CPU_BASED_TPR_SHADOW;
|
|
|
|
exec_control |= vmcs12->cpu_based_vm_exec_control;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write an illegal value to VIRTUAL_APIC_PAGE_ADDR. Later, if
|
|
|
|
* nested_get_vmcs12_pages can't fix it up, the illegal value
|
|
|
|
* will result in a VM entry failure.
|
|
|
|
*/
|
|
|
|
if (exec_control & CPU_BASED_TPR_SHADOW) {
|
|
|
|
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
|
|
|
|
vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
|
|
|
|
} else {
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
exec_control |= CPU_BASED_CR8_LOAD_EXITING |
|
|
|
|
CPU_BASED_CR8_STORE_EXITING;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A vmexit (to either L1 hypervisor or L0 userspace) is always needed
|
|
|
|
* for I/O port accesses.
|
|
|
|
*/
|
|
|
|
exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
|
|
|
|
exec_control |= CPU_BASED_UNCOND_IO_EXITING;
|
|
|
|
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SECONDARY EXEC CONTROLS
|
|
|
|
*/
|
|
|
|
if (cpu_has_secondary_exec_ctrls()) {
|
|
|
|
exec_control = vmx->secondary_exec_control;
|
|
|
|
|
|
|
|
/* Take the following fields only from vmcs12 */
|
|
|
|
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
|
|
|
SECONDARY_EXEC_ENABLE_INVPCID |
|
|
|
|
SECONDARY_EXEC_RDTSCP |
|
|
|
|
SECONDARY_EXEC_XSAVES |
|
|
|
|
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
|
SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
|
SECONDARY_EXEC_ENABLE_VMFUNC);
|
|
|
|
if (nested_cpu_has(vmcs12,
|
|
|
|
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
|
|
|
|
vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
|
|
|
|
~SECONDARY_EXEC_ENABLE_PML;
|
|
|
|
exec_control |= vmcs12_exec_ctrl;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* VMCS shadowing for L2 is emulated for now */
|
|
|
|
exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
|
|
|
|
|
|
|
|
if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
|
|
|
|
vmcs_write16(GUEST_INTR_STATUS,
|
|
|
|
vmcs12->guest_intr_status);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write an illegal value to APIC_ACCESS_ADDR. Later,
|
|
|
|
* nested_get_vmcs12_pages will either fix it up or
|
|
|
|
* remove the VM execution control.
|
|
|
|
*/
|
|
|
|
if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
|
|
|
|
vmcs_write64(APIC_ACCESS_ADDR, -1ull);
|
|
|
|
|
|
|
|
if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
|
|
|
|
vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
|
|
|
|
|
|
|
|
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ENTRY CONTROLS
|
|
|
|
*
|
|
|
|
* vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
|
|
|
|
* are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
|
|
|
|
* on the related bits (if supported by the CPU) in the hope that
|
|
|
|
* we can avoid VMWrites during vmx_set_efer().
|
|
|
|
*/
|
|
|
|
exec_control = (vmcs12->vm_entry_controls | vmx_vmentry_ctrl()) &
|
|
|
|
~VM_ENTRY_IA32E_MODE & ~VM_ENTRY_LOAD_IA32_EFER;
|
|
|
|
if (cpu_has_load_ia32_efer()) {
|
|
|
|
if (guest_efer & EFER_LMA)
|
|
|
|
exec_control |= VM_ENTRY_IA32E_MODE;
|
|
|
|
if (guest_efer != host_efer)
|
|
|
|
exec_control |= VM_ENTRY_LOAD_IA32_EFER;
|
|
|
|
}
|
|
|
|
vm_entry_controls_init(vmx, exec_control);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* EXIT CONTROLS
|
|
|
|
*
|
|
|
|
* L2->L1 exit controls are emulated - the hardware exit is to L0 so
|
|
|
|
* we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
|
|
|
|
* bits may be modified by vmx_set_efer() in prepare_vmcs02().
|
|
|
|
*/
|
|
|
|
exec_control = vmx_vmexit_ctrl();
|
|
|
|
if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
|
|
|
|
exec_control |= VM_EXIT_LOAD_IA32_EFER;
|
|
|
|
vm_exit_controls_init(vmx, exec_control);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Conceptually we want to copy the PML address and index from
|
|
|
|
* vmcs01 here, and then back to vmcs01 on nested vmexit. But,
|
|
|
|
* since we always flush the log on each vmexit and never change
|
|
|
|
* the PML address (once set), this happens to be equivalent to
|
|
|
|
* simply resetting the index in vmcs02.
|
|
|
|
*/
|
|
|
|
if (enable_pml)
|
|
|
|
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupt/Exception Fields
|
|
|
|
*/
|
|
|
|
if (vmx->nested.nested_run_pending) {
|
|
|
|
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
|
|
|
|
vmcs12->vm_entry_intr_info_field);
|
|
|
|
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
|
|
|
|
vmcs12->vm_entry_exception_error_code);
|
|
|
|
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
|
|
|
|
vmcs12->vm_entry_instruction_len);
|
|
|
|
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
|
|
|
|
vmcs12->guest_interruptibility_info);
|
|
|
|
vmx->loaded_vmcs->nmi_known_unmasked =
|
|
|
|
!(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
|
|
|
|
} else {
|
|
|
|
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void prepare_vmcs02_full(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
|
|
|
|
|
|
|
|
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
|
|
|
|
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
|
|
|
|
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
|
|
|
|
vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
|
|
|
|
vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
|
|
|
|
vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
|
|
|
|
vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
|
|
|
|
vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
|
|
|
|
vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
|
|
|
|
vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
|
|
|
|
vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
|
|
|
|
vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
|
|
|
|
vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
|
|
|
|
vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
|
|
|
|
vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
|
|
|
|
vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
|
|
|
|
vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
|
|
|
|
vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
|
|
|
|
vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
|
|
|
|
vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
|
|
|
|
vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
|
|
|
|
vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
|
|
|
|
vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
|
|
|
|
vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
|
|
|
|
vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
|
|
|
|
vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
|
|
|
|
vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
|
|
|
|
vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
|
|
|
|
vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
|
|
|
|
vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
|
|
|
|
vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
|
|
|
|
vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
|
|
|
|
vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
|
|
|
|
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
|
|
|
|
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
|
|
|
|
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
|
|
|
|
vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
|
|
|
|
vmcs12->guest_pending_dbg_exceptions);
|
|
|
|
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
|
|
|
|
vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* L1 may access the L2's PDPTR, so save them to construct
|
|
|
|
* vmcs12
|
|
|
|
*/
|
|
|
|
if (enable_ept) {
|
|
|
|
vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
|
|
|
|
vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
|
|
|
|
vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
|
|
|
|
vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nested_cpu_has_xsaves(vmcs12))
|
|
|
|
vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Whether page-faults are trapped is determined by a combination of
|
|
|
|
* 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
|
|
|
|
* If enable_ept, L0 doesn't care about page faults and we should
|
|
|
|
* set all of these to L1's desires. However, if !enable_ept, L0 does
|
|
|
|
* care about (at least some) page faults, and because it is not easy
|
|
|
|
* (if at all possible?) to merge L0 and L1's desires, we simply ask
|
|
|
|
* to exit on each and every L2 page fault. This is done by setting
|
|
|
|
* MASK=MATCH=0 and (see below) EB.PF=1.
|
|
|
|
* Note that below we don't need special code to set EB.PF beyond the
|
|
|
|
* "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
|
|
|
|
* vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
|
|
|
|
* !enable_ept, EB.PF is 1, so the "or" will always be 1.
|
|
|
|
*/
|
|
|
|
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
|
|
|
|
enable_ept ? vmcs12->page_fault_error_code_mask : 0);
|
|
|
|
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
|
|
|
|
enable_ept ? vmcs12->page_fault_error_code_match : 0);
|
|
|
|
|
|
|
|
if (cpu_has_vmx_apicv()) {
|
|
|
|
vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
|
|
|
|
vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
|
|
|
|
vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
|
|
|
|
vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
|
|
|
|
}
|
|
|
|
|
|
|
|
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
|
|
|
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
|
|
|
|
|
|
|
|
set_cr4_guest_host_mask(vmx);
|
|
|
|
|
|
|
|
if (kvm_mpx_supported()) {
|
|
|
|
if (vmx->nested.nested_run_pending &&
|
|
|
|
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
|
|
|
|
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
|
|
|
|
else
|
|
|
|
vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
|
|
|
|
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
|
|
|
|
* with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
|
|
|
|
* guest in a way that will both be appropriate to L1's requests, and our
|
|
|
|
* needs. In addition to modifying the active vmcs (which is vmcs02), this
|
|
|
|
* function also has additional necessary side-effects, like setting various
|
|
|
|
* vcpu->arch fields.
|
|
|
|
* Returns 0 on success, 1 on failure. Invalid state exit qualification code
|
|
|
|
* is assigned to entry_failure_code on failure.
|
|
|
|
*/
|
|
|
|
static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|
|
|
u32 *entry_failure_code)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
|
|
|
|
|
|
|
|
if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) {
|
|
|
|
prepare_vmcs02_full(vmx, vmcs12);
|
|
|
|
vmx->nested.dirty_vmcs12 = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First, the fields that are shadowed. This must be kept in sync
|
|
|
|
* with vmcs_shadow_fields.h.
|
|
|
|
*/
|
|
|
|
if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
|
|
|
|
HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
|
|
|
|
vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
|
|
|
|
vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vmx->nested.nested_run_pending &&
|
|
|
|
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
|
|
|
|
kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
|
|
|
|
vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
|
|
|
|
} else {
|
|
|
|
kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
|
|
|
|
vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
|
|
|
|
}
|
|
|
|
vmx_set_rflags(vcpu, vmcs12->guest_rflags);
|
|
|
|
|
|
|
|
/* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
|
|
|
|
* bitwise-or of what L1 wants to trap for L2, and what we want to
|
|
|
|
* trap. Note that CR0.TS also needs updating - we do this later.
|
|
|
|
*/
|
|
|
|
update_exception_bitmap(vcpu);
|
|
|
|
vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
|
|
|
|
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
|
|
|
|
|
|
|
|
if (vmx->nested.nested_run_pending &&
|
|
|
|
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
|
|
|
|
vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
|
|
|
|
vcpu->arch.pat = vmcs12->guest_ia32_pat;
|
|
|
|
} else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
|
|
|
|
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
|
|
|
|
}
|
|
|
|
|
|
|
|
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
|
|
|
|
|
|
|
|
if (kvm_has_tsc_control)
|
|
|
|
decache_tsc_multiplier(vmx);
|
|
|
|
|
|
|
|
if (enable_vpid) {
|
|
|
|
/*
|
|
|
|
* There is no direct mapping between vpid02 and vpid12, the
|
|
|
|
* vpid02 is per-vCPU for L0 and reused while the value of
|
|
|
|
* vpid12 is changed w/ one invvpid during nested vmentry.
|
|
|
|
* The vpid12 is allocated by L1 for L2, so it will not
|
|
|
|
* influence global bitmap(for vpid01 and vpid02 allocation)
|
|
|
|
* even if spawn a lot of nested vCPUs.
|
|
|
|
*/
|
|
|
|
if (nested_cpu_has_vpid(vmcs12) && nested_has_guest_tlb_tag(vcpu)) {
|
|
|
|
if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
|
|
|
|
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
|
|
|
|
__vmx_flush_tlb(vcpu, nested_get_vpid02(vcpu), false);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* If L1 use EPT, then L0 needs to execute INVEPT on
|
|
|
|
* EPTP02 instead of EPTP01. Therefore, delay TLB
|
|
|
|
* flush until vmcs02->eptp is fully updated by
|
|
|
|
* KVM_REQ_LOAD_CR3. Note that this assumes
|
|
|
|
* KVM_REQ_TLB_FLUSH is evaluated after
|
|
|
|
* KVM_REQ_LOAD_CR3 in vcpu_enter_guest().
|
|
|
|
*/
|
|
|
|
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nested_cpu_has_ept(vmcs12))
|
|
|
|
nested_ept_init_mmu_context(vcpu);
|
|
|
|
else if (nested_cpu_has2(vmcs12,
|
|
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
|
|
|
|
vmx_flush_tlb(vcpu, true);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
|
|
|
|
* bits which we consider mandatory enabled.
|
|
|
|
* The CR0_READ_SHADOW is what L2 should have expected to read given
|
|
|
|
* the specifications by L1; It's not enough to take
|
|
|
|
* vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
|
|
|
|
* have more bits than L1 expected.
|
|
|
|
*/
|
|
|
|
vmx_set_cr0(vcpu, vmcs12->guest_cr0);
|
|
|
|
vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
|
|
|
|
|
|
|
|
vmx_set_cr4(vcpu, vmcs12->guest_cr4);
|
|
|
|
vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
|
|
|
|
|
|
|
|
vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
|
|
|
|
/* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
|
|
|
|
vmx_set_efer(vcpu, vcpu->arch.efer);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Guest state is invalid and unrestricted guest is disabled,
|
|
|
|
* which means L1 attempted VMEntry to L2 with invalid state.
|
|
|
|
* Fail the VMEntry.
|
|
|
|
*/
|
|
|
|
if (vmx->emulation_required) {
|
|
|
|
*entry_failure_code = ENTRY_FAIL_DEFAULT;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Shadow page tables on either EPT or shadow page tables. */
|
|
|
|
if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
|
|
|
|
entry_failure_code))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!enable_ept)
|
|
|
|
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
|
|
|
|
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
if (!nested_cpu_has_nmi_exiting(vmcs12) &&
|
|
|
|
nested_cpu_has_virtual_nmis(vmcs12))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!nested_cpu_has_virtual_nmis(vmcs12) &&
|
|
|
|
nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool valid_ept_address(struct kvm_vcpu *vcpu, u64 address)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
int maxphyaddr = cpuid_maxphyaddr(vcpu);
|
|
|
|
|
|
|
|
/* Check for memory type validity */
|
|
|
|
switch (address & VMX_EPTP_MT_MASK) {
|
|
|
|
case VMX_EPTP_MT_UC:
|
|
|
|
if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
case VMX_EPTP_MT_WB:
|
|
|
|
if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))
|
|
|
|
return false;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* only 4 levels page-walk length are valid */
|
|
|
|
if ((address & VMX_EPTP_PWL_MASK) != VMX_EPTP_PWL_4)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Reserved bits should not be set */
|
|
|
|
if (address >> maxphyaddr || ((address >> 7) & 0x1f))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* AD, if set, should be supported */
|
|
|
|
if (address & VMX_EPTP_AD_ENABLE_BIT) {
|
|
|
|
if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:07 +07:00
|
|
|
/*
|
|
|
|
* Checks related to VM-Execution Control Fields
|
|
|
|
*/
|
|
|
|
static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
2018-12-04 04:53:18 +07:00
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
2018-12-13 01:30:07 +07:00
|
|
|
if (!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
|
2018-12-04 04:53:18 +07:00
|
|
|
vmx->nested.msrs.pinbased_ctls_low,
|
|
|
|
vmx->nested.msrs.pinbased_ctls_high) ||
|
2018-12-13 01:30:07 +07:00
|
|
|
!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
|
|
|
|
vmx->nested.msrs.procbased_ctls_low,
|
|
|
|
vmx->nested.msrs.procbased_ctls_high))
|
|
|
|
return -EINVAL;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
2018-12-13 01:30:07 +07:00
|
|
|
if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
|
|
|
|
!vmx_control_verify(vmcs12->secondary_vm_exec_control,
|
|
|
|
vmx->nested.msrs.secondary_ctls_low,
|
|
|
|
vmx->nested.msrs.secondary_ctls_high))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu) ||
|
|
|
|
nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
|
|
|
|
nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
|
|
|
|
nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
|
|
|
|
nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
|
|
|
|
nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
|
|
|
|
nested_vmx_check_nmi_controls(vmcs12) ||
|
|
|
|
nested_vmx_check_pml_controls(vcpu, vmcs12) ||
|
|
|
|
nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
|
|
|
|
nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
|
|
|
|
nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
|
|
|
|
(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-02-13 07:42:23 +07:00
|
|
|
if (!nested_cpu_has_preemption_timer(vmcs12) &&
|
|
|
|
nested_cpu_has_save_preemption_timer(vmcs12))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-12-13 01:30:07 +07:00
|
|
|
if (nested_cpu_has_ept(vmcs12) &&
|
|
|
|
!valid_ept_address(vcpu, vmcs12->ept_pointer))
|
|
|
|
return -EINVAL;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
if (nested_cpu_has_vmfunc(vmcs12)) {
|
|
|
|
if (vmcs12->vm_function_control &
|
|
|
|
~vmx->nested.msrs.vmfunc_controls)
|
2018-12-13 01:30:07 +07:00
|
|
|
return -EINVAL;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
if (nested_cpu_has_eptp_switching(vmcs12)) {
|
|
|
|
if (!nested_cpu_has_ept(vmcs12) ||
|
|
|
|
!page_address_valid(vcpu, vmcs12->eptp_list_address))
|
2018-12-13 01:30:07 +07:00
|
|
|
return -EINVAL;
|
2018-12-04 04:53:18 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:07 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:09 +07:00
|
|
|
/*
|
|
|
|
* Checks related to VM-Exit Control Fields
|
|
|
|
*/
|
|
|
|
static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
if (!vmx_control_verify(vmcs12->vm_exit_controls,
|
|
|
|
vmx->nested.msrs.exit_ctls_low,
|
|
|
|
vmx->nested.msrs.exit_ctls_high) ||
|
|
|
|
nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:10 +07:00
|
|
|
/*
|
|
|
|
* Checks related to VM-Entry Control Fields
|
|
|
|
*/
|
|
|
|
static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
2018-12-13 01:30:07 +07:00
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
2018-12-04 04:53:18 +07:00
|
|
|
|
2018-12-13 01:30:09 +07:00
|
|
|
if (!vmx_control_verify(vmcs12->vm_entry_controls,
|
2018-12-13 01:30:07 +07:00
|
|
|
vmx->nested.msrs.entry_ctls_low,
|
|
|
|
vmx->nested.msrs.entry_ctls_high))
|
2018-12-13 01:30:10 +07:00
|
|
|
return -EINVAL;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* From the Intel SDM, volume 3:
|
|
|
|
* Fields relevant to VM-entry event injection must be set properly.
|
|
|
|
* These fields are the VM-entry interruption-information field, the
|
|
|
|
* VM-entry exception error code, and the VM-entry instruction length.
|
|
|
|
*/
|
|
|
|
if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
|
|
|
|
u32 intr_info = vmcs12->vm_entry_intr_info_field;
|
|
|
|
u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
|
|
|
|
u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
|
|
|
|
bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
|
|
|
|
bool should_have_error_code;
|
|
|
|
bool urg = nested_cpu_has2(vmcs12,
|
|
|
|
SECONDARY_EXEC_UNRESTRICTED_GUEST);
|
|
|
|
bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
|
|
|
|
|
|
|
|
/* VM-entry interruption-info field: interruption type */
|
|
|
|
if (intr_type == INTR_TYPE_RESERVED ||
|
|
|
|
(intr_type == INTR_TYPE_OTHER_EVENT &&
|
|
|
|
!nested_cpu_supports_monitor_trap_flag(vcpu)))
|
2018-12-13 01:30:10 +07:00
|
|
|
return -EINVAL;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
/* VM-entry interruption-info field: vector */
|
|
|
|
if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
|
|
|
|
(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
|
|
|
|
(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
|
2018-12-13 01:30:10 +07:00
|
|
|
return -EINVAL;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
/* VM-entry interruption-info field: deliver error code */
|
|
|
|
should_have_error_code =
|
|
|
|
intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
|
|
|
|
x86_exception_has_error_code(vector);
|
|
|
|
if (has_error_code != should_have_error_code)
|
2018-12-13 01:30:10 +07:00
|
|
|
return -EINVAL;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
/* VM-entry exception error code */
|
|
|
|
if (has_error_code &&
|
|
|
|
vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
|
2018-12-13 01:30:10 +07:00
|
|
|
return -EINVAL;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
/* VM-entry interruption-info field: reserved bits */
|
|
|
|
if (intr_info & INTR_INFO_RESVD_BITS_MASK)
|
2018-12-13 01:30:10 +07:00
|
|
|
return -EINVAL;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
/* VM-entry instruction length */
|
|
|
|
switch (intr_type) {
|
|
|
|
case INTR_TYPE_SOFT_EXCEPTION:
|
|
|
|
case INTR_TYPE_SOFT_INTR:
|
|
|
|
case INTR_TYPE_PRIV_SW_EXCEPTION:
|
|
|
|
if ((vmcs12->vm_entry_instruction_len > 15) ||
|
|
|
|
(vmcs12->vm_entry_instruction_len == 0 &&
|
|
|
|
!nested_cpu_has_zero_length_injection(vcpu)))
|
2018-12-13 01:30:10 +07:00
|
|
|
return -EINVAL;
|
2018-12-04 04:53:18 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:10 +07:00
|
|
|
if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:11 +07:00
|
|
|
/*
|
|
|
|
* Checks related to Host Control Registers and MSRs
|
|
|
|
*/
|
|
|
|
static int nested_check_host_control_regs(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
2018-12-13 01:30:10 +07:00
|
|
|
{
|
|
|
|
bool ia32e;
|
|
|
|
|
|
|
|
if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
|
|
|
|
!nested_host_cr4_valid(vcpu, vmcs12->host_cr4) ||
|
|
|
|
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
|
2018-12-13 01:30:11 +07:00
|
|
|
return -EINVAL;
|
2018-12-13 01:30:10 +07:00
|
|
|
/*
|
|
|
|
* If the load IA32_EFER VM-exit control is 1, bits reserved in the
|
|
|
|
* IA32_EFER MSR must be 0 in the field for that register. In addition,
|
|
|
|
* the values of the LMA and LME bits in the field must each be that of
|
|
|
|
* the host address-space size VM-exit control.
|
|
|
|
*/
|
|
|
|
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
|
|
|
|
ia32e = (vmcs12->vm_exit_controls &
|
|
|
|
VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
|
|
|
|
if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
|
|
|
|
ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
|
|
|
|
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME))
|
2018-12-13 01:30:11 +07:00
|
|
|
return -EINVAL;
|
2018-12-13 01:30:10 +07:00
|
|
|
}
|
|
|
|
|
2018-12-04 04:53:18 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:12 +07:00
|
|
|
/*
|
|
|
|
* Checks related to Guest Non-register State
|
|
|
|
*/
|
|
|
|
static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
|
2018-12-13 01:30:11 +07:00
|
|
|
{
|
|
|
|
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
|
|
|
|
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT)
|
2018-12-13 01:30:12 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2018-12-13 01:30:11 +07:00
|
|
|
|
2018-12-13 01:30:12 +07:00
|
|
|
static int nested_vmx_check_vmentry_prereqs(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
2018-12-13 01:30:11 +07:00
|
|
|
if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
|
|
|
|
nested_check_vm_exit_controls(vcpu, vmcs12) ||
|
|
|
|
nested_check_vm_entry_controls(vcpu, vmcs12))
|
|
|
|
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
|
|
|
|
|
|
|
if (nested_check_host_control_regs(vcpu, vmcs12))
|
|
|
|
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
|
|
|
|
|
2018-12-13 01:30:12 +07:00
|
|
|
if (nested_check_guest_non_reg_state(vmcs12))
|
|
|
|
return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
|
|
|
|
|
2018-12-13 01:30:11 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-12-04 04:53:18 +07:00
|
|
|
static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
int r;
|
|
|
|
struct page *page;
|
|
|
|
struct vmcs12 *shadow;
|
|
|
|
|
|
|
|
if (vmcs12->vmcs_link_pointer == -1ull)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, vmcs12->vmcs_link_pointer))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->vmcs_link_pointer);
|
|
|
|
if (is_error_page(page))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
r = 0;
|
|
|
|
shadow = kmap(page);
|
|
|
|
if (shadow->hdr.revision_id != VMCS12_REVISION ||
|
|
|
|
shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12))
|
|
|
|
r = -EINVAL;
|
|
|
|
kunmap(page);
|
|
|
|
kvm_release_page_clean(page);
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:06 +07:00
|
|
|
static int nested_vmx_check_vmentry_postreqs(struct kvm_vcpu *vcpu,
|
2018-12-13 01:30:07 +07:00
|
|
|
struct vmcs12 *vmcs12,
|
|
|
|
u32 *exit_qual)
|
2018-12-04 04:53:18 +07:00
|
|
|
{
|
|
|
|
bool ia32e;
|
|
|
|
|
|
|
|
*exit_qual = ENTRY_FAIL_DEFAULT;
|
|
|
|
|
|
|
|
if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
|
|
|
|
!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
|
|
|
|
*exit_qual = ENTRY_FAIL_VMCS_LINK_PTR;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the load IA32_EFER VM-entry control is 1, the following checks
|
|
|
|
* are performed on the field for the IA32_EFER MSR:
|
|
|
|
* - Bits reserved in the IA32_EFER MSR must be 0.
|
|
|
|
* - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
|
|
|
|
* the IA-32e mode guest VM-exit control. It must also be identical
|
|
|
|
* to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
|
|
|
|
* CR0.PG) is 1.
|
|
|
|
*/
|
|
|
|
if (to_vmx(vcpu)->nested.nested_run_pending &&
|
|
|
|
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
|
|
|
|
ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
|
|
|
|
if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
|
|
|
|
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
|
|
|
|
((vmcs12->guest_cr0 & X86_CR0_PG) &&
|
|
|
|
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME)))
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
|
|
|
|
(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
|
|
|
|
(vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines
Transitioning to/from a VMX guest requires KVM to manually save/load
the bulk of CPU state that the guest is allowed to direclty access,
e.g. XSAVE state, CR2, GPRs, etc... For obvious reasons, loading the
guest's GPR snapshot prior to VM-Enter and saving the snapshot after
VM-Exit is done via handcoded assembly. The assembly blob is written
as inline asm so that it can easily access KVM-defined structs that
are used to hold guest state, e.g. moving the blob to a standalone
assembly file would require generating defines for struct offsets.
The other relevant aspect of VMX transitions in KVM is the handling of
VM-Exits. KVM doesn't employ a separate VM-Exit handler per se, but
rather treats the VMX transition as a mega instruction (with many side
effects), i.e. sets the VMCS.HOST_RIP to a label immediately following
VMLAUNCH/VMRESUME. The label is then exposed to C code via a global
variable definition in the inline assembly.
Because of the global variable, KVM takes steps to (attempt to) ensure
only a single instance of the owning C function, e.g. vmx_vcpu_run, is
generated by the compiler. The earliest approach placed the inline
assembly in a separate noinline function[1]. Later, the assembly was
folded back into vmx_vcpu_run() and tagged with __noclone[2][3], which
is still used today.
After moving to __noclone, an edge case was encountered where GCC's
-ftracer optimization resulted in the inline assembly blob being
duplicated. This was "fixed" by explicitly disabling -ftracer in the
__noclone definition[4].
Recently, it was found that disabling -ftracer causes build warnings
for unsuspecting users of __noclone[5], and more importantly for KVM,
prevents the compiler for properly optimizing vmx_vcpu_run()[6]. And
perhaps most importantly of all, it was pointed out that there is no
way to prevent duplication of a function with 100% reliability[7],
i.e. more edge cases may be encountered in the future.
So to summarize, the only way to prevent the compiler from duplicating
the global variable definition is to move the variable out of inline
assembly, which has been suggested several times over[1][7][8].
Resolve the aforementioned issues by moving the VMLAUNCH+VRESUME and
VM-Exit "handler" to standalone assembly sub-routines. Moving only
the core VMX transition codes allows the struct indexing to remain as
inline assembly and also allows the sub-routines to be used by
nested_vmx_check_vmentry_hw(). Reusing the sub-routines has a happy
side-effect of eliminating two VMWRITEs in the nested_early_check path
as there is no longer a need to dynamically change VMCS.HOST_RIP.
Note that callers to vmx_vmenter() must account for the CALL modifying
RSP, e.g. must subtract op-size from RSP when synchronizing RSP with
VMCS.HOST_RSP and "restore" RSP prior to the CALL. There are no great
alternatives to fudging RSP. Saving RSP in vmx_enter() is difficult
because doing so requires a second register (VMWRITE does not provide
an immediate encoding for the VMCS field and KVM supports Hyper-V's
memory-based eVMCS ABI). The other more drastic alternative would be
to use eschew VMCS.HOST_RSP and manually save/load RSP using a per-cpu
variable (which can be encoded as e.g. gs:[imm]). But because a valid
stack is needed at the time of VM-Exit (NMIs aren't blocked and a user
could theoretically insert INT3/INT1ICEBRK at the VM-Exit handler), a
dedicated per-cpu VM-Exit stack would be required. A dedicated stack
isn't difficult to implement, but it would require at least one page
per CPU and knowledge of the stack in the dumpstack routines. And in
most cases there is essentially zero overhead in dynamically updating
VMCS.HOST_RSP, e.g. the VMWRITE can be avoided for all but the first
VMLAUNCH unless nested_early_check=1, which is not a fast path. In
other words, avoiding the VMCS.HOST_RSP by using a dedicated stack
would only make the code marginally less ugly while requiring at least
one page per CPU and forcing the kernel to be aware (and approve) of
the VM-Exit stack shenanigans.
[1] cea15c24ca39 ("KVM: Move KVM context switch into own function")
[2] a3b5ba49a8c5 ("KVM: VMX: add the __noclone attribute to vmx_vcpu_run")
[3] 104f226bfd0a ("KVM: VMX: Fold __vmx_vcpu_run() into vmx_vcpu_run()")
[4] 95272c29378e ("compiler-gcc: disable -ftracer for __noclone functions")
[5] https://lkml.kernel.org/r/20181218140105.ajuiglkpvstt3qxs@treble
[6] https://patchwork.kernel.org/patch/8707981/#21817015
[7] https://lkml.kernel.org/r/ri6y38lo23g.fsf@suse.cz
[8] https://lkml.kernel.org/r/20181218212042.GE25620@tassilo.jf.intel.com
Suggested-by: Andi Kleen <ak@linux.intel.com>
Suggested-by: Martin Jambor <mjambor@suse.cz>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Martin Jambor <mjambor@suse.cz>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Miroslav Benes <mbenes@suse.cz>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-12-21 03:25:17 +07:00
|
|
|
static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
|
2018-12-04 04:53:18 +07:00
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
unsigned long cr3, cr4;
|
2019-01-25 22:40:58 +07:00
|
|
|
bool vm_fail;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
if (!nested_early_check)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (vmx->msr_autoload.host.nr)
|
|
|
|
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
|
|
|
|
if (vmx->msr_autoload.guest.nr)
|
|
|
|
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
|
|
|
|
|
|
|
|
preempt_disable();
|
|
|
|
|
|
|
|
vmx_prepare_switch_to_guest(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
|
|
|
|
* which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
|
|
|
|
* be written (by preparve_vmcs02()) before the "real" VMEnter, i.e.
|
|
|
|
* there is no need to preserve other bits or save/restore the field.
|
|
|
|
*/
|
|
|
|
vmcs_writel(GUEST_RFLAGS, 0);
|
|
|
|
|
|
|
|
cr3 = __get_current_cr3_fast();
|
|
|
|
if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
|
|
|
|
vmcs_writel(HOST_CR3, cr3);
|
|
|
|
vmx->loaded_vmcs->host_state.cr3 = cr3;
|
|
|
|
}
|
|
|
|
|
|
|
|
cr4 = cr4_read_shadow();
|
|
|
|
if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
|
|
|
|
vmcs_writel(HOST_CR4, cr4);
|
|
|
|
vmx->loaded_vmcs->host_state.cr4 = cr4;
|
|
|
|
}
|
|
|
|
|
|
|
|
asm(
|
KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines
Transitioning to/from a VMX guest requires KVM to manually save/load
the bulk of CPU state that the guest is allowed to direclty access,
e.g. XSAVE state, CR2, GPRs, etc... For obvious reasons, loading the
guest's GPR snapshot prior to VM-Enter and saving the snapshot after
VM-Exit is done via handcoded assembly. The assembly blob is written
as inline asm so that it can easily access KVM-defined structs that
are used to hold guest state, e.g. moving the blob to a standalone
assembly file would require generating defines for struct offsets.
The other relevant aspect of VMX transitions in KVM is the handling of
VM-Exits. KVM doesn't employ a separate VM-Exit handler per se, but
rather treats the VMX transition as a mega instruction (with many side
effects), i.e. sets the VMCS.HOST_RIP to a label immediately following
VMLAUNCH/VMRESUME. The label is then exposed to C code via a global
variable definition in the inline assembly.
Because of the global variable, KVM takes steps to (attempt to) ensure
only a single instance of the owning C function, e.g. vmx_vcpu_run, is
generated by the compiler. The earliest approach placed the inline
assembly in a separate noinline function[1]. Later, the assembly was
folded back into vmx_vcpu_run() and tagged with __noclone[2][3], which
is still used today.
After moving to __noclone, an edge case was encountered where GCC's
-ftracer optimization resulted in the inline assembly blob being
duplicated. This was "fixed" by explicitly disabling -ftracer in the
__noclone definition[4].
Recently, it was found that disabling -ftracer causes build warnings
for unsuspecting users of __noclone[5], and more importantly for KVM,
prevents the compiler for properly optimizing vmx_vcpu_run()[6]. And
perhaps most importantly of all, it was pointed out that there is no
way to prevent duplication of a function with 100% reliability[7],
i.e. more edge cases may be encountered in the future.
So to summarize, the only way to prevent the compiler from duplicating
the global variable definition is to move the variable out of inline
assembly, which has been suggested several times over[1][7][8].
Resolve the aforementioned issues by moving the VMLAUNCH+VRESUME and
VM-Exit "handler" to standalone assembly sub-routines. Moving only
the core VMX transition codes allows the struct indexing to remain as
inline assembly and also allows the sub-routines to be used by
nested_vmx_check_vmentry_hw(). Reusing the sub-routines has a happy
side-effect of eliminating two VMWRITEs in the nested_early_check path
as there is no longer a need to dynamically change VMCS.HOST_RIP.
Note that callers to vmx_vmenter() must account for the CALL modifying
RSP, e.g. must subtract op-size from RSP when synchronizing RSP with
VMCS.HOST_RSP and "restore" RSP prior to the CALL. There are no great
alternatives to fudging RSP. Saving RSP in vmx_enter() is difficult
because doing so requires a second register (VMWRITE does not provide
an immediate encoding for the VMCS field and KVM supports Hyper-V's
memory-based eVMCS ABI). The other more drastic alternative would be
to use eschew VMCS.HOST_RSP and manually save/load RSP using a per-cpu
variable (which can be encoded as e.g. gs:[imm]). But because a valid
stack is needed at the time of VM-Exit (NMIs aren't blocked and a user
could theoretically insert INT3/INT1ICEBRK at the VM-Exit handler), a
dedicated per-cpu VM-Exit stack would be required. A dedicated stack
isn't difficult to implement, but it would require at least one page
per CPU and knowledge of the stack in the dumpstack routines. And in
most cases there is essentially zero overhead in dynamically updating
VMCS.HOST_RSP, e.g. the VMWRITE can be avoided for all but the first
VMLAUNCH unless nested_early_check=1, which is not a fast path. In
other words, avoiding the VMCS.HOST_RSP by using a dedicated stack
would only make the code marginally less ugly while requiring at least
one page per CPU and forcing the kernel to be aware (and approve) of
the VM-Exit stack shenanigans.
[1] cea15c24ca39 ("KVM: Move KVM context switch into own function")
[2] a3b5ba49a8c5 ("KVM: VMX: add the __noclone attribute to vmx_vcpu_run")
[3] 104f226bfd0a ("KVM: VMX: Fold __vmx_vcpu_run() into vmx_vcpu_run()")
[4] 95272c29378e ("compiler-gcc: disable -ftracer for __noclone functions")
[5] https://lkml.kernel.org/r/20181218140105.ajuiglkpvstt3qxs@treble
[6] https://patchwork.kernel.org/patch/8707981/#21817015
[7] https://lkml.kernel.org/r/ri6y38lo23g.fsf@suse.cz
[8] https://lkml.kernel.org/r/20181218212042.GE25620@tassilo.jf.intel.com
Suggested-by: Andi Kleen <ak@linux.intel.com>
Suggested-by: Martin Jambor <mjambor@suse.cz>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Martin Jambor <mjambor@suse.cz>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Miroslav Benes <mbenes@suse.cz>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-12-21 03:25:17 +07:00
|
|
|
"sub $%c[wordsize], %%" _ASM_SP "\n\t" /* temporarily adjust RSP for CALL */
|
2019-01-25 22:41:02 +07:00
|
|
|
"cmp %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
|
|
|
|
"je 1f \n\t"
|
2019-01-25 22:41:01 +07:00
|
|
|
__ex("vmwrite %%" _ASM_SP ", %[HOST_RSP]") "\n\t"
|
2019-01-25 22:41:02 +07:00
|
|
|
"mov %%" _ASM_SP ", %c[host_state_rsp](%[loaded_vmcs]) \n\t"
|
|
|
|
"1: \n\t"
|
KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines
Transitioning to/from a VMX guest requires KVM to manually save/load
the bulk of CPU state that the guest is allowed to direclty access,
e.g. XSAVE state, CR2, GPRs, etc... For obvious reasons, loading the
guest's GPR snapshot prior to VM-Enter and saving the snapshot after
VM-Exit is done via handcoded assembly. The assembly blob is written
as inline asm so that it can easily access KVM-defined structs that
are used to hold guest state, e.g. moving the blob to a standalone
assembly file would require generating defines for struct offsets.
The other relevant aspect of VMX transitions in KVM is the handling of
VM-Exits. KVM doesn't employ a separate VM-Exit handler per se, but
rather treats the VMX transition as a mega instruction (with many side
effects), i.e. sets the VMCS.HOST_RIP to a label immediately following
VMLAUNCH/VMRESUME. The label is then exposed to C code via a global
variable definition in the inline assembly.
Because of the global variable, KVM takes steps to (attempt to) ensure
only a single instance of the owning C function, e.g. vmx_vcpu_run, is
generated by the compiler. The earliest approach placed the inline
assembly in a separate noinline function[1]. Later, the assembly was
folded back into vmx_vcpu_run() and tagged with __noclone[2][3], which
is still used today.
After moving to __noclone, an edge case was encountered where GCC's
-ftracer optimization resulted in the inline assembly blob being
duplicated. This was "fixed" by explicitly disabling -ftracer in the
__noclone definition[4].
Recently, it was found that disabling -ftracer causes build warnings
for unsuspecting users of __noclone[5], and more importantly for KVM,
prevents the compiler for properly optimizing vmx_vcpu_run()[6]. And
perhaps most importantly of all, it was pointed out that there is no
way to prevent duplication of a function with 100% reliability[7],
i.e. more edge cases may be encountered in the future.
So to summarize, the only way to prevent the compiler from duplicating
the global variable definition is to move the variable out of inline
assembly, which has been suggested several times over[1][7][8].
Resolve the aforementioned issues by moving the VMLAUNCH+VRESUME and
VM-Exit "handler" to standalone assembly sub-routines. Moving only
the core VMX transition codes allows the struct indexing to remain as
inline assembly and also allows the sub-routines to be used by
nested_vmx_check_vmentry_hw(). Reusing the sub-routines has a happy
side-effect of eliminating two VMWRITEs in the nested_early_check path
as there is no longer a need to dynamically change VMCS.HOST_RIP.
Note that callers to vmx_vmenter() must account for the CALL modifying
RSP, e.g. must subtract op-size from RSP when synchronizing RSP with
VMCS.HOST_RSP and "restore" RSP prior to the CALL. There are no great
alternatives to fudging RSP. Saving RSP in vmx_enter() is difficult
because doing so requires a second register (VMWRITE does not provide
an immediate encoding for the VMCS field and KVM supports Hyper-V's
memory-based eVMCS ABI). The other more drastic alternative would be
to use eschew VMCS.HOST_RSP and manually save/load RSP using a per-cpu
variable (which can be encoded as e.g. gs:[imm]). But because a valid
stack is needed at the time of VM-Exit (NMIs aren't blocked and a user
could theoretically insert INT3/INT1ICEBRK at the VM-Exit handler), a
dedicated per-cpu VM-Exit stack would be required. A dedicated stack
isn't difficult to implement, but it would require at least one page
per CPU and knowledge of the stack in the dumpstack routines. And in
most cases there is essentially zero overhead in dynamically updating
VMCS.HOST_RSP, e.g. the VMWRITE can be avoided for all but the first
VMLAUNCH unless nested_early_check=1, which is not a fast path. In
other words, avoiding the VMCS.HOST_RSP by using a dedicated stack
would only make the code marginally less ugly while requiring at least
one page per CPU and forcing the kernel to be aware (and approve) of
the VM-Exit stack shenanigans.
[1] cea15c24ca39 ("KVM: Move KVM context switch into own function")
[2] a3b5ba49a8c5 ("KVM: VMX: add the __noclone attribute to vmx_vcpu_run")
[3] 104f226bfd0a ("KVM: VMX: Fold __vmx_vcpu_run() into vmx_vcpu_run()")
[4] 95272c29378e ("compiler-gcc: disable -ftracer for __noclone functions")
[5] https://lkml.kernel.org/r/20181218140105.ajuiglkpvstt3qxs@treble
[6] https://patchwork.kernel.org/patch/8707981/#21817015
[7] https://lkml.kernel.org/r/ri6y38lo23g.fsf@suse.cz
[8] https://lkml.kernel.org/r/20181218212042.GE25620@tassilo.jf.intel.com
Suggested-by: Andi Kleen <ak@linux.intel.com>
Suggested-by: Martin Jambor <mjambor@suse.cz>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Martin Jambor <mjambor@suse.cz>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Miroslav Benes <mbenes@suse.cz>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-12-21 03:25:17 +07:00
|
|
|
"add $%c[wordsize], %%" _ASM_SP "\n\t" /* un-adjust RSP */
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
/* Check if vmlaunch or vmresume is needed */
|
KVM: nVMX: Reference vmx->loaded_vmcs->launched directly
Temporarily propagating vmx->loaded_vmcs->launched to vmx->__launched
is not functionally necessary, but rather was done historically to
avoid passing both 'vmx' and 'loaded_vmcs' to the vCPU-run asm blob.
Nested early checks inherited this behavior by virtue of copy+paste.
A future patch will move HOST_RSP caching to be per-VMCS, i.e. store
'host_rsp' in loaded VMCS. Now that the reference to 'vmx->fail' is
also gone from nested early checks, referencing 'loaded_vmcs' directly
means we can drop the 'vmx' reference when introducing per-VMCS RSP
caching. And it means __launched can be dropped from struct vcpu_vmx
if/when vCPU-run receives similar treatment.
Note the use of a named register constraint for 'loaded_vmcs'. Using
RCX to hold 'vmx' was inherited from vCPU-run. In the vCPU-run case,
the scratch register needs to be explicitly defined as it is crushed
when loading guest state, i.e. deferring to the compiler would corrupt
the pointer. Since nested early checks never loads guests state, it's
a-ok to let the compiler pick any register. Naming the constraint
avoids the fragility of referencing constraints via %1, %2, etc.., which
breaks horribly when modifying constraints, and generally makes the asm
blob more readable.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-01-25 22:41:00 +07:00
|
|
|
"cmpb $0, %c[launched](%[loaded_vmcs])\n\t"
|
KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines
Transitioning to/from a VMX guest requires KVM to manually save/load
the bulk of CPU state that the guest is allowed to direclty access,
e.g. XSAVE state, CR2, GPRs, etc... For obvious reasons, loading the
guest's GPR snapshot prior to VM-Enter and saving the snapshot after
VM-Exit is done via handcoded assembly. The assembly blob is written
as inline asm so that it can easily access KVM-defined structs that
are used to hold guest state, e.g. moving the blob to a standalone
assembly file would require generating defines for struct offsets.
The other relevant aspect of VMX transitions in KVM is the handling of
VM-Exits. KVM doesn't employ a separate VM-Exit handler per se, but
rather treats the VMX transition as a mega instruction (with many side
effects), i.e. sets the VMCS.HOST_RIP to a label immediately following
VMLAUNCH/VMRESUME. The label is then exposed to C code via a global
variable definition in the inline assembly.
Because of the global variable, KVM takes steps to (attempt to) ensure
only a single instance of the owning C function, e.g. vmx_vcpu_run, is
generated by the compiler. The earliest approach placed the inline
assembly in a separate noinline function[1]. Later, the assembly was
folded back into vmx_vcpu_run() and tagged with __noclone[2][3], which
is still used today.
After moving to __noclone, an edge case was encountered where GCC's
-ftracer optimization resulted in the inline assembly blob being
duplicated. This was "fixed" by explicitly disabling -ftracer in the
__noclone definition[4].
Recently, it was found that disabling -ftracer causes build warnings
for unsuspecting users of __noclone[5], and more importantly for KVM,
prevents the compiler for properly optimizing vmx_vcpu_run()[6]. And
perhaps most importantly of all, it was pointed out that there is no
way to prevent duplication of a function with 100% reliability[7],
i.e. more edge cases may be encountered in the future.
So to summarize, the only way to prevent the compiler from duplicating
the global variable definition is to move the variable out of inline
assembly, which has been suggested several times over[1][7][8].
Resolve the aforementioned issues by moving the VMLAUNCH+VRESUME and
VM-Exit "handler" to standalone assembly sub-routines. Moving only
the core VMX transition codes allows the struct indexing to remain as
inline assembly and also allows the sub-routines to be used by
nested_vmx_check_vmentry_hw(). Reusing the sub-routines has a happy
side-effect of eliminating two VMWRITEs in the nested_early_check path
as there is no longer a need to dynamically change VMCS.HOST_RIP.
Note that callers to vmx_vmenter() must account for the CALL modifying
RSP, e.g. must subtract op-size from RSP when synchronizing RSP with
VMCS.HOST_RSP and "restore" RSP prior to the CALL. There are no great
alternatives to fudging RSP. Saving RSP in vmx_enter() is difficult
because doing so requires a second register (VMWRITE does not provide
an immediate encoding for the VMCS field and KVM supports Hyper-V's
memory-based eVMCS ABI). The other more drastic alternative would be
to use eschew VMCS.HOST_RSP and manually save/load RSP using a per-cpu
variable (which can be encoded as e.g. gs:[imm]). But because a valid
stack is needed at the time of VM-Exit (NMIs aren't blocked and a user
could theoretically insert INT3/INT1ICEBRK at the VM-Exit handler), a
dedicated per-cpu VM-Exit stack would be required. A dedicated stack
isn't difficult to implement, but it would require at least one page
per CPU and knowledge of the stack in the dumpstack routines. And in
most cases there is essentially zero overhead in dynamically updating
VMCS.HOST_RSP, e.g. the VMWRITE can be avoided for all but the first
VMLAUNCH unless nested_early_check=1, which is not a fast path. In
other words, avoiding the VMCS.HOST_RSP by using a dedicated stack
would only make the code marginally less ugly while requiring at least
one page per CPU and forcing the kernel to be aware (and approve) of
the VM-Exit stack shenanigans.
[1] cea15c24ca39 ("KVM: Move KVM context switch into own function")
[2] a3b5ba49a8c5 ("KVM: VMX: add the __noclone attribute to vmx_vcpu_run")
[3] 104f226bfd0a ("KVM: VMX: Fold __vmx_vcpu_run() into vmx_vcpu_run()")
[4] 95272c29378e ("compiler-gcc: disable -ftracer for __noclone functions")
[5] https://lkml.kernel.org/r/20181218140105.ajuiglkpvstt3qxs@treble
[6] https://patchwork.kernel.org/patch/8707981/#21817015
[7] https://lkml.kernel.org/r/ri6y38lo23g.fsf@suse.cz
[8] https://lkml.kernel.org/r/20181218212042.GE25620@tassilo.jf.intel.com
Suggested-by: Andi Kleen <ak@linux.intel.com>
Suggested-by: Martin Jambor <mjambor@suse.cz>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Martin Jambor <mjambor@suse.cz>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Miroslav Benes <mbenes@suse.cz>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-12-21 03:25:17 +07:00
|
|
|
|
2019-01-25 22:40:58 +07:00
|
|
|
/*
|
|
|
|
* VMLAUNCH and VMRESUME clear RFLAGS.{CF,ZF} on VM-Exit, set
|
|
|
|
* RFLAGS.CF on VM-Fail Invalid and set RFLAGS.ZF on VM-Fail
|
|
|
|
* Valid. vmx_vmenter() directly "returns" RFLAGS, and so the
|
2019-01-25 22:40:59 +07:00
|
|
|
* results of VM-Enter is captured via CC_{SET,OUT} to vm_fail.
|
2019-01-25 22:40:58 +07:00
|
|
|
*/
|
KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines
Transitioning to/from a VMX guest requires KVM to manually save/load
the bulk of CPU state that the guest is allowed to direclty access,
e.g. XSAVE state, CR2, GPRs, etc... For obvious reasons, loading the
guest's GPR snapshot prior to VM-Enter and saving the snapshot after
VM-Exit is done via handcoded assembly. The assembly blob is written
as inline asm so that it can easily access KVM-defined structs that
are used to hold guest state, e.g. moving the blob to a standalone
assembly file would require generating defines for struct offsets.
The other relevant aspect of VMX transitions in KVM is the handling of
VM-Exits. KVM doesn't employ a separate VM-Exit handler per se, but
rather treats the VMX transition as a mega instruction (with many side
effects), i.e. sets the VMCS.HOST_RIP to a label immediately following
VMLAUNCH/VMRESUME. The label is then exposed to C code via a global
variable definition in the inline assembly.
Because of the global variable, KVM takes steps to (attempt to) ensure
only a single instance of the owning C function, e.g. vmx_vcpu_run, is
generated by the compiler. The earliest approach placed the inline
assembly in a separate noinline function[1]. Later, the assembly was
folded back into vmx_vcpu_run() and tagged with __noclone[2][3], which
is still used today.
After moving to __noclone, an edge case was encountered where GCC's
-ftracer optimization resulted in the inline assembly blob being
duplicated. This was "fixed" by explicitly disabling -ftracer in the
__noclone definition[4].
Recently, it was found that disabling -ftracer causes build warnings
for unsuspecting users of __noclone[5], and more importantly for KVM,
prevents the compiler for properly optimizing vmx_vcpu_run()[6]. And
perhaps most importantly of all, it was pointed out that there is no
way to prevent duplication of a function with 100% reliability[7],
i.e. more edge cases may be encountered in the future.
So to summarize, the only way to prevent the compiler from duplicating
the global variable definition is to move the variable out of inline
assembly, which has been suggested several times over[1][7][8].
Resolve the aforementioned issues by moving the VMLAUNCH+VRESUME and
VM-Exit "handler" to standalone assembly sub-routines. Moving only
the core VMX transition codes allows the struct indexing to remain as
inline assembly and also allows the sub-routines to be used by
nested_vmx_check_vmentry_hw(). Reusing the sub-routines has a happy
side-effect of eliminating two VMWRITEs in the nested_early_check path
as there is no longer a need to dynamically change VMCS.HOST_RIP.
Note that callers to vmx_vmenter() must account for the CALL modifying
RSP, e.g. must subtract op-size from RSP when synchronizing RSP with
VMCS.HOST_RSP and "restore" RSP prior to the CALL. There are no great
alternatives to fudging RSP. Saving RSP in vmx_enter() is difficult
because doing so requires a second register (VMWRITE does not provide
an immediate encoding for the VMCS field and KVM supports Hyper-V's
memory-based eVMCS ABI). The other more drastic alternative would be
to use eschew VMCS.HOST_RSP and manually save/load RSP using a per-cpu
variable (which can be encoded as e.g. gs:[imm]). But because a valid
stack is needed at the time of VM-Exit (NMIs aren't blocked and a user
could theoretically insert INT3/INT1ICEBRK at the VM-Exit handler), a
dedicated per-cpu VM-Exit stack would be required. A dedicated stack
isn't difficult to implement, but it would require at least one page
per CPU and knowledge of the stack in the dumpstack routines. And in
most cases there is essentially zero overhead in dynamically updating
VMCS.HOST_RSP, e.g. the VMWRITE can be avoided for all but the first
VMLAUNCH unless nested_early_check=1, which is not a fast path. In
other words, avoiding the VMCS.HOST_RSP by using a dedicated stack
would only make the code marginally less ugly while requiring at least
one page per CPU and forcing the kernel to be aware (and approve) of
the VM-Exit stack shenanigans.
[1] cea15c24ca39 ("KVM: Move KVM context switch into own function")
[2] a3b5ba49a8c5 ("KVM: VMX: add the __noclone attribute to vmx_vcpu_run")
[3] 104f226bfd0a ("KVM: VMX: Fold __vmx_vcpu_run() into vmx_vcpu_run()")
[4] 95272c29378e ("compiler-gcc: disable -ftracer for __noclone functions")
[5] https://lkml.kernel.org/r/20181218140105.ajuiglkpvstt3qxs@treble
[6] https://patchwork.kernel.org/patch/8707981/#21817015
[7] https://lkml.kernel.org/r/ri6y38lo23g.fsf@suse.cz
[8] https://lkml.kernel.org/r/20181218212042.GE25620@tassilo.jf.intel.com
Suggested-by: Andi Kleen <ak@linux.intel.com>
Suggested-by: Martin Jambor <mjambor@suse.cz>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Martin Jambor <mjambor@suse.cz>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Miroslav Benes <mbenes@suse.cz>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-12-21 03:25:17 +07:00
|
|
|
"call vmx_vmenter\n\t"
|
|
|
|
|
2019-01-25 22:40:59 +07:00
|
|
|
CC_SET(be)
|
|
|
|
: ASM_CALL_CONSTRAINT, CC_OUT(be) (vm_fail)
|
2019-01-25 22:41:02 +07:00
|
|
|
: [HOST_RSP]"r"((unsigned long)HOST_RSP),
|
KVM: nVMX: Reference vmx->loaded_vmcs->launched directly
Temporarily propagating vmx->loaded_vmcs->launched to vmx->__launched
is not functionally necessary, but rather was done historically to
avoid passing both 'vmx' and 'loaded_vmcs' to the vCPU-run asm blob.
Nested early checks inherited this behavior by virtue of copy+paste.
A future patch will move HOST_RSP caching to be per-VMCS, i.e. store
'host_rsp' in loaded VMCS. Now that the reference to 'vmx->fail' is
also gone from nested early checks, referencing 'loaded_vmcs' directly
means we can drop the 'vmx' reference when introducing per-VMCS RSP
caching. And it means __launched can be dropped from struct vcpu_vmx
if/when vCPU-run receives similar treatment.
Note the use of a named register constraint for 'loaded_vmcs'. Using
RCX to hold 'vmx' was inherited from vCPU-run. In the vCPU-run case,
the scratch register needs to be explicitly defined as it is crushed
when loading guest state, i.e. deferring to the compiler would corrupt
the pointer. Since nested early checks never loads guests state, it's
a-ok to let the compiler pick any register. Naming the constraint
avoids the fragility of referencing constraints via %1, %2, etc.., which
breaks horribly when modifying constraints, and generally makes the asm
blob more readable.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-01-25 22:41:00 +07:00
|
|
|
[loaded_vmcs]"r"(vmx->loaded_vmcs),
|
|
|
|
[launched]"i"(offsetof(struct loaded_vmcs, launched)),
|
2019-01-25 22:41:02 +07:00
|
|
|
[host_state_rsp]"i"(offsetof(struct loaded_vmcs, host_state.rsp)),
|
KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines
Transitioning to/from a VMX guest requires KVM to manually save/load
the bulk of CPU state that the guest is allowed to direclty access,
e.g. XSAVE state, CR2, GPRs, etc... For obvious reasons, loading the
guest's GPR snapshot prior to VM-Enter and saving the snapshot after
VM-Exit is done via handcoded assembly. The assembly blob is written
as inline asm so that it can easily access KVM-defined structs that
are used to hold guest state, e.g. moving the blob to a standalone
assembly file would require generating defines for struct offsets.
The other relevant aspect of VMX transitions in KVM is the handling of
VM-Exits. KVM doesn't employ a separate VM-Exit handler per se, but
rather treats the VMX transition as a mega instruction (with many side
effects), i.e. sets the VMCS.HOST_RIP to a label immediately following
VMLAUNCH/VMRESUME. The label is then exposed to C code via a global
variable definition in the inline assembly.
Because of the global variable, KVM takes steps to (attempt to) ensure
only a single instance of the owning C function, e.g. vmx_vcpu_run, is
generated by the compiler. The earliest approach placed the inline
assembly in a separate noinline function[1]. Later, the assembly was
folded back into vmx_vcpu_run() and tagged with __noclone[2][3], which
is still used today.
After moving to __noclone, an edge case was encountered where GCC's
-ftracer optimization resulted in the inline assembly blob being
duplicated. This was "fixed" by explicitly disabling -ftracer in the
__noclone definition[4].
Recently, it was found that disabling -ftracer causes build warnings
for unsuspecting users of __noclone[5], and more importantly for KVM,
prevents the compiler for properly optimizing vmx_vcpu_run()[6]. And
perhaps most importantly of all, it was pointed out that there is no
way to prevent duplication of a function with 100% reliability[7],
i.e. more edge cases may be encountered in the future.
So to summarize, the only way to prevent the compiler from duplicating
the global variable definition is to move the variable out of inline
assembly, which has been suggested several times over[1][7][8].
Resolve the aforementioned issues by moving the VMLAUNCH+VRESUME and
VM-Exit "handler" to standalone assembly sub-routines. Moving only
the core VMX transition codes allows the struct indexing to remain as
inline assembly and also allows the sub-routines to be used by
nested_vmx_check_vmentry_hw(). Reusing the sub-routines has a happy
side-effect of eliminating two VMWRITEs in the nested_early_check path
as there is no longer a need to dynamically change VMCS.HOST_RIP.
Note that callers to vmx_vmenter() must account for the CALL modifying
RSP, e.g. must subtract op-size from RSP when synchronizing RSP with
VMCS.HOST_RSP and "restore" RSP prior to the CALL. There are no great
alternatives to fudging RSP. Saving RSP in vmx_enter() is difficult
because doing so requires a second register (VMWRITE does not provide
an immediate encoding for the VMCS field and KVM supports Hyper-V's
memory-based eVMCS ABI). The other more drastic alternative would be
to use eschew VMCS.HOST_RSP and manually save/load RSP using a per-cpu
variable (which can be encoded as e.g. gs:[imm]). But because a valid
stack is needed at the time of VM-Exit (NMIs aren't blocked and a user
could theoretically insert INT3/INT1ICEBRK at the VM-Exit handler), a
dedicated per-cpu VM-Exit stack would be required. A dedicated stack
isn't difficult to implement, but it would require at least one page
per CPU and knowledge of the stack in the dumpstack routines. And in
most cases there is essentially zero overhead in dynamically updating
VMCS.HOST_RSP, e.g. the VMWRITE can be avoided for all but the first
VMLAUNCH unless nested_early_check=1, which is not a fast path. In
other words, avoiding the VMCS.HOST_RSP by using a dedicated stack
would only make the code marginally less ugly while requiring at least
one page per CPU and forcing the kernel to be aware (and approve) of
the VM-Exit stack shenanigans.
[1] cea15c24ca39 ("KVM: Move KVM context switch into own function")
[2] a3b5ba49a8c5 ("KVM: VMX: add the __noclone attribute to vmx_vcpu_run")
[3] 104f226bfd0a ("KVM: VMX: Fold __vmx_vcpu_run() into vmx_vcpu_run()")
[4] 95272c29378e ("compiler-gcc: disable -ftracer for __noclone functions")
[5] https://lkml.kernel.org/r/20181218140105.ajuiglkpvstt3qxs@treble
[6] https://patchwork.kernel.org/patch/8707981/#21817015
[7] https://lkml.kernel.org/r/ri6y38lo23g.fsf@suse.cz
[8] https://lkml.kernel.org/r/20181218212042.GE25620@tassilo.jf.intel.com
Suggested-by: Andi Kleen <ak@linux.intel.com>
Suggested-by: Martin Jambor <mjambor@suse.cz>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Nadav Amit <namit@vmware.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Martin Jambor <mjambor@suse.cz>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Miroslav Benes <mbenes@suse.cz>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2018-12-21 03:25:17 +07:00
|
|
|
[wordsize]"i"(sizeof(ulong))
|
2019-01-25 22:40:55 +07:00
|
|
|
: "cc", "memory"
|
2018-12-04 04:53:18 +07:00
|
|
|
);
|
|
|
|
|
|
|
|
preempt_enable();
|
|
|
|
|
|
|
|
if (vmx->msr_autoload.host.nr)
|
|
|
|
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
|
|
|
if (vmx->msr_autoload.guest.nr)
|
|
|
|
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
|
|
|
|
|
2019-01-25 22:40:58 +07:00
|
|
|
if (vm_fail) {
|
2018-12-04 04:53:18 +07:00
|
|
|
WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
|
|
|
|
VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VMExit clears RFLAGS.IF and DR7, even on a consistency check.
|
|
|
|
*/
|
|
|
|
local_irq_enable();
|
|
|
|
if (hw_breakpoint_active())
|
|
|
|
set_debugreg(__this_cpu_read(cpu_dr7), 7);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A non-failing VMEntry means we somehow entered guest mode with
|
|
|
|
* an illegal RIP, and that's just the tip of the iceberg. There
|
|
|
|
* is no telling what memory has been modified or what state has
|
|
|
|
* been exposed to unknown code. Hitting this all but guarantees
|
|
|
|
* a (very critical) hardware issue.
|
|
|
|
*/
|
|
|
|
WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
|
|
|
|
VMX_EXIT_REASONS_FAILED_VMENTRY));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12);
|
|
|
|
|
|
|
|
static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
struct page *page;
|
|
|
|
u64 hpa;
|
|
|
|
|
|
|
|
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
|
|
|
/*
|
|
|
|
* Translate L1 physical address to host physical
|
|
|
|
* address for vmcs02. Keep the page pinned, so this
|
|
|
|
* physical address remains valid. We keep a reference
|
|
|
|
* to it so we can release it later.
|
|
|
|
*/
|
|
|
|
if (vmx->nested.apic_access_page) { /* shouldn't happen */
|
|
|
|
kvm_release_page_dirty(vmx->nested.apic_access_page);
|
|
|
|
vmx->nested.apic_access_page = NULL;
|
|
|
|
}
|
|
|
|
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
|
|
|
|
/*
|
|
|
|
* If translation failed, no matter: This feature asks
|
|
|
|
* to exit when accessing the given address, and if it
|
|
|
|
* can never be accessed, this feature won't do
|
|
|
|
* anything anyway.
|
|
|
|
*/
|
|
|
|
if (!is_error_page(page)) {
|
|
|
|
vmx->nested.apic_access_page = page;
|
|
|
|
hpa = page_to_phys(vmx->nested.apic_access_page);
|
|
|
|
vmcs_write64(APIC_ACCESS_ADDR, hpa);
|
|
|
|
} else {
|
|
|
|
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
|
|
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
|
|
|
|
if (vmx->nested.virtual_apic_page) { /* shouldn't happen */
|
|
|
|
kvm_release_page_dirty(vmx->nested.virtual_apic_page);
|
|
|
|
vmx->nested.virtual_apic_page = NULL;
|
|
|
|
}
|
|
|
|
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->virtual_apic_page_addr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If translation failed, VM entry will fail because
|
|
|
|
* prepare_vmcs02 set VIRTUAL_APIC_PAGE_ADDR to -1ull.
|
|
|
|
* Failing the vm entry is _not_ what the processor
|
|
|
|
* does but it's basically the only possibility we
|
|
|
|
* have. We could still enter the guest if CR8 load
|
|
|
|
* exits are enabled, CR8 store exits are enabled, and
|
|
|
|
* virtualize APIC access is disabled; in this case
|
|
|
|
* the processor would never use the TPR shadow and we
|
|
|
|
* could simply clear the bit from the execution
|
|
|
|
* control. But such a configuration is useless, so
|
|
|
|
* let's keep the code simple.
|
|
|
|
*/
|
|
|
|
if (!is_error_page(page)) {
|
|
|
|
vmx->nested.virtual_apic_page = page;
|
|
|
|
hpa = page_to_phys(vmx->nested.virtual_apic_page);
|
|
|
|
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, hpa);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nested_cpu_has_posted_intr(vmcs12)) {
|
|
|
|
if (vmx->nested.pi_desc_page) { /* shouldn't happen */
|
|
|
|
kunmap(vmx->nested.pi_desc_page);
|
|
|
|
kvm_release_page_dirty(vmx->nested.pi_desc_page);
|
|
|
|
vmx->nested.pi_desc_page = NULL;
|
* ARM: selftests improvements, large PUD support for HugeTLB,
single-stepping fixes, improved tracing, various timer and vGIC
fixes
* x86: Processor Tracing virtualization, STIBP support, some correctness fixes,
refactorings and splitting of vmx.c, use the Hyper-V range TLB flush hypercall,
reduce order of vcpu struct, WBNOINVD support, do not use -ftrace for __noclone
functions, nested guest support for PAUSE filtering on AMD, more Hyper-V
enlightenments (direct mode for synthetic timers)
* PPC: nested VFIO
* s390: bugfixes only this time
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v2.0.22 (GNU/Linux)
iQEcBAABAgAGBQJcH0vFAAoJEL/70l94x66Dw/wH/2FZp1YOM5OgiJzgqnXyDbyf
dNEfWo472MtNiLsuf+ZAfJojVIu9cv7wtBfXNzW+75XZDfh/J88geHWNSiZDm3Fe
aM4MOnGG0yF3hQrRQyEHe4IFhGFNERax8Ccv+OL44md9CjYrIrsGkRD08qwb+gNh
P8T/3wJEKwUcVHA/1VHEIM8MlirxNENc78p6JKd/C7zb0emjGavdIpWFUMr3SNfs
CemabhJUuwOYtwjRInyx1y34FzYwW3Ejuc9a9UoZ+COahUfkuxHE8u+EQS7vLVF6
2VGVu5SA0PqgmLlGhHthxLqVgQYo+dB22cRnsLtXlUChtVAq8q9uu5sKzvqEzuE=
=b4Jx
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"ARM:
- selftests improvements
- large PUD support for HugeTLB
- single-stepping fixes
- improved tracing
- various timer and vGIC fixes
x86:
- Processor Tracing virtualization
- STIBP support
- some correctness fixes
- refactorings and splitting of vmx.c
- use the Hyper-V range TLB flush hypercall
- reduce order of vcpu struct
- WBNOINVD support
- do not use -ftrace for __noclone functions
- nested guest support for PAUSE filtering on AMD
- more Hyper-V enlightenments (direct mode for synthetic timers)
PPC:
- nested VFIO
s390:
- bugfixes only this time"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (171 commits)
KVM: x86: Add CPUID support for new instruction WBNOINVD
kvm: selftests: ucall: fix exit mmio address guessing
Revert "compiler-gcc: disable -ftracer for __noclone functions"
KVM: VMX: Move VM-Enter + VM-Exit handling to non-inline sub-routines
KVM: VMX: Explicitly reference RCX as the vmx_vcpu pointer in asm blobs
KVM: x86: Use jmp to invoke kvm_spurious_fault() from .fixup
MAINTAINERS: Add arch/x86/kvm sub-directories to existing KVM/x86 entry
KVM/x86: Use SVM assembly instruction mnemonics instead of .byte streams
KVM/MMU: Flush tlb directly in the kvm_zap_gfn_range()
KVM/MMU: Flush tlb directly in kvm_set_pte_rmapp()
KVM/MMU: Move tlb flush in kvm_set_pte_rmapp() to kvm_mmu_notifier_change_pte()
KVM: Make kvm_set_spte_hva() return int
KVM: Replace old tlb flush function with new one to flush a specified range.
KVM/MMU: Add tlb flush with range helper function
KVM/VMX: Add hv tlb range flush support
x86/hyper-v: Add HvFlushGuestAddressList hypercall support
KVM: Add tlb_remote_flush_with_range callback in kvm_x86_ops
KVM: x86: Disable Intel PT when VMXON in L1 guest
KVM: x86: Set intercept for Intel PT MSRs read/write
KVM: x86: Implement Intel PT MSRs read/write emulation
...
2018-12-27 02:46:28 +07:00
|
|
|
vmx->nested.pi_desc = NULL;
|
|
|
|
vmcs_write64(POSTED_INTR_DESC_ADDR, -1ull);
|
2018-12-04 04:53:18 +07:00
|
|
|
}
|
|
|
|
page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->posted_intr_desc_addr);
|
|
|
|
if (is_error_page(page))
|
|
|
|
return;
|
|
|
|
vmx->nested.pi_desc_page = page;
|
|
|
|
vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page);
|
|
|
|
vmx->nested.pi_desc =
|
|
|
|
(struct pi_desc *)((void *)vmx->nested.pi_desc +
|
|
|
|
(unsigned long)(vmcs12->posted_intr_desc_addr &
|
|
|
|
(PAGE_SIZE - 1)));
|
|
|
|
vmcs_write64(POSTED_INTR_DESC_ADDR,
|
|
|
|
page_to_phys(vmx->nested.pi_desc_page) +
|
|
|
|
(unsigned long)(vmcs12->posted_intr_desc_addr &
|
|
|
|
(PAGE_SIZE - 1)));
|
|
|
|
}
|
|
|
|
if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
|
|
|
|
vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL,
|
|
|
|
CPU_BASED_USE_MSR_BITMAPS);
|
|
|
|
else
|
|
|
|
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
|
|
|
|
CPU_BASED_USE_MSR_BITMAPS);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Intel's VMX Instruction Reference specifies a common set of prerequisites
|
|
|
|
* for running VMX instructions (except VMXON, whose prerequisites are
|
|
|
|
* slightly different). It also specifies what exception to inject otherwise.
|
|
|
|
* Note that many of these exceptions have priority over VM exits, so they
|
|
|
|
* don't have to be checked again here.
|
|
|
|
*/
|
|
|
|
static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!to_vmx(vcpu)->nested.vmxon) {
|
|
|
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vmx_get_cpl(vcpu)) {
|
|
|
|
kvm_inject_gp(vcpu, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u8 rvi = vmx_get_rvi();
|
|
|
|
u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
|
|
|
|
|
|
|
|
return ((rvi & 0xf0) > (vppr & 0xf0));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If from_vmentry is false, this is being called from state restore (either RSM
|
|
|
|
* or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
|
|
|
|
+ *
|
|
|
|
+ * Returns:
|
|
|
|
+ * 0 - success, i.e. proceed with actual VMEnter
|
|
|
|
+ * 1 - consistency check VMExit
|
|
|
|
+ * -1 - consistency check VMFail
|
|
|
|
*/
|
|
|
|
int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
bool evaluate_pending_interrupts;
|
|
|
|
u32 exit_reason = EXIT_REASON_INVALID_STATE;
|
|
|
|
u32 exit_qual;
|
|
|
|
|
|
|
|
evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
|
|
|
|
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
|
|
|
|
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
|
|
|
|
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
|
|
|
|
|
|
|
|
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
|
|
|
|
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
|
|
|
|
if (kvm_mpx_supported() &&
|
|
|
|
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
|
|
|
|
vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
|
|
|
|
|
|
|
|
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
|
|
|
|
|
|
|
|
prepare_vmcs02_early(vmx, vmcs12);
|
|
|
|
|
|
|
|
if (from_vmentry) {
|
|
|
|
nested_get_vmcs12_pages(vcpu);
|
|
|
|
|
|
|
|
if (nested_vmx_check_vmentry_hw(vcpu)) {
|
|
|
|
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:06 +07:00
|
|
|
if (nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
|
2018-12-04 04:53:18 +07:00
|
|
|
goto vmentry_fail_vmexit;
|
|
|
|
}
|
|
|
|
|
|
|
|
enter_guest_mode(vcpu);
|
|
|
|
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
|
|
|
|
vcpu->arch.tsc_offset += vmcs12->tsc_offset;
|
|
|
|
|
|
|
|
if (prepare_vmcs02(vcpu, vmcs12, &exit_qual))
|
|
|
|
goto vmentry_fail_vmexit_guest_mode;
|
|
|
|
|
|
|
|
if (from_vmentry) {
|
|
|
|
exit_reason = EXIT_REASON_MSR_LOAD_FAIL;
|
|
|
|
exit_qual = nested_vmx_load_msr(vcpu,
|
|
|
|
vmcs12->vm_entry_msr_load_addr,
|
|
|
|
vmcs12->vm_entry_msr_load_count);
|
|
|
|
if (exit_qual)
|
|
|
|
goto vmentry_fail_vmexit_guest_mode;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The MMU is not initialized to point at the right entities yet and
|
|
|
|
* "get pages" would need to read data from the guest (i.e. we will
|
|
|
|
* need to perform gpa to hpa translation). Request a call
|
|
|
|
* to nested_get_vmcs12_pages before the next VM-entry. The MSRs
|
|
|
|
* have already been set at vmentry time and should not be reset.
|
|
|
|
*/
|
|
|
|
kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If L1 had a pending IRQ/NMI until it executed
|
|
|
|
* VMLAUNCH/VMRESUME which wasn't delivered because it was
|
|
|
|
* disallowed (e.g. interrupts disabled), L0 needs to
|
|
|
|
* evaluate if this pending event should cause an exit from L2
|
|
|
|
* to L1 or delivered directly to L2 (e.g. In case L1 don't
|
|
|
|
* intercept EXTERNAL_INTERRUPT).
|
|
|
|
*
|
|
|
|
* Usually this would be handled by the processor noticing an
|
|
|
|
* IRQ/NMI window request, or checking RVI during evaluation of
|
|
|
|
* pending virtual interrupts. However, this setting was done
|
|
|
|
* on VMCS01 and now VMCS02 is active instead. Thus, we force L0
|
|
|
|
* to perform pending event evaluation by requesting a KVM_REQ_EVENT.
|
|
|
|
*/
|
|
|
|
if (unlikely(evaluate_pending_interrupts))
|
|
|
|
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
|
|
|
|
2019-01-30 01:14:46 +07:00
|
|
|
/*
|
|
|
|
* Do not start the preemption timer hrtimer until after we know
|
|
|
|
* we are successful, so that only nested_vmx_vmexit needs to cancel
|
|
|
|
* the timer.
|
|
|
|
*/
|
|
|
|
vmx->nested.preemption_timer_expired = false;
|
|
|
|
if (nested_cpu_has_preemption_timer(vmcs12))
|
|
|
|
vmx_start_preemption_timer(vcpu);
|
|
|
|
|
2018-12-04 04:53:18 +07:00
|
|
|
/*
|
|
|
|
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
|
|
|
|
* we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
|
|
|
|
* returned as far as L1 is concerned. It will only return (and set
|
|
|
|
* the success flag) when L2 exits (see nested_vmx_vmexit()).
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A failed consistency check that leads to a VMExit during L1's
|
|
|
|
* VMEnter to L2 is a variation of a normal VMexit, as explained in
|
|
|
|
* 26.7 "VM-entry failures during or after loading guest state".
|
|
|
|
*/
|
|
|
|
vmentry_fail_vmexit_guest_mode:
|
|
|
|
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
|
|
|
|
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
|
|
|
|
leave_guest_mode(vcpu);
|
|
|
|
|
|
|
|
vmentry_fail_vmexit:
|
|
|
|
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
|
|
|
|
|
|
|
if (!from_vmentry)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
load_vmcs12_host_state(vcpu, vmcs12);
|
|
|
|
vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
|
|
|
|
vmcs12->exit_qualification = exit_qual;
|
|
|
|
if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
|
|
|
|
vmx->nested.need_vmcs12_sync = true;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
|
|
|
|
* for running an L2 nested guest.
|
|
|
|
*/
|
|
|
|
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
|
|
|
|
{
|
|
|
|
struct vmcs12 *vmcs12;
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!nested_vmx_check_permission(vcpu))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!nested_vmx_handle_enlightened_vmptrld(vcpu, true))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)
|
|
|
|
return nested_vmx_failInvalid(vcpu);
|
|
|
|
|
|
|
|
vmcs12 = get_vmcs12(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
|
|
|
|
* that there *is* a valid VMCS pointer, RFLAGS.CF is set
|
|
|
|
* rather than RFLAGS.ZF, and no error number is stored to the
|
|
|
|
* VM-instruction error field.
|
|
|
|
*/
|
|
|
|
if (vmcs12->hdr.shadow_vmcs)
|
|
|
|
return nested_vmx_failInvalid(vcpu);
|
|
|
|
|
|
|
|
if (vmx->nested.hv_evmcs) {
|
|
|
|
copy_enlightened_to_vmcs12(vmx);
|
|
|
|
/* Enlightened VMCS doesn't have launch state */
|
|
|
|
vmcs12->launch_state = !launch;
|
|
|
|
} else if (enable_shadow_vmcs) {
|
|
|
|
copy_shadow_to_vmcs12(vmx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The nested entry process starts with enforcing various prerequisites
|
|
|
|
* on vmcs12 as required by the Intel SDM, and act appropriately when
|
|
|
|
* they fail: As the SDM explains, some conditions should cause the
|
|
|
|
* instruction to fail, while others will cause the instruction to seem
|
|
|
|
* to succeed, but return an EXIT_REASON_INVALID_STATE.
|
|
|
|
* To speed up the normal (success) code path, we should avoid checking
|
|
|
|
* for misconfigurations which will anyway be caught by the processor
|
|
|
|
* when using the merged vmcs02.
|
|
|
|
*/
|
|
|
|
if (interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS)
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
|
|
|
|
|
|
|
|
if (vmcs12->launch_state == launch)
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
|
|
|
|
: VMXERR_VMRESUME_NONLAUNCHED_VMCS);
|
|
|
|
|
2018-12-13 01:30:06 +07:00
|
|
|
ret = nested_vmx_check_vmentry_prereqs(vcpu, vmcs12);
|
2018-12-04 04:53:18 +07:00
|
|
|
if (ret)
|
|
|
|
return nested_vmx_failValid(vcpu, ret);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We're finally done with prerequisite checking, and can start with
|
|
|
|
* the nested entry.
|
|
|
|
*/
|
|
|
|
vmx->nested.nested_run_pending = 1;
|
|
|
|
ret = nested_vmx_enter_non_root_mode(vcpu, true);
|
|
|
|
vmx->nested.nested_run_pending = !ret;
|
|
|
|
if (ret > 0)
|
|
|
|
return 1;
|
|
|
|
else if (ret)
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
|
|
|
|
|
|
/* Hide L1D cache contents from the nested guest. */
|
|
|
|
vmx->vcpu.arch.l1tf_flush_l1d = true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must happen outside of nested_vmx_enter_non_root_mode() as it will
|
|
|
|
* also be used as part of restoring nVMX state for
|
|
|
|
* snapshot restore (migration).
|
|
|
|
*
|
|
|
|
* In this flow, it is assumed that vmcs12 cache was
|
|
|
|
* trasferred as part of captured nVMX state and should
|
|
|
|
* therefore not be read from guest memory (which may not
|
|
|
|
* exist on destination host yet).
|
|
|
|
*/
|
|
|
|
nested_cache_shadow_vmcs12(vcpu, vmcs12);
|
|
|
|
|
|
|
|
/*
|
2018-11-27 02:22:32 +07:00
|
|
|
* If we're entering a halted L2 vcpu and the L2 vcpu won't be
|
|
|
|
* awakened by event injection or by an NMI-window VM-exit or
|
|
|
|
* by an interrupt-window VM-exit, halt the vcpu.
|
2018-12-04 04:53:18 +07:00
|
|
|
*/
|
|
|
|
if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
|
2018-11-27 02:22:32 +07:00
|
|
|
!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
|
|
|
|
!(vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_NMI_PENDING) &&
|
|
|
|
!((vmcs12->cpu_based_vm_exec_control & CPU_BASED_VIRTUAL_INTR_PENDING) &&
|
|
|
|
(vmcs12->guest_rflags & X86_EFLAGS_IF))) {
|
2018-12-04 04:53:18 +07:00
|
|
|
vmx->nested.nested_run_pending = 0;
|
|
|
|
return kvm_vcpu_halt(vcpu);
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
|
|
|
|
* because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
|
|
|
|
* This function returns the new value we should put in vmcs12.guest_cr0.
|
|
|
|
* It's not enough to just return the vmcs02 GUEST_CR0. Rather,
|
|
|
|
* 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
|
|
|
|
* available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
|
|
|
|
* didn't trap the bit, because if L1 did, so would L0).
|
|
|
|
* 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
|
|
|
|
* been modified by L2, and L1 knows it. So just leave the old value of
|
|
|
|
* the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
|
|
|
|
* isn't relevant, because if L0 traps this bit it can set it to anything.
|
|
|
|
* 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
|
|
|
|
* changed these bits, and therefore they need to be updated, but L0
|
|
|
|
* didn't necessarily allow them to be changed in GUEST_CR0 - and rather
|
|
|
|
* put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
|
|
|
|
*/
|
|
|
|
static inline unsigned long
|
|
|
|
vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
return
|
|
|
|
/*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
|
|
|
|
/*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
|
|
|
|
/*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
|
|
|
|
vcpu->arch.cr0_guest_owned_bits));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long
|
|
|
|
vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
return
|
|
|
|
/*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
|
|
|
|
/*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
|
|
|
|
/*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
|
|
|
|
vcpu->arch.cr4_guest_owned_bits));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
u32 idt_vectoring;
|
|
|
|
unsigned int nr;
|
|
|
|
|
|
|
|
if (vcpu->arch.exception.injected) {
|
|
|
|
nr = vcpu->arch.exception.nr;
|
|
|
|
idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
|
|
|
|
|
|
|
|
if (kvm_exception_is_soft(nr)) {
|
|
|
|
vmcs12->vm_exit_instruction_len =
|
|
|
|
vcpu->arch.event_exit_inst_len;
|
|
|
|
idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
|
|
|
|
} else
|
|
|
|
idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
|
|
|
|
|
|
|
|
if (vcpu->arch.exception.has_error_code) {
|
|
|
|
idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
|
|
|
|
vmcs12->idt_vectoring_error_code =
|
|
|
|
vcpu->arch.exception.error_code;
|
|
|
|
}
|
|
|
|
|
|
|
|
vmcs12->idt_vectoring_info_field = idt_vectoring;
|
|
|
|
} else if (vcpu->arch.nmi_injected) {
|
|
|
|
vmcs12->idt_vectoring_info_field =
|
|
|
|
INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
|
|
|
|
} else if (vcpu->arch.interrupt.injected) {
|
|
|
|
nr = vcpu->arch.interrupt.nr;
|
|
|
|
idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
|
|
|
|
|
|
|
|
if (vcpu->arch.interrupt.soft) {
|
|
|
|
idt_vectoring |= INTR_TYPE_SOFT_INTR;
|
|
|
|
vmcs12->vm_entry_instruction_len =
|
|
|
|
vcpu->arch.event_exit_inst_len;
|
|
|
|
} else
|
|
|
|
idt_vectoring |= INTR_TYPE_EXT_INTR;
|
|
|
|
|
|
|
|
vmcs12->idt_vectoring_info_field = idt_vectoring;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
gfn_t gfn;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't need to mark the APIC access page dirty; it is never
|
|
|
|
* written to by the CPU during APIC virtualization.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
|
|
|
|
gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
|
|
|
|
kvm_vcpu_mark_page_dirty(vcpu, gfn);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nested_cpu_has_posted_intr(vmcs12)) {
|
|
|
|
gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
|
|
|
|
kvm_vcpu_mark_page_dirty(vcpu, gfn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
int max_irr;
|
|
|
|
void *vapic_page;
|
|
|
|
u16 status;
|
|
|
|
|
|
|
|
if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
|
|
|
|
return;
|
|
|
|
|
|
|
|
vmx->nested.pi_pending = false;
|
|
|
|
if (!pi_test_and_clear_on(vmx->nested.pi_desc))
|
|
|
|
return;
|
|
|
|
|
|
|
|
max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
|
|
|
|
if (max_irr != 256) {
|
|
|
|
vapic_page = kmap(vmx->nested.virtual_apic_page);
|
|
|
|
__kvm_apic_update_irr(vmx->nested.pi_desc->pir,
|
|
|
|
vapic_page, &max_irr);
|
|
|
|
kunmap(vmx->nested.virtual_apic_page);
|
|
|
|
|
|
|
|
status = vmcs_read16(GUEST_INTR_STATUS);
|
|
|
|
if ((u8)max_irr > ((u8)status & 0xff)) {
|
|
|
|
status &= ~0xff;
|
|
|
|
status |= (u8)max_irr;
|
|
|
|
vmcs_write16(GUEST_INTR_STATUS, status);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
nested_mark_vmcs12_pages_dirty(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
|
|
|
|
unsigned long exit_qual)
|
|
|
|
{
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
unsigned int nr = vcpu->arch.exception.nr;
|
|
|
|
u32 intr_info = nr | INTR_INFO_VALID_MASK;
|
|
|
|
|
|
|
|
if (vcpu->arch.exception.has_error_code) {
|
|
|
|
vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
|
|
|
|
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (kvm_exception_is_soft(nr))
|
|
|
|
intr_info |= INTR_TYPE_SOFT_EXCEPTION;
|
|
|
|
else
|
|
|
|
intr_info |= INTR_TYPE_HARD_EXCEPTION;
|
|
|
|
|
|
|
|
if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
|
|
|
|
vmx_get_nmi_mask(vcpu))
|
|
|
|
intr_info |= INTR_INFO_UNBLOCK_NMI;
|
|
|
|
|
|
|
|
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
unsigned long exit_qual;
|
|
|
|
bool block_nested_events =
|
|
|
|
vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
|
|
|
|
|
|
|
|
if (vcpu->arch.exception.pending &&
|
|
|
|
nested_vmx_check_exception(vcpu, &exit_qual)) {
|
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
|
|
|
nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
|
|
|
|
vmx->nested.preemption_timer_expired) {
|
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
|
|
|
nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
|
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
|
|
|
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
|
|
|
|
NMI_VECTOR | INTR_TYPE_NMI_INTR |
|
|
|
|
INTR_INFO_VALID_MASK, 0);
|
|
|
|
/*
|
|
|
|
* The NMI-triggered VM exit counts as injection:
|
|
|
|
* clear this one and block further NMIs.
|
|
|
|
*/
|
|
|
|
vcpu->arch.nmi_pending = 0;
|
|
|
|
vmx_set_nmi_mask(vcpu, true);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
|
|
|
|
nested_exit_on_intr(vcpu)) {
|
|
|
|
if (block_nested_events)
|
|
|
|
return -EBUSY;
|
|
|
|
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
vmx_complete_nested_posted_interrupt(vcpu);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
ktime_t remaining =
|
|
|
|
hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
|
|
|
|
u64 value;
|
|
|
|
|
|
|
|
if (ktime_to_ns(remaining) <= 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
|
|
|
|
do_div(value, 1000000);
|
|
|
|
return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Update the guest state fields of vmcs12 to reflect changes that
|
|
|
|
* occurred while L2 was running. (The "IA-32e mode guest" bit of the
|
|
|
|
* VM-entry controls is also updated, since this is really a guest
|
|
|
|
* state bit.)
|
|
|
|
*/
|
|
|
|
static void sync_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
|
|
|
|
vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
|
|
|
|
|
|
|
|
vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
|
|
|
|
vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
|
|
|
|
vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
|
|
|
|
|
|
|
|
vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
|
|
|
|
vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
|
|
|
|
vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
|
|
|
|
vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
|
|
|
|
vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
|
|
|
|
vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
|
|
|
|
vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
|
|
|
|
vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
|
|
|
|
vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
|
|
|
|
vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
|
|
|
|
vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
|
|
|
|
vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
|
|
|
|
vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
|
|
|
|
vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
|
|
|
|
vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
|
|
|
|
vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
|
|
|
|
vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
|
|
|
|
vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
|
|
|
|
vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
|
|
|
|
vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
|
|
|
|
vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
|
|
|
|
vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
|
|
|
|
vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
|
|
|
|
vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
|
|
|
|
vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
|
|
|
|
vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
|
|
|
|
vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
|
|
|
|
vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
|
|
|
|
vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
|
|
|
|
vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
|
|
|
|
vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
|
|
|
|
vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
|
|
|
|
vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
|
|
|
|
vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
|
|
|
|
vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
|
|
|
|
vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
|
|
|
|
|
|
|
|
vmcs12->guest_interruptibility_info =
|
|
|
|
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
|
|
|
|
vmcs12->guest_pending_dbg_exceptions =
|
|
|
|
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
|
|
|
|
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
|
|
|
|
vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
|
|
|
|
else
|
|
|
|
vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
|
|
|
|
|
2019-01-30 01:12:35 +07:00
|
|
|
if (nested_cpu_has_preemption_timer(vmcs12) &&
|
|
|
|
vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
|
2018-12-04 04:53:18 +07:00
|
|
|
vmcs12->vmx_preemption_timer_value =
|
|
|
|
vmx_get_preemption_timer_value(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In some cases (usually, nested EPT), L2 is allowed to change its
|
|
|
|
* own CR3 without exiting. If it has changed it, we must keep it.
|
|
|
|
* Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
|
|
|
|
* by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
|
|
|
|
*
|
|
|
|
* Additionally, restore L2's PDPTR to vmcs12.
|
|
|
|
*/
|
|
|
|
if (enable_ept) {
|
|
|
|
vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
|
|
|
|
vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
|
|
|
|
vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
|
|
|
|
vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
|
|
|
|
vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
|
|
|
|
}
|
|
|
|
|
|
|
|
vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
|
|
|
|
|
|
|
|
if (nested_cpu_has_vid(vmcs12))
|
|
|
|
vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
|
|
|
|
|
|
|
|
vmcs12->vm_entry_controls =
|
|
|
|
(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
|
|
|
|
(vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
|
|
|
|
|
|
|
|
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
|
|
|
|
kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
|
|
|
|
vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO: These cannot have changed unless we have MSR bitmaps and
|
|
|
|
* the relevant bit asks not to trap the change */
|
|
|
|
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
|
|
|
|
vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
|
|
|
|
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
|
|
|
|
vmcs12->guest_ia32_efer = vcpu->arch.efer;
|
|
|
|
vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
|
|
|
|
vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
|
|
|
|
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
|
|
|
|
if (kvm_mpx_supported())
|
|
|
|
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
|
|
|
|
* and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
|
|
|
|
* and this function updates it to reflect the changes to the guest state while
|
|
|
|
* L2 was running (and perhaps made some exits which were handled directly by L0
|
|
|
|
* without going back to L1), and to reflect the exit reason.
|
|
|
|
* Note that we do not have to copy here all VMCS fields, just those that
|
|
|
|
* could have changed by the L2 guest or the exit - i.e., the guest-state and
|
|
|
|
* exit-information fields only. Other fields are modified by L1 with VMWRITE,
|
|
|
|
* which already writes to vmcs12 directly.
|
|
|
|
*/
|
|
|
|
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
|
|
|
|
u32 exit_reason, u32 exit_intr_info,
|
|
|
|
unsigned long exit_qualification)
|
|
|
|
{
|
|
|
|
/* update guest state fields: */
|
|
|
|
sync_vmcs12(vcpu, vmcs12);
|
|
|
|
|
|
|
|
/* update exit information fields: */
|
|
|
|
|
|
|
|
vmcs12->vm_exit_reason = exit_reason;
|
|
|
|
vmcs12->exit_qualification = exit_qualification;
|
|
|
|
vmcs12->vm_exit_intr_info = exit_intr_info;
|
|
|
|
|
|
|
|
vmcs12->idt_vectoring_info_field = 0;
|
|
|
|
vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
|
|
|
|
vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
|
|
|
|
|
|
|
if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
|
|
|
|
vmcs12->launch_state = 1;
|
|
|
|
|
|
|
|
/* vm_entry_intr_info_field is cleared on exit. Emulate this
|
|
|
|
* instead of reading the real value. */
|
|
|
|
vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Transfer the event that L0 or L1 may wanted to inject into
|
|
|
|
* L2 to IDT_VECTORING_INFO_FIELD.
|
|
|
|
*/
|
|
|
|
vmcs12_save_pending_event(vcpu, vmcs12);
|
2018-12-05 07:00:13 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* According to spec, there's no need to store the guest's
|
|
|
|
* MSRs if the exit is due to a VM-entry failure that occurs
|
|
|
|
* during or after loading the guest state. Since this exit
|
|
|
|
* does not fall in that category, we need to save the MSRs.
|
|
|
|
*/
|
|
|
|
if (nested_vmx_store_msr(vcpu,
|
|
|
|
vmcs12->vm_exit_msr_store_addr,
|
|
|
|
vmcs12->vm_exit_msr_store_count))
|
|
|
|
nested_vmx_abort(vcpu,
|
|
|
|
VMX_ABORT_SAVE_GUEST_MSR_FAIL);
|
2018-12-04 04:53:18 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Drop what we picked up for L2 via vmx_complete_interrupts. It is
|
|
|
|
* preserved above and would only end up incorrectly in L1.
|
|
|
|
*/
|
|
|
|
vcpu->arch.nmi_injected = false;
|
|
|
|
kvm_clear_exception_queue(vcpu);
|
|
|
|
kvm_clear_interrupt_queue(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A part of what we need to when the nested L2 guest exits and we want to
|
|
|
|
* run its L1 parent, is to reset L1's guest state to the host state specified
|
|
|
|
* in vmcs12.
|
|
|
|
* This function is to be called not only on normal nested exit, but also on
|
|
|
|
* a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
|
|
|
|
* Failures During or After Loading Guest State").
|
|
|
|
* This function should be called when the active VMCS is L1's (vmcs01).
|
|
|
|
*/
|
|
|
|
static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
struct kvm_segment seg;
|
|
|
|
u32 entry_failure_code;
|
|
|
|
|
|
|
|
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
|
|
|
|
vcpu->arch.efer = vmcs12->host_ia32_efer;
|
|
|
|
else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
|
|
|
|
vcpu->arch.efer |= (EFER_LMA | EFER_LME);
|
|
|
|
else
|
|
|
|
vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
|
|
|
|
vmx_set_efer(vcpu, vcpu->arch.efer);
|
|
|
|
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
|
|
|
|
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
|
|
|
|
vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
|
|
|
|
vmx_set_interrupt_shadow(vcpu, 0);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that calling vmx_set_cr0 is important, even if cr0 hasn't
|
|
|
|
* actually changed, because vmx_set_cr0 refers to efer set above.
|
|
|
|
*
|
|
|
|
* CR0_GUEST_HOST_MASK is already set in the original vmcs01
|
|
|
|
* (KVM doesn't change it);
|
|
|
|
*/
|
|
|
|
vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
|
|
|
|
vmx_set_cr0(vcpu, vmcs12->host_cr0);
|
|
|
|
|
|
|
|
/* Same as above - no reason to call set_cr4_guest_host_mask(). */
|
|
|
|
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
|
|
|
|
vmx_set_cr4(vcpu, vmcs12->host_cr4);
|
|
|
|
|
|
|
|
nested_ept_uninit_mmu_context(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only PDPTE load can fail as the value of cr3 was checked on entry and
|
|
|
|
* couldn't have changed.
|
|
|
|
*/
|
|
|
|
if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
|
|
|
|
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
|
|
|
|
|
|
|
|
if (!enable_ept)
|
|
|
|
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If vmcs01 doesn't use VPID, CPU flushes TLB on every
|
|
|
|
* VMEntry/VMExit. Thus, no need to flush TLB.
|
|
|
|
*
|
|
|
|
* If vmcs12 doesn't use VPID, L1 expects TLB to be
|
|
|
|
* flushed on every VMEntry/VMExit.
|
|
|
|
*
|
|
|
|
* Otherwise, we can preserve TLB entries as long as we are
|
|
|
|
* able to tag L1 TLB entries differently than L2 TLB entries.
|
|
|
|
*
|
|
|
|
* If vmcs12 uses EPT, we need to execute this flush on EPTP01
|
|
|
|
* and therefore we request the TLB flush to happen only after VMCS EPTP
|
|
|
|
* has been set by KVM_REQ_LOAD_CR3.
|
|
|
|
*/
|
|
|
|
if (enable_vpid &&
|
|
|
|
(!nested_cpu_has_vpid(vmcs12) || !nested_has_guest_tlb_tag(vcpu))) {
|
|
|
|
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
|
|
|
|
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
|
|
|
|
vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
|
|
|
|
vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
|
|
|
|
vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
|
|
|
|
vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
|
|
|
|
vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
|
|
|
|
|
|
|
|
/* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
|
|
|
|
if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
|
|
|
|
vmcs_write64(GUEST_BNDCFGS, 0);
|
|
|
|
|
|
|
|
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
|
|
|
|
vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
|
|
|
|
vcpu->arch.pat = vmcs12->host_ia32_pat;
|
|
|
|
}
|
|
|
|
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
|
|
|
|
vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
|
|
|
|
vmcs12->host_ia32_perf_global_ctrl);
|
|
|
|
|
|
|
|
/* Set L1 segment info according to Intel SDM
|
|
|
|
27.5.2 Loading Host Segment and Descriptor-Table Registers */
|
|
|
|
seg = (struct kvm_segment) {
|
|
|
|
.base = 0,
|
|
|
|
.limit = 0xFFFFFFFF,
|
|
|
|
.selector = vmcs12->host_cs_selector,
|
|
|
|
.type = 11,
|
|
|
|
.present = 1,
|
|
|
|
.s = 1,
|
|
|
|
.g = 1
|
|
|
|
};
|
|
|
|
if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
|
|
|
|
seg.l = 1;
|
|
|
|
else
|
|
|
|
seg.db = 1;
|
|
|
|
vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
|
|
|
|
seg = (struct kvm_segment) {
|
|
|
|
.base = 0,
|
|
|
|
.limit = 0xFFFFFFFF,
|
|
|
|
.type = 3,
|
|
|
|
.present = 1,
|
|
|
|
.s = 1,
|
|
|
|
.db = 1,
|
|
|
|
.g = 1
|
|
|
|
};
|
|
|
|
seg.selector = vmcs12->host_ds_selector;
|
|
|
|
vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
|
|
|
|
seg.selector = vmcs12->host_es_selector;
|
|
|
|
vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
|
|
|
|
seg.selector = vmcs12->host_ss_selector;
|
|
|
|
vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
|
|
|
|
seg.selector = vmcs12->host_fs_selector;
|
|
|
|
seg.base = vmcs12->host_fs_base;
|
|
|
|
vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
|
|
|
|
seg.selector = vmcs12->host_gs_selector;
|
|
|
|
seg.base = vmcs12->host_gs_base;
|
|
|
|
vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
|
|
|
|
seg = (struct kvm_segment) {
|
|
|
|
.base = vmcs12->host_tr_base,
|
|
|
|
.limit = 0x67,
|
|
|
|
.selector = vmcs12->host_tr_selector,
|
|
|
|
.type = 11,
|
|
|
|
.present = 1
|
|
|
|
};
|
|
|
|
vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
|
|
|
|
|
|
|
|
kvm_set_dr(vcpu, 7, 0x400);
|
|
|
|
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
|
|
|
|
|
|
|
|
if (cpu_has_vmx_msr_bitmap())
|
|
|
|
vmx_update_msr_bitmap(vcpu);
|
|
|
|
|
|
|
|
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
|
|
|
|
vmcs12->vm_exit_msr_load_count))
|
|
|
|
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
|
|
|
|
{
|
|
|
|
struct shared_msr_entry *efer_msr;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
|
|
|
|
return vmcs_read64(GUEST_IA32_EFER);
|
|
|
|
|
|
|
|
if (cpu_has_load_ia32_efer())
|
|
|
|
return host_efer;
|
|
|
|
|
|
|
|
for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
|
|
|
|
if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
|
|
|
|
return vmx->msr_autoload.guest.val[i].value;
|
|
|
|
}
|
|
|
|
|
|
|
|
efer_msr = find_msr_entry(vmx, MSR_EFER);
|
|
|
|
if (efer_msr)
|
|
|
|
return efer_msr->data;
|
|
|
|
|
|
|
|
return host_efer;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
struct vmx_msr_entry g, h;
|
|
|
|
struct msr_data msr;
|
|
|
|
gpa_t gpa;
|
|
|
|
u32 i, j;
|
|
|
|
|
|
|
|
vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
|
|
|
|
|
|
|
|
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
|
|
|
|
/*
|
|
|
|
* L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
|
|
|
|
* as vmcs01.GUEST_DR7 contains a userspace defined value
|
|
|
|
* and vcpu->arch.dr7 is not squirreled away before the
|
|
|
|
* nested VMENTER (not worth adding a variable in nested_vmx).
|
|
|
|
*/
|
|
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
|
|
|
|
kvm_set_dr(vcpu, 7, DR7_FIXED_1);
|
|
|
|
else
|
|
|
|
WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that calling vmx_set_{efer,cr0,cr4} is important as they
|
|
|
|
* handle a variety of side effects to KVM's software model.
|
|
|
|
*/
|
|
|
|
vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
|
|
|
|
|
|
|
|
vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
|
|
|
|
vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
|
|
|
|
|
|
|
|
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
|
|
|
|
vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
|
|
|
|
|
|
|
|
nested_ept_uninit_mmu_context(vcpu);
|
|
|
|
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
|
|
|
|
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
|
|
|
|
* from vmcs01 (if necessary). The PDPTRs are not loaded on
|
|
|
|
* VMFail, like everything else we just need to ensure our
|
|
|
|
* software model is up-to-date.
|
|
|
|
*/
|
|
|
|
ept_save_pdptrs(vcpu);
|
|
|
|
|
|
|
|
kvm_mmu_reset_context(vcpu);
|
|
|
|
|
|
|
|
if (cpu_has_vmx_msr_bitmap())
|
|
|
|
vmx_update_msr_bitmap(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This nasty bit of open coding is a compromise between blindly
|
|
|
|
* loading L1's MSRs using the exit load lists (incorrect emulation
|
|
|
|
* of VMFail), leaving the nested VM's MSRs in the software model
|
|
|
|
* (incorrect behavior) and snapshotting the modified MSRs (too
|
|
|
|
* expensive since the lists are unbound by hardware). For each
|
|
|
|
* MSR that was (prematurely) loaded from the nested VMEntry load
|
|
|
|
* list, reload it from the exit load list if it exists and differs
|
|
|
|
* from the guest value. The intent is to stuff host state as
|
|
|
|
* silently as possible, not to fully process the exit load list.
|
|
|
|
*/
|
|
|
|
msr.host_initiated = false;
|
|
|
|
for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
|
|
|
|
gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
|
|
|
|
if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
"%s read MSR index failed (%u, 0x%08llx)\n",
|
|
|
|
__func__, i, gpa);
|
|
|
|
goto vmabort;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
|
|
|
|
gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
|
|
|
|
if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
"%s read MSR failed (%u, 0x%08llx)\n",
|
|
|
|
__func__, j, gpa);
|
|
|
|
goto vmabort;
|
|
|
|
}
|
|
|
|
if (h.index != g.index)
|
|
|
|
continue;
|
|
|
|
if (h.value == g.value)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (nested_vmx_load_msr_check(vcpu, &h)) {
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
"%s check failed (%u, 0x%x, 0x%x)\n",
|
|
|
|
__func__, j, h.index, h.reserved);
|
|
|
|
goto vmabort;
|
|
|
|
}
|
|
|
|
|
|
|
|
msr.index = h.index;
|
|
|
|
msr.data = h.value;
|
|
|
|
if (kvm_set_msr(vcpu, &msr)) {
|
|
|
|
pr_debug_ratelimited(
|
|
|
|
"%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
|
|
|
|
__func__, j, h.index, h.value);
|
|
|
|
goto vmabort;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
vmabort:
|
|
|
|
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
|
|
|
|
* and modify vmcs12 to make it see what it would expect to see there if
|
|
|
|
* L2 was its real guest. Must only be called when in L2 (is_guest_mode())
|
|
|
|
*/
|
|
|
|
void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|
|
|
u32 exit_intr_info, unsigned long exit_qualification)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
|
|
|
|
/* trying to cancel vmlaunch/vmresume is a bug */
|
|
|
|
WARN_ON_ONCE(vmx->nested.nested_run_pending);
|
|
|
|
|
|
|
|
leave_guest_mode(vcpu);
|
|
|
|
|
2019-01-30 01:12:35 +07:00
|
|
|
if (nested_cpu_has_preemption_timer(vmcs12))
|
|
|
|
hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
|
|
|
|
|
2018-12-04 04:53:18 +07:00
|
|
|
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
|
|
|
|
vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
|
|
|
|
|
|
|
|
if (likely(!vmx->fail)) {
|
|
|
|
if (exit_reason == -1)
|
|
|
|
sync_vmcs12(vcpu, vmcs12);
|
|
|
|
else
|
|
|
|
prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
|
|
|
|
exit_qualification);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Must happen outside of sync_vmcs12() as it will
|
|
|
|
* also be used to capture vmcs12 cache as part of
|
|
|
|
* capturing nVMX state for snapshot (migration).
|
|
|
|
*
|
|
|
|
* Otherwise, this flush will dirty guest memory at a
|
|
|
|
* point it is already assumed by user-space to be
|
|
|
|
* immutable.
|
|
|
|
*/
|
|
|
|
nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* The only expected VM-instruction error is "VM entry with
|
|
|
|
* invalid control field(s)." Anything else indicates a
|
|
|
|
* problem with L0. And we should never get here with a
|
|
|
|
* VMFail of any type if early consistency checks are enabled.
|
|
|
|
*/
|
|
|
|
WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
|
|
|
|
VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
|
|
WARN_ON_ONCE(nested_early_check);
|
|
|
|
}
|
|
|
|
|
|
|
|
vmx_switch_vmcs(vcpu, &vmx->vmcs01);
|
|
|
|
|
|
|
|
/* Update any VMCS fields that might have changed while L2 ran */
|
|
|
|
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
|
|
|
|
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
|
|
|
|
vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
|
|
|
|
|
|
|
|
if (kvm_has_tsc_control)
|
|
|
|
decache_tsc_multiplier(vmx);
|
|
|
|
|
|
|
|
if (vmx->nested.change_vmcs01_virtual_apic_mode) {
|
|
|
|
vmx->nested.change_vmcs01_virtual_apic_mode = false;
|
|
|
|
vmx_set_virtual_apic_mode(vcpu);
|
|
|
|
} else if (!nested_cpu_has_ept(vmcs12) &&
|
|
|
|
nested_cpu_has2(vmcs12,
|
|
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
|
|
|
vmx_flush_tlb(vcpu, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Unpin physical memory we referred to in vmcs02 */
|
|
|
|
if (vmx->nested.apic_access_page) {
|
|
|
|
kvm_release_page_dirty(vmx->nested.apic_access_page);
|
|
|
|
vmx->nested.apic_access_page = NULL;
|
|
|
|
}
|
|
|
|
if (vmx->nested.virtual_apic_page) {
|
|
|
|
kvm_release_page_dirty(vmx->nested.virtual_apic_page);
|
|
|
|
vmx->nested.virtual_apic_page = NULL;
|
|
|
|
}
|
|
|
|
if (vmx->nested.pi_desc_page) {
|
|
|
|
kunmap(vmx->nested.pi_desc_page);
|
|
|
|
kvm_release_page_dirty(vmx->nested.pi_desc_page);
|
|
|
|
vmx->nested.pi_desc_page = NULL;
|
|
|
|
vmx->nested.pi_desc = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are now running in L2, mmu_notifier will force to reload the
|
|
|
|
* page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
|
|
|
|
*/
|
|
|
|
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
|
|
|
|
|
|
|
|
if ((exit_reason != -1) && (enable_shadow_vmcs || vmx->nested.hv_evmcs))
|
|
|
|
vmx->nested.need_vmcs12_sync = true;
|
|
|
|
|
|
|
|
/* in case we halted in L2 */
|
|
|
|
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
|
|
|
|
|
|
|
if (likely(!vmx->fail)) {
|
|
|
|
/*
|
|
|
|
* TODO: SDM says that with acknowledge interrupt on
|
|
|
|
* exit, bit 31 of the VM-exit interrupt information
|
|
|
|
* (valid interrupt) is always set to 1 on
|
|
|
|
* EXIT_REASON_EXTERNAL_INTERRUPT, so we shouldn't
|
|
|
|
* need kvm_cpu_has_interrupt(). See the commit
|
|
|
|
* message for details.
|
|
|
|
*/
|
|
|
|
if (nested_exit_intr_ack_set(vcpu) &&
|
|
|
|
exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
|
|
|
|
kvm_cpu_has_interrupt(vcpu)) {
|
|
|
|
int irq = kvm_cpu_get_interrupt(vcpu);
|
|
|
|
WARN_ON(irq < 0);
|
|
|
|
vmcs12->vm_exit_intr_info = irq |
|
|
|
|
INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (exit_reason != -1)
|
|
|
|
trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
|
|
|
|
vmcs12->exit_qualification,
|
|
|
|
vmcs12->idt_vectoring_info_field,
|
|
|
|
vmcs12->vm_exit_intr_info,
|
|
|
|
vmcs12->vm_exit_intr_error_code,
|
|
|
|
KVM_ISA_VMX);
|
|
|
|
|
|
|
|
load_vmcs12_host_state(vcpu, vmcs12);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* After an early L2 VM-entry failure, we're now back
|
|
|
|
* in L1 which thinks it just finished a VMLAUNCH or
|
|
|
|
* VMRESUME instruction, so we need to set the failure
|
|
|
|
* flag and the VM-instruction error field of the VMCS
|
|
|
|
* accordingly, and skip the emulated instruction.
|
|
|
|
*/
|
|
|
|
(void)nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore L1's host state to KVM's software model. We're here
|
|
|
|
* because a consistency check was caught by hardware, which
|
|
|
|
* means some amount of guest state has been propagated to KVM's
|
|
|
|
* model and needs to be unwound to the host's state.
|
|
|
|
*/
|
|
|
|
nested_vmx_restore_host_state(vcpu);
|
|
|
|
|
|
|
|
vmx->fail = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Decode the memory-address operand of a vmx instruction, as recorded on an
|
|
|
|
* exit caused by such an instruction (run by a guest hypervisor).
|
|
|
|
* On success, returns 0. When the operand is invalid, returns 1 and throws
|
|
|
|
* #UD or #GP.
|
|
|
|
*/
|
|
|
|
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
|
|
|
|
u32 vmx_instruction_info, bool wr, gva_t *ret)
|
|
|
|
{
|
|
|
|
gva_t off;
|
|
|
|
bool exn;
|
|
|
|
struct kvm_segment s;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* According to Vol. 3B, "Information for VM Exits Due to Instruction
|
|
|
|
* Execution", on an exit, vmx_instruction_info holds most of the
|
|
|
|
* addressing components of the operand. Only the displacement part
|
|
|
|
* is put in exit_qualification (see 3B, "Basic VM-Exit Information").
|
|
|
|
* For how an actual address is calculated from all these components,
|
|
|
|
* refer to Vol. 1, "Operand Addressing".
|
|
|
|
*/
|
|
|
|
int scaling = vmx_instruction_info & 3;
|
|
|
|
int addr_size = (vmx_instruction_info >> 7) & 7;
|
|
|
|
bool is_reg = vmx_instruction_info & (1u << 10);
|
|
|
|
int seg_reg = (vmx_instruction_info >> 15) & 7;
|
|
|
|
int index_reg = (vmx_instruction_info >> 18) & 0xf;
|
|
|
|
bool index_is_valid = !(vmx_instruction_info & (1u << 22));
|
|
|
|
int base_reg = (vmx_instruction_info >> 23) & 0xf;
|
|
|
|
bool base_is_valid = !(vmx_instruction_info & (1u << 27));
|
|
|
|
|
|
|
|
if (is_reg) {
|
|
|
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Addr = segment_base + offset */
|
|
|
|
/* offset = base + [index * scale] + displacement */
|
|
|
|
off = exit_qualification; /* holds the displacement */
|
KVM: nVMX: Sign extend displacements of VMX instr's mem operands
The VMCS.EXIT_QUALIFCATION field reports the displacements of memory
operands for various instructions, including VMX instructions, as a
naturally sized unsigned value, but masks the value by the addr size,
e.g. given a ModRM encoded as -0x28(%ebp), the -0x28 displacement is
reported as 0xffffffd8 for a 32-bit address size. Despite some weird
wording regarding sign extension, the SDM explicitly states that bits
beyond the instructions address size are undefined:
In all cases, bits of this field beyond the instruction’s address
size are undefined.
Failure to sign extend the displacement results in KVM incorrectly
treating a negative displacement as a large positive displacement when
the address size of the VMX instruction is smaller than KVM's native
size, e.g. a 32-bit address size on a 64-bit KVM.
The very original decoding, added by commit 064aea774768 ("KVM: nVMX:
Decoding memory operands of VMX instructions"), sort of modeled sign
extension by truncating the final virtual/linear address for a 32-bit
address size. I.e. it messed up the effective address but made it work
by adjusting the final address.
When segmentation checks were added, the truncation logic was kept
as-is and no sign extension logic was introduced. In other words, it
kept calculating the wrong effective address while mostly generating
the correct virtual/linear address. As the effective address is what's
used in the segment limit checks, this results in KVM incorreclty
injecting #GP/#SS faults due to non-existent segment violations when
a nested VMM uses negative displacements with an address size smaller
than KVM's native address size.
Using the -0x28(%ebp) example, an EBP value of 0x1000 will result in
KVM using 0x100000fd8 as the effective address when checking for a
segment limit violation. This causes a 100% failure rate when running
a 32-bit KVM build as L1 on top of a 64-bit KVM L0.
Fixes: f9eb4af67c9d ("KVM: nVMX: VMX instructions: add checks for #GP/#SS exceptions")
Cc: stable@vger.kernel.org
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2019-01-24 05:39:23 +07:00
|
|
|
if (addr_size == 1)
|
|
|
|
off = (gva_t)sign_extend64(off, 31);
|
|
|
|
else if (addr_size == 0)
|
|
|
|
off = (gva_t)sign_extend64(off, 15);
|
2018-12-04 04:53:18 +07:00
|
|
|
if (base_is_valid)
|
|
|
|
off += kvm_register_read(vcpu, base_reg);
|
|
|
|
if (index_is_valid)
|
|
|
|
off += kvm_register_read(vcpu, index_reg)<<scaling;
|
|
|
|
vmx_get_segment(vcpu, &s, seg_reg);
|
|
|
|
|
2019-01-24 05:39:24 +07:00
|
|
|
/*
|
|
|
|
* The effective address, i.e. @off, of a memory operand is truncated
|
|
|
|
* based on the address size of the instruction. Note that this is
|
|
|
|
* the *effective address*, i.e. the address prior to accounting for
|
|
|
|
* the segment's base.
|
|
|
|
*/
|
2018-12-04 04:53:18 +07:00
|
|
|
if (addr_size == 1) /* 32 bit */
|
2019-01-24 05:39:24 +07:00
|
|
|
off &= 0xffffffff;
|
|
|
|
else if (addr_size == 0) /* 16 bit */
|
|
|
|
off &= 0xffff;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
/* Checks for #GP/#SS exceptions. */
|
|
|
|
exn = false;
|
|
|
|
if (is_long_mode(vcpu)) {
|
2019-01-24 05:39:24 +07:00
|
|
|
/*
|
|
|
|
* The virtual/linear address is never truncated in 64-bit
|
|
|
|
* mode, e.g. a 32-bit address size can yield a 64-bit virtual
|
|
|
|
* address when using FS/GS with a non-zero base.
|
|
|
|
*/
|
|
|
|
*ret = s.base + off;
|
|
|
|
|
2018-12-04 04:53:18 +07:00
|
|
|
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
|
|
|
|
* non-canonical form. This is the only check on the memory
|
|
|
|
* destination for long mode!
|
|
|
|
*/
|
|
|
|
exn = is_noncanonical_address(*ret, vcpu);
|
2019-01-30 23:25:38 +07:00
|
|
|
} else {
|
2019-01-24 05:39:24 +07:00
|
|
|
/*
|
|
|
|
* When not in long mode, the virtual/linear address is
|
|
|
|
* unconditionally truncated to 32 bits regardless of the
|
|
|
|
* address size.
|
|
|
|
*/
|
|
|
|
*ret = (s.base + off) & 0xffffffff;
|
|
|
|
|
2018-12-04 04:53:18 +07:00
|
|
|
/* Protected mode: apply checks for segment validity in the
|
|
|
|
* following order:
|
|
|
|
* - segment type check (#GP(0) may be thrown)
|
|
|
|
* - usability check (#GP(0)/#SS(0))
|
|
|
|
* - limit check (#GP(0)/#SS(0))
|
|
|
|
*/
|
|
|
|
if (wr)
|
|
|
|
/* #GP(0) if the destination operand is located in a
|
|
|
|
* read-only data segment or any code segment.
|
|
|
|
*/
|
|
|
|
exn = ((s.type & 0xa) == 0 || (s.type & 8));
|
|
|
|
else
|
|
|
|
/* #GP(0) if the source operand is located in an
|
|
|
|
* execute-only code segment
|
|
|
|
*/
|
|
|
|
exn = ((s.type & 0xa) == 8);
|
|
|
|
if (exn) {
|
|
|
|
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
|
|
|
|
*/
|
|
|
|
exn = (s.unusable != 0);
|
2019-01-24 05:39:25 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Protected mode: #GP(0)/#SS(0) if the memory operand is
|
|
|
|
* outside the segment limit. All CPUs that support VMX ignore
|
|
|
|
* limit checks for flat segments, i.e. segments with base==0,
|
|
|
|
* limit==0xffffffff and of type expand-up data or code.
|
2018-12-04 04:53:18 +07:00
|
|
|
*/
|
2019-01-24 05:39:25 +07:00
|
|
|
if (!(s.base == 0 && s.limit == 0xffffffff &&
|
|
|
|
((s.type & 8) || !(s.type & 4))))
|
|
|
|
exn = exn || (off + sizeof(u64) > s.limit);
|
2018-12-04 04:53:18 +07:00
|
|
|
}
|
|
|
|
if (exn) {
|
|
|
|
kvm_queue_exception_e(vcpu,
|
|
|
|
seg_reg == VCPU_SREG_SS ?
|
|
|
|
SS_VECTOR : GP_VECTOR,
|
|
|
|
0);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
|
|
|
|
{
|
|
|
|
gva_t gva;
|
|
|
|
struct x86_exception e;
|
|
|
|
|
|
|
|
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
|
|
|
|
vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
|
|
|
|
kvm_inject_page_fault(vcpu, &e);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a shadow VMCS and associate it with the currently loaded
|
|
|
|
* VMCS, unless such a shadow VMCS already exists. The newly allocated
|
|
|
|
* VMCS is also VMCLEARed, so that it is ready for use.
|
|
|
|
*/
|
|
|
|
static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We should allocate a shadow vmcs for vmcs01 only when L1
|
|
|
|
* executes VMXON and free it when L1 executes VMXOFF.
|
|
|
|
* As it is invalid to execute VMXON twice, we shouldn't reach
|
|
|
|
* here when vmcs01 already have an allocated shadow vmcs.
|
|
|
|
*/
|
|
|
|
WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
|
|
|
|
|
|
|
|
if (!loaded_vmcs->shadow_vmcs) {
|
|
|
|
loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
|
|
|
|
if (loaded_vmcs->shadow_vmcs)
|
|
|
|
vmcs_clear(loaded_vmcs->shadow_vmcs);
|
|
|
|
}
|
|
|
|
return loaded_vmcs->shadow_vmcs;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int enter_vmx_operation(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
int r;
|
|
|
|
|
|
|
|
r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
|
|
|
|
if (r < 0)
|
|
|
|
goto out_vmcs02;
|
|
|
|
|
2019-02-12 02:02:52 +07:00
|
|
|
vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
|
2018-12-04 04:53:18 +07:00
|
|
|
if (!vmx->nested.cached_vmcs12)
|
|
|
|
goto out_cached_vmcs12;
|
|
|
|
|
2019-02-12 02:02:52 +07:00
|
|
|
vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
|
2018-12-04 04:53:18 +07:00
|
|
|
if (!vmx->nested.cached_shadow_vmcs12)
|
|
|
|
goto out_cached_shadow_vmcs12;
|
|
|
|
|
|
|
|
if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
|
|
|
|
goto out_shadow_vmcs;
|
|
|
|
|
|
|
|
hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
|
|
|
|
HRTIMER_MODE_REL_PINNED);
|
|
|
|
vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
|
|
|
|
|
|
|
|
vmx->nested.vpid02 = allocate_vpid();
|
|
|
|
|
|
|
|
vmx->nested.vmcs02_initialized = false;
|
|
|
|
vmx->nested.vmxon = true;
|
2018-10-24 15:05:16 +07:00
|
|
|
|
|
|
|
if (pt_mode == PT_MODE_HOST_GUEST) {
|
|
|
|
vmx->pt_desc.guest.ctl = 0;
|
|
|
|
pt_update_intercept_for_msr(vmx);
|
|
|
|
}
|
|
|
|
|
2018-12-04 04:53:18 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_shadow_vmcs:
|
|
|
|
kfree(vmx->nested.cached_shadow_vmcs12);
|
|
|
|
|
|
|
|
out_cached_shadow_vmcs12:
|
|
|
|
kfree(vmx->nested.cached_vmcs12);
|
|
|
|
|
|
|
|
out_cached_vmcs12:
|
|
|
|
free_loaded_vmcs(&vmx->nested.vmcs02);
|
|
|
|
|
|
|
|
out_vmcs02:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Emulate the VMXON instruction.
|
|
|
|
* Currently, we just remember that VMX is active, and do not save or even
|
|
|
|
* inspect the argument to VMXON (the so-called "VMXON pointer") because we
|
|
|
|
* do not currently need to store anything in that guest-allocated memory
|
|
|
|
* region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
|
|
|
|
* argument is different from the VMXON pointer (which the spec says they do).
|
|
|
|
*/
|
|
|
|
static int handle_vmon(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
gpa_t vmptr;
|
|
|
|
struct page *page;
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
|
|
|
|
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The Intel VMX Instruction Reference lists a bunch of bits that are
|
|
|
|
* prerequisite to running VMXON, most notably cr4.VMXE must be set to
|
|
|
|
* 1 (see vmx_set_cr4() for when we allow the guest to set this).
|
|
|
|
* Otherwise, we should fail with #UD. But most faulting conditions
|
|
|
|
* have already been checked by hardware, prior to the VM-exit for
|
|
|
|
* VMXON. We do test guest cr4.VMXE because processor CR4 always has
|
|
|
|
* that bit set to 1 in non-root mode.
|
|
|
|
*/
|
|
|
|
if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
|
|
|
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* CPL=0 must be checked manually. */
|
|
|
|
if (vmx_get_cpl(vcpu)) {
|
|
|
|
kvm_inject_gp(vcpu, 0);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vmx->nested.vmxon)
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
|
|
|
|
|
|
|
|
if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
|
|
|
|
!= VMXON_NEEDED_FEATURES) {
|
|
|
|
kvm_inject_gp(vcpu, 0);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nested_vmx_get_vmptr(vcpu, &vmptr))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SDM 3: 24.11.5
|
|
|
|
* The first 4 bytes of VMXON region contain the supported
|
|
|
|
* VMCS revision identifier
|
|
|
|
*
|
|
|
|
* Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
|
|
|
|
* which replaces physical address width with 32
|
|
|
|
*/
|
|
|
|
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
|
|
|
|
return nested_vmx_failInvalid(vcpu);
|
|
|
|
|
|
|
|
page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
|
|
|
|
if (is_error_page(page))
|
|
|
|
return nested_vmx_failInvalid(vcpu);
|
|
|
|
|
|
|
|
if (*(u32 *)kmap(page) != VMCS12_REVISION) {
|
|
|
|
kunmap(page);
|
|
|
|
kvm_release_page_clean(page);
|
|
|
|
return nested_vmx_failInvalid(vcpu);
|
|
|
|
}
|
|
|
|
kunmap(page);
|
|
|
|
kvm_release_page_clean(page);
|
|
|
|
|
|
|
|
vmx->nested.vmxon_ptr = vmptr;
|
|
|
|
ret = enter_vmx_operation(vcpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return nested_vmx_succeed(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
|
|
|
|
if (vmx->nested.current_vmptr == -1ull)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (enable_shadow_vmcs) {
|
|
|
|
/* copy to memory all shadowed fields in case
|
|
|
|
they were modified */
|
|
|
|
copy_shadow_to_vmcs12(vmx);
|
|
|
|
vmx->nested.need_vmcs12_sync = false;
|
|
|
|
vmx_disable_shadow_vmcs(vmx);
|
|
|
|
}
|
|
|
|
vmx->nested.posted_intr_nv = -1;
|
|
|
|
|
|
|
|
/* Flush VMCS12 to guest memory */
|
|
|
|
kvm_vcpu_write_guest_page(vcpu,
|
|
|
|
vmx->nested.current_vmptr >> PAGE_SHIFT,
|
|
|
|
vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
|
|
|
|
|
|
|
|
kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
|
|
|
|
|
|
|
|
vmx->nested.current_vmptr = -1ull;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Emulate the VMXOFF instruction */
|
|
|
|
static int handle_vmoff(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (!nested_vmx_check_permission(vcpu))
|
|
|
|
return 1;
|
|
|
|
free_nested(vcpu);
|
|
|
|
return nested_vmx_succeed(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Emulate the VMCLEAR instruction */
|
|
|
|
static int handle_vmclear(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
u32 zero = 0;
|
|
|
|
gpa_t vmptr;
|
|
|
|
|
|
|
|
if (!nested_vmx_check_permission(vcpu))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (nested_vmx_get_vmptr(vcpu, &vmptr))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_VMCLEAR_INVALID_ADDRESS);
|
|
|
|
|
|
|
|
if (vmptr == vmx->nested.vmxon_ptr)
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_VMCLEAR_VMXON_POINTER);
|
|
|
|
|
|
|
|
if (vmx->nested.hv_evmcs_page) {
|
|
|
|
if (vmptr == vmx->nested.hv_evmcs_vmptr)
|
|
|
|
nested_release_evmcs(vcpu);
|
|
|
|
} else {
|
|
|
|
if (vmptr == vmx->nested.current_vmptr)
|
|
|
|
nested_release_vmcs12(vcpu);
|
|
|
|
|
|
|
|
kvm_vcpu_write_guest(vcpu,
|
|
|
|
vmptr + offsetof(struct vmcs12,
|
|
|
|
launch_state),
|
|
|
|
&zero, sizeof(zero));
|
|
|
|
}
|
|
|
|
|
|
|
|
return nested_vmx_succeed(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
|
|
|
|
|
|
|
|
/* Emulate the VMLAUNCH instruction */
|
|
|
|
static int handle_vmlaunch(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
return nested_vmx_run(vcpu, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Emulate the VMRESUME instruction */
|
|
|
|
static int handle_vmresume(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
|
|
|
|
return nested_vmx_run(vcpu, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int handle_vmread(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
unsigned long field;
|
|
|
|
u64 field_value;
|
|
|
|
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
|
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
|
|
|
gva_t gva = 0;
|
|
|
|
struct vmcs12 *vmcs12;
|
|
|
|
|
|
|
|
if (!nested_vmx_check_permission(vcpu))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
|
|
|
|
return nested_vmx_failInvalid(vcpu);
|
|
|
|
|
|
|
|
if (!is_guest_mode(vcpu))
|
|
|
|
vmcs12 = get_vmcs12(vcpu);
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* When vmcs->vmcs_link_pointer is -1ull, any VMREAD
|
|
|
|
* to shadowed-field sets the ALU flags for VMfailInvalid.
|
|
|
|
*/
|
|
|
|
if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
|
|
|
|
return nested_vmx_failInvalid(vcpu);
|
|
|
|
vmcs12 = get_shadow_vmcs12(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Decode instruction info and find the field to read */
|
|
|
|
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
|
|
|
|
/* Read the field, zero-extended to a u64 field_value */
|
|
|
|
if (vmcs12_read_any(vmcs12, field, &field_value) < 0)
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_UNSUPPORTED_VMCS_COMPONENT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now copy part of this value to register or memory, as requested.
|
|
|
|
* Note that the number of bits actually copied is 32 or 64 depending
|
|
|
|
* on the guest's mode (32 or 64 bit), not on the given field's length.
|
|
|
|
*/
|
|
|
|
if (vmx_instruction_info & (1u << 10)) {
|
|
|
|
kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
|
|
|
|
field_value);
|
|
|
|
} else {
|
|
|
|
if (get_vmx_mem_address(vcpu, exit_qualification,
|
|
|
|
vmx_instruction_info, true, &gva))
|
|
|
|
return 1;
|
|
|
|
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
|
|
|
|
kvm_write_guest_virt_system(vcpu, gva, &field_value,
|
|
|
|
(is_long_mode(vcpu) ? 8 : 4), NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
return nested_vmx_succeed(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int handle_vmwrite(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
unsigned long field;
|
|
|
|
gva_t gva;
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
|
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
|
|
|
|
|
|
|
/* The value to write might be 32 or 64 bits, depending on L1's long
|
|
|
|
* mode, and eventually we need to write that into a field of several
|
|
|
|
* possible lengths. The code below first zero-extends the value to 64
|
|
|
|
* bit (field_value), and then copies only the appropriate number of
|
|
|
|
* bits into the vmcs12 field.
|
|
|
|
*/
|
|
|
|
u64 field_value = 0;
|
|
|
|
struct x86_exception e;
|
|
|
|
struct vmcs12 *vmcs12;
|
|
|
|
|
|
|
|
if (!nested_vmx_check_permission(vcpu))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (vmx->nested.current_vmptr == -1ull)
|
|
|
|
return nested_vmx_failInvalid(vcpu);
|
|
|
|
|
|
|
|
if (vmx_instruction_info & (1u << 10))
|
|
|
|
field_value = kvm_register_readl(vcpu,
|
|
|
|
(((vmx_instruction_info) >> 3) & 0xf));
|
|
|
|
else {
|
|
|
|
if (get_vmx_mem_address(vcpu, exit_qualification,
|
|
|
|
vmx_instruction_info, false, &gva))
|
|
|
|
return 1;
|
|
|
|
if (kvm_read_guest_virt(vcpu, gva, &field_value,
|
|
|
|
(is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
|
|
|
|
kvm_inject_page_fault(vcpu, &e);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
|
|
|
|
/*
|
|
|
|
* If the vCPU supports "VMWRITE to any supported field in the
|
|
|
|
* VMCS," then the "read-only" fields are actually read/write.
|
|
|
|
*/
|
|
|
|
if (vmcs_field_readonly(field) &&
|
|
|
|
!nested_cpu_has_vmwrite_any_field(vcpu))
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
|
|
|
|
|
|
|
|
if (!is_guest_mode(vcpu))
|
|
|
|
vmcs12 = get_vmcs12(vcpu);
|
|
|
|
else {
|
|
|
|
/*
|
|
|
|
* When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
|
|
|
|
* to shadowed-field sets the ALU flags for VMfailInvalid.
|
|
|
|
*/
|
|
|
|
if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
|
|
|
|
return nested_vmx_failInvalid(vcpu);
|
|
|
|
vmcs12 = get_shadow_vmcs12(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vmcs12_write_any(vmcs12, field, field_value) < 0)
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_UNSUPPORTED_VMCS_COMPONENT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not track vmcs12 dirty-state if in guest-mode
|
|
|
|
* as we actually dirty shadow vmcs12 instead of vmcs12.
|
|
|
|
*/
|
|
|
|
if (!is_guest_mode(vcpu)) {
|
|
|
|
switch (field) {
|
|
|
|
#define SHADOW_FIELD_RW(x) case x:
|
|
|
|
#include "vmcs_shadow_fields.h"
|
|
|
|
/*
|
|
|
|
* The fields that can be updated by L1 without a vmexit are
|
|
|
|
* always updated in the vmcs02, the others go down the slow
|
|
|
|
* path of prepare_vmcs02.
|
|
|
|
*/
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
vmx->nested.dirty_vmcs12 = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nested_vmx_succeed(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
|
|
|
|
{
|
|
|
|
vmx->nested.current_vmptr = vmptr;
|
|
|
|
if (enable_shadow_vmcs) {
|
|
|
|
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
|
|
|
|
SECONDARY_EXEC_SHADOW_VMCS);
|
|
|
|
vmcs_write64(VMCS_LINK_POINTER,
|
|
|
|
__pa(vmx->vmcs01.shadow_vmcs));
|
|
|
|
vmx->nested.need_vmcs12_sync = true;
|
|
|
|
}
|
|
|
|
vmx->nested.dirty_vmcs12 = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Emulate the VMPTRLD instruction */
|
|
|
|
static int handle_vmptrld(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
gpa_t vmptr;
|
|
|
|
|
|
|
|
if (!nested_vmx_check_permission(vcpu))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (nested_vmx_get_vmptr(vcpu, &vmptr))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu)))
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_VMPTRLD_INVALID_ADDRESS);
|
|
|
|
|
|
|
|
if (vmptr == vmx->nested.vmxon_ptr)
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_VMPTRLD_VMXON_POINTER);
|
|
|
|
|
|
|
|
/* Forbid normal VMPTRLD if Enlightened version was used */
|
|
|
|
if (vmx->nested.hv_evmcs)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (vmx->nested.current_vmptr != vmptr) {
|
|
|
|
struct vmcs12 *new_vmcs12;
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
page = kvm_vcpu_gpa_to_page(vcpu, vmptr);
|
|
|
|
if (is_error_page(page)) {
|
|
|
|
/*
|
|
|
|
* Reads from an unbacked page return all 1s,
|
|
|
|
* which means that the 32 bits located at the
|
|
|
|
* given physical address won't match the required
|
|
|
|
* VMCS12_REVISION identifier.
|
|
|
|
*/
|
2019-01-10 00:22:56 +07:00
|
|
|
return nested_vmx_failValid(vcpu,
|
2018-12-04 04:53:18 +07:00
|
|
|
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
|
|
|
|
}
|
|
|
|
new_vmcs12 = kmap(page);
|
|
|
|
if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
|
|
|
|
(new_vmcs12->hdr.shadow_vmcs &&
|
|
|
|
!nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
|
|
|
|
kunmap(page);
|
|
|
|
kvm_release_page_clean(page);
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
|
|
|
|
}
|
|
|
|
|
|
|
|
nested_release_vmcs12(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load VMCS12 from guest memory since it is not already
|
|
|
|
* cached.
|
|
|
|
*/
|
|
|
|
memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
|
|
|
|
kunmap(page);
|
|
|
|
kvm_release_page_clean(page);
|
|
|
|
|
|
|
|
set_current_vmptr(vmx, vmptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return nested_vmx_succeed(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Emulate the VMPTRST instruction */
|
|
|
|
static int handle_vmptrst(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
unsigned long exit_qual = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
|
u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
|
|
|
gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
|
|
|
|
struct x86_exception e;
|
|
|
|
gva_t gva;
|
|
|
|
|
|
|
|
if (!nested_vmx_check_permission(vcpu))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (get_vmx_mem_address(vcpu, exit_qual, instr_info, true, &gva))
|
|
|
|
return 1;
|
|
|
|
/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
|
|
|
|
if (kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr,
|
|
|
|
sizeof(gpa_t), &e)) {
|
|
|
|
kvm_inject_page_fault(vcpu, &e);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return nested_vmx_succeed(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Emulate the INVEPT instruction */
|
|
|
|
static int handle_invept(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
u32 vmx_instruction_info, types;
|
|
|
|
unsigned long type;
|
|
|
|
gva_t gva;
|
|
|
|
struct x86_exception e;
|
|
|
|
struct {
|
|
|
|
u64 eptp, gpa;
|
|
|
|
} operand;
|
|
|
|
|
|
|
|
if (!(vmx->nested.msrs.secondary_ctls_high &
|
|
|
|
SECONDARY_EXEC_ENABLE_EPT) ||
|
|
|
|
!(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
|
|
|
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nested_vmx_check_permission(vcpu))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
|
|
|
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
|
|
|
|
|
|
|
|
types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
|
|
|
|
|
|
|
|
if (type >= 32 || !(types & (1 << type)))
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
|
|
|
|
|
|
|
|
/* According to the Intel VMX instruction reference, the memory
|
|
|
|
* operand is read even if it isn't needed (e.g., for type==global)
|
|
|
|
*/
|
|
|
|
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
|
|
|
|
vmx_instruction_info, false, &gva))
|
|
|
|
return 1;
|
|
|
|
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
|
|
|
|
kvm_inject_page_fault(vcpu, &e);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case VMX_EPT_EXTENT_GLOBAL:
|
|
|
|
/*
|
|
|
|
* TODO: track mappings and invalidate
|
|
|
|
* single context requests appropriately
|
|
|
|
*/
|
|
|
|
case VMX_EPT_EXTENT_CONTEXT:
|
|
|
|
kvm_mmu_sync_roots(vcpu);
|
|
|
|
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
BUG_ON(1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nested_vmx_succeed(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int handle_invvpid(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
u32 vmx_instruction_info;
|
|
|
|
unsigned long type, types;
|
|
|
|
gva_t gva;
|
|
|
|
struct x86_exception e;
|
|
|
|
struct {
|
|
|
|
u64 vpid;
|
|
|
|
u64 gla;
|
|
|
|
} operand;
|
|
|
|
u16 vpid02;
|
|
|
|
|
|
|
|
if (!(vmx->nested.msrs.secondary_ctls_high &
|
|
|
|
SECONDARY_EXEC_ENABLE_VPID) ||
|
|
|
|
!(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
|
|
|
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!nested_vmx_check_permission(vcpu))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
|
|
|
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
|
|
|
|
|
|
|
|
types = (vmx->nested.msrs.vpid_caps &
|
|
|
|
VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
|
|
|
|
|
|
|
|
if (type >= 32 || !(types & (1 << type)))
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
|
|
|
|
|
|
|
|
/* according to the intel vmx instruction reference, the memory
|
|
|
|
* operand is read even if it isn't needed (e.g., for type==global)
|
|
|
|
*/
|
|
|
|
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
|
|
|
|
vmx_instruction_info, false, &gva))
|
|
|
|
return 1;
|
|
|
|
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
|
|
|
|
kvm_inject_page_fault(vcpu, &e);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (operand.vpid >> 16)
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
|
|
|
|
|
|
|
|
vpid02 = nested_get_vpid02(vcpu);
|
|
|
|
switch (type) {
|
|
|
|
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
|
|
|
|
if (!operand.vpid ||
|
|
|
|
is_noncanonical_address(operand.gla, vcpu))
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
|
|
|
|
if (cpu_has_vmx_invvpid_individual_addr()) {
|
|
|
|
__invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR,
|
|
|
|
vpid02, operand.gla);
|
|
|
|
} else
|
|
|
|
__vmx_flush_tlb(vcpu, vpid02, false);
|
|
|
|
break;
|
|
|
|
case VMX_VPID_EXTENT_SINGLE_CONTEXT:
|
|
|
|
case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
|
|
|
|
if (!operand.vpid)
|
|
|
|
return nested_vmx_failValid(vcpu,
|
|
|
|
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
|
|
|
|
__vmx_flush_tlb(vcpu, vpid02, false);
|
|
|
|
break;
|
|
|
|
case VMX_VPID_EXTENT_ALL_CONTEXT:
|
|
|
|
__vmx_flush_tlb(vcpu, vpid02, false);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
WARN_ON_ONCE(1);
|
|
|
|
return kvm_skip_emulated_instruction(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
return nested_vmx_succeed(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
u32 index = vcpu->arch.regs[VCPU_REGS_RCX];
|
|
|
|
u64 address;
|
|
|
|
bool accessed_dirty;
|
|
|
|
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
|
|
|
|
|
|
|
if (!nested_cpu_has_eptp_switching(vmcs12) ||
|
|
|
|
!nested_cpu_has_ept(vmcs12))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (index >= VMFUNC_EPTP_ENTRIES)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
|
|
|
|
if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
|
|
|
|
&address, index * 8, 8))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
accessed_dirty = !!(address & VMX_EPTP_AD_ENABLE_BIT);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the (L2) guest does a vmfunc to the currently
|
|
|
|
* active ept pointer, we don't have to do anything else
|
|
|
|
*/
|
|
|
|
if (vmcs12->ept_pointer != address) {
|
|
|
|
if (!valid_ept_address(vcpu, address))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
kvm_mmu_unload(vcpu);
|
|
|
|
mmu->ept_ad = accessed_dirty;
|
|
|
|
mmu->mmu_role.base.ad_disabled = !accessed_dirty;
|
|
|
|
vmcs12->ept_pointer = address;
|
|
|
|
/*
|
|
|
|
* TODO: Check what's the correct approach in case
|
|
|
|
* mmu reload fails. Currently, we just let the next
|
|
|
|
* reload potentially fail
|
|
|
|
*/
|
|
|
|
kvm_mmu_reload(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int handle_vmfunc(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
struct vmcs12 *vmcs12;
|
|
|
|
u32 function = vcpu->arch.regs[VCPU_REGS_RAX];
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VMFUNC is only supported for nested guests, but we always enable the
|
|
|
|
* secondary control for simplicity; for non-nested mode, fake that we
|
|
|
|
* didn't by injecting #UD.
|
|
|
|
*/
|
|
|
|
if (!is_guest_mode(vcpu)) {
|
|
|
|
kvm_queue_exception(vcpu, UD_VECTOR);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
vmcs12 = get_vmcs12(vcpu);
|
|
|
|
if ((vmcs12->vm_function_control & (1 << function)) == 0)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
switch (function) {
|
|
|
|
case 0:
|
|
|
|
if (nested_vmx_eptp_switching(vcpu, vmcs12))
|
|
|
|
goto fail;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
return kvm_skip_emulated_instruction(vcpu);
|
|
|
|
|
|
|
|
fail:
|
|
|
|
nested_vmx_vmexit(vcpu, vmx->exit_reason,
|
|
|
|
vmcs_read32(VM_EXIT_INTR_INFO),
|
|
|
|
vmcs_readl(EXIT_QUALIFICATION));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
unsigned long exit_qualification;
|
|
|
|
gpa_t bitmap, last_bitmap;
|
|
|
|
unsigned int port;
|
|
|
|
int size;
|
|
|
|
u8 b;
|
|
|
|
|
|
|
|
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
|
|
|
|
|
|
|
|
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
|
|
|
|
|
port = exit_qualification >> 16;
|
|
|
|
size = (exit_qualification & 7) + 1;
|
|
|
|
|
|
|
|
last_bitmap = (gpa_t)-1;
|
|
|
|
b = -1;
|
|
|
|
|
|
|
|
while (size > 0) {
|
|
|
|
if (port < 0x8000)
|
|
|
|
bitmap = vmcs12->io_bitmap_a;
|
|
|
|
else if (port < 0x10000)
|
|
|
|
bitmap = vmcs12->io_bitmap_b;
|
|
|
|
else
|
|
|
|
return true;
|
|
|
|
bitmap += (port & 0x7fff) / 8;
|
|
|
|
|
|
|
|
if (last_bitmap != bitmap)
|
|
|
|
if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
|
|
|
|
return true;
|
|
|
|
if (b & (1 << (port & 7)))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
port++;
|
|
|
|
size--;
|
|
|
|
last_bitmap = bitmap;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return 1 if we should exit from L2 to L1 to handle an MSR access access,
|
|
|
|
* rather than handle it ourselves in L0. I.e., check whether L1 expressed
|
|
|
|
* disinterest in the current event (read or write a specific MSR) by using an
|
|
|
|
* MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
|
|
|
|
*/
|
|
|
|
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12, u32 exit_reason)
|
|
|
|
{
|
|
|
|
u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
|
|
|
|
gpa_t bitmap;
|
|
|
|
|
|
|
|
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The MSR_BITMAP page is divided into four 1024-byte bitmaps,
|
|
|
|
* for the four combinations of read/write and low/high MSR numbers.
|
|
|
|
* First we need to figure out which of the four to use:
|
|
|
|
*/
|
|
|
|
bitmap = vmcs12->msr_bitmap;
|
|
|
|
if (exit_reason == EXIT_REASON_MSR_WRITE)
|
|
|
|
bitmap += 2048;
|
|
|
|
if (msr_index >= 0xc0000000) {
|
|
|
|
msr_index -= 0xc0000000;
|
|
|
|
bitmap += 1024;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Then read the msr_index'th bit from this bitmap: */
|
|
|
|
if (msr_index < 1024*8) {
|
|
|
|
unsigned char b;
|
|
|
|
if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
|
|
|
|
return true;
|
|
|
|
return 1 & (b >> (msr_index & 7));
|
|
|
|
} else
|
|
|
|
return true; /* let L1 handle the wrong parameter */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return 1 if we should exit from L2 to L1 to handle a CR access exit,
|
|
|
|
* rather than handle it ourselves in L0. I.e., check if L1 wanted to
|
|
|
|
* intercept (via guest_host_mask etc.) the current event.
|
|
|
|
*/
|
|
|
|
static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12)
|
|
|
|
{
|
|
|
|
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
|
|
|
|
int cr = exit_qualification & 15;
|
|
|
|
int reg;
|
|
|
|
unsigned long val;
|
|
|
|
|
|
|
|
switch ((exit_qualification >> 4) & 3) {
|
|
|
|
case 0: /* mov to cr */
|
|
|
|
reg = (exit_qualification >> 8) & 15;
|
|
|
|
val = kvm_register_readl(vcpu, reg);
|
|
|
|
switch (cr) {
|
|
|
|
case 0:
|
|
|
|
if (vmcs12->cr0_guest_host_mask &
|
|
|
|
(val ^ vmcs12->cr0_read_shadow))
|
|
|
|
return true;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
if ((vmcs12->cr3_target_count >= 1 &&
|
|
|
|
vmcs12->cr3_target_value0 == val) ||
|
|
|
|
(vmcs12->cr3_target_count >= 2 &&
|
|
|
|
vmcs12->cr3_target_value1 == val) ||
|
|
|
|
(vmcs12->cr3_target_count >= 3 &&
|
|
|
|
vmcs12->cr3_target_value2 == val) ||
|
|
|
|
(vmcs12->cr3_target_count >= 4 &&
|
|
|
|
vmcs12->cr3_target_value3 == val))
|
|
|
|
return false;
|
|
|
|
if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
|
|
|
|
return true;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
if (vmcs12->cr4_guest_host_mask &
|
|
|
|
(vmcs12->cr4_read_shadow ^ val))
|
|
|
|
return true;
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
|
|
|
|
return true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 2: /* clts */
|
|
|
|
if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
|
|
|
|
(vmcs12->cr0_read_shadow & X86_CR0_TS))
|
|
|
|
return true;
|
|
|
|
break;
|
|
|
|
case 1: /* mov from cr */
|
|
|
|
switch (cr) {
|
|
|
|
case 3:
|
|
|
|
if (vmcs12->cpu_based_vm_exec_control &
|
|
|
|
CPU_BASED_CR3_STORE_EXITING)
|
|
|
|
return true;
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
if (vmcs12->cpu_based_vm_exec_control &
|
|
|
|
CPU_BASED_CR8_STORE_EXITING)
|
|
|
|
return true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 3: /* lmsw */
|
|
|
|
/*
|
|
|
|
* lmsw can change bits 1..3 of cr0, and only set bit 0 of
|
|
|
|
* cr0. Other attempted changes are ignored, with no exit.
|
|
|
|
*/
|
|
|
|
val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
|
|
|
|
if (vmcs12->cr0_guest_host_mask & 0xe &
|
|
|
|
(val ^ vmcs12->cr0_read_shadow))
|
|
|
|
return true;
|
|
|
|
if ((vmcs12->cr0_guest_host_mask & 0x1) &&
|
|
|
|
!(vmcs12->cr0_read_shadow & 0x1) &&
|
|
|
|
(val & 0x1))
|
|
|
|
return true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
|
|
|
|
struct vmcs12 *vmcs12, gpa_t bitmap)
|
|
|
|
{
|
|
|
|
u32 vmx_instruction_info;
|
|
|
|
unsigned long field;
|
|
|
|
u8 b;
|
|
|
|
|
|
|
|
if (!nested_cpu_has_shadow_vmcs(vmcs12))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
/* Decode instruction info and find the field to access */
|
|
|
|
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
|
|
|
|
field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
|
|
|
|
|
|
|
|
/* Out-of-range fields always cause a VM exit from L2 to L1 */
|
|
|
|
if (field >> 15)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return 1 & (b >> (field & 7));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
|
|
|
|
* should handle it ourselves in L0 (and then continue L2). Only call this
|
|
|
|
* when in is_guest_mode (L2).
|
|
|
|
*/
|
|
|
|
bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
|
|
|
|
{
|
|
|
|
u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
|
|
|
|
|
|
|
if (vmx->nested.nested_run_pending)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (unlikely(vmx->fail)) {
|
|
|
|
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
|
|
|
|
vmcs_read32(VM_INSTRUCTION_ERROR));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The host physical addresses of some pages of guest memory
|
|
|
|
* are loaded into the vmcs02 (e.g. vmcs12's Virtual APIC
|
|
|
|
* Page). The CPU may write to these pages via their host
|
|
|
|
* physical address while L2 is running, bypassing any
|
|
|
|
* address-translation-based dirty tracking (e.g. EPT write
|
|
|
|
* protection).
|
|
|
|
*
|
|
|
|
* Mark them dirty on every exit from L2 to prevent them from
|
|
|
|
* getting out of sync with dirty tracking.
|
|
|
|
*/
|
|
|
|
nested_mark_vmcs12_pages_dirty(vcpu);
|
|
|
|
|
|
|
|
trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
|
|
|
|
vmcs_readl(EXIT_QUALIFICATION),
|
|
|
|
vmx->idt_vectoring_info,
|
|
|
|
intr_info,
|
|
|
|
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
|
|
|
|
KVM_ISA_VMX);
|
|
|
|
|
|
|
|
switch (exit_reason) {
|
|
|
|
case EXIT_REASON_EXCEPTION_NMI:
|
|
|
|
if (is_nmi(intr_info))
|
|
|
|
return false;
|
|
|
|
else if (is_page_fault(intr_info))
|
|
|
|
return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept;
|
|
|
|
else if (is_debug(intr_info) &&
|
|
|
|
vcpu->guest_debug &
|
|
|
|
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
|
|
|
|
return false;
|
|
|
|
else if (is_breakpoint(intr_info) &&
|
|
|
|
vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
|
|
|
|
return false;
|
|
|
|
return vmcs12->exception_bitmap &
|
|
|
|
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
|
|
|
|
case EXIT_REASON_EXTERNAL_INTERRUPT:
|
|
|
|
return false;
|
|
|
|
case EXIT_REASON_TRIPLE_FAULT:
|
|
|
|
return true;
|
|
|
|
case EXIT_REASON_PENDING_INTERRUPT:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
|
|
|
|
case EXIT_REASON_NMI_WINDOW:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
|
|
|
|
case EXIT_REASON_TASK_SWITCH:
|
|
|
|
return true;
|
|
|
|
case EXIT_REASON_CPUID:
|
|
|
|
return true;
|
|
|
|
case EXIT_REASON_HLT:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
|
|
|
|
case EXIT_REASON_INVD:
|
|
|
|
return true;
|
|
|
|
case EXIT_REASON_INVLPG:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
|
|
|
|
case EXIT_REASON_RDPMC:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
|
|
|
|
case EXIT_REASON_RDRAND:
|
|
|
|
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
|
|
|
|
case EXIT_REASON_RDSEED:
|
|
|
|
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
|
|
|
|
case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
|
|
|
|
case EXIT_REASON_VMREAD:
|
|
|
|
return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
|
|
|
|
vmcs12->vmread_bitmap);
|
|
|
|
case EXIT_REASON_VMWRITE:
|
|
|
|
return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
|
|
|
|
vmcs12->vmwrite_bitmap);
|
|
|
|
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
|
|
|
|
case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
|
|
|
|
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
|
|
|
|
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
|
|
|
|
case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
|
|
|
|
/*
|
|
|
|
* VMX instructions trap unconditionally. This allows L1 to
|
|
|
|
* emulate them for its L2 guest, i.e., allows 3-level nesting!
|
|
|
|
*/
|
|
|
|
return true;
|
|
|
|
case EXIT_REASON_CR_ACCESS:
|
|
|
|
return nested_vmx_exit_handled_cr(vcpu, vmcs12);
|
|
|
|
case EXIT_REASON_DR_ACCESS:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
|
|
|
|
case EXIT_REASON_IO_INSTRUCTION:
|
|
|
|
return nested_vmx_exit_handled_io(vcpu, vmcs12);
|
|
|
|
case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
|
|
|
|
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
|
|
|
|
case EXIT_REASON_MSR_READ:
|
|
|
|
case EXIT_REASON_MSR_WRITE:
|
|
|
|
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
|
|
|
|
case EXIT_REASON_INVALID_STATE:
|
|
|
|
return true;
|
|
|
|
case EXIT_REASON_MWAIT_INSTRUCTION:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
|
|
|
|
case EXIT_REASON_MONITOR_TRAP_FLAG:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
|
|
|
|
case EXIT_REASON_MONITOR_INSTRUCTION:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
|
|
|
|
case EXIT_REASON_PAUSE_INSTRUCTION:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
|
|
|
|
nested_cpu_has2(vmcs12,
|
|
|
|
SECONDARY_EXEC_PAUSE_LOOP_EXITING);
|
|
|
|
case EXIT_REASON_MCE_DURING_VMENTRY:
|
|
|
|
return false;
|
|
|
|
case EXIT_REASON_TPR_BELOW_THRESHOLD:
|
|
|
|
return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
|
|
|
|
case EXIT_REASON_APIC_ACCESS:
|
|
|
|
case EXIT_REASON_APIC_WRITE:
|
|
|
|
case EXIT_REASON_EOI_INDUCED:
|
|
|
|
/*
|
|
|
|
* The controls for "virtualize APIC accesses," "APIC-
|
|
|
|
* register virtualization," and "virtual-interrupt
|
|
|
|
* delivery" only come from vmcs12.
|
|
|
|
*/
|
|
|
|
return true;
|
|
|
|
case EXIT_REASON_EPT_VIOLATION:
|
|
|
|
/*
|
|
|
|
* L0 always deals with the EPT violation. If nested EPT is
|
|
|
|
* used, and the nested mmu code discovers that the address is
|
|
|
|
* missing in the guest EPT table (EPT12), the EPT violation
|
|
|
|
* will be injected with nested_ept_inject_page_fault()
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
case EXIT_REASON_EPT_MISCONFIG:
|
|
|
|
/*
|
|
|
|
* L2 never uses directly L1's EPT, but rather L0's own EPT
|
|
|
|
* table (shadow on EPT) or a merged EPT table that L0 built
|
|
|
|
* (EPT on EPT). So any problems with the structure of the
|
|
|
|
* table is L0's fault.
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
case EXIT_REASON_INVPCID:
|
|
|
|
return
|
|
|
|
nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
|
|
|
|
nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
|
|
|
|
case EXIT_REASON_WBINVD:
|
|
|
|
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
|
|
|
|
case EXIT_REASON_XSETBV:
|
|
|
|
return true;
|
|
|
|
case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
|
|
|
|
/*
|
|
|
|
* This should never happen, since it is not possible to
|
|
|
|
* set XSS to a non-zero value---neither in L1 nor in L2.
|
|
|
|
* If if it were, XSS would have to be checked against
|
|
|
|
* the XSS exit bitmap in vmcs12.
|
|
|
|
*/
|
|
|
|
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
|
|
|
|
case EXIT_REASON_PREEMPTION_TIMER:
|
|
|
|
return false;
|
|
|
|
case EXIT_REASON_PML_FULL:
|
|
|
|
/* We emulate PML support to L1. */
|
|
|
|
return false;
|
|
|
|
case EXIT_REASON_VMFUNC:
|
|
|
|
/* VM functions are emulated through L2->L0 vmexits. */
|
|
|
|
return false;
|
|
|
|
case EXIT_REASON_ENCLS:
|
|
|
|
/* SGX is never exposed to L1 */
|
|
|
|
return false;
|
|
|
|
default:
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_nested_state __user *user_kvm_nested_state,
|
|
|
|
u32 user_data_size)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx;
|
|
|
|
struct vmcs12 *vmcs12;
|
|
|
|
struct kvm_nested_state kvm_state = {
|
|
|
|
.flags = 0,
|
|
|
|
.format = 0,
|
|
|
|
.size = sizeof(kvm_state),
|
|
|
|
.vmx.vmxon_pa = -1ull,
|
|
|
|
.vmx.vmcs_pa = -1ull,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (!vcpu)
|
|
|
|
return kvm_state.size + 2 * VMCS12_SIZE;
|
|
|
|
|
|
|
|
vmx = to_vmx(vcpu);
|
|
|
|
vmcs12 = get_vmcs12(vcpu);
|
|
|
|
|
|
|
|
if (nested_vmx_allowed(vcpu) && vmx->nested.enlightened_vmcs_enabled)
|
|
|
|
kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
|
|
|
|
|
|
|
|
if (nested_vmx_allowed(vcpu) &&
|
|
|
|
(vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
|
|
|
|
kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
|
|
|
|
kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr;
|
|
|
|
|
|
|
|
if (vmx_has_valid_vmcs12(vcpu)) {
|
|
|
|
kvm_state.size += VMCS12_SIZE;
|
|
|
|
|
|
|
|
if (is_guest_mode(vcpu) &&
|
|
|
|
nested_cpu_has_shadow_vmcs(vmcs12) &&
|
|
|
|
vmcs12->vmcs_link_pointer != -1ull)
|
|
|
|
kvm_state.size += VMCS12_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (vmx->nested.smm.vmxon)
|
|
|
|
kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
|
|
|
|
|
|
|
|
if (vmx->nested.smm.guest_mode)
|
|
|
|
kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
|
|
|
|
|
|
|
|
if (is_guest_mode(vcpu)) {
|
|
|
|
kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
|
|
|
|
|
|
|
|
if (vmx->nested.nested_run_pending)
|
|
|
|
kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (user_data_size < kvm_state.size)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (!vmx_has_valid_vmcs12(vcpu))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* When running L2, the authoritative vmcs12 state is in the
|
|
|
|
* vmcs02. When running L1, the authoritative vmcs12 state is
|
|
|
|
* in the shadow or enlightened vmcs linked to vmcs01, unless
|
|
|
|
* need_vmcs12_sync is set, in which case, the authoritative
|
|
|
|
* vmcs12 state is in the vmcs12 already.
|
|
|
|
*/
|
|
|
|
if (is_guest_mode(vcpu)) {
|
|
|
|
sync_vmcs12(vcpu, vmcs12);
|
|
|
|
} else if (!vmx->nested.need_vmcs12_sync) {
|
|
|
|
if (vmx->nested.hv_evmcs)
|
|
|
|
copy_enlightened_to_vmcs12(vmx);
|
|
|
|
else if (enable_shadow_vmcs)
|
|
|
|
copy_shadow_to_vmcs12(vmx);
|
|
|
|
}
|
|
|
|
|
2019-01-25 04:48:20 +07:00
|
|
|
/*
|
|
|
|
* Copy over the full allocated size of vmcs12 rather than just the size
|
|
|
|
* of the struct.
|
|
|
|
*/
|
|
|
|
if (copy_to_user(user_kvm_nested_state->data, vmcs12, VMCS12_SIZE))
|
2018-12-04 04:53:18 +07:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
|
|
|
|
vmcs12->vmcs_link_pointer != -1ull) {
|
|
|
|
if (copy_to_user(user_kvm_nested_state->data + VMCS12_SIZE,
|
2019-01-25 04:48:20 +07:00
|
|
|
get_shadow_vmcs12(vcpu), VMCS12_SIZE))
|
2018-12-04 04:53:18 +07:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
return kvm_state.size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Forcibly leave nested mode in order to be able to reset the VCPU later on.
|
|
|
|
*/
|
|
|
|
void vmx_leave_nested(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
if (is_guest_mode(vcpu)) {
|
|
|
|
to_vmx(vcpu)->nested.nested_run_pending = 0;
|
|
|
|
nested_vmx_vmexit(vcpu, -1, 0, 0);
|
|
|
|
}
|
|
|
|
free_nested(vcpu);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
|
|
|
|
struct kvm_nested_state __user *user_kvm_nested_state,
|
|
|
|
struct kvm_nested_state *kvm_state)
|
|
|
|
{
|
|
|
|
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
|
|
|
struct vmcs12 *vmcs12;
|
|
|
|
u32 exit_qual;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (kvm_state->format != 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (kvm_state->flags & KVM_STATE_NESTED_EVMCS)
|
|
|
|
nested_enable_evmcs(vcpu, NULL);
|
|
|
|
|
|
|
|
if (!nested_vmx_allowed(vcpu))
|
|
|
|
return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL;
|
|
|
|
|
|
|
|
if (kvm_state->vmx.vmxon_pa == -1ull) {
|
|
|
|
if (kvm_state->vmx.smm.flags)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (kvm_state->vmx.vmcs_pa != -1ull)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
vmx_leave_nested(vcpu);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
|
|
|
|
(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (kvm_state->vmx.smm.flags &
|
|
|
|
~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SMM temporarily disables VMX, so we cannot be in guest mode,
|
|
|
|
* nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
|
|
|
|
* must be zero.
|
|
|
|
*/
|
|
|
|
if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
|
|
|
|
!(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
vmx_leave_nested(vcpu);
|
|
|
|
if (kvm_state->vmx.vmxon_pa == -1ull)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa;
|
|
|
|
ret = enter_vmx_operation(vcpu);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Empty 'VMXON' state is permitted */
|
|
|
|
if (kvm_state->size < sizeof(kvm_state) + sizeof(*vmcs12))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (kvm_state->vmx.vmcs_pa != -1ull) {
|
|
|
|
if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa ||
|
|
|
|
!page_address_valid(vcpu, kvm_state->vmx.vmcs_pa))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa);
|
|
|
|
} else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
|
|
|
|
/*
|
|
|
|
* Sync eVMCS upon entry as we may not have
|
|
|
|
* HV_X64_MSR_VP_ASSIST_PAGE set up yet.
|
|
|
|
*/
|
|
|
|
vmx->nested.need_vmcs12_sync = true;
|
|
|
|
} else {
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
|
|
|
|
vmx->nested.smm.vmxon = true;
|
|
|
|
vmx->nested.vmxon = false;
|
|
|
|
|
|
|
|
if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
|
|
|
|
vmx->nested.smm.guest_mode = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vmcs12 = get_vmcs12(vcpu);
|
|
|
|
if (copy_from_user(vmcs12, user_kvm_nested_state->data, sizeof(*vmcs12)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (vmcs12->hdr.revision_id != VMCS12_REVISION)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
vmx->nested.nested_run_pending =
|
|
|
|
!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
|
|
|
|
|
|
|
|
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
|
|
|
|
vmcs12->vmcs_link_pointer != -1ull) {
|
|
|
|
struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
|
|
|
|
|
|
|
|
if (kvm_state->size < sizeof(kvm_state) + 2 * sizeof(*vmcs12))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(shadow_vmcs12,
|
|
|
|
user_kvm_nested_state->data + VMCS12_SIZE,
|
|
|
|
sizeof(*vmcs12)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
|
|
|
|
!shadow_vmcs12->hdr.shadow_vmcs)
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2018-12-13 01:30:06 +07:00
|
|
|
if (nested_vmx_check_vmentry_prereqs(vcpu, vmcs12) ||
|
|
|
|
nested_vmx_check_vmentry_postreqs(vcpu, vmcs12, &exit_qual))
|
2018-12-04 04:53:18 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
vmx->nested.dirty_vmcs12 = true;
|
|
|
|
ret = nested_vmx_enter_non_root_mode(vcpu, false);
|
|
|
|
if (ret)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nested_vmx_vcpu_setup(void)
|
|
|
|
{
|
|
|
|
if (enable_shadow_vmcs) {
|
|
|
|
/*
|
|
|
|
* At vCPU creation, "VMWRITE to any supported field
|
|
|
|
* in the VMCS" is supported, so use the more
|
|
|
|
* permissive vmx_vmread_bitmap to specify both read
|
|
|
|
* and write permissions for the shadow VMCS.
|
|
|
|
*/
|
|
|
|
vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
|
|
|
|
vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmread_bitmap));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
|
|
|
|
* returned for the various VMX controls MSRs when nested VMX is enabled.
|
|
|
|
* The same values should also be used to verify that vmcs12 control fields are
|
|
|
|
* valid during nested entry from L1 to L2.
|
|
|
|
* Each of these control msrs has a low and high 32-bit half: A low bit is on
|
|
|
|
* if the corresponding bit in the (32-bit) control field *must* be on, and a
|
|
|
|
* bit in the high half is on if the corresponding bit in the control field
|
|
|
|
* may be on. See also vmx_control_verify().
|
|
|
|
*/
|
|
|
|
void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
|
|
|
|
bool apicv)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Note that as a general rule, the high half of the MSRs (bits in
|
|
|
|
* the control fields which may be 1) should be initialized by the
|
|
|
|
* intersection of the underlying hardware's MSR (i.e., features which
|
|
|
|
* can be supported) and the list of features we want to expose -
|
|
|
|
* because they are known to be properly supported in our code.
|
|
|
|
* Also, usually, the low half of the MSRs (bits which must be 1) can
|
|
|
|
* be set to 0, meaning that L1 may turn off any of these bits. The
|
|
|
|
* reason is that if one of these bits is necessary, it will appear
|
|
|
|
* in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
|
|
|
|
* fields of vmcs01 and vmcs02, will turn these bits off - and
|
|
|
|
* nested_vmx_exit_reflected() will not pass related exits to L1.
|
|
|
|
* These rules have exceptions below.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* pin-based controls */
|
|
|
|
rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
|
|
|
|
msrs->pinbased_ctls_low,
|
|
|
|
msrs->pinbased_ctls_high);
|
|
|
|
msrs->pinbased_ctls_low |=
|
|
|
|
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
|
|
|
|
msrs->pinbased_ctls_high &=
|
|
|
|
PIN_BASED_EXT_INTR_MASK |
|
|
|
|
PIN_BASED_NMI_EXITING |
|
|
|
|
PIN_BASED_VIRTUAL_NMIS |
|
|
|
|
(apicv ? PIN_BASED_POSTED_INTR : 0);
|
|
|
|
msrs->pinbased_ctls_high |=
|
|
|
|
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
|
|
|
|
PIN_BASED_VMX_PREEMPTION_TIMER;
|
|
|
|
|
|
|
|
/* exit controls */
|
|
|
|
rdmsr(MSR_IA32_VMX_EXIT_CTLS,
|
|
|
|
msrs->exit_ctls_low,
|
|
|
|
msrs->exit_ctls_high);
|
|
|
|
msrs->exit_ctls_low =
|
|
|
|
VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
|
|
|
|
|
|
|
|
msrs->exit_ctls_high &=
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
VM_EXIT_HOST_ADDR_SPACE_SIZE |
|
|
|
|
#endif
|
|
|
|
VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
|
|
|
|
msrs->exit_ctls_high |=
|
|
|
|
VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
|
|
|
|
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
|
|
|
|
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
|
|
|
|
|
|
|
|
/* We support free control of debug control saving. */
|
|
|
|
msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
|
|
|
|
|
|
|
|
/* entry controls */
|
|
|
|
rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
|
|
|
|
msrs->entry_ctls_low,
|
|
|
|
msrs->entry_ctls_high);
|
|
|
|
msrs->entry_ctls_low =
|
|
|
|
VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
|
|
|
|
msrs->entry_ctls_high &=
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
VM_ENTRY_IA32E_MODE |
|
|
|
|
#endif
|
|
|
|
VM_ENTRY_LOAD_IA32_PAT;
|
|
|
|
msrs->entry_ctls_high |=
|
|
|
|
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
|
|
|
|
|
|
|
|
/* We support free control of debug control loading. */
|
|
|
|
msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
|
|
|
|
|
|
|
|
/* cpu-based controls */
|
|
|
|
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
|
|
|
|
msrs->procbased_ctls_low,
|
|
|
|
msrs->procbased_ctls_high);
|
|
|
|
msrs->procbased_ctls_low =
|
|
|
|
CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
|
|
|
|
msrs->procbased_ctls_high &=
|
|
|
|
CPU_BASED_VIRTUAL_INTR_PENDING |
|
|
|
|
CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
|
|
|
|
CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
|
|
|
|
CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
|
|
|
|
CPU_BASED_CR3_STORE_EXITING |
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
|
|
|
|
#endif
|
|
|
|
CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
|
|
|
|
CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
|
|
|
|
CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
|
|
|
|
CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
|
|
|
|
CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
|
|
|
|
/*
|
|
|
|
* We can allow some features even when not supported by the
|
|
|
|
* hardware. For example, L1 can specify an MSR bitmap - and we
|
|
|
|
* can use it to avoid exits to L1 - even when L0 runs L2
|
|
|
|
* without MSR bitmaps.
|
|
|
|
*/
|
|
|
|
msrs->procbased_ctls_high |=
|
|
|
|
CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
|
|
|
|
CPU_BASED_USE_MSR_BITMAPS;
|
|
|
|
|
|
|
|
/* We support free control of CR3 access interception. */
|
|
|
|
msrs->procbased_ctls_low &=
|
|
|
|
~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* secondary cpu-based controls. Do not include those that
|
|
|
|
* depend on CPUID bits, they are added later by vmx_cpuid_update.
|
|
|
|
*/
|
2019-02-07 17:42:14 +07:00
|
|
|
if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
|
|
|
|
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
|
|
|
|
msrs->secondary_ctls_low,
|
|
|
|
msrs->secondary_ctls_high);
|
|
|
|
|
2018-12-04 04:53:18 +07:00
|
|
|
msrs->secondary_ctls_low = 0;
|
|
|
|
msrs->secondary_ctls_high &=
|
|
|
|
SECONDARY_EXEC_DESC |
|
|
|
|
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
|
|
|
SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
|
|
|
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
|
|
|
SECONDARY_EXEC_WBINVD_EXITING;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We can emulate "VMCS shadowing," even if the hardware
|
|
|
|
* doesn't support it.
|
|
|
|
*/
|
|
|
|
msrs->secondary_ctls_high |=
|
|
|
|
SECONDARY_EXEC_SHADOW_VMCS;
|
|
|
|
|
|
|
|
if (enable_ept) {
|
|
|
|
/* nested EPT: emulate EPT also to L1 */
|
|
|
|
msrs->secondary_ctls_high |=
|
|
|
|
SECONDARY_EXEC_ENABLE_EPT;
|
|
|
|
msrs->ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
|
|
|
|
VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
|
|
|
|
if (cpu_has_vmx_ept_execute_only())
|
|
|
|
msrs->ept_caps |=
|
|
|
|
VMX_EPT_EXECUTE_ONLY_BIT;
|
|
|
|
msrs->ept_caps &= ept_caps;
|
|
|
|
msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
|
|
|
|
VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
|
|
|
|
VMX_EPT_1GB_PAGE_BIT;
|
|
|
|
if (enable_ept_ad_bits) {
|
|
|
|
msrs->secondary_ctls_high |=
|
|
|
|
SECONDARY_EXEC_ENABLE_PML;
|
|
|
|
msrs->ept_caps |= VMX_EPT_AD_BIT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpu_has_vmx_vmfunc()) {
|
|
|
|
msrs->secondary_ctls_high |=
|
|
|
|
SECONDARY_EXEC_ENABLE_VMFUNC;
|
|
|
|
/*
|
|
|
|
* Advertise EPTP switching unconditionally
|
|
|
|
* since we emulate it
|
|
|
|
*/
|
|
|
|
if (enable_ept)
|
|
|
|
msrs->vmfunc_controls =
|
|
|
|
VMX_VMFUNC_EPTP_SWITCHING;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Old versions of KVM use the single-context version without
|
|
|
|
* checking for support, so declare that it is supported even
|
|
|
|
* though it is treated as global context. The alternative is
|
|
|
|
* not failing the single-context invvpid, and it is worse.
|
|
|
|
*/
|
|
|
|
if (enable_vpid) {
|
|
|
|
msrs->secondary_ctls_high |=
|
|
|
|
SECONDARY_EXEC_ENABLE_VPID;
|
|
|
|
msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
|
|
|
|
VMX_VPID_EXTENT_SUPPORTED_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (enable_unrestricted_guest)
|
|
|
|
msrs->secondary_ctls_high |=
|
|
|
|
SECONDARY_EXEC_UNRESTRICTED_GUEST;
|
|
|
|
|
|
|
|
if (flexpriority_enabled)
|
|
|
|
msrs->secondary_ctls_high |=
|
|
|
|
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
|
|
|
|
|
|
|
/* miscellaneous data */
|
|
|
|
rdmsr(MSR_IA32_VMX_MISC,
|
|
|
|
msrs->misc_low,
|
|
|
|
msrs->misc_high);
|
|
|
|
msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
|
|
|
|
msrs->misc_low |=
|
|
|
|
MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
|
|
|
|
VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
|
|
|
|
VMX_MISC_ACTIVITY_HLT;
|
|
|
|
msrs->misc_high = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This MSR reports some information about VMX support. We
|
|
|
|
* should return information about the VMX we emulate for the
|
|
|
|
* guest, and the VMCS structure we give it - not about the
|
|
|
|
* VMX support of the underlying hardware.
|
|
|
|
*/
|
|
|
|
msrs->basic =
|
|
|
|
VMCS12_REVISION |
|
|
|
|
VMX_BASIC_TRUE_CTLS |
|
|
|
|
((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
|
|
|
|
(VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
|
|
|
|
|
|
|
|
if (cpu_has_vmx_basic_inout())
|
|
|
|
msrs->basic |= VMX_BASIC_INOUT;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* These MSRs specify bits which the guest must keep fixed on
|
|
|
|
* while L1 is in VMXON mode (in L1's root mode, or running an L2).
|
|
|
|
* We picked the standard core2 setting.
|
|
|
|
*/
|
|
|
|
#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
|
|
|
|
#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
|
|
|
|
msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
|
|
|
|
msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
|
|
|
|
|
|
|
|
/* These MSRs specify bits which the guest must keep fixed off. */
|
|
|
|
rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
|
|
|
|
rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
|
|
|
|
|
|
|
|
/* highest index: VMX_PREEMPTION_TIMER_VALUE */
|
|
|
|
msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void nested_vmx_hardware_unsetup(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (enable_shadow_vmcs) {
|
|
|
|
for (i = 0; i < VMX_BITMAP_NR; i++)
|
|
|
|
free_page((unsigned long)vmx_bitmap[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!cpu_has_vmx_shadow_vmcs())
|
|
|
|
enable_shadow_vmcs = 0;
|
|
|
|
if (enable_shadow_vmcs) {
|
|
|
|
for (i = 0; i < VMX_BITMAP_NR; i++) {
|
2019-02-12 02:02:52 +07:00
|
|
|
/*
|
|
|
|
* The vmx_bitmap is not tied to a VM and so should
|
|
|
|
* not be charged to a memcg.
|
|
|
|
*/
|
2018-12-04 04:53:18 +07:00
|
|
|
vmx_bitmap[i] = (unsigned long *)
|
|
|
|
__get_free_page(GFP_KERNEL);
|
|
|
|
if (!vmx_bitmap[i]) {
|
|
|
|
nested_vmx_hardware_unsetup();
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
init_vmcs_shadow_fields();
|
|
|
|
}
|
|
|
|
|
|
|
|
exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear,
|
|
|
|
exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
|
|
|
|
exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld,
|
|
|
|
exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst,
|
|
|
|
exit_handlers[EXIT_REASON_VMREAD] = handle_vmread,
|
|
|
|
exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume,
|
|
|
|
exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite,
|
|
|
|
exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff,
|
|
|
|
exit_handlers[EXIT_REASON_VMON] = handle_vmon,
|
|
|
|
exit_handlers[EXIT_REASON_INVEPT] = handle_invept,
|
|
|
|
exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid,
|
|
|
|
exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc,
|
|
|
|
|
|
|
|
kvm_x86_ops->check_nested_events = vmx_check_nested_events;
|
|
|
|
kvm_x86_ops->get_nested_state = vmx_get_nested_state;
|
|
|
|
kvm_x86_ops->set_nested_state = vmx_set_nested_state;
|
|
|
|
kvm_x86_ops->get_vmcs12_pages = nested_get_vmcs12_pages,
|
|
|
|
kvm_x86_ops->nested_enable_evmcs = nested_enable_evmcs;
|
2018-12-11 00:21:55 +07:00
|
|
|
kvm_x86_ops->nested_get_evmcs_version = nested_get_evmcs_version;
|
2018-12-04 04:53:18 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|