mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 07:05:36 +07:00
Merge branch 'kvm-amd-fixes' into HEAD
This topic branch will be included in both kvm/master and kvm/next (for 5.8) in order to simplify testing of kvm/next.
This commit is contained in:
commit
f6bfd9c8ff
@ -578,6 +578,7 @@ struct kvm_vcpu_arch {
|
||||
unsigned long cr4;
|
||||
unsigned long cr4_guest_owned_bits;
|
||||
unsigned long cr8;
|
||||
u32 host_pkru;
|
||||
u32 pkru;
|
||||
u32 hflags;
|
||||
u64 efer;
|
||||
@ -1093,8 +1094,6 @@ struct kvm_x86_ops {
|
||||
void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||
u64 (*get_dr6)(struct kvm_vcpu *vcpu);
|
||||
void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
|
||||
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
|
||||
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
|
||||
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
|
||||
@ -1449,6 +1448,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
|
||||
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
|
||||
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, unsigned long payload);
|
||||
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
|
||||
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
|
||||
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
|
||||
|
@ -1427,7 +1427,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *current_vcpu, u64 ingpa,
|
||||
*/
|
||||
kvm_make_vcpus_request_mask(kvm,
|
||||
KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
|
||||
vcpu_mask, &hv_vcpu->tlb_flush);
|
||||
NULL, vcpu_mask, &hv_vcpu->tlb_flush);
|
||||
|
||||
ret_success:
|
||||
/* We always do full TLB flush, set rep_done = rep_cnt. */
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#include <asm/msr-index.h>
|
||||
#include <asm/debugreg.h>
|
||||
|
||||
#include "kvm_emulate.h"
|
||||
#include "trace.h"
|
||||
@ -267,7 +268,7 @@ void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
|
||||
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
|
||||
svm->vmcb->save.rip = nested_vmcb->save.rip;
|
||||
svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
|
||||
svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
|
||||
svm->vcpu.arch.dr6 = nested_vmcb->save.dr6;
|
||||
svm->vmcb->save.cpl = nested_vmcb->save.cpl;
|
||||
|
||||
svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
|
||||
@ -482,7 +483,7 @@ int nested_svm_vmexit(struct vcpu_svm *svm)
|
||||
nested_vmcb->save.rsp = vmcb->save.rsp;
|
||||
nested_vmcb->save.rax = vmcb->save.rax;
|
||||
nested_vmcb->save.dr7 = vmcb->save.dr7;
|
||||
nested_vmcb->save.dr6 = vmcb->save.dr6;
|
||||
nested_vmcb->save.dr6 = svm->vcpu.arch.dr6;
|
||||
nested_vmcb->save.cpl = vmcb->save.cpl;
|
||||
|
||||
nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
|
||||
@ -606,26 +607,45 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
|
||||
/* DB exceptions for our internal use must not cause vmexit */
|
||||
static int nested_svm_intercept_db(struct vcpu_svm *svm)
|
||||
{
|
||||
unsigned long dr6;
|
||||
unsigned long dr6 = svm->vmcb->save.dr6;
|
||||
|
||||
/* Always catch it and pass it to userspace if debugging. */
|
||||
if (svm->vcpu.guest_debug &
|
||||
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
|
||||
return NESTED_EXIT_HOST;
|
||||
|
||||
/* if we're not singlestepping, it's not ours */
|
||||
if (!svm->nmi_singlestep)
|
||||
return NESTED_EXIT_DONE;
|
||||
goto reflected_db;
|
||||
|
||||
/* if it's not a singlestep exception, it's not ours */
|
||||
if (kvm_get_dr(&svm->vcpu, 6, &dr6))
|
||||
return NESTED_EXIT_DONE;
|
||||
if (!(dr6 & DR6_BS))
|
||||
return NESTED_EXIT_DONE;
|
||||
goto reflected_db;
|
||||
|
||||
/* if the guest is singlestepping, it should get the vmexit */
|
||||
if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
|
||||
disable_nmi_singlestep(svm);
|
||||
return NESTED_EXIT_DONE;
|
||||
goto reflected_db;
|
||||
}
|
||||
|
||||
/* it's ours, the nested hypervisor must not see this one */
|
||||
return NESTED_EXIT_HOST;
|
||||
|
||||
reflected_db:
|
||||
/*
|
||||
* Synchronize guest DR6 here just like in kvm_deliver_exception_payload;
|
||||
* it will be moved into the nested VMCB by nested_svm_vmexit. Once
|
||||
* exceptions will be moved to svm_check_nested_events, all this stuff
|
||||
* will just go away and we could just return NESTED_EXIT_HOST
|
||||
* unconditionally. db_interception will queue the exception, which
|
||||
* will be processed by svm_check_nested_events if a nested vmexit is
|
||||
* required, and we will just use kvm_deliver_exception_payload to copy
|
||||
* the payload to DR6 before vmexit.
|
||||
*/
|
||||
WARN_ON(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT);
|
||||
svm->vcpu.arch.dr6 &= ~(DR_TRAP_BITS | DR6_RTM);
|
||||
svm->vcpu.arch.dr6 |= dr6 & ~DR6_FIXED_1;
|
||||
return NESTED_EXIT_DONE;
|
||||
}
|
||||
|
||||
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
|
||||
@ -682,6 +702,9 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
|
||||
if (svm->nested.intercept_exceptions & excp_bits) {
|
||||
if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
|
||||
vmexit = nested_svm_intercept_db(svm);
|
||||
else if (exit_code == SVM_EXIT_EXCP_BASE + BP_VECTOR &&
|
||||
svm->vcpu.guest_debug & KVM_GUESTDBG_USE_SW_BP)
|
||||
vmexit = NESTED_EXIT_HOST;
|
||||
else
|
||||
vmexit = NESTED_EXIT_DONE;
|
||||
}
|
||||
|
@ -1672,17 +1672,14 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
|
||||
mark_dirty(svm->vmcb, VMCB_ASID);
|
||||
}
|
||||
|
||||
static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
|
||||
static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value)
|
||||
{
|
||||
return to_svm(vcpu)->vmcb->save.dr6;
|
||||
}
|
||||
struct vmcb *vmcb = svm->vmcb;
|
||||
|
||||
static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
svm->vmcb->save.dr6 = value;
|
||||
mark_dirty(svm->vmcb, VMCB_DR);
|
||||
if (unlikely(value != vmcb->save.dr6)) {
|
||||
vmcb->save.dr6 = value;
|
||||
mark_dirty(vmcb, VMCB_DR);
|
||||
}
|
||||
}
|
||||
|
||||
static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
|
||||
@ -1693,9 +1690,12 @@ static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
|
||||
get_debugreg(vcpu->arch.db[1], 1);
|
||||
get_debugreg(vcpu->arch.db[2], 2);
|
||||
get_debugreg(vcpu->arch.db[3], 3);
|
||||
vcpu->arch.dr6 = svm_get_dr6(vcpu);
|
||||
/*
|
||||
* We cannot reset svm->vmcb->save.dr6 to DR6_FIXED_1|DR6_RTM here,
|
||||
* because db_interception might need it. We can do it before vmentry.
|
||||
*/
|
||||
vcpu->arch.dr6 = svm->vmcb->save.dr6;
|
||||
vcpu->arch.dr7 = svm->vmcb->save.dr7;
|
||||
|
||||
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
|
||||
set_dr_intercepts(svm);
|
||||
}
|
||||
@ -1739,7 +1739,8 @@ static int db_interception(struct vcpu_svm *svm)
|
||||
if (!(svm->vcpu.guest_debug &
|
||||
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
|
||||
!svm->nmi_singlestep) {
|
||||
kvm_queue_exception(&svm->vcpu, DB_VECTOR);
|
||||
u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1;
|
||||
kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -3317,6 +3318,15 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
svm->vmcb->save.cr2 = vcpu->arch.cr2;
|
||||
|
||||
/*
|
||||
* Run with all-zero DR6 unless needed, so that we can get the exact cause
|
||||
* of a #DB.
|
||||
*/
|
||||
if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
|
||||
svm_set_dr6(svm, vcpu->arch.dr6);
|
||||
else
|
||||
svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM);
|
||||
|
||||
clgi();
|
||||
kvm_load_guest_xsave_state(vcpu);
|
||||
|
||||
@ -3931,8 +3941,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
||||
.set_idt = svm_set_idt,
|
||||
.get_gdt = svm_get_gdt,
|
||||
.set_gdt = svm_set_gdt,
|
||||
.get_dr6 = svm_get_dr6,
|
||||
.set_dr6 = svm_set_dr6,
|
||||
.set_dr7 = svm_set_dr7,
|
||||
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
|
||||
.cache_reg = svm_cache_reg,
|
||||
|
@ -1372,7 +1372,6 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
|
||||
vmx_vcpu_pi_load(vcpu, cpu);
|
||||
|
||||
vmx->host_pkru = read_pkru();
|
||||
vmx->host_debugctlmsr = get_debugctlmsr();
|
||||
}
|
||||
|
||||
@ -4677,15 +4676,13 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
|
||||
dr6 = vmcs_readl(EXIT_QUALIFICATION);
|
||||
if (!(vcpu->guest_debug &
|
||||
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
|
||||
vcpu->arch.dr6 &= ~DR_TRAP_BITS;
|
||||
vcpu->arch.dr6 |= dr6 | DR6_RTM;
|
||||
if (is_icebp(intr_info))
|
||||
WARN_ON(!skip_emulated_instruction(vcpu));
|
||||
|
||||
kvm_queue_exception(vcpu, DB_VECTOR);
|
||||
kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
|
||||
return 1;
|
||||
}
|
||||
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
|
||||
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
|
||||
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
|
||||
/* fall through */
|
||||
case BP_VECTOR:
|
||||
@ -4929,16 +4926,14 @@ static int handle_dr(struct kvm_vcpu *vcpu)
|
||||
* guest debugging itself.
|
||||
*/
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
|
||||
vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
|
||||
vcpu->run->debug.arch.dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1;
|
||||
vcpu->run->debug.arch.dr7 = dr7;
|
||||
vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
|
||||
vcpu->run->debug.arch.exception = DB_VECTOR;
|
||||
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
|
||||
return 0;
|
||||
} else {
|
||||
vcpu->arch.dr6 &= ~DR_TRAP_BITS;
|
||||
vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
|
||||
kvm_queue_exception(vcpu, DB_VECTOR);
|
||||
kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BD);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
@ -4969,15 +4964,6 @@ static int handle_dr(struct kvm_vcpu *vcpu)
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.dr6;
|
||||
}
|
||||
|
||||
static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
|
||||
{
|
||||
}
|
||||
|
||||
static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
get_debugreg(vcpu->arch.db[0], 0);
|
||||
@ -6577,11 +6563,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
kvm_load_guest_xsave_state(vcpu);
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE) &&
|
||||
vcpu->arch.pkru != vmx->host_pkru)
|
||||
__write_pkru(vcpu->arch.pkru);
|
||||
|
||||
pt_guest_enter(vmx);
|
||||
|
||||
if (vcpu_to_pmu(vcpu)->version)
|
||||
@ -6671,18 +6652,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
|
||||
pt_guest_exit(vmx);
|
||||
|
||||
/*
|
||||
* eager fpu is enabled if PKEY is supported and CR4 is switched
|
||||
* back on host, so it is safe to read guest PKRU from current
|
||||
* XSAVE.
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
|
||||
vcpu->arch.pkru = rdpkru();
|
||||
if (vcpu->arch.pkru != vmx->host_pkru)
|
||||
__write_pkru(vmx->host_pkru);
|
||||
}
|
||||
|
||||
kvm_load_host_xsave_state(vcpu);
|
||||
|
||||
vmx->nested.nested_run_pending = 0;
|
||||
@ -7740,8 +7709,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
|
||||
.set_idt = vmx_set_idt,
|
||||
.get_gdt = vmx_get_gdt,
|
||||
.set_gdt = vmx_set_gdt,
|
||||
.get_dr6 = vmx_get_dr6,
|
||||
.set_dr6 = vmx_set_dr6,
|
||||
.set_dr7 = vmx_set_dr7,
|
||||
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
|
||||
.cache_reg = vmx_cache_reg,
|
||||
|
@ -572,11 +572,12 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
|
||||
|
||||
static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
|
||||
unsigned long payload)
|
||||
void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
|
||||
unsigned long payload)
|
||||
{
|
||||
kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_queue_exception_p);
|
||||
|
||||
static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
|
||||
u32 error_code, unsigned long payload)
|
||||
@ -836,11 +837,25 @@ void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.ia32_xss != host_xss)
|
||||
wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss);
|
||||
}
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
(kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
|
||||
(vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) &&
|
||||
vcpu->arch.pkru != vcpu->arch.host_pkru)
|
||||
__write_pkru(vcpu->arch.pkru);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_load_guest_xsave_state);
|
||||
|
||||
void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (static_cpu_has(X86_FEATURE_PKU) &&
|
||||
(kvm_read_cr4_bits(vcpu, X86_CR4_PKE) ||
|
||||
(vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) {
|
||||
vcpu->arch.pkru = rdpkru();
|
||||
if (vcpu->arch.pkru != vcpu->arch.host_pkru)
|
||||
__write_pkru(vcpu->arch.host_pkru);
|
||||
}
|
||||
|
||||
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) {
|
||||
|
||||
if (vcpu->arch.xcr0 != host_xcr0)
|
||||
@ -1045,12 +1060,6 @@ static void kvm_update_dr0123(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void kvm_update_dr6(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
|
||||
kvm_x86_ops.set_dr6(vcpu, vcpu->arch.dr6);
|
||||
}
|
||||
|
||||
static void kvm_update_dr7(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long dr7;
|
||||
@ -1090,7 +1099,6 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
|
||||
if (val & 0xffffffff00000000ULL)
|
||||
return -1; /* #GP */
|
||||
vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
|
||||
kvm_update_dr6(vcpu);
|
||||
break;
|
||||
case 5:
|
||||
/* fall through */
|
||||
@ -1126,10 +1134,7 @@ int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
|
||||
case 4:
|
||||
/* fall through */
|
||||
case 6:
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
|
||||
*val = vcpu->arch.dr6;
|
||||
else
|
||||
*val = kvm_x86_ops.get_dr6(vcpu);
|
||||
*val = vcpu->arch.dr6;
|
||||
break;
|
||||
case 5:
|
||||
/* fall through */
|
||||
@ -3558,6 +3563,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
|
||||
kvm_x86_ops.vcpu_load(vcpu, cpu);
|
||||
|
||||
/* Save host pkru register if supported */
|
||||
vcpu->arch.host_pkru = read_pkru();
|
||||
|
||||
/* Apply any externally detected TSC adjustments (due to suspend) */
|
||||
if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
|
||||
adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
|
||||
@ -4009,7 +4017,6 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
|
||||
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
|
||||
kvm_update_dr0123(vcpu);
|
||||
vcpu->arch.dr6 = dbgregs->dr6;
|
||||
kvm_update_dr6(vcpu);
|
||||
vcpu->arch.dr7 = dbgregs->dr7;
|
||||
kvm_update_dr7(vcpu);
|
||||
|
||||
@ -6659,7 +6666,7 @@ static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
|
||||
kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM;
|
||||
kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
|
||||
kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu);
|
||||
kvm_run->debug.arch.exception = DB_VECTOR;
|
||||
kvm_run->exit_reason = KVM_EXIT_DEBUG;
|
||||
return 0;
|
||||
@ -6719,9 +6726,7 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
|
||||
vcpu->arch.db);
|
||||
|
||||
if (dr6 != 0) {
|
||||
vcpu->arch.dr6 &= ~DR_TRAP_BITS;
|
||||
vcpu->arch.dr6 |= dr6 | DR6_RTM;
|
||||
kvm_queue_exception(vcpu, DB_VECTOR);
|
||||
kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
|
||||
*r = 1;
|
||||
return true;
|
||||
}
|
||||
@ -8042,7 +8047,7 @@ void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
|
||||
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
|
||||
|
||||
kvm_make_vcpus_request_mask(kvm, KVM_REQ_SCAN_IOAPIC,
|
||||
vcpu_bitmap, cpus);
|
||||
NULL, vcpu_bitmap, cpus);
|
||||
|
||||
free_cpumask_var(cpus);
|
||||
}
|
||||
@ -8072,6 +8077,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_update_apicv);
|
||||
*/
|
||||
void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
|
||||
{
|
||||
struct kvm_vcpu *except;
|
||||
unsigned long old, new, expected;
|
||||
|
||||
if (!kvm_x86_ops.check_apicv_inhibit_reasons ||
|
||||
@ -8096,7 +8102,17 @@ void kvm_request_apicv_update(struct kvm *kvm, bool activate, ulong bit)
|
||||
trace_kvm_apicv_update_request(activate, bit);
|
||||
if (kvm_x86_ops.pre_update_apicv_exec_ctrl)
|
||||
kvm_x86_ops.pre_update_apicv_exec_ctrl(kvm, activate);
|
||||
kvm_make_all_cpus_request(kvm, KVM_REQ_APICV_UPDATE);
|
||||
|
||||
/*
|
||||
* Sending request to update APICV for all other vcpus,
|
||||
* while update the calling vcpu immediately instead of
|
||||
* waiting for another #VMEXIT to handle the request.
|
||||
*/
|
||||
except = kvm_get_running_vcpu();
|
||||
kvm_make_all_cpus_request_except(kvm, KVM_REQ_APICV_UPDATE,
|
||||
except);
|
||||
if (except)
|
||||
kvm_vcpu_update_apicv(except);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_request_apicv_update);
|
||||
|
||||
@ -8420,7 +8436,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
|
||||
kvm_x86_ops.sync_dirty_debug_regs(vcpu);
|
||||
kvm_update_dr0123(vcpu);
|
||||
kvm_update_dr6(vcpu);
|
||||
kvm_update_dr7(vcpu);
|
||||
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
|
||||
}
|
||||
@ -9481,7 +9496,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
|
||||
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
|
||||
kvm_update_dr0123(vcpu);
|
||||
vcpu->arch.dr6 = DR6_INIT;
|
||||
kvm_update_dr6(vcpu);
|
||||
vcpu->arch.dr7 = DR7_FIXED_1;
|
||||
kvm_update_dr7(vcpu);
|
||||
|
||||
|
@ -813,8 +813,11 @@ void kvm_flush_remote_tlbs(struct kvm *kvm);
|
||||
void kvm_reload_remote_mmus(struct kvm *kvm);
|
||||
|
||||
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||
struct kvm_vcpu *except,
|
||||
unsigned long *vcpu_bitmap, cpumask_var_t tmp);
|
||||
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
|
||||
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
|
||||
struct kvm_vcpu *except);
|
||||
bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||
unsigned long *vcpu_bitmap);
|
||||
|
||||
|
@ -28,6 +28,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
|
||||
TEST_GEN_PROGS_x86_64 += x86_64/debug_regs
|
||||
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
|
||||
TEST_GEN_PROGS_x86_64 += demand_paging_test
|
||||
TEST_GEN_PROGS_x86_64 += dirty_log_test
|
||||
|
@ -143,6 +143,8 @@ struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid);
|
||||
void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_guest_debug *debug);
|
||||
void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_mp_state *mp_state);
|
||||
void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs);
|
||||
|
@ -1201,6 +1201,15 @@ void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
|
||||
ret, errno);
|
||||
}
|
||||
|
||||
void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
|
||||
struct kvm_guest_debug *debug)
|
||||
{
|
||||
struct vcpu *vcpu = vcpu_find(vm, vcpuid);
|
||||
int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug);
|
||||
|
||||
TEST_ASSERT(ret == 0, "KVM_SET_GUEST_DEBUG failed: %d", ret);
|
||||
}
|
||||
|
||||
/*
|
||||
* VM VCPU Set MP State
|
||||
*
|
||||
|
202
tools/testing/selftests/kvm/x86_64/debug_regs.c
Normal file
202
tools/testing/selftests/kvm/x86_64/debug_regs.c
Normal file
@ -0,0 +1,202 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* KVM guest debug register tests
|
||||
*
|
||||
* Copyright (C) 2020, Red Hat, Inc.
|
||||
*/
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
|
||||
#define VCPU_ID 0
|
||||
|
||||
#define DR6_BD (1 << 13)
|
||||
#define DR7_GD (1 << 13)
|
||||
|
||||
/* For testing data access debug BP */
|
||||
uint32_t guest_value;
|
||||
|
||||
extern unsigned char sw_bp, hw_bp, write_data, ss_start, bd_start;
|
||||
|
||||
static void guest_code(void)
|
||||
{
|
||||
/*
|
||||
* Software BP tests.
|
||||
*
|
||||
* NOTE: sw_bp need to be before the cmd here, because int3 is an
|
||||
* exception rather than a normal trap for KVM_SET_GUEST_DEBUG (we
|
||||
* capture it using the vcpu exception bitmap).
|
||||
*/
|
||||
asm volatile("sw_bp: int3");
|
||||
|
||||
/* Hardware instruction BP test */
|
||||
asm volatile("hw_bp: nop");
|
||||
|
||||
/* Hardware data BP test */
|
||||
asm volatile("mov $1234,%%rax;\n\t"
|
||||
"mov %%rax,%0;\n\t write_data:"
|
||||
: "=m" (guest_value) : : "rax");
|
||||
|
||||
/* Single step test, covers 2 basic instructions and 2 emulated */
|
||||
asm volatile("ss_start: "
|
||||
"xor %%rax,%%rax\n\t"
|
||||
"cpuid\n\t"
|
||||
"movl $0x1a0,%%ecx\n\t"
|
||||
"rdmsr\n\t"
|
||||
: : : "rax", "ecx");
|
||||
|
||||
/* DR6.BD test */
|
||||
asm volatile("bd_start: mov %%dr0, %%rax" : : : "rax");
|
||||
GUEST_DONE();
|
||||
}
|
||||
|
||||
#define CLEAR_DEBUG() memset(&debug, 0, sizeof(debug))
|
||||
#define APPLY_DEBUG() vcpu_set_guest_debug(vm, VCPU_ID, &debug)
|
||||
#define CAST_TO_RIP(v) ((unsigned long long)&(v))
|
||||
#define SET_RIP(v) do { \
|
||||
vcpu_regs_get(vm, VCPU_ID, ®s); \
|
||||
regs.rip = (v); \
|
||||
vcpu_regs_set(vm, VCPU_ID, ®s); \
|
||||
} while (0)
|
||||
#define MOVE_RIP(v) SET_RIP(regs.rip + (v));
|
||||
|
||||
int main(void)
|
||||
{
|
||||
struct kvm_guest_debug debug;
|
||||
unsigned long long target_dr6, target_rip;
|
||||
struct kvm_regs regs;
|
||||
struct kvm_run *run;
|
||||
struct kvm_vm *vm;
|
||||
struct ucall uc;
|
||||
uint64_t cmd;
|
||||
int i;
|
||||
/* Instruction lengths starting at ss_start */
|
||||
int ss_size[4] = {
|
||||
3, /* xor */
|
||||
2, /* cpuid */
|
||||
5, /* mov */
|
||||
2, /* rdmsr */
|
||||
};
|
||||
|
||||
if (!kvm_check_cap(KVM_CAP_SET_GUEST_DEBUG)) {
|
||||
print_skip("KVM_CAP_SET_GUEST_DEBUG not supported");
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm = vm_create_default(VCPU_ID, 0, guest_code);
|
||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
|
||||
/* Test software BPs - int3 */
|
||||
CLEAR_DEBUG();
|
||||
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
|
||||
APPLY_DEBUG();
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
|
||||
run->debug.arch.exception == BP_VECTOR &&
|
||||
run->debug.arch.pc == CAST_TO_RIP(sw_bp),
|
||||
"INT3: exit %d exception %d rip 0x%llx (should be 0x%llx)",
|
||||
run->exit_reason, run->debug.arch.exception,
|
||||
run->debug.arch.pc, CAST_TO_RIP(sw_bp));
|
||||
MOVE_RIP(1);
|
||||
|
||||
/* Test instruction HW BP over DR[0-3] */
|
||||
for (i = 0; i < 4; i++) {
|
||||
CLEAR_DEBUG();
|
||||
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
|
||||
debug.arch.debugreg[i] = CAST_TO_RIP(hw_bp);
|
||||
debug.arch.debugreg[7] = 0x400 | (1UL << (2*i+1));
|
||||
APPLY_DEBUG();
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
target_dr6 = 0xffff0ff0 | (1UL << i);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
|
||||
run->debug.arch.exception == DB_VECTOR &&
|
||||
run->debug.arch.pc == CAST_TO_RIP(hw_bp) &&
|
||||
run->debug.arch.dr6 == target_dr6,
|
||||
"INS_HW_BP (DR%d): exit %d exception %d rip 0x%llx "
|
||||
"(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
|
||||
i, run->exit_reason, run->debug.arch.exception,
|
||||
run->debug.arch.pc, CAST_TO_RIP(hw_bp),
|
||||
run->debug.arch.dr6, target_dr6);
|
||||
}
|
||||
/* Skip "nop" */
|
||||
MOVE_RIP(1);
|
||||
|
||||
/* Test data access HW BP over DR[0-3] */
|
||||
for (i = 0; i < 4; i++) {
|
||||
CLEAR_DEBUG();
|
||||
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
|
||||
debug.arch.debugreg[i] = CAST_TO_RIP(guest_value);
|
||||
debug.arch.debugreg[7] = 0x00000400 | (1UL << (2*i+1)) |
|
||||
(0x000d0000UL << (4*i));
|
||||
APPLY_DEBUG();
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
target_dr6 = 0xffff0ff0 | (1UL << i);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
|
||||
run->debug.arch.exception == DB_VECTOR &&
|
||||
run->debug.arch.pc == CAST_TO_RIP(write_data) &&
|
||||
run->debug.arch.dr6 == target_dr6,
|
||||
"DATA_HW_BP (DR%d): exit %d exception %d rip 0x%llx "
|
||||
"(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
|
||||
i, run->exit_reason, run->debug.arch.exception,
|
||||
run->debug.arch.pc, CAST_TO_RIP(write_data),
|
||||
run->debug.arch.dr6, target_dr6);
|
||||
/* Rollback the 4-bytes "mov" */
|
||||
MOVE_RIP(-7);
|
||||
}
|
||||
/* Skip the 4-bytes "mov" */
|
||||
MOVE_RIP(7);
|
||||
|
||||
/* Test single step */
|
||||
target_rip = CAST_TO_RIP(ss_start);
|
||||
target_dr6 = 0xffff4ff0ULL;
|
||||
vcpu_regs_get(vm, VCPU_ID, ®s);
|
||||
for (i = 0; i < (sizeof(ss_size) / sizeof(ss_size[0])); i++) {
|
||||
target_rip += ss_size[i];
|
||||
CLEAR_DEBUG();
|
||||
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
|
||||
debug.arch.debugreg[7] = 0x00000400;
|
||||
APPLY_DEBUG();
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
|
||||
run->debug.arch.exception == DB_VECTOR &&
|
||||
run->debug.arch.pc == target_rip &&
|
||||
run->debug.arch.dr6 == target_dr6,
|
||||
"SINGLE_STEP[%d]: exit %d exception %d rip 0x%llx "
|
||||
"(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
|
||||
i, run->exit_reason, run->debug.arch.exception,
|
||||
run->debug.arch.pc, target_rip, run->debug.arch.dr6,
|
||||
target_dr6);
|
||||
}
|
||||
|
||||
/* Finally test global disable */
|
||||
CLEAR_DEBUG();
|
||||
debug.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
|
||||
debug.arch.debugreg[7] = 0x400 | DR7_GD;
|
||||
APPLY_DEBUG();
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
target_dr6 = 0xffff0ff0 | DR6_BD;
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_DEBUG &&
|
||||
run->debug.arch.exception == DB_VECTOR &&
|
||||
run->debug.arch.pc == CAST_TO_RIP(bd_start) &&
|
||||
run->debug.arch.dr6 == target_dr6,
|
||||
"DR7.GD: exit %d exception %d rip 0x%llx "
|
||||
"(should be 0x%llx) dr6 0x%llx (should be 0x%llx)",
|
||||
run->exit_reason, run->debug.arch.exception,
|
||||
run->debug.arch.pc, target_rip, run->debug.arch.dr6,
|
||||
target_dr6);
|
||||
|
||||
/* Disable all debug controls, run to the end */
|
||||
CLEAR_DEBUG();
|
||||
APPLY_DEBUG();
|
||||
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "KVM_EXIT_IO");
|
||||
cmd = get_ucall(vm, VCPU_ID, &uc);
|
||||
TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE");
|
||||
|
||||
kvm_vm_free(vm);
|
||||
|
||||
return 0;
|
||||
}
|
@ -259,6 +259,7 @@ static inline bool kvm_kick_many_cpus(const struct cpumask *cpus, bool wait)
|
||||
}
|
||||
|
||||
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||
struct kvm_vcpu *except,
|
||||
unsigned long *vcpu_bitmap, cpumask_var_t tmp)
|
||||
{
|
||||
int i, cpu, me;
|
||||
@ -268,7 +269,8 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||
me = get_cpu();
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
|
||||
if ((vcpu_bitmap && !test_bit(i, vcpu_bitmap)) ||
|
||||
vcpu == except)
|
||||
continue;
|
||||
|
||||
kvm_make_request(req, vcpu);
|
||||
@ -288,19 +290,25 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
|
||||
return called;
|
||||
}
|
||||
|
||||
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
||||
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
|
||||
struct kvm_vcpu *except)
|
||||
{
|
||||
cpumask_var_t cpus;
|
||||
bool called;
|
||||
|
||||
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
|
||||
|
||||
called = kvm_make_vcpus_request_mask(kvm, req, NULL, cpus);
|
||||
called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus);
|
||||
|
||||
free_cpumask_var(cpus);
|
||||
return called;
|
||||
}
|
||||
|
||||
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
|
||||
{
|
||||
return kvm_make_all_cpus_request_except(kvm, req, NULL);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
|
||||
void kvm_flush_remote_tlbs(struct kvm *kvm)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user