mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 10:20:49 +07:00
* A couple bugfixes, and mostly selftests changes.
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.22 (GNU/Linux) iQEcBAABAgAGBQJdXX+WAAoJEL/70l94x66DmBoH/RT2qD8Z16Nwlz/8XbFmUCtb 4B0eCaiTpnmDuF2T9nghMitg/CHjFHJJK7bLSkq9C4Fb9HTEfDabb4TRMENOVTB3 u9bK2s+/WG2uWKt1rP6uhSgVumwPQ/5JY30N59nEZ+ZgY6bJ+U3pUfZnPb4FeaVi EKYXDeToEXWVHJyv+HpkJ+2YV7gQ43qZO3x6papzPAbJp5yN7YDETDylPLF7y7CL njZ+8z4HaVoJH/T3E2wySEcFO2W9Pc7YH8fIFbvGmPMuOMB5AabBj3mnq/38qsVn n/SPMrq81DqNH9xeCQXR/7NdPr0ifx830K74pHJ/uCcwHjzB9KWSkv8lbyBPuEU= =zArX -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Paolo Bonzini: "A couple bugfixes, and mostly selftests changes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: selftests/kvm: make platform_info_test pass on AMD Revert "KVM: x86/mmu: Zap only the relevant pages when removing a memslot" selftests: kvm: fix state save/load on processors without XSAVE selftests: kvm: fix vmx_set_nested_state_test selftests: kvm: provide common function to enable eVMCS selftests: kvm: do not try running the VM in vmx_set_nested_state_test KVM: x86: svm: remove redundant assignment of var new_entry MAINTAINERS: add KVM x86 reviewers MAINTAINERS: change list for KVM/s390 kvm: x86: skip populating logical dest map if apic is not sw enabled
This commit is contained in:
commit
bb7ba8069d
19
MAINTAINERS
19
MAINTAINERS
@ -8832,14 +8832,6 @@ F: virt/kvm/*
|
||||
F: tools/kvm/
|
||||
F: tools/testing/selftests/kvm/
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
|
||||
M: Joerg Roedel <joro@8bytes.org>
|
||||
L: kvm@vger.kernel.org
|
||||
W: http://www.linux-kvm.org/
|
||||
S: Maintained
|
||||
F: arch/x86/include/asm/svm.h
|
||||
F: arch/x86/kvm/svm.c
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
|
||||
M: Marc Zyngier <maz@kernel.org>
|
||||
R: James Morse <james.morse@arm.com>
|
||||
@ -8882,7 +8874,7 @@ M: Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
M: Janosch Frank <frankja@linux.ibm.com>
|
||||
R: David Hildenbrand <david@redhat.com>
|
||||
R: Cornelia Huck <cohuck@redhat.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: kvm@vger.kernel.org
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
|
||||
S: Supported
|
||||
@ -8897,6 +8889,11 @@ F: tools/testing/selftests/kvm/*/s390x/
|
||||
KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
M: Radim Krčmář <rkrcmar@redhat.com>
|
||||
R: Sean Christopherson <sean.j.christopherson@intel.com>
|
||||
R: Vitaly Kuznetsov <vkuznets@redhat.com>
|
||||
R: Wanpeng Li <wanpengli@tencent.com>
|
||||
R: Jim Mattson <jmattson@google.com>
|
||||
R: Joerg Roedel <joro@8bytes.org>
|
||||
L: kvm@vger.kernel.org
|
||||
W: http://www.linux-kvm.org
|
||||
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
|
||||
@ -8904,8 +8901,12 @@ S: Supported
|
||||
F: arch/x86/kvm/
|
||||
F: arch/x86/kvm/*/
|
||||
F: arch/x86/include/uapi/asm/kvm*
|
||||
F: arch/x86/include/uapi/asm/vmx.h
|
||||
F: arch/x86/include/uapi/asm/svm.h
|
||||
F: arch/x86/include/asm/kvm*
|
||||
F: arch/x86/include/asm/pvclock-abi.h
|
||||
F: arch/x86/include/asm/svm.h
|
||||
F: arch/x86/include/asm/vmx.h
|
||||
F: arch/x86/kernel/kvm.c
|
||||
F: arch/x86/kernel/kvmclock.c
|
||||
|
||||
|
@ -216,6 +216,9 @@ static void recalculate_apic_map(struct kvm *kvm)
|
||||
if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
|
||||
new->phys_map[xapic_id] = apic;
|
||||
|
||||
if (!kvm_apic_sw_enabled(apic))
|
||||
continue;
|
||||
|
||||
ldr = kvm_lapic_get_reg(apic, APIC_LDR);
|
||||
|
||||
if (apic_x2apic_mode(apic)) {
|
||||
@ -258,6 +261,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
|
||||
static_key_slow_dec_deferred(&apic_sw_disabled);
|
||||
else
|
||||
static_key_slow_inc(&apic_sw_disabled.key);
|
||||
|
||||
recalculate_apic_map(apic->vcpu->kvm);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
struct kvm_page_track_notifier_node *node)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
LIST_HEAD(invalid_list);
|
||||
unsigned long i;
|
||||
bool flush;
|
||||
gfn_t gfn;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
if (list_empty(&kvm->arch.active_mmu_pages))
|
||||
goto out_unlock;
|
||||
|
||||
flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
|
||||
|
||||
for (i = 0; i < slot->npages; i++) {
|
||||
gfn = slot->base_gfn + i;
|
||||
|
||||
for_each_valid_sp(kvm, sp, gfn) {
|
||||
if (sp->gfn != gfn)
|
||||
continue;
|
||||
|
||||
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
|
||||
}
|
||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
|
||||
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
|
||||
flush = false;
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
}
|
||||
}
|
||||
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
kvm_mmu_zap_all(kvm);
|
||||
}
|
||||
|
||||
void kvm_mmu_init_vm(struct kvm *kvm)
|
||||
|
@ -1714,7 +1714,6 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
|
||||
if (!entry)
|
||||
return -EINVAL;
|
||||
|
||||
new_entry = READ_ONCE(*entry);
|
||||
new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
|
||||
AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
|
||||
AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
|
||||
|
@ -220,6 +220,8 @@ struct hv_enlightened_vmcs {
|
||||
struct hv_enlightened_vmcs *current_evmcs;
|
||||
struct hv_vp_assist_page *current_vp_assist;
|
||||
|
||||
int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id);
|
||||
|
||||
static inline int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist)
|
||||
{
|
||||
u64 val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) |
|
||||
|
@ -1060,9 +1060,11 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
|
||||
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
|
||||
r);
|
||||
|
||||
r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
|
||||
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
|
||||
r);
|
||||
if (kvm_check_cap(KVM_CAP_XCRS)) {
|
||||
r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
|
||||
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
|
||||
r);
|
||||
}
|
||||
|
||||
r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
|
||||
TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
|
||||
@ -1103,9 +1105,11 @@ void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *s
|
||||
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
|
||||
r);
|
||||
|
||||
r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
|
||||
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
|
||||
r);
|
||||
if (kvm_check_cap(KVM_CAP_XCRS)) {
|
||||
r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
|
||||
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
|
||||
r);
|
||||
}
|
||||
|
||||
r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
|
||||
TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
|
||||
|
@ -12,6 +12,26 @@
|
||||
|
||||
bool enable_evmcs;
|
||||
|
||||
int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
|
||||
{
|
||||
uint16_t evmcs_ver;
|
||||
|
||||
struct kvm_enable_cap enable_evmcs_cap = {
|
||||
.cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
|
||||
.args[0] = (unsigned long)&evmcs_ver
|
||||
};
|
||||
|
||||
vcpu_ioctl(vm, vcpu_id, KVM_ENABLE_CAP, &enable_evmcs_cap);
|
||||
|
||||
/* KVM should return supported EVMCS version range */
|
||||
TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
|
||||
(evmcs_ver & 0xff) > 0,
|
||||
"Incorrect EVMCS version range: %x:%x\n",
|
||||
evmcs_ver & 0xff, evmcs_ver >> 8);
|
||||
|
||||
return evmcs_ver;
|
||||
}
|
||||
|
||||
/* Allocate memory regions for nested VMX tests.
|
||||
*
|
||||
* Input Args:
|
||||
|
@ -79,11 +79,6 @@ int main(int argc, char *argv[])
|
||||
struct kvm_x86_state *state;
|
||||
struct ucall uc;
|
||||
int stage;
|
||||
uint16_t evmcs_ver;
|
||||
struct kvm_enable_cap enable_evmcs_cap = {
|
||||
.cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
|
||||
.args[0] = (unsigned long)&evmcs_ver
|
||||
};
|
||||
|
||||
/* Create VM */
|
||||
vm = vm_create_default(VCPU_ID, 0, guest_code);
|
||||
@ -96,13 +91,7 @@ int main(int argc, char *argv[])
|
||||
exit(KSFT_SKIP);
|
||||
}
|
||||
|
||||
vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
|
||||
|
||||
/* KVM should return supported EVMCS version range */
|
||||
TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
|
||||
(evmcs_ver & 0xff) > 0,
|
||||
"Incorrect EVMCS version range: %x:%x\n",
|
||||
evmcs_ver & 0xff, evmcs_ver >> 8);
|
||||
vcpu_enable_evmcs(vm, VCPU_ID);
|
||||
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
|
||||
@ -146,7 +135,7 @@ int main(int argc, char *argv[])
|
||||
kvm_vm_restart(vm, O_RDWR);
|
||||
vm_vcpu_add(vm, VCPU_ID);
|
||||
vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
|
||||
vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
|
||||
vcpu_enable_evmcs(vm, VCPU_ID);
|
||||
vcpu_load_state(vm, VCPU_ID, state);
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
free(state);
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "test_util.h"
|
||||
#include "kvm_util.h"
|
||||
#include "processor.h"
|
||||
#include "vmx.h"
|
||||
|
||||
#define VCPU_ID 0
|
||||
|
||||
@ -106,12 +107,7 @@ int main(int argc, char *argv[])
|
||||
{
|
||||
struct kvm_vm *vm;
|
||||
int rv;
|
||||
uint16_t evmcs_ver;
|
||||
struct kvm_cpuid2 *hv_cpuid_entries;
|
||||
struct kvm_enable_cap enable_evmcs_cap = {
|
||||
.cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
|
||||
.args[0] = (unsigned long)&evmcs_ver
|
||||
};
|
||||
|
||||
/* Tell stdout not to buffer its content */
|
||||
setbuf(stdout, NULL);
|
||||
@ -136,14 +132,14 @@ int main(int argc, char *argv[])
|
||||
|
||||
free(hv_cpuid_entries);
|
||||
|
||||
rv = _vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);
|
||||
|
||||
if (rv) {
|
||||
if (!kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
|
||||
fprintf(stderr,
|
||||
"Enlightened VMCS is unsupported, skip related test\n");
|
||||
goto vm_free;
|
||||
}
|
||||
|
||||
vcpu_enable_evmcs(vm, VCPU_ID);
|
||||
|
||||
hv_cpuid_entries = kvm_get_supported_hv_cpuid(vm);
|
||||
if (!hv_cpuid_entries)
|
||||
return 1;
|
||||
|
@ -99,8 +99,8 @@ int main(int argc, char *argv[])
|
||||
msr_platform_info = vcpu_get_msr(vm, VCPU_ID, MSR_PLATFORM_INFO);
|
||||
vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO,
|
||||
msr_platform_info | MSR_PLATFORM_INFO_MAX_TURBO_RATIO);
|
||||
test_msr_platform_info_disabled(vm);
|
||||
test_msr_platform_info_enabled(vm);
|
||||
test_msr_platform_info_disabled(vm);
|
||||
vcpu_set_msr(vm, VCPU_ID, MSR_PLATFORM_INFO, msr_platform_info);
|
||||
|
||||
kvm_vm_free(vm);
|
||||
|
@ -25,24 +25,17 @@
|
||||
#define VMCS12_REVISION 0x11e57ed0
|
||||
#define VCPU_ID 5
|
||||
|
||||
bool have_evmcs;
|
||||
|
||||
void test_nested_state(struct kvm_vm *vm, struct kvm_nested_state *state)
|
||||
{
|
||||
volatile struct kvm_run *run;
|
||||
|
||||
vcpu_nested_state_set(vm, VCPU_ID, state, false);
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
|
||||
"Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
|
||||
run->exit_reason,
|
||||
exit_reason_str(run->exit_reason));
|
||||
}
|
||||
|
||||
void test_nested_state_expect_errno(struct kvm_vm *vm,
|
||||
struct kvm_nested_state *state,
|
||||
int expected_errno)
|
||||
{
|
||||
volatile struct kvm_run *run;
|
||||
int rv;
|
||||
|
||||
rv = vcpu_nested_state_set(vm, VCPU_ID, state, true);
|
||||
@ -50,12 +43,6 @@ void test_nested_state_expect_errno(struct kvm_vm *vm,
|
||||
"Expected %s (%d) from vcpu_nested_state_set but got rv: %i errno: %s (%d)",
|
||||
strerror(expected_errno), expected_errno, rv, strerror(errno),
|
||||
errno);
|
||||
run = vcpu_state(vm, VCPU_ID);
|
||||
vcpu_run(vm, VCPU_ID);
|
||||
TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN,
|
||||
"Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s),\n",
|
||||
run->exit_reason,
|
||||
exit_reason_str(run->exit_reason));
|
||||
}
|
||||
|
||||
void test_nested_state_expect_einval(struct kvm_vm *vm,
|
||||
@ -90,8 +77,9 @@ void set_default_vmx_state(struct kvm_nested_state *state, int size)
|
||||
{
|
||||
memset(state, 0, size);
|
||||
state->flags = KVM_STATE_NESTED_GUEST_MODE |
|
||||
KVM_STATE_NESTED_RUN_PENDING |
|
||||
KVM_STATE_NESTED_EVMCS;
|
||||
KVM_STATE_NESTED_RUN_PENDING;
|
||||
if (have_evmcs)
|
||||
state->flags |= KVM_STATE_NESTED_EVMCS;
|
||||
state->format = 0;
|
||||
state->size = size;
|
||||
state->hdr.vmx.vmxon_pa = 0x1000;
|
||||
@ -141,13 +129,19 @@ void test_vmx_nested_state(struct kvm_vm *vm)
|
||||
/*
|
||||
* Setting vmxon_pa == -1ull and vmcs_pa == -1ull exits early without
|
||||
* setting the nested state but flags other than eVMCS must be clear.
|
||||
* The eVMCS flag can be set if the enlightened VMCS capability has
|
||||
* been enabled.
|
||||
*/
|
||||
set_default_vmx_state(state, state_sz);
|
||||
state->hdr.vmx.vmxon_pa = -1ull;
|
||||
state->hdr.vmx.vmcs12_pa = -1ull;
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
|
||||
state->flags = KVM_STATE_NESTED_EVMCS;
|
||||
state->flags &= KVM_STATE_NESTED_EVMCS;
|
||||
if (have_evmcs) {
|
||||
test_nested_state_expect_einval(vm, state);
|
||||
vcpu_enable_evmcs(vm, VCPU_ID);
|
||||
}
|
||||
test_nested_state(vm, state);
|
||||
|
||||
/* It is invalid to have vmxon_pa == -1ull and SMM flags non-zero. */
|
||||
@ -232,6 +226,8 @@ int main(int argc, char *argv[])
|
||||
struct kvm_nested_state state;
|
||||
struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
|
||||
|
||||
have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
|
||||
|
||||
if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
|
||||
printf("KVM_CAP_NESTED_STATE not available, skipping test\n");
|
||||
exit(KSFT_SKIP);
|
||||
|
Loading…
Reference in New Issue
Block a user