mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 12:30:55 +07:00
KVM fixes for v4.15-rc7
s390: * Two fixes for potential bitmap overruns in the cmma migration code x86: * Clear guest provided GPRs to defeat the Project Zero PoC for CVE 2017-5715 -----BEGIN PGP SIGNATURE----- iQEcBAABCAAGBQJaUTJ4AAoJEED/6hsPKofohk0IAJAFlMG66u5MxC0kSM61U4Zf 1vkzRwAkBbcN82LpGQKbqabVyTq0F3aLipyOn6WO5SN0K5m+OI2OV/aAroPyX8bI F7nWIqTXLhJ9X6KXINFvyavHMprvWl8PA72tR/B/7GhhfShrZ2wGgqhl0vv/kCUK /8q+5e693yJqw8ceemin9a6kPJrLpmjeH+Oy24KIlGbvJWV4UrIE86pRHnAnBtg8 L7Vbxn5+ezKmakvBh+zF8NKcD1zHDcmQZHoYFPsQT0vX5GPoYqT2bcO6gsh1Grmp 8ti6KkrnP+j2A/OEna4LBWfwKI/1xHXneB22BYrAxvNjHt+R4JrjaPpx82SEB4Y= =URMR -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull KVM fixes from Radim Krčmář: "s390: - Two fixes for potential bitmap overruns in the cmma migration code x86: - Clear guest provided GPRs to defeat the Project Zero PoC for CVE 2017-5715" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: kvm: vmx: Scrub hardware GPRs at VM-exit KVM: s390: prevent buffer overrun on memory hotplug during migration KVM: s390: fix cmma migration for multiple memory slots
This commit is contained in:
commit
5b6c02f383
@ -792,11 +792,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
|
||||
|
||||
if (kvm->arch.use_cmma) {
|
||||
/*
|
||||
* Get the last slot. They should be sorted by base_gfn, so the
|
||||
* last slot is also the one at the end of the address space.
|
||||
* We have verified above that at least one slot is present.
|
||||
* Get the first slot. They are reverse sorted by base_gfn, so
|
||||
* the first slot is also the one at the end of the address
|
||||
* space. We have verified above that at least one slot is
|
||||
* present.
|
||||
*/
|
||||
ms = slots->memslots + slots->used_slots - 1;
|
||||
ms = slots->memslots;
|
||||
/* round up so we only use full longs */
|
||||
ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
|
||||
/* allocate enough bytes to store all the bits */
|
||||
|
@ -1006,7 +1006,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
|
||||
cbrlo[entries] = gfn << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
if (orc) {
|
||||
if (orc && gfn < ms->bitmap_size) {
|
||||
/* increment only if we are really flipping the bit to 1 */
|
||||
if (!test_and_set_bit(gfn, ms->pgste_bitmap))
|
||||
atomic64_inc(&ms->dirty_pages);
|
||||
|
@ -4985,6 +4985,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
"mov %%r13, %c[r13](%[svm]) \n\t"
|
||||
"mov %%r14, %c[r14](%[svm]) \n\t"
|
||||
"mov %%r15, %c[r15](%[svm]) \n\t"
|
||||
#endif
|
||||
/*
|
||||
* Clear host registers marked as clobbered to prevent
|
||||
* speculative use.
|
||||
*/
|
||||
"xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
|
||||
"xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
|
||||
"xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
|
||||
"xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
|
||||
"xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
|
||||
#ifdef CONFIG_X86_64
|
||||
"xor %%r8, %%r8 \n\t"
|
||||
"xor %%r9, %%r9 \n\t"
|
||||
"xor %%r10, %%r10 \n\t"
|
||||
"xor %%r11, %%r11 \n\t"
|
||||
"xor %%r12, %%r12 \n\t"
|
||||
"xor %%r13, %%r13 \n\t"
|
||||
"xor %%r14, %%r14 \n\t"
|
||||
"xor %%r15, %%r15 \n\t"
|
||||
#endif
|
||||
"pop %%" _ASM_BP
|
||||
:
|
||||
|
@ -9415,6 +9415,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
/* Save guest registers, load host registers, keep flags */
|
||||
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
|
||||
"pop %0 \n\t"
|
||||
"setbe %c[fail](%0)\n\t"
|
||||
"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
|
||||
"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
|
||||
__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
|
||||
@ -9431,12 +9432,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
"mov %%r13, %c[r13](%0) \n\t"
|
||||
"mov %%r14, %c[r14](%0) \n\t"
|
||||
"mov %%r15, %c[r15](%0) \n\t"
|
||||
"xor %%r8d, %%r8d \n\t"
|
||||
"xor %%r9d, %%r9d \n\t"
|
||||
"xor %%r10d, %%r10d \n\t"
|
||||
"xor %%r11d, %%r11d \n\t"
|
||||
"xor %%r12d, %%r12d \n\t"
|
||||
"xor %%r13d, %%r13d \n\t"
|
||||
"xor %%r14d, %%r14d \n\t"
|
||||
"xor %%r15d, %%r15d \n\t"
|
||||
#endif
|
||||
"mov %%cr2, %%" _ASM_AX " \n\t"
|
||||
"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
|
||||
|
||||
"xor %%eax, %%eax \n\t"
|
||||
"xor %%ebx, %%ebx \n\t"
|
||||
"xor %%esi, %%esi \n\t"
|
||||
"xor %%edi, %%edi \n\t"
|
||||
"pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
|
||||
"setbe %c[fail](%0) \n\t"
|
||||
".pushsection .rodata \n\t"
|
||||
".global vmx_return \n\t"
|
||||
"vmx_return: " _ASM_PTR " 2b \n\t"
|
||||
|
Loading…
Reference in New Issue
Block a user