From cd758a9b57ee85f0733c759e60f42b969c81f27b Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 26 Aug 2019 16:20:47 +1000 Subject: [PATCH 01/12] KVM: PPC: Book3S HV: Use __gfn_to_pfn_memslot in HPT page fault handler This makes the same changes in the page fault handler for HPT guests that commits 31c8b0d0694a ("KVM: PPC: Book3S HV: Use __gfn_to_pfn_memslot() in page fault handler", 2018-03-01), 71d29f43b633 ("KVM: PPC: Book3S HV: Don't use compound_order to determine host mapping size", 2018-09-11) and 6579804c4317 ("KVM: PPC: Book3S HV: Avoid crash from THP collapse during radix page fault", 2018-10-04) made for the page fault handler for radix guests. In summary, where we used to call get_user_pages_fast() and then do special handling for VM_PFNMAP vmas, we now call __get_user_pages_fast() and then __gfn_to_pfn_memslot() if that fails, followed by reading the Linux PTE to get the host PFN, host page size and mapping attributes. This also brings in the change from SetPageDirty() to set_page_dirty_lock() which was done for the radix page fault handler in commit c3856aeb2940 ("KVM: PPC: Book3S HV: Fix handling of large pages in radix page fault handler", 2018-02-23). Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_mmu_hv.c | 119 +++++++++++++--------------- 1 file changed, 57 insertions(+), 62 deletions(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 6c372f5c61b6..3aecec890d6f 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -485,18 +485,18 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, __be64 *hptep; unsigned long mmu_seq, psize, pte_size; unsigned long gpa_base, gfn_base; - unsigned long gpa, gfn, hva, pfn; + unsigned long gpa, gfn, hva, pfn, hpa; struct kvm_memory_slot *memslot; unsigned long *rmap; struct revmap_entry *rev; - struct page *page, *pages[1]; - long index, ret, npages; + struct page *page; + long index, ret; bool is_ci; - unsigned int writing, write_ok; - struct vm_area_struct *vma; + bool writing, write_ok; + unsigned int shift; unsigned long rcbits; long mmio_update; - struct mm_struct *mm; + pte_t pte, *ptep; if (kvm_is_radix(kvm)) return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr); @@ -570,59 +570,62 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, smp_rmb(); ret = -EFAULT; - is_ci = false; - pfn = 0; page = NULL; - mm = kvm->mm; - pte_size = PAGE_SIZE; writing = (dsisr & DSISR_ISSTORE) != 0; /* If writing != 0, then the HPTE must allow writing, if we get here */ write_ok = writing; hva = gfn_to_hva_memslot(memslot, gfn); - npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages); - if (npages < 1) { - /* Check if it's an I/O mapping */ - down_read(&mm->mmap_sem); - vma = find_vma(mm, hva); - if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end && - (vma->vm_flags & VM_PFNMAP)) { - pfn = vma->vm_pgoff + - ((hva - vma->vm_start) >> PAGE_SHIFT); - pte_size = psize; - is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot)))); - write_ok = vma->vm_flags & VM_WRITE; - } - up_read(&mm->mmap_sem); - if (!pfn) - goto out_put; + + /* + * Do a fast check first, since __gfn_to_pfn_memslot doesn't + * do it with !atomic && !async, which is how we call it. + * We always ask for write permission since the common case + * is that the page is writable. + */ + if (__get_user_pages_fast(hva, 1, 1, &page) == 1) { + write_ok = true; } else { - page = pages[0]; - pfn = page_to_pfn(page); - if (PageHuge(page)) { - page = compound_head(page); - pte_size <<= compound_order(page); - } - /* if the guest wants write access, see if that is OK */ - if (!writing && hpte_is_writable(r)) { - pte_t *ptep, pte; - unsigned long flags; - /* - * We need to protect against page table destruction - * hugepage split and collapse. - */ - local_irq_save(flags); - ptep = find_current_mm_pte(mm->pgd, hva, NULL, NULL); - if (ptep) { - pte = kvmppc_read_update_linux_pte(ptep, 1); - if (__pte_write(pte)) - write_ok = 1; - } - local_irq_restore(flags); + /* Call KVM generic code to do the slow-path check */ + pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, + writing, &write_ok); + if (is_error_noslot_pfn(pfn)) + return -EFAULT; + page = NULL; + if (pfn_valid(pfn)) { + page = pfn_to_page(pfn); + if (PageReserved(page)) + page = NULL; } } + /* + * Read the PTE from the process' radix tree and use that + * so we get the shift and attribute bits. + */ + local_irq_disable(); + ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); + /* + * If the PTE disappeared temporarily due to a THP + * collapse, just return and let the guest try again. + */ + if (!ptep) { + local_irq_enable(); + if (page) + put_page(page); + return RESUME_GUEST; + } + pte = *ptep; + local_irq_enable(); + hpa = pte_pfn(pte) << PAGE_SHIFT; + pte_size = PAGE_SIZE; + if (shift) + pte_size = 1ul << shift; + is_ci = pte_ci(pte); + if (psize > pte_size) goto out_put; + if (pte_size > psize) + hpa |= hva & (pte_size - psize); /* Check WIMG vs. the actual page we're accessing */ if (!hpte_cache_flags_ok(r, is_ci)) { @@ -636,14 +639,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, } /* - * Set the HPTE to point to pfn. - * Since the pfn is at PAGE_SIZE granularity, make sure we + * Set the HPTE to point to hpa. + * Since the hpa is at PAGE_SIZE granularity, make sure we * don't mask out lower-order bits if psize < PAGE_SIZE. */ if (psize < PAGE_SIZE) psize = PAGE_SIZE; - r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | - ((pfn << PAGE_SHIFT) & ~(psize - 1)); + r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa; if (hpte_is_writable(r) && !write_ok) r = hpte_make_readonly(r); ret = RESUME_GUEST; @@ -708,20 +710,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, asm volatile("ptesync" : : : "memory"); preempt_enable(); if (page && hpte_is_writable(r)) - SetPageDirty(page); + set_page_dirty_lock(page); out_put: trace_kvm_page_fault_exit(vcpu, hpte, ret); - if (page) { - /* - * We drop pages[0] here, not page because page might - * have been set to the head page of a compound, but - * we have to drop the reference on the correct tail - * page to match the get inside gup() - */ - put_page(pages[0]); - } + if (page) + put_page(page); return ret; out_unlock: From afd313564cf1904bbdb52052c80a2522b55782d3 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 18 Feb 2020 15:36:50 +1100 Subject: [PATCH 02/12] KVM: PPC: Book3S HV: Use RADIX_PTE_INDEX_SIZE in Radix MMU code In kvmppc_unmap_free_pte() in book3s_64_mmu_radix.c, we use the non-constant value PTE_INDEX_SIZE to clear a PTE page. We can instead use the constant RADIX_PTE_INDEX_SIZE, because we know this code will only be running when the Radix MMU is active. Note that we already use RADIX_PTE_INDEX_SIZE for the allocation of kvm_pte_cache. Signed-off-by: Michael Ellerman Reviewed-by: Leonardo Bras Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_64_mmu_radix.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 803940d79b73..134fbc1f029f 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -425,7 +425,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, unsigned int lpid) { if (full) { - memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE); + memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE); } else { pte_t *p = pte; unsigned long it; From 1dff3064c764b5a51c367b949b341d2e38972bec Mon Sep 17 00:00:00 2001 From: Gustavo Romero Date: Fri, 21 Feb 2020 11:29:50 -0500 Subject: [PATCH 03/12] KVM: PPC: Book3S HV: Treat TM-related invalid form instructions on P9 like the valid ones On P9 DD2.2 due to a CPU defect some TM instructions need to be emulated by KVM. This is handled at first by the hardware raising a softpatch interrupt when certain TM instructions that need KVM assistance are executed in the guest. Althought some TM instructions per Power ISA are invalid forms they can raise a softpatch interrupt too. For instance, 'tresume.' instruction as defined in the ISA must have bit 31 set (1), but an instruction that matches 'tresume.' PO and XO opcode fields but has bit 31 not set (0), like 0x7cfe9ddc, also raises a softpatch interrupt. Similarly for 'treclaim.' and 'trechkpt.' instructions with bit 31 = 0, i.e. 0x7c00075c and 0x7c0007dc, respectively. Hence, if a code like the following is executed in the guest it will raise a softpatch interrupt just like a 'tresume.' when the TM facility is enabled ('tabort. 0' in the example is used only to enable the TM facility): int main() { asm("tabort. 0; .long 0x7cfe9ddc;"); } Currently in such a case KVM throws a complete trace like: [345523.705984] WARNING: CPU: 24 PID: 64413 at arch/powerpc/kvm/book3s_hv_tm.c:211 kvmhv_p9_tm_emulation+0x68/0x620 [kvm_hv] [345523.705985] Modules linked in: kvm_hv(E) xt_conntrack ipt_REJECT nf_reject_ipv4 xt_tcpudp ip6table_mangle ip6table_nat iptable_mangle iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 ebtable_filter ebtables ip6table_filter ip6_tables iptable_filter bridge stp llc sch_fq_codel ipmi_powernv at24 vmx_crypto ipmi_devintf ipmi_msghandler ibmpowernv uio_pdrv_genirq kvm opal_prd uio leds_powernv ib_iser rdma_cm iw_cm ib_cm ib_core iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi ip_tables x_tables autofs4 btrfs blake2b_generic zstd_compress raid10 raid456 async_raid6_recov async_memcpy async_pq async_xor async_tx libcrc32c xor raid6_pq raid1 raid0 multipath linear tg3 crct10dif_vpmsum crc32c_vpmsum ipr [last unloaded: kvm_hv] [345523.706030] CPU: 24 PID: 64413 Comm: CPU 0/KVM Tainted: G W E 5.5.0+ #1 [345523.706031] NIP: c0080000072cb9c0 LR: c0080000072b5e80 CTR: c0080000085c7850 [345523.706034] REGS: c000000399467680 TRAP: 0700 Tainted: G W E (5.5.0+) [345523.706034] MSR: 900000010282b033 CR: 24022428 XER: 00000000 [345523.706042] CFAR: c0080000072b5e7c IRQMASK: 0 GPR00: c0080000072b5e80 c000000399467910 c0080000072db500 c000000375ccc720 GPR04: c000000375ccc720 00000003fbec0000 0000a10395dda5a6 0000000000000000 GPR08: 000000007cfe9ddc 7cfe9ddc000005dc 7cfe9ddc7c0005dc c0080000072cd530 GPR12: c0080000085c7850 c0000003fffeb800 0000000000000001 00007dfb737f0000 GPR16: c0002001edcca558 0000000000000000 0000000000000000 0000000000000001 GPR20: c000000001b21258 c0002001edcca558 0000000000000018 0000000000000000 GPR24: 0000000001000000 ffffffffffffffff 0000000000000001 0000000000001500 GPR28: c0002001edcc4278 c00000037dd80000 800000050280f033 c000000375ccc720 [345523.706062] NIP [c0080000072cb9c0] kvmhv_p9_tm_emulation+0x68/0x620 [kvm_hv] [345523.706065] LR [c0080000072b5e80] kvmppc_handle_exit_hv.isra.53+0x3e8/0x798 [kvm_hv] [345523.706066] Call Trace: [345523.706069] [c000000399467910] [c000000399467940] 0xc000000399467940 (unreliable) [345523.706071] [c000000399467950] [c000000399467980] 0xc000000399467980 [345523.706075] [c0000003994679f0] [c0080000072bd1c4] kvmhv_run_single_vcpu+0xa1c/0xb80 [kvm_hv] [345523.706079] [c000000399467ac0] [c0080000072bd8e0] kvmppc_vcpu_run_hv+0x5b8/0xb00 [kvm_hv] [345523.706087] [c000000399467b90] [c0080000085c93cc] kvmppc_vcpu_run+0x34/0x48 [kvm] [345523.706095] [c000000399467bb0] [c0080000085c582c] kvm_arch_vcpu_ioctl_run+0x244/0x420 [kvm] [345523.706101] [c000000399467c40] [c0080000085b7498] kvm_vcpu_ioctl+0x3d0/0x7b0 [kvm] [345523.706105] [c000000399467db0] [c0000000004adf9c] ksys_ioctl+0x13c/0x170 [345523.706107] [c000000399467e00] [c0000000004adff8] sys_ioctl+0x28/0x80 [345523.706111] [c000000399467e20] [c00000000000b278] system_call+0x5c/0x68 [345523.706112] Instruction dump: [345523.706114] 419e0390 7f8a4840 409d0048 6d497c00 2f89075d 419e021c 6d497c00 2f8907dd [345523.706119] 419e01c0 6d497c00 2f8905dd 419e00a4 <0fe00000> 38210040 38600000 ebc1fff0 and then treats the executed instruction as a 'nop'. However the POWER9 User's Manual, in section "4.6.10 Book II Invalid Forms", informs that for TM instructions bit 31 is in fact ignored, thus for the TM-related invalid forms ignoring bit 31 and handling them like the valid forms is an acceptable way to handle them. POWER8 behaves the same way too. This commit changes the handling of the cases here described by treating the TM-related invalid forms that can generate a softpatch interrupt just like their valid forms (w/ bit 31 = 1) instead of as a 'nop' and by gently reporting any other unrecognized case to the host and treating it as illegal instruction instead of throwing a trace and treating it as a 'nop'. Signed-off-by: Gustavo Romero Reviewed-by: Segher Boessenkool Acked-By: Michael Neuling Reviewed-by: Leonardo Bras Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_asm.h | 3 +++ arch/powerpc/kvm/book3s_hv_tm.c | 28 ++++++++++++++++++++----- arch/powerpc/kvm/book3s_hv_tm_builtin.c | 16 ++++++++++++-- 3 files changed, 40 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 635fb154b33f..a3633560493b 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -150,4 +150,7 @@ #define KVM_INST_FETCH_FAILED -1 +/* Extract PO and XOP opcode fields */ +#define PO_XOP_OPCODE_MASK 0xfc0007fe + #endif /* __POWERPC_KVM_ASM_H__ */ diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c index 0db937497169..cc90b8b82329 100644 --- a/arch/powerpc/kvm/book3s_hv_tm.c +++ b/arch/powerpc/kvm/book3s_hv_tm.c @@ -3,6 +3,8 @@ * Copyright 2017 Paul Mackerras, IBM Corp. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include @@ -44,7 +46,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) u64 newmsr, bescr; int ra, rs; - switch (instr & 0xfc0007ff) { + /* + * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit + * in these instructions, so masking bit 31 out doesn't change these + * instructions. For treclaim., tsr., and trechkpt. instructions if bit + * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section + * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit + * 31 is an acceptable way to handle these invalid forms that have + * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/ + * bit 31 set) can generate a softpatch interrupt. Hence both forms + * are handled below for these instructions so they behave the same way. + */ + switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; @@ -105,7 +118,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = newmsr; return RESUME_GUEST; - case PPC_INST_TSR: + /* ignore bit 31, see comment above */ + case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* check for PR=1 and arch 2.06 bit set in PCR */ if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) { /* generate an illegal instruction interrupt */ @@ -140,7 +154,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = msr; return RESUME_GUEST; - case PPC_INST_TRECLAIM: + /* ignore bit 31, see comment above */ + case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK): /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { /* generate an illegal instruction interrupt */ @@ -176,7 +191,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr &= ~MSR_TS_MASK; return RESUME_GUEST; - case PPC_INST_TRECHKPT: + /* ignore bit 31, see comment above */ + case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK): /* XXX do we need to check for PR=0 here? */ /* check for TM disabled in the HFSCR or MSR */ if (!(vcpu->arch.hfscr & HFSCR_TM)) { @@ -208,6 +224,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) } /* What should we do here? We didn't recognize the instruction */ - WARN_ON_ONCE(1); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr); + return RESUME_GUEST; } diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c index 217246279dfa..fad931f224ef 100644 --- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c @@ -23,7 +23,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) u64 newmsr, msr, bescr; int rs; - switch (instr & 0xfc0007ff) { + /* + * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit + * in these instructions, so masking bit 31 out doesn't change these + * instructions. For the tsr. instruction if bit 31 = 0 then it is per + * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid + * Forms, informs specifically that ignoring bit 31 is an acceptable way + * to handle TM-related invalid forms that have bit 31 = 0. Moreover, + * for emulation purposes both forms (w/ and wo/ bit 31 set) can + * generate a softpatch interrupt. Hence both forms are handled below + * for tsr. to make them behave the same way. + */ + switch (instr & PO_XOP_OPCODE_MASK) { case PPC_INST_RFID: /* XXX do we need to check for PR=0 here? */ newmsr = vcpu->arch.shregs.srr1; @@ -73,7 +84,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu) vcpu->arch.shregs.msr = newmsr; return 1; - case PPC_INST_TSR: + /* ignore bit 31, see comment above */ + case (PPC_INST_TSR & PO_XOP_OPCODE_MASK): /* we know the MSR has the TS field = S (0b01) here */ msr = vcpu->arch.shregs.msr; /* check for PR=1 and arch 2.06 bit set in PCR */ From 1f50cc1705350a4697923203fedd7d8fb1087fe2 Mon Sep 17 00:00:00 2001 From: Michael Roth Date: Tue, 10 Mar 2020 16:11:28 -0500 Subject: [PATCH 04/12] KVM: PPC: Book3S HV: Fix H_CEDE return code for nested guests The h_cede_tm kvm-unit-test currently fails when run inside an L1 guest via the guest/nested hypervisor. ./run-tests.sh -v ... TESTNAME=h_cede_tm TIMEOUT=90s ACCEL= ./powerpc/run powerpc/tm.elf -smp 2,threads=2 -machine cap-htm=on -append "h_cede_tm" FAIL h_cede_tm (2 tests, 1 unexpected failures) While the test relates to transactional memory instructions, the actual failure is due to the return code of the H_CEDE hypercall, which is reported as 224 instead of 0. This happens even when no TM instructions are issued. 224 is the value placed in r3 to execute a hypercall for H_CEDE, and r3 is where the caller expects the return code to be placed upon return. In the case of guest running under a nested hypervisor, issuing H_CEDE causes a return from H_ENTER_NESTED. In this case H_CEDE is specially-handled immediately rather than later in kvmppc_pseries_do_hcall() as with most other hcalls, but we forget to set the return code for the caller, hence why kvm-unit-test sees the 224 return code and reports an error. Guest kernels generally don't check the return value of H_CEDE, so that likely explains why this hasn't caused issues outside of kvm-unit-tests so far. Fix this by setting r3 to 0 after we finish processing the H_CEDE. RHBZ: 1778556 Fixes: 4bad77799fed ("KVM: PPC: Book3S HV: Handle hypercalls correctly when nested") Cc: linuxppc-dev@ozlabs.org Cc: David Gibson Cc: Paul Mackerras Signed-off-by: Michael Roth Reviewed-by: David Gibson Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index fbc55a12b691..18675bd48e90 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -3615,6 +3615,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && kvmppc_get_gpr(vcpu, 3) == H_CEDE) { kvmppc_nested_cede(vcpu); + kvmppc_set_gpr(vcpu, 3, 0); trap = 0; } } else { From 8fc6ba0a205e9adfaf976077be976f1d4dcd45eb Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 10 Mar 2020 21:51:30 -0700 Subject: [PATCH 05/12] KVM: PPC: Use fallthrough; Convert the various uses of fallthrough comments to fallthrough; Done via script Link: https://lore.kernel.org/lkml/b56602fcf79f849e733e7b521bb0e17895d390fa.1582230379.git.joe.com/ Signed-off-by: Joe Perches Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_32_mmu.c | 2 +- arch/powerpc/kvm/book3s_64_mmu.c | 2 +- arch/powerpc/kvm/book3s_pr.c | 2 +- arch/powerpc/kvm/booke.c | 6 +++--- arch/powerpc/kvm/powerpc.c | 1 - 5 files changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index f21e73492ce3..3fbd570f9c1e 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -234,7 +234,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, case 2: case 6: pte->may_write = true; - /* fall through */ + fallthrough; case 3: case 5: case 7: diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 599133256a95..26b8b27a3755 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c @@ -311,7 +311,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, case 2: case 6: gpte->may_write = true; - /* fall through */ + fallthrough; case 3: case 5: case 7: diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 3bc2f5da8fa1..1d9c554746a9 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -740,7 +740,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) pte.raddr &= ~SPLIT_HACK_MASK; - /* fall through */ + fallthrough; case MSR_IR: vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index c9f4b374dc56..10e2d76d8546 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -421,11 +421,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, case BOOKE_IRQPRIO_DATA_STORAGE: case BOOKE_IRQPRIO_ALIGNMENT: update_dear = true; - /* fall through */ + fallthrough; case BOOKE_IRQPRIO_INST_STORAGE: case BOOKE_IRQPRIO_PROGRAM: update_esr = true; - /* fall through */ + fallthrough; case BOOKE_IRQPRIO_ITLB_MISS: case BOOKE_IRQPRIO_SYSCALL: case BOOKE_IRQPRIO_FP_UNAVAIL: @@ -459,7 +459,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, case BOOKE_IRQPRIO_DECREMENTER: case BOOKE_IRQPRIO_FIT: keep_irq = true; - /* fall through */ + fallthrough; case BOOKE_IRQPRIO_EXTERNAL: case BOOKE_IRQPRIO_DBELL: allowed = vcpu->arch.shared->msr & MSR_EE; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 62ee66d5eb6f..6729c13161f7 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -524,7 +524,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = 1; break; case KVM_CAP_PPC_GUEST_DEBUG_SSTEP: - /* fall through */ case KVM_CAP_PPC_PAIRED_SINGLES: case KVM_CAP_PPC_OSI: case KVM_CAP_PPC_GET_PVINFO: From b2fa4f9088db497fb1b4c6b71e64045b3597ed1f Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Wed, 18 Mar 2020 18:43:30 +0100 Subject: [PATCH 06/12] KVM: PPC: Book3S PR: Fix kernel crash with PR KVM With PR KVM, shutting down a VM causes the host kernel to crash: [ 314.219284] BUG: Unable to handle kernel data access on read at 0xc00800000176c638 [ 314.219299] Faulting instruction address: 0xc008000000d4ddb0 cpu 0x0: Vector: 300 (Data Access) at [c00000036da077a0] pc: c008000000d4ddb0: kvmppc_mmu_pte_flush_all+0x68/0xd0 [kvm_pr] lr: c008000000d4dd94: kvmppc_mmu_pte_flush_all+0x4c/0xd0 [kvm_pr] sp: c00000036da07a30 msr: 900000010280b033 dar: c00800000176c638 dsisr: 40000000 current = 0xc00000036d4c0000 paca = 0xc000000001a00000 irqmask: 0x03 irq_happened: 0x01 pid = 1992, comm = qemu-system-ppc Linux version 5.6.0-master-gku+ (greg@palmb) (gcc version 7.5.0 (Ubuntu 7.5.0-3ubuntu1~18.04)) #17 SMP Wed Mar 18 13:49:29 CET 2020 enter ? for help [c00000036da07ab0] c008000000d4fbe0 kvmppc_mmu_destroy_pr+0x28/0x60 [kvm_pr] [c00000036da07ae0] c0080000009eab8c kvmppc_mmu_destroy+0x34/0x50 [kvm] [c00000036da07b00] c0080000009e50c0 kvm_arch_vcpu_destroy+0x108/0x140 [kvm] [c00000036da07b30] c0080000009d1b50 kvm_vcpu_destroy+0x28/0x80 [kvm] [c00000036da07b60] c0080000009e4434 kvm_arch_destroy_vm+0xbc/0x190 [kvm] [c00000036da07ba0] c0080000009d9c2c kvm_put_kvm+0x1d4/0x3f0 [kvm] [c00000036da07c00] c0080000009da760 kvm_vm_release+0x38/0x60 [kvm] [c00000036da07c30] c000000000420be0 __fput+0xe0/0x310 [c00000036da07c90] c0000000001747a0 task_work_run+0x150/0x1c0 [c00000036da07cf0] c00000000014896c do_exit+0x44c/0xd00 [c00000036da07dc0] c0000000001492f4 do_group_exit+0x64/0xd0 [c00000036da07e00] c000000000149384 sys_exit_group+0x24/0x30 [c00000036da07e20] c00000000000b9d0 system_call+0x5c/0x68 This is caused by a use-after-free in kvmppc_mmu_pte_flush_all() which dereferences vcpu->arch.book3s which was previously freed by kvmppc_core_vcpu_free_pr(). This happens because kvmppc_mmu_destroy() is called after kvmppc_core_vcpu_free() since commit ff030fdf5573 ("KVM: PPC: Move kvm_vcpu_init() invocation to common code"). The kvmppc_mmu_destroy() helper calls one of the following depending on the KVM backend: - kvmppc_mmu_destroy_hv() which does nothing (Book3s HV) - kvmppc_mmu_destroy_pr() which undoes the effects of kvmppc_mmu_init() (Book3s PR 32-bit) - kvmppc_mmu_destroy_pr() which undoes the effects of kvmppc_mmu_init() (Book3s PR 64-bit) - kvmppc_mmu_destroy_e500() which does nothing (BookE e500/e500mc) It turns out that this is only relevant to PR KVM actually. And both 32 and 64 backends need vcpu->arch.book3s to be valid when calling kvmppc_mmu_destroy_pr(). So instead of calling kvmppc_mmu_destroy() from kvm_arch_vcpu_destroy(), call kvmppc_mmu_destroy_pr() at the beginning of kvmppc_core_vcpu_free_pr(). This is consistent with kvmppc_mmu_init() being the last call in kvmppc_core_vcpu_create_pr(). For the same reason, if kvmppc_core_vcpu_create_pr() returns an error then this means that kvmppc_mmu_init() was either not called or failed, in which case kvmppc_mmu_destroy() should not be called. Drop the line in the error path of kvm_arch_vcpu_create(). Fixes: ff030fdf5573 ("KVM: PPC: Move kvm_vcpu_init() invocation to common code") Signed-off-by: Greg Kurz Reviewed-by: Sean Christopherson Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_pr.c | 1 + arch/powerpc/kvm/powerpc.c | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 1d9c554746a9..9b112bd243d9 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1817,6 +1817,7 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); + kvmppc_mmu_destroy_pr(vcpu); free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); #ifdef CONFIG_KVM_BOOK3S_32_HANDLER kfree(vcpu->arch.shadow_vcpu); diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 6729c13161f7..e229a81016d0 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -750,7 +750,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) return 0; out_vcpu_uninit: - kvmppc_mmu_destroy(vcpu); kvmppc_subarch_vcpu_uninit(vcpu); return err; } @@ -783,7 +782,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) kvmppc_core_vcpu_free(vcpu); - kvmppc_mmu_destroy(vcpu); kvmppc_subarch_vcpu_uninit(vcpu); } From 3f1268dda8e47f808f4f50f24715b84d4b228bf3 Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Wed, 18 Mar 2020 18:43:36 +0100 Subject: [PATCH 07/12] KVM: PPC: Book3S PR: Move kvmppc_mmu_init() into PR KVM This is only relevant to PR KVM. Make it obvious by moving the function declaration to the Book3s header and rename it with a _pr suffix. Signed-off-by: Greg Kurz Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 1 - arch/powerpc/kvm/book3s.h | 1 + arch/powerpc/kvm/book3s_32_mmu_host.c | 2 +- arch/powerpc/kvm/book3s_64_mmu_host.c | 2 +- arch/powerpc/kvm/book3s_pr.c | 2 +- 5 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 406ec46304d5..0b80e3420fef 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -108,7 +108,6 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); -extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu); extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h index 3a4613985949..eae259ee49af 100644 --- a/arch/powerpc/kvm/book3s.h +++ b/arch/powerpc/kvm/book3s.h @@ -16,6 +16,7 @@ extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva); extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte); +extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu); extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance); diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index d4cb3bcf41b6..e8e7b2c530d1 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -356,7 +356,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) /* From mm/mmu_context_hash32.c */ #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff) -int kvmppc_mmu_init(struct kvm_vcpu *vcpu) +int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int err; diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 044dd49eeb9d..e452158a18d7 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -384,7 +384,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) __destroy_context(to_book3s(vcpu)->context_id[0]); } -int kvmppc_mmu_init(struct kvm_vcpu *vcpu) +int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu) { struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); int err; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 9b112bd243d9..ec042e0327b9 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -1795,7 +1795,7 @@ static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu) vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; - err = kvmppc_mmu_init(vcpu); + err = kvmppc_mmu_init_pr(vcpu); if (err < 0) goto free_shared_page; From 6fef0c6bbe4987fc94c14f52782b224ddaf3530b Mon Sep 17 00:00:00 2001 From: Greg Kurz Date: Wed, 18 Mar 2020 18:43:42 +0100 Subject: [PATCH 08/12] KVM: PPC: Kill kvmppc_ops::mmu_destroy() and kvmppc_mmu_destroy() These are only used by HV KVM and BookE, and in both cases they are nops. Signed-off-by: Greg Kurz Signed-off-by: Paul Mackerras --- arch/powerpc/include/asm/kvm_ppc.h | 2 -- arch/powerpc/kvm/book3s.c | 5 ----- arch/powerpc/kvm/book3s_hv.c | 6 ------ arch/powerpc/kvm/book3s_pr.c | 1 - arch/powerpc/kvm/booke.c | 5 ----- arch/powerpc/kvm/booke.h | 2 -- arch/powerpc/kvm/e500.c | 1 - arch/powerpc/kvm/e500_mmu.c | 4 ---- arch/powerpc/kvm/e500mc.c | 1 - 9 files changed, 27 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 0b80e3420fef..e716862d56b9 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -107,7 +107,6 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, unsigned int gtlb_idx); extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); -extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, @@ -288,7 +287,6 @@ struct kvmppc_ops { int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end); int (*test_age_hva)(struct kvm *kvm, unsigned long hva); void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte); - void (*mmu_destroy)(struct kvm_vcpu *vcpu); void (*free_memslot)(struct kvm_memory_slot *slot); int (*init_vm)(struct kvm *kvm); void (*destroy_vm)(struct kvm *kvm); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 0adaf4791a6d..5690a1f9b976 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -858,11 +858,6 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) return 0; } -void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) -{ - vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); -} - int kvmppc_core_init_vm(struct kvm *kvm) { diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 18675bd48e90..85e75b12e513 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4555,11 +4555,6 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) } } -static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu) -{ - return; -} - void kvmppc_setup_partition_table(struct kvm *kvm) { unsigned long dw0, dw1; @@ -5523,7 +5518,6 @@ static struct kvmppc_ops kvm_ops_hv = { .age_hva = kvm_age_hva_hv, .test_age_hva = kvm_test_age_hva_hv, .set_spte_hva = kvm_set_spte_hva_hv, - .mmu_destroy = kvmppc_mmu_destroy_hv, .free_memslot = kvmppc_core_free_memslot_hv, .init_vm = kvmppc_core_init_vm_hv, .destroy_vm = kvmppc_core_destroy_vm_hv, diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index ec042e0327b9..a0f6813f4560 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -2087,7 +2087,6 @@ static struct kvmppc_ops kvm_ops_pr = { .age_hva = kvm_age_hva_pr, .test_age_hva = kvm_test_age_hva_pr, .set_spte_hva = kvm_set_spte_hva_pr, - .mmu_destroy = kvmppc_mmu_destroy_pr, .free_memslot = kvmppc_core_free_memslot_pr, .init_vm = kvmppc_core_init_vm_pr, .destroy_vm = kvmppc_core_destroy_vm_pr, diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 10e2d76d8546..6c18ea88fd25 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -2073,11 +2073,6 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) kvmppc_clear_dbsr(); } -void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) -{ - vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); -} - int kvmppc_core_init_vm(struct kvm *kvm) { return kvm->arch.kvm_ops->init_vm(kvm); diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 9d3169fbce55..65b4d337d337 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -94,7 +94,6 @@ enum int_class { void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type); -extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance); @@ -102,7 +101,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); -extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu); extern int kvmppc_core_emulate_op_e500(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance); diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index f2b4feaff6d2..7e8b69015d20 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -490,7 +490,6 @@ static struct kvmppc_ops kvm_ops_e500 = { .vcpu_put = kvmppc_core_vcpu_put_e500, .vcpu_create = kvmppc_core_vcpu_create_e500, .vcpu_free = kvmppc_core_vcpu_free_e500, - .mmu_destroy = kvmppc_mmu_destroy_e500, .init_vm = kvmppc_core_init_vm_e500, .destroy_vm = kvmppc_core_destroy_vm_e500, .emulate_op = kvmppc_core_emulate_op_e500, diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index 2d910b87e441..e131fbecdcc4 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c @@ -533,10 +533,6 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index, return get_tlb_raddr(gtlbe) | (eaddr & pgmask); } -void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu) -{ -} - /*****************************************/ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index e6b06cb2b92c..1c189b5aadcc 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -376,7 +376,6 @@ static struct kvmppc_ops kvm_ops_e500mc = { .vcpu_put = kvmppc_core_vcpu_put_e500mc, .vcpu_create = kvmppc_core_vcpu_create_e500mc, .vcpu_free = kvmppc_core_vcpu_free_e500mc, - .mmu_destroy = kvmppc_mmu_destroy_e500, .init_vm = kvmppc_core_init_vm_e500mc, .destroy_vm = kvmppc_core_destroy_vm_e500mc, .emulate_op = kvmppc_core_emulate_op_e500, From 9bee484b280a059c1faa10ae174af4f4af02c805 Mon Sep 17 00:00:00 2001 From: Fabiano Rosas Date: Thu, 19 Mar 2020 19:55:10 -0300 Subject: [PATCH 09/12] KVM: PPC: Book3S HV: Skip kvmppc_uvmem_free if Ultravisor is not supported kvmppc_uvmem_init checks for Ultravisor support and returns early if it is not present. Calling kvmppc_uvmem_free at module exit will cause an Oops: $ modprobe -r kvm-hv Oops: Kernel access of bad area, sig: 11 [#1] NIP: c000000000789e90 LR: c000000000789e8c CTR: c000000000401030 REGS: c000003fa7bab9a0 TRAP: 0300 Not tainted (5.6.0-rc6-00033-g6c90b86a745a-dirty) MSR: 9000000000009033 CR: 24002282 XER: 00000000 CFAR: c000000000dae880 DAR: 0000000000000008 DSISR: 40000000 IRQMASK: 1 GPR00: c000000000789e8c c000003fa7babc30 c0000000016fe500 0000000000000000 GPR04: 0000000000000000 0000000000000006 0000000000000000 c000003faf205c00 GPR08: 0000000000000000 0000000000000001 000000008000002d c00800000ddde140 GPR12: c000000000401030 c000003ffffd9080 0000000000000001 0000000000000000 GPR16: 0000000000000000 0000000000000000 000000013aad0074 000000013aaac978 GPR20: 000000013aad0070 0000000000000000 00007fffd1b37158 0000000000000000 GPR24: 000000014fef0d58 0000000000000000 000000014fef0cf0 0000000000000001 GPR28: 0000000000000000 0000000000000000 c0000000018b2a60 0000000000000000 NIP [c000000000789e90] percpu_ref_kill_and_confirm+0x40/0x170 LR [c000000000789e8c] percpu_ref_kill_and_confirm+0x3c/0x170 Call Trace: [c000003fa7babc30] [c000003faf2064d4] 0xc000003faf2064d4 (unreliable) [c000003fa7babcb0] [c000000000400e8c] dev_pagemap_kill+0x6c/0x80 [c000003fa7babcd0] [c000000000401064] memunmap_pages+0x34/0x2f0 [c000003fa7babd50] [c00800000dddd548] kvmppc_uvmem_free+0x30/0x80 [kvm_hv] [c000003fa7babd80] [c00800000ddcef18] kvmppc_book3s_exit_hv+0x20/0x78 [kvm_hv] [c000003fa7babda0] [c0000000002084d0] sys_delete_module+0x1d0/0x2c0 [c000003fa7babe20] [c00000000000b9d0] system_call+0x5c/0x68 Instruction dump: 3fc2001b fb81ffe0 fba1ffe8 fbe1fff8 7c7f1b78 7c9c2378 3bde4560 7fc3f378 f8010010 f821ff81 486249a1 60000000 7c7d1b78 712a0002 40820084 ---[ end trace 5774ef4dc2c98279 ]--- So this patch checks if kvmppc_uvmem_init actually allocated anything before running kvmppc_uvmem_free. Fixes: ca9f4942670c ("KVM: PPC: Book3S HV: Support for running secure guests") Cc: stable@vger.kernel.org # v5.5+ Reported-by: Greg Kurz Signed-off-by: Fabiano Rosas Tested-by: Greg Kurz Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_uvmem.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 79b1202b1c62..9d26614b2a77 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -806,6 +806,9 @@ int kvmppc_uvmem_init(void) void kvmppc_uvmem_free(void) { + if (!kvmppc_uvmem_bitmap) + return; + memunmap_pages(&kvmppc_uvmem_pgmap); release_mem_region(kvmppc_uvmem_pgmap.res.start, resource_size(&kvmppc_uvmem_pgmap.res)); From 8c47b6ff29e3d88484fe59d02f9db6de7e44e310 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Fri, 20 Mar 2020 11:26:42 +0100 Subject: [PATCH 10/12] KVM: PPC: Book3S HV: Check caller of H_SVM_* Hcalls The Hcall named H_SVM_* are reserved to the Ultravisor. However, nothing prevent a malicious VM or SVM to call them. This could lead to weird result and should be filtered out. Checking the Secure bit of the calling MSR ensure that the call is coming from either the Ultravisor or a SVM. But any system call made from a SVM are going through the Ultravisor, and the Ultravisor should filter out these malicious call. This way, only the Ultravisor is able to make such a Hcall. Cc: Bharata B Rao Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Signed-off-by: Laurent Dufour Reviewed-by: Ram Pai Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv.c | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 85e75b12e513..a308de610cdf 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1073,25 +1073,35 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) kvmppc_get_gpr(vcpu, 6)); break; case H_SVM_PAGE_IN: - ret = kvmppc_h_svm_page_in(vcpu->kvm, - kvmppc_get_gpr(vcpu, 4), - kvmppc_get_gpr(vcpu, 5), - kvmppc_get_gpr(vcpu, 6)); + ret = H_UNSUPPORTED; + if (kvmppc_get_srr1(vcpu) & MSR_S) + ret = kvmppc_h_svm_page_in(vcpu->kvm, + kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); break; case H_SVM_PAGE_OUT: - ret = kvmppc_h_svm_page_out(vcpu->kvm, - kvmppc_get_gpr(vcpu, 4), - kvmppc_get_gpr(vcpu, 5), - kvmppc_get_gpr(vcpu, 6)); + ret = H_UNSUPPORTED; + if (kvmppc_get_srr1(vcpu) & MSR_S) + ret = kvmppc_h_svm_page_out(vcpu->kvm, + kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); break; case H_SVM_INIT_START: - ret = kvmppc_h_svm_init_start(vcpu->kvm); + ret = H_UNSUPPORTED; + if (kvmppc_get_srr1(vcpu) & MSR_S) + ret = kvmppc_h_svm_init_start(vcpu->kvm); break; case H_SVM_INIT_DONE: - ret = kvmppc_h_svm_init_done(vcpu->kvm); + ret = H_UNSUPPORTED; + if (kvmppc_get_srr1(vcpu) & MSR_S) + ret = kvmppc_h_svm_init_done(vcpu->kvm); break; case H_SVM_INIT_ABORT: - ret = kvmppc_h_svm_init_abort(vcpu->kvm); + ret = H_UNSUPPORTED; + if (kvmppc_get_srr1(vcpu) & MSR_S) + ret = kvmppc_h_svm_init_abort(vcpu->kvm); break; default: From 377f02d487b5f74a2411fa01316ba4aff1819629 Mon Sep 17 00:00:00 2001 From: Laurent Dufour Date: Fri, 20 Mar 2020 11:26:43 +0100 Subject: [PATCH 11/12] KVM: PPC: Book3S HV: H_SVM_INIT_START must call UV_RETURN When the call to UV_REGISTER_MEM_SLOT is failing, for instance because there is not enough free secured memory, the Hypervisor (HV) has to call UV_RETURN to report the error to the Ultravisor (UV). Then the UV will call H_SVM_INIT_ABORT to abort the securing phase and go back to the calling VM. If the kvm->arch.secure_guest is not set, in the return path rfid is called but there is no valid context to get back to the SVM since the Hcall has been routed by the Ultravisor. Move the setting of kvm->arch.secure_guest earlier in kvmppc_h_svm_init_start() so in the return path, UV_RETURN will be called instead of rfid. Cc: Bharata B Rao Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Signed-off-by: Laurent Dufour Reviewed-by: Ram Pai Tested-by: Fabiano Rosas Signed-off-by: Paul Mackerras --- arch/powerpc/kvm/book3s_hv_uvmem.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 9d26614b2a77..53b88cae3e73 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -209,6 +209,8 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) int ret = H_SUCCESS; int srcu_idx; + kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START; + if (!kvmppc_uvmem_bitmap) return H_UNSUPPORTED; @@ -233,7 +235,6 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) goto out; } } - kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_START; out: srcu_read_unlock(&kvm->srcu, srcu_idx); return ret; From 9a5788c615f52f6d7bf0b61986a632d4ec86791d Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Thu, 19 Mar 2020 15:29:55 +1100 Subject: [PATCH 12/12] KVM: PPC: Book3S HV: Add a capability for enabling secure guests At present, on Power systems with Protected Execution Facility hardware and an ultravisor, a KVM guest can transition to being a secure guest at will. Userspace (QEMU) has no way of knowing whether a host system is capable of running secure guests. This will present a problem in future when the ultravisor is capable of migrating secure guests from one host to another, because virtualization management software will have no way to ensure that secure guests only run in domains where all of the hosts can support secure guests. This adds a VM capability which has two functions: (a) userspace can query it to find out whether the host can support secure guests, and (b) userspace can enable it for a guest, which allows that guest to become a secure guest. If userspace does not enable it, KVM will return an error when the ultravisor does the hypercall that indicates that the guest is starting to transition to a secure guest. The ultravisor will then abort the transition and the guest will terminate. Signed-off-by: Paul Mackerras Reviewed-by: David Gibson Reviewed-by: Ram Pai --- Documentation/virt/kvm/api.rst | 17 +++++++++++++++++ arch/powerpc/include/asm/kvm_book3s_uvmem.h | 6 ++++++ arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/include/asm/kvm_ppc.h | 1 + arch/powerpc/kvm/book3s_hv.c | 16 ++++++++++++++++ arch/powerpc/kvm/book3s_hv_uvmem.c | 13 +++++++++++++ arch/powerpc/kvm/powerpc.c | 14 ++++++++++++++ include/uapi/linux/kvm.h | 1 + 8 files changed, 69 insertions(+) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 158d1186d103..a9255002aa1c 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -5779,6 +5779,23 @@ it hard or impossible to use it correctly. The availability of KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 signals that those bugs are fixed. Userspace should not try to use KVM_CAP_MANUAL_DIRTY_LOG_PROTECT. +7.19 KVM_CAP_PPC_SECURE_GUEST +------------------------------ + +:Architectures: ppc + +This capability indicates that KVM is running on a host that has +ultravisor firmware and thus can support a secure guest. On such a +system, a guest can ask the ultravisor to make it a secure guest, +one whose memory is inaccessible to the host except for pages which +are explicitly requested to be shared with the host. The ultravisor +notifies KVM when a guest requests to become a secure guest, and KVM +has the opportunity to veto the transition. + +If present, this capability can be enabled for a VM, meaning that KVM +will allow the transition to secure guest mode. Otherwise KVM will +veto the transition. + 8. Other capabilities. ====================== diff --git a/arch/powerpc/include/asm/kvm_book3s_uvmem.h b/arch/powerpc/include/asm/kvm_book3s_uvmem.h index 5a9834e0e2d1..9cb7d8be2366 100644 --- a/arch/powerpc/include/asm/kvm_book3s_uvmem.h +++ b/arch/powerpc/include/asm/kvm_book3s_uvmem.h @@ -5,6 +5,7 @@ #ifdef CONFIG_PPC_UV int kvmppc_uvmem_init(void); void kvmppc_uvmem_free(void); +bool kvmppc_uvmem_available(void); int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot); void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot); @@ -30,6 +31,11 @@ static inline int kvmppc_uvmem_init(void) static inline void kvmppc_uvmem_free(void) { } +static inline bool kvmppc_uvmem_available(void) +{ + return false; +} + static inline int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) { diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 6e8b8ffd06ad..f99b4333dfba 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -303,6 +303,7 @@ struct kvm_arch { u8 radix; u8 fwnmi_enabled; u8 secure_guest; + u8 svm_enabled; bool threads_indep; bool nested_enable; pgd_t *pgtable; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index e716862d56b9..94f5a32acaf1 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -313,6 +313,7 @@ struct kvmppc_ops { int size); int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, int size); + int (*enable_svm)(struct kvm *kvm); int (*svm_off)(struct kvm *kvm); }; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index a308de610cdf..fa6e4fc7d0e4 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -5428,6 +5428,21 @@ static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa) vpa->update_pending = 0; } +/* + * Enable a guest to become a secure VM, or test whether + * that could be enabled. + * Called when the KVM_CAP_PPC_SECURE_GUEST capability is + * tested (kvm == NULL) or enabled (kvm != NULL). + */ +static int kvmhv_enable_svm(struct kvm *kvm) +{ + if (!kvmppc_uvmem_available()) + return -EINVAL; + if (kvm) + kvm->arch.svm_enabled = 1; + return 0; +} + /* * IOCTL handler to turn off secure mode of guest * @@ -5548,6 +5563,7 @@ static struct kvmppc_ops kvm_ops_hv = { .enable_nested = kvmhv_enable_nested, .load_from_eaddr = kvmhv_load_from_eaddr, .store_to_eaddr = kvmhv_store_to_eaddr, + .enable_svm = kvmhv_enable_svm, .svm_off = kvmhv_svm_off, }; diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index 53b88cae3e73..6ed98e70097d 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -113,6 +113,15 @@ struct kvmppc_uvmem_page_pvt { bool skip_page_out; }; +bool kvmppc_uvmem_available(void) +{ + /* + * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor + * and our data structures have been initialized successfully. + */ + return !!kvmppc_uvmem_bitmap; +} + int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot) { struct kvmppc_uvmem_slot *p; @@ -218,6 +227,10 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm) if (!kvm_is_radix(kvm)) return H_UNSUPPORTED; + /* NAK the transition to secure if not enabled */ + if (!kvm->arch.svm_enabled) + return H_AUTHORITY; + srcu_idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); kvm_for_each_memslot(memslot, slots) { diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index e229a81016d0..c48862d86adc 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -668,6 +668,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)); break; +#endif +#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) + case KVM_CAP_PPC_SECURE_GUEST: + r = hv_enabled && kvmppc_hv_ops->enable_svm && + !kvmppc_hv_ops->enable_svm(NULL); + break; #endif default: r = 0; @@ -2166,6 +2172,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm, break; r = kvm->arch.kvm_ops->enable_nested(kvm); break; +#endif +#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) + case KVM_CAP_PPC_SECURE_GUEST: + r = -EINVAL; + if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) + break; + r = kvm->arch.kvm_ops->enable_svm(kvm); + break; #endif default: r = -EINVAL; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 5e6234cb25a6..428c7dde6b4b 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1016,6 +1016,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_INJECT_EXT_DABT 178 #define KVM_CAP_S390_VCPU_RESETS 179 #define KVM_CAP_S390_PROTECTED 180 +#define KVM_CAP_PPC_SECURE_GUEST 181 #ifdef KVM_CAP_IRQ_ROUTING