From 27025a602cb9d8b0fa5162b465334ef059a503b6 Mon Sep 17 00:00:00 2001 From: Liu Ping Fan Date: Tue, 19 Nov 2013 14:12:48 +0800 Subject: [PATCH 01/45] powerpc: kvm: optimize "sc 1" as fast return In some scene, e.g openstack CI, PR guest can trigger "sc 1" frequently, this patch optimizes the path by directly delivering BOOK3S_INTERRUPT_SYSCALL to HV guest, so powernv can return to HV guest without heavy exit, i.e, no need to swap TLB, HTAB,.. etc Signed-off-by: Liu Ping Fan Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv.c | 10 ++++------ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 19 ++++++++++++++++++- 2 files changed, 22 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 072287f1c3bc..93203bbe5714 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -669,12 +669,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, /* hcall - punt to userspace */ int i; - if (vcpu->arch.shregs.msr & MSR_PR) { - /* sc 1 from userspace - reflect to guest syscall */ - kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL); - r = RESUME_GUEST; - break; - } + /* hypercall with MSR_PR has already been handled in rmode, + * and never reaches here. + */ + run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); for (i = 0; i < 9; ++i) run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bc8de75b1925..d5ddc2d10748 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -686,6 +686,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 5: mtspr SPRN_SRR0, r6 mtspr SPRN_SRR1, r7 +/* + * Required state: + * R4 = vcpu + * R10: value for HSRR0 + * R11: value for HSRR1 + * R13 = PACA + */ fast_guest_return: li r0,0 stb r0,VCPU_CEDED(r4) /* cancel cede */ @@ -1471,7 +1478,8 @@ kvmppc_hisi: hcall_try_real_mode: ld r3,VCPU_GPR(R3)(r9) andi. r0,r11,MSR_PR - bne guest_exit_cont + /* sc 1 from userspace - reflect to guest syscall */ + bne sc_1_fast_return clrrdi r3,r3,2 cmpldi r3,hcall_real_table_end - hcall_real_table bge guest_exit_cont @@ -1492,6 +1500,15 @@ hcall_try_real_mode: ld r11,VCPU_MSR(r4) b fast_guest_return +sc_1_fast_return: + mtspr SPRN_SRR0,r10 + mtspr SPRN_SRR1,r11 + li r10, BOOK3S_INTERRUPT_SYSCALL + li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ + rotldi r11, r11, 63 + mr r4,r9 + b fast_guest_return + /* We've attempted a real mode hcall, but it's punted it back * to userspace. We need to restore some clobbered volatiles * before resuming the pass-it-to-qemu path */ From 398a76c677a2612c1b03a8d20fbf116e3778ebec Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Mon, 9 Dec 2013 13:53:42 +0100 Subject: [PATCH 02/45] KVM: PPC: Add devname:kvm aliases for modules Systems that support automatic loading of kernel modules through device aliases should try and automatically load kvm when /dev/kvm gets opened. Add code to support that magic for all PPC kvm targets, even the ones that don't support modules yet. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/44x.c | 4 ++++ arch/powerpc/kvm/book3s.c | 8 ++++++++ arch/powerpc/kvm/book3s_hv.c | 3 +++ arch/powerpc/kvm/book3s_pr.c | 3 +++ arch/powerpc/kvm/e500.c | 4 ++++ arch/powerpc/kvm/e500mc.c | 4 ++++ 6 files changed, 26 insertions(+) diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 93221e87b911..9cb4b0a36031 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include @@ -231,3 +233,5 @@ static void __exit kvmppc_44x_exit(void) module_init(kvmppc_44x_init); module_exit(kvmppc_44x_exit); +MODULE_ALIAS_MISCDEV(KVM_MINOR); +MODULE_ALIAS("devname:kvm"); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 8912608b7e1b..48cf91bc862f 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include @@ -879,3 +881,9 @@ static void kvmppc_book3s_exit(void) module_init(kvmppc_book3s_init); module_exit(kvmppc_book3s_exit); + +/* On 32bit this is our one and only kernel module */ +#ifdef CONFIG_KVM_BOOK3S_32 +MODULE_ALIAS_MISCDEV(KVM_MINOR); +MODULE_ALIAS("devname:kvm"); +#endif diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 93203bbe5714..088a6e54c998 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -2216,3 +2217,5 @@ static void kvmppc_book3s_exit_hv(void) module_init(kvmppc_book3s_init_hv); module_exit(kvmppc_book3s_exit_hv); MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(KVM_MINOR); +MODULE_ALIAS("devname:kvm"); diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3dd171..21bf7c5c9545 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -41,6 +41,7 @@ #include #include #include +#include #include "book3s.h" @@ -1584,4 +1585,6 @@ module_init(kvmppc_book3s_init_pr); module_exit(kvmppc_book3s_exit_pr); MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(KVM_MINOR); +MODULE_ALIAS("devname:kvm"); #endif diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 497b142f651c..2e02ed849f36 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include @@ -573,3 +575,5 @@ static void __exit kvmppc_e500_exit(void) module_init(kvmppc_e500_init); module_exit(kvmppc_e500_exit); +MODULE_ALIAS_MISCDEV(KVM_MINOR); +MODULE_ALIAS("devname:kvm"); diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 4132cd2fc171..17e456279224 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include @@ -391,3 +393,5 @@ static void __exit kvmppc_e500mc_exit(void) module_init(kvmppc_e500mc_init); module_exit(kvmppc_e500mc_exit); +MODULE_ALIAS_MISCDEV(KVM_MINOR); +MODULE_ALIAS("devname:kvm"); From 458ff3c099a1266991208f2c009afc2405e5b6bc Mon Sep 17 00:00:00 2001 From: Gleb Natapov Date: Sun, 1 Sep 2013 15:53:46 +0300 Subject: [PATCH 03/45] KVM: PPC: fix couple of memory leaks in MPIC/XICS devices XICS failed to free xics structure on error path. MPIC destroy handler forgot to delete kvm_device structure. Signed-off-by: Gleb Natapov Acked-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_xics.c | 4 +++- arch/powerpc/kvm/mpic.c | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index 02a17dcf1610..d1acd32a64c0 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -1246,8 +1246,10 @@ static int kvmppc_xics_create(struct kvm_device *dev, u32 type) kvm->arch.xics = xics; mutex_unlock(&kvm->lock); - if (ret) + if (ret) { + kfree(xics); return ret; + } xics_debugfs_init(xics); diff --git a/arch/powerpc/kvm/mpic.c b/arch/powerpc/kvm/mpic.c index 2861ae9eaae6..efbd9962a209 100644 --- a/arch/powerpc/kvm/mpic.c +++ b/arch/powerpc/kvm/mpic.c @@ -1635,6 +1635,7 @@ static void mpic_destroy(struct kvm_device *dev) dev->kvm->arch.mpic = NULL; kfree(opp); + kfree(dev); } static int mpic_set_default_irq_routing(struct openpic *opp) From 1820a8d2163554dcd0272cc095338499db4a4dc3 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Tue, 8 Oct 2013 09:32:19 +0530 Subject: [PATCH 04/45] kvm/powerpc: rename kvm_hypercall() to epapr_hypercall() kvm_hypercall() have nothing KVM specific, so renamed to epapr_hypercall(). Also this in moved to arch/powerpc/include/asm/epapr_hcalls.h Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/epapr_hcalls.h | 46 +++++++++++++++++++++++++ arch/powerpc/include/asm/kvm_para.h | 23 ++++--------- arch/powerpc/kernel/kvm.c | 41 ++-------------------- 3 files changed, 54 insertions(+), 56 deletions(-) diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h index 86b0ac79990c..eec87f9494bc 100644 --- a/arch/powerpc/include/asm/epapr_hcalls.h +++ b/arch/powerpc/include/asm/epapr_hcalls.h @@ -460,5 +460,51 @@ static inline unsigned int ev_idle(void) return r3; } + +#ifdef CONFIG_EPAPR_PARAVIRT +static inline unsigned long epapr_hypercall(unsigned long *in, + unsigned long *out, + unsigned long nr) +{ + unsigned long register r0 asm("r0"); + unsigned long register r3 asm("r3") = in[0]; + unsigned long register r4 asm("r4") = in[1]; + unsigned long register r5 asm("r5") = in[2]; + unsigned long register r6 asm("r6") = in[3]; + unsigned long register r7 asm("r7") = in[4]; + unsigned long register r8 asm("r8") = in[5]; + unsigned long register r9 asm("r9") = in[6]; + unsigned long register r10 asm("r10") = in[7]; + unsigned long register r11 asm("r11") = nr; + unsigned long register r12 asm("r12"); + + asm volatile("bl epapr_hypercall_start" + : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), + "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11), + "=r"(r12) + : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8), + "r"(r9), "r"(r10), "r"(r11) + : "memory", "cc", "xer", "ctr", "lr"); + + out[0] = r4; + out[1] = r5; + out[2] = r6; + out[3] = r7; + out[4] = r8; + out[5] = r9; + out[6] = r10; + out[7] = r11; + + return r3; +} +#else +static unsigned long epapr_hypercall(unsigned long *in, + unsigned long *out, + unsigned long nr) +{ + return EV_UNIMPLEMENTED; +} +#endif + #endif /* !__ASSEMBLY__ */ #endif /* _EPAPR_HCALLS_H */ diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h index 2b119654b4c1..c18660ef26d8 100644 --- a/arch/powerpc/include/asm/kvm_para.h +++ b/arch/powerpc/include/asm/kvm_para.h @@ -39,10 +39,6 @@ static inline int kvm_para_available(void) return 1; } -extern unsigned long kvm_hypercall(unsigned long *in, - unsigned long *out, - unsigned long nr); - #else static inline int kvm_para_available(void) @@ -50,13 +46,6 @@ static inline int kvm_para_available(void) return 0; } -static unsigned long kvm_hypercall(unsigned long *in, - unsigned long *out, - unsigned long nr) -{ - return EV_UNIMPLEMENTED; -} - #endif static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2) @@ -65,7 +54,7 @@ static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2) unsigned long out[8]; unsigned long r; - r = kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); + r = epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); *r2 = out[0]; return r; @@ -76,7 +65,7 @@ static inline long kvm_hypercall0(unsigned int nr) unsigned long in[8]; unsigned long out[8]; - return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); + return epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); } static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) @@ -85,7 +74,7 @@ static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) unsigned long out[8]; in[0] = p1; - return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); + return epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); } static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, @@ -96,7 +85,7 @@ static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, in[0] = p1; in[1] = p2; - return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); + return epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); } static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, @@ -108,7 +97,7 @@ static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, in[0] = p1; in[1] = p2; in[2] = p3; - return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); + return epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); } static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, @@ -122,7 +111,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, in[1] = p2; in[2] = p3; in[3] = p4; - return kvm_hypercall(in, out, KVM_HCALL_TOKEN(nr)); + return epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); } diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c index db28032e320e..6a0175297b0d 100644 --- a/arch/powerpc/kernel/kvm.c +++ b/arch/powerpc/kernel/kvm.c @@ -413,13 +413,13 @@ static void kvm_map_magic_page(void *data) { u32 *features = data; - ulong in[8]; + ulong in[8] = {0}; ulong out[8]; in[0] = KVM_MAGIC_PAGE; in[1] = KVM_MAGIC_PAGE; - kvm_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE)); + epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE)); *features = out[0]; } @@ -711,43 +711,6 @@ static void kvm_use_magic_page(void) kvm_patching_worked ? "worked" : "failed"); } -unsigned long kvm_hypercall(unsigned long *in, - unsigned long *out, - unsigned long nr) -{ - unsigned long register r0 asm("r0"); - unsigned long register r3 asm("r3") = in[0]; - unsigned long register r4 asm("r4") = in[1]; - unsigned long register r5 asm("r5") = in[2]; - unsigned long register r6 asm("r6") = in[3]; - unsigned long register r7 asm("r7") = in[4]; - unsigned long register r8 asm("r8") = in[5]; - unsigned long register r9 asm("r9") = in[6]; - unsigned long register r10 asm("r10") = in[7]; - unsigned long register r11 asm("r11") = nr; - unsigned long register r12 asm("r12"); - - asm volatile("bl epapr_hypercall_start" - : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), - "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11), - "=r"(r12) - : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8), - "r"(r9), "r"(r10), "r"(r11) - : "memory", "cc", "xer", "ctr", "lr"); - - out[0] = r4; - out[1] = r5; - out[2] = r6; - out[3] = r7; - out[4] = r8; - out[5] = r9; - out[6] = r10; - out[7] = r11; - - return r3; -} -EXPORT_SYMBOL_GPL(kvm_hypercall); - static __init void kvm_free_tmp(void) { free_reserved_area(&kvm_tmp[kvm_tmp_index], From b1f0d94c26b64e814243b736f47e7ef40d96432c Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Tue, 8 Oct 2013 09:32:20 +0530 Subject: [PATCH 05/45] kvm/powerpc: move kvm_hypercall0() and friends to epapr_hypercall0() kvm_hypercall0() and friends have nothing KVM specific so moved to epapr_hypercall0() and friends. Also they are moved from arch/powerpc/include/asm/kvm_para.h to arch/powerpc/include/asm/epapr_hcalls.h Signed-off-by: Bharat Bhushan Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/epapr_hcalls.h | 65 +++++++++++++++++++++++ arch/powerpc/include/asm/kvm_para.h | 69 +------------------------ 2 files changed, 66 insertions(+), 68 deletions(-) diff --git a/arch/powerpc/include/asm/epapr_hcalls.h b/arch/powerpc/include/asm/epapr_hcalls.h index eec87f9494bc..334459ad145b 100644 --- a/arch/powerpc/include/asm/epapr_hcalls.h +++ b/arch/powerpc/include/asm/epapr_hcalls.h @@ -506,5 +506,70 @@ static unsigned long epapr_hypercall(unsigned long *in, } #endif +static inline long epapr_hypercall0_1(unsigned int nr, unsigned long *r2) +{ + unsigned long in[8]; + unsigned long out[8]; + unsigned long r; + + r = epapr_hypercall(in, out, nr); + *r2 = out[0]; + + return r; +} + +static inline long epapr_hypercall0(unsigned int nr) +{ + unsigned long in[8]; + unsigned long out[8]; + + return epapr_hypercall(in, out, nr); +} + +static inline long epapr_hypercall1(unsigned int nr, unsigned long p1) +{ + unsigned long in[8]; + unsigned long out[8]; + + in[0] = p1; + return epapr_hypercall(in, out, nr); +} + +static inline long epapr_hypercall2(unsigned int nr, unsigned long p1, + unsigned long p2) +{ + unsigned long in[8]; + unsigned long out[8]; + + in[0] = p1; + in[1] = p2; + return epapr_hypercall(in, out, nr); +} + +static inline long epapr_hypercall3(unsigned int nr, unsigned long p1, + unsigned long p2, unsigned long p3) +{ + unsigned long in[8]; + unsigned long out[8]; + + in[0] = p1; + in[1] = p2; + in[2] = p3; + return epapr_hypercall(in, out, nr); +} + +static inline long epapr_hypercall4(unsigned int nr, unsigned long p1, + unsigned long p2, unsigned long p3, + unsigned long p4) +{ + unsigned long in[8]; + unsigned long out[8]; + + in[0] = p1; + in[1] = p2; + in[2] = p3; + in[3] = p4; + return epapr_hypercall(in, out, nr); +} #endif /* !__ASSEMBLY__ */ #endif /* _EPAPR_HCALLS_H */ diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h index c18660ef26d8..336a91acb8b1 100644 --- a/arch/powerpc/include/asm/kvm_para.h +++ b/arch/powerpc/include/asm/kvm_para.h @@ -48,73 +48,6 @@ static inline int kvm_para_available(void) #endif -static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2) -{ - unsigned long in[8]; - unsigned long out[8]; - unsigned long r; - - r = epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); - *r2 = out[0]; - - return r; -} - -static inline long kvm_hypercall0(unsigned int nr) -{ - unsigned long in[8]; - unsigned long out[8]; - - return epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); -} - -static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) -{ - unsigned long in[8]; - unsigned long out[8]; - - in[0] = p1; - return epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); -} - -static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, - unsigned long p2) -{ - unsigned long in[8]; - unsigned long out[8]; - - in[0] = p1; - in[1] = p2; - return epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); -} - -static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, - unsigned long p2, unsigned long p3) -{ - unsigned long in[8]; - unsigned long out[8]; - - in[0] = p1; - in[1] = p2; - in[2] = p3; - return epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); -} - -static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, - unsigned long p2, unsigned long p3, - unsigned long p4) -{ - unsigned long in[8]; - unsigned long out[8]; - - in[0] = p1; - in[1] = p2; - in[2] = p3; - in[3] = p4; - return epapr_hypercall(in, out, KVM_HCALL_TOKEN(nr)); -} - - static inline unsigned int kvm_arch_para_features(void) { unsigned long r; @@ -122,7 +55,7 @@ static inline unsigned int kvm_arch_para_features(void) if (!kvm_para_available()) return 0; - if(kvm_hypercall0_1(KVM_HC_FEATURES, &r)) + if(epapr_hypercall0_1(KVM_HCALL_TOKEN(KVM_HC_FEATURES), &r)) return 0; return r; From 09548fdaf32ce77a68e7f9a8a3098c1306b04858 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 15 Oct 2013 20:43:01 +1100 Subject: [PATCH 06/45] KVM: PPC: Use load_fp/vr_state rather than load_up_fpu/altivec The load_up_fpu and load_up_altivec functions were never intended to be called from C, and do things like modifying the MSR value in their callers' stack frames, which are assumed to be interrupt frames. In addition, on 32-bit Book S they require the MMU to be off. This makes KVM use the new load_fp_state() and load_vr_state() functions instead of load_up_fpu/altivec. This means we can remove the assembler glue in book3s_rmhandlers.S, and potentially fixes a bug on Book E, where load_up_fpu was called directly from C. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_book3s.h | 3 -- arch/powerpc/include/asm/switch_to.h | 2 -- arch/powerpc/kvm/book3s_exports.c | 4 --- arch/powerpc/kvm/book3s_pr.c | 18 ++++++---- arch/powerpc/kvm/book3s_rmhandlers.S | 47 --------------------------- arch/powerpc/kvm/booke.h | 3 +- 6 files changed, 14 insertions(+), 63 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4a594b76674d..8bb870694616 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -186,9 +186,6 @@ extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, extern void kvmppc_entry_trampoline(void); extern void kvmppc_hv_entry_trampoline(void); -extern void kvmppc_load_up_fpu(void); -extern void kvmppc_load_up_altivec(void); -extern void kvmppc_load_up_vsx(void); extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 9ee12610af02..971ca336f451 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -25,10 +25,8 @@ static inline void save_tar(struct thread_struct *prev) static inline void save_tar(struct thread_struct *prev) {} #endif -extern void load_up_fpu(void); extern void enable_kernel_fp(void); extern void enable_kernel_altivec(void); -extern void load_up_altivec(struct task_struct *); extern int emulate_altivec(struct pt_regs *); extern void __giveup_vsx(struct task_struct *); extern void giveup_vsx(struct task_struct *); diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c index 852989a9bad3..20d4ea8e656d 100644 --- a/arch/powerpc/kvm/book3s_exports.c +++ b/arch/powerpc/kvm/book3s_exports.c @@ -25,9 +25,5 @@ EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline); #endif #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline); -EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu); -#ifdef CONFIG_ALTIVEC -EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec); -#endif #endif diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 21bf7c5c9545..d63a91f825d3 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -691,7 +691,8 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, #endif t->fp_state.fpscr = vcpu->arch.fpscr; t->fpexc_mode = 0; - kvmppc_load_up_fpu(); + enable_kernel_fp(); + load_fp_state(&t->fp_state); } if (msr & MSR_VEC) { @@ -699,7 +700,8 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); t->vr_state.vscr = vcpu->arch.vscr; t->vrsave = -1; - kvmppc_load_up_altivec(); + enable_kernel_altivec(); + load_vr_state(&t->vr_state); #endif } @@ -722,11 +724,15 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) if (!lost_ext) return; - if (lost_ext & MSR_FP) - kvmppc_load_up_fpu(); + if (lost_ext & MSR_FP) { + enable_kernel_fp(); + load_fp_state(¤t->thread.fp_state); + } #ifdef CONFIG_ALTIVEC - if (lost_ext & MSR_VEC) - kvmppc_load_up_altivec(); + if (lost_ext & MSR_VEC) { + enable_kernel_altivec(); + load_vr_state(¤t->thread.vr_state); + } #endif current->thread.regs->msr |= lost_ext; } diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9edab8..c78ffbc371a5 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S @@ -166,51 +166,4 @@ _GLOBAL(kvmppc_entry_trampoline) mtsrr1 r6 RFI -#if defined(CONFIG_PPC_BOOK3S_32) -#define STACK_LR INT_FRAME_SIZE+4 - -/* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */ -#define MSR_EXT_START \ - PPC_STL r20, _NIP(r1); \ - mfmsr r20; \ - LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \ - andc r3,r20,r3; /* Disable DR,EE */ \ - mtmsr r3; \ - sync - -#define MSR_EXT_END \ - mtmsr r20; /* Enable DR,EE */ \ - sync; \ - PPC_LL r20, _NIP(r1) - -#elif defined(CONFIG_PPC_BOOK3S_64) -#define STACK_LR _LINK -#define MSR_EXT_START -#define MSR_EXT_END -#endif - -/* - * Activate current's external feature (FPU/Altivec/VSX) - */ -#define define_load_up(what) \ - \ -_GLOBAL(kvmppc_load_up_ ## what); \ - PPC_STLU r1, -INT_FRAME_SIZE(r1); \ - mflr r3; \ - PPC_STL r3, STACK_LR(r1); \ - MSR_EXT_START; \ - \ - bl FUNC(load_up_ ## what); \ - \ - MSR_EXT_END; \ - PPC_LL r3, STACK_LR(r1); \ - mtlr r3; \ - addi r1, r1, INT_FRAME_SIZE; \ - blr - -define_load_up(fpu) -#ifdef CONFIG_ALTIVEC -define_load_up(altivec) -#endif - #include "book3s_segment.S" diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index 09bfd9bc7cf8..fe59f225327f 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -136,7 +136,8 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) { #ifdef CONFIG_PPC_FPU if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { - load_up_fpu(); + enable_kernel_fp(); + load_fp_state(¤t->thread.fp_state); current->thread.regs->msr |= MSR_FP; } #endif From efff19122315f1431f6b02cd2983b15f5d3957bd Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 15 Oct 2013 20:43:02 +1100 Subject: [PATCH 07/45] KVM: PPC: Store FP/VSX/VMX state in thread_fp/vr_state structures This uses struct thread_fp_state and struct thread_vr_state to store the floating-point, VMX/Altivec and VSX state, rather than flat arrays. This makes transferring the state to/from the thread_struct simpler and allows us to unify the get/set_one_reg implementations for the VSX registers. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_host.h | 12 +- arch/powerpc/kernel/asm-offsets.c | 11 +- arch/powerpc/kvm/book3s.c | 38 +++-- arch/powerpc/kvm/book3s_hv.c | 42 ------ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 +- arch/powerpc/kvm/book3s_paired_singles.c | 169 +++++++++++------------ arch/powerpc/kvm/book3s_pr.c | 63 +-------- arch/powerpc/kvm/booke.c | 8 +- arch/powerpc/kvm/powerpc.c | 4 +- 9 files changed, 131 insertions(+), 220 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 237d1d25b448..3ca0b430eaee 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -410,8 +410,7 @@ struct kvm_vcpu_arch { ulong gpr[32]; - u64 fpr[32]; - u64 fpscr; + struct thread_fp_state fp; #ifdef CONFIG_SPE ulong evr[32]; @@ -420,12 +419,7 @@ struct kvm_vcpu_arch { u64 acc; #endif #ifdef CONFIG_ALTIVEC - vector128 vr[32]; - vector128 vscr; -#endif - -#ifdef CONFIG_VSX - u64 vsr[64]; + struct thread_vr_state vr; #endif #ifdef CONFIG_KVM_BOOKE_HV @@ -619,6 +613,8 @@ struct kvm_vcpu_arch { #endif }; +#define VCPU_FPR(vcpu, i) (vcpu)->arch.fp.fpr[i][TS_FPROFFSET] + /* Values for vcpu->arch.state */ #define KVMPPC_VCPU_NOTREADY 0 #define KVMPPC_VCPU_RUNNABLE 1 diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2ea5cc033ec8..8403f9031d93 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -425,14 +425,11 @@ int main(void) DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid)); DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); - DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); - DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr)); + DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr)); + DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fp.fpscr)); #ifdef CONFIG_ALTIVEC - DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr)); - DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr)); -#endif -#ifdef CONFIG_VSX - DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr)); + DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr)); + DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vr.vscr)); #endif DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 48cf91bc862f..94e597e6f15c 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -577,10 +577,10 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) break; case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: i = reg->id - KVM_REG_PPC_FPR0; - val = get_reg_val(reg->id, vcpu->arch.fpr[i]); + val = get_reg_val(reg->id, VCPU_FPR(vcpu, i)); break; case KVM_REG_PPC_FPSCR: - val = get_reg_val(reg->id, vcpu->arch.fpscr); + val = get_reg_val(reg->id, vcpu->arch.fp.fpscr); break; #ifdef CONFIG_ALTIVEC case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: @@ -588,19 +588,30 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) r = -ENXIO; break; } - val.vval = vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0]; + val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; break; case KVM_REG_PPC_VSCR: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } - val = get_reg_val(reg->id, vcpu->arch.vscr.u[3]); + val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); break; case KVM_REG_PPC_VRSAVE: val = get_reg_val(reg->id, vcpu->arch.vrsave); break; #endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_VSX + case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: + if (cpu_has_feature(CPU_FTR_VSX)) { + long int i = reg->id - KVM_REG_PPC_VSR0; + val.vsxval[0] = vcpu->arch.fp.fpr[i][0]; + val.vsxval[1] = vcpu->arch.fp.fpr[i][1]; + } else { + r = -ENXIO; + } + break; +#endif /* CONFIG_VSX */ case KVM_REG_PPC_DEBUG_INST: { u32 opcode = INS_TW; r = copy_to_user((u32 __user *)(long)reg->addr, @@ -656,10 +667,10 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) break; case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: i = reg->id - KVM_REG_PPC_FPR0; - vcpu->arch.fpr[i] = set_reg_val(reg->id, val); + VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val); break; case KVM_REG_PPC_FPSCR: - vcpu->arch.fpscr = set_reg_val(reg->id, val); + vcpu->arch.fp.fpscr = set_reg_val(reg->id, val); break; #ifdef CONFIG_ALTIVEC case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: @@ -667,14 +678,14 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) r = -ENXIO; break; } - vcpu->arch.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; + vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; break; case KVM_REG_PPC_VSCR: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { r = -ENXIO; break; } - vcpu->arch.vscr.u[3] = set_reg_val(reg->id, val); + vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); break; case KVM_REG_PPC_VRSAVE: if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { @@ -684,6 +695,17 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) vcpu->arch.vrsave = set_reg_val(reg->id, val); break; #endif /* CONFIG_ALTIVEC */ +#ifdef CONFIG_VSX + case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: + if (cpu_has_feature(CPU_FTR_VSX)) { + long int i = reg->id - KVM_REG_PPC_VSR0; + vcpu->arch.fp.fpr[i][0] = val.vsxval[0]; + vcpu->arch.fp.fpr[i][1] = val.vsxval[1]; + } else { + r = -ENXIO; + } + break; +#endif /* CONFIG_VSX */ #ifdef CONFIG_KVM_XICS case KVM_REG_PPC_ICP_STATE: if (!vcpu->arch.icp) { diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 088a6e54c998..461f55566167 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -811,27 +811,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_SDAR: *val = get_reg_val(id, vcpu->arch.sdar); break; -#ifdef CONFIG_VSX - case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: - if (cpu_has_feature(CPU_FTR_VSX)) { - /* VSX => FP reg i is stored in arch.vsr[2*i] */ - long int i = id - KVM_REG_PPC_FPR0; - *val = get_reg_val(id, vcpu->arch.vsr[2 * i]); - } else { - /* let generic code handle it */ - r = -EINVAL; - } - break; - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: - if (cpu_has_feature(CPU_FTR_VSX)) { - long int i = id - KVM_REG_PPC_VSR0; - val->vsxval[0] = vcpu->arch.vsr[2 * i]; - val->vsxval[1] = vcpu->arch.vsr[2 * i + 1]; - } else { - r = -ENXIO; - } - break; -#endif /* CONFIG_VSX */ case KVM_REG_PPC_VPA_ADDR: spin_lock(&vcpu->arch.vpa_update_lock); *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); @@ -914,27 +893,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_SDAR: vcpu->arch.sdar = set_reg_val(id, *val); break; -#ifdef CONFIG_VSX - case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31: - if (cpu_has_feature(CPU_FTR_VSX)) { - /* VSX => FP reg i is stored in arch.vsr[2*i] */ - long int i = id - KVM_REG_PPC_FPR0; - vcpu->arch.vsr[2 * i] = set_reg_val(id, *val); - } else { - /* let generic code handle it */ - r = -EINVAL; - } - break; - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: - if (cpu_has_feature(CPU_FTR_VSX)) { - long int i = id - KVM_REG_PPC_VSR0; - vcpu->arch.vsr[2 * i] = val->vsxval[0]; - vcpu->arch.vsr[2 * i + 1] = val->vsxval[1]; - } else { - r = -ENXIO; - } - break; -#endif /* CONFIG_VSX */ case KVM_REG_PPC_VPA_ADDR: addr = set_reg_val(id, *val); r = -EINVAL; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index d5ddc2d10748..47fd536fb13b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1889,7 +1889,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) BEGIN_FTR_SECTION reg = 0 .rept 32 - li r6,reg*16+VCPU_VSRS + li r6,reg*16+VCPU_FPRS STXVD2X(reg,R6,R3) reg = reg + 1 .endr @@ -1951,7 +1951,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) BEGIN_FTR_SECTION reg = 0 .rept 32 - li r7,reg*16+VCPU_VSRS + li r7,reg*16+VCPU_FPRS LXVD2X(reg,R7,R4) reg = reg + 1 .endr diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index a59a25a13218..c1abd95063f4 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c @@ -160,7 +160,7 @@ static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) { - kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]); + kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); } static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) @@ -207,11 +207,11 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, /* put in registers */ switch (ls_type) { case FPU_LS_SINGLE: - kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]); + kvm_cvt_fd((u32*)tmp, &VCPU_FPR(vcpu, rs)); vcpu->arch.qpr[rs] = *((u32*)tmp); break; case FPU_LS_DOUBLE: - vcpu->arch.fpr[rs] = *((u64*)tmp); + VCPU_FPR(vcpu, rs) = *((u64*)tmp); break; } @@ -233,18 +233,18 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, switch (ls_type) { case FPU_LS_SINGLE: - kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp); + kvm_cvt_df(&VCPU_FPR(vcpu, rs), (u32*)tmp); val = *((u32*)tmp); len = sizeof(u32); break; case FPU_LS_SINGLE_LOW: - *((u32*)tmp) = vcpu->arch.fpr[rs]; - val = vcpu->arch.fpr[rs] & 0xffffffff; + *((u32*)tmp) = VCPU_FPR(vcpu, rs); + val = VCPU_FPR(vcpu, rs) & 0xffffffff; len = sizeof(u32); break; case FPU_LS_DOUBLE: - *((u64*)tmp) = vcpu->arch.fpr[rs]; - val = vcpu->arch.fpr[rs]; + *((u64*)tmp) = VCPU_FPR(vcpu, rs); + val = VCPU_FPR(vcpu, rs); len = sizeof(u64); break; default: @@ -301,7 +301,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, emulated = EMULATE_DONE; /* put in registers */ - kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]); + kvm_cvt_fd(&tmp[0], &VCPU_FPR(vcpu, rs)); vcpu->arch.qpr[rs] = tmp[1]; dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], @@ -319,7 +319,7 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, u32 tmp[2]; int len = w ? sizeof(u32) : sizeof(u64); - kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]); + kvm_cvt_df(&VCPU_FPR(vcpu, rs), &tmp[0]); tmp[1] = vcpu->arch.qpr[rs]; r = kvmppc_st(vcpu, &addr, len, tmp, true); @@ -512,7 +512,6 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, u32 *src2, u32 *src3)) { u32 *qpr = vcpu->arch.qpr; - u64 *fpr = vcpu->arch.fpr; u32 ps0_out; u32 ps0_in1, ps0_in2, ps0_in3; u32 ps1_in1, ps1_in2, ps1_in3; @@ -521,20 +520,20 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, WARN_ON(rc); /* PS0 */ - kvm_cvt_df(&fpr[reg_in1], &ps0_in1); - kvm_cvt_df(&fpr[reg_in2], &ps0_in2); - kvm_cvt_df(&fpr[reg_in3], &ps0_in3); + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1); + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2); + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in3), &ps0_in3); if (scalar & SCALAR_LOW) ps0_in2 = qpr[reg_in2]; - func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); + func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2, &ps0_in3); dprintk(KERN_INFO "PS3 ps0 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", ps0_in1, ps0_in2, ps0_in3, ps0_out); if (!(scalar & SCALAR_NO_PS0)) - kvm_cvt_fd(&ps0_out, &fpr[reg_out]); + kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); /* PS1 */ ps1_in1 = qpr[reg_in1]; @@ -545,7 +544,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, ps1_in2 = ps0_in2; if (!(scalar & SCALAR_NO_PS1)) - func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); + func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in1, &ps1_in2, &ps1_in3); dprintk(KERN_INFO "PS3 ps1 -> f(0x%x, 0x%x, 0x%x) = 0x%x\n", ps1_in1, ps1_in2, ps1_in3, qpr[reg_out]); @@ -561,7 +560,6 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, u32 *src2)) { u32 *qpr = vcpu->arch.qpr; - u64 *fpr = vcpu->arch.fpr; u32 ps0_out; u32 ps0_in1, ps0_in2; u32 ps1_out; @@ -571,20 +569,20 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, WARN_ON(rc); /* PS0 */ - kvm_cvt_df(&fpr[reg_in1], &ps0_in1); + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in1), &ps0_in1); if (scalar & SCALAR_LOW) ps0_in2 = qpr[reg_in2]; else - kvm_cvt_df(&fpr[reg_in2], &ps0_in2); + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in2), &ps0_in2); - func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); + func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in1, &ps0_in2); if (!(scalar & SCALAR_NO_PS0)) { dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", ps0_in1, ps0_in2, ps0_out); - kvm_cvt_fd(&ps0_out, &fpr[reg_out]); + kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); } /* PS1 */ @@ -594,7 +592,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, if (scalar & SCALAR_HIGH) ps1_in2 = ps0_in2; - func(&vcpu->arch.fpscr, &ps1_out, &ps1_in1, &ps1_in2); + func(&vcpu->arch.fp.fpscr, &ps1_out, &ps1_in1, &ps1_in2); if (!(scalar & SCALAR_NO_PS1)) { qpr[reg_out] = ps1_out; @@ -612,7 +610,6 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, u32 *dst, u32 *src1)) { u32 *qpr = vcpu->arch.qpr; - u64 *fpr = vcpu->arch.fpr; u32 ps0_out, ps0_in; u32 ps1_in; @@ -620,17 +617,17 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, WARN_ON(rc); /* PS0 */ - kvm_cvt_df(&fpr[reg_in], &ps0_in); - func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); + kvm_cvt_df(&VCPU_FPR(vcpu, reg_in), &ps0_in); + func(&vcpu->arch.fp.fpscr, &ps0_out, &ps0_in); dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", ps0_in, ps0_out); - kvm_cvt_fd(&ps0_out, &fpr[reg_out]); + kvm_cvt_fd(&ps0_out, &VCPU_FPR(vcpu, reg_out)); /* PS1 */ ps1_in = qpr[reg_in]; - func(&vcpu->arch.fpscr, &qpr[reg_out], &ps1_in); + func(&vcpu->arch.fp.fpscr, &qpr[reg_out], &ps1_in); dprintk(KERN_INFO "PS1 ps1 -> f(0x%x) = 0x%x\n", ps1_in, qpr[reg_out]); @@ -649,10 +646,10 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) int ax_rc = inst_get_field(inst, 21, 25); short full_d = inst_get_field(inst, 16, 31); - u64 *fpr_d = &vcpu->arch.fpr[ax_rd]; - u64 *fpr_a = &vcpu->arch.fpr[ax_ra]; - u64 *fpr_b = &vcpu->arch.fpr[ax_rb]; - u64 *fpr_c = &vcpu->arch.fpr[ax_rc]; + u64 *fpr_d = &VCPU_FPR(vcpu, ax_rd); + u64 *fpr_a = &VCPU_FPR(vcpu, ax_ra); + u64 *fpr_b = &VCPU_FPR(vcpu, ax_rb); + u64 *fpr_c = &VCPU_FPR(vcpu, ax_rc); bool rcomp = (inst & 1) ? true : false; u32 cr = kvmppc_get_cr(vcpu); @@ -674,11 +671,11 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) /* Do we need to clear FE0 / FE1 here? Don't think so. */ #ifdef DEBUG - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { u32 f; - kvm_cvt_df(&vcpu->arch.fpr[i], &f); + kvm_cvt_df(&VCPU_FPR(vcpu, i), &f); dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", - i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); + i, f, VCPU_FPR(vcpu, i), i, vcpu->arch.qpr[i]); } #endif @@ -764,8 +761,8 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) break; } case OP_4X_PS_NEG: - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; - vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL; + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); + VCPU_FPR(vcpu, ax_rd) ^= 0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] ^= 0x80000000; break; @@ -775,7 +772,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case OP_4X_PS_MR: WARN_ON(rcomp); - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; case OP_4X_PS_CMPO1: @@ -784,44 +781,44 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case OP_4X_PS_NABS: WARN_ON(rcomp); - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; - vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL; + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); + VCPU_FPR(vcpu, ax_rd) |= 0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] |= 0x80000000; break; case OP_4X_PS_ABS: WARN_ON(rcomp); - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; - vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL; + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rb); + VCPU_FPR(vcpu, ax_rd) &= ~0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] &= ~0x80000000; break; case OP_4X_PS_MERGE00: WARN_ON(rcomp); - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; - /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ - kvm_cvt_df(&vcpu->arch.fpr[ax_rb], + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra); + /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */ + kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb), &vcpu->arch.qpr[ax_rd]); break; case OP_4X_PS_MERGE01: WARN_ON(rcomp); - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_ra); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; case OP_4X_PS_MERGE10: WARN_ON(rcomp); - /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ + /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */ kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], - &vcpu->arch.fpr[ax_rd]); - /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ - kvm_cvt_df(&vcpu->arch.fpr[ax_rb], + &VCPU_FPR(vcpu, ax_rd)); + /* vcpu->arch.qpr[ax_rd] = VCPU_FPR(vcpu, ax_rb); */ + kvm_cvt_df(&VCPU_FPR(vcpu, ax_rb), &vcpu->arch.qpr[ax_rd]); break; case OP_4X_PS_MERGE11: WARN_ON(rcomp); - /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ + /* VCPU_FPR(vcpu, ax_rd) = vcpu->arch.qpr[ax_ra]; */ kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], - &vcpu->arch.fpr[ax_rd]); + &VCPU_FPR(vcpu, ax_rd)); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; } @@ -856,7 +853,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) case OP_4A_PS_SUM1: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds); - vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc]; + VCPU_FPR(vcpu, ax_rd) = VCPU_FPR(vcpu, ax_rc); break; case OP_4A_PS_SUM0: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, @@ -1106,45 +1103,45 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) case 59: switch (inst_get_field(inst, 21, 30)) { case OP_59_FADDS: - fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + fpd_fadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FSUBS: - fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + fpd_fsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FDIVS: - fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + fpd_fdivs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FRES: - fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + fpd_fres(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FRSQRTES: - fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + fpd_frsqrtes(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; } switch (inst_get_field(inst, 26, 30)) { case OP_59_FMULS: - fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); + fpd_fmuls(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FMSUBS: - fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + fpd_fmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FMADDS: - fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + fpd_fmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FNMSUBS: - fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + fpd_fnmsubs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FNMADDS: - fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + fpd_fnmadds(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; } @@ -1159,12 +1156,12 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) break; case OP_63_MFFS: /* XXX missing CR */ - *fpr_d = vcpu->arch.fpscr; + *fpr_d = vcpu->arch.fp.fpscr; break; case OP_63_MTFSF: /* XXX missing fm bits */ /* XXX missing CR */ - vcpu->arch.fpscr = *fpr_b; + vcpu->arch.fp.fpscr = *fpr_b; break; case OP_63_FCMPU: { @@ -1172,7 +1169,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) u32 cr0_mask = 0xf0000000; u32 cr_shift = inst_get_field(inst, 6, 8) * 4; - fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); + fpd_fcmpu(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); cr &= ~(cr0_mask >> cr_shift); cr |= (cr & cr0_mask) >> cr_shift; break; @@ -1183,40 +1180,40 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) u32 cr0_mask = 0xf0000000; u32 cr_shift = inst_get_field(inst, 6, 8) * 4; - fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); + fpd_fcmpo(&vcpu->arch.fp.fpscr, &tmp_cr, fpr_a, fpr_b); cr &= ~(cr0_mask >> cr_shift); cr |= (cr & cr0_mask) >> cr_shift; break; } case OP_63_FNEG: - fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + fpd_fneg(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FMR: *fpr_d = *fpr_b; break; case OP_63_FABS: - fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + fpd_fabs(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FCPSGN: - fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + fpd_fcpsgn(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FDIV: - fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FADD: - fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + fpd_fadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FSUB: - fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); + fpd_fsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FCTIW: - fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + fpd_fctiw(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FCTIWZ: - fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + fpd_fctiwz(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FRSP: - fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + fpd_frsp(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_63_FRSQRTE: @@ -1224,39 +1221,39 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) double one = 1.0f; /* fD = sqrt(fB) */ - fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); + fpd_fsqrt(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_b); /* fD = 1.0f / fD */ - fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); + fpd_fdiv(&vcpu->arch.fp.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); break; } } switch (inst_get_field(inst, 26, 30)) { case OP_63_FMUL: - fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); + fpd_fmul(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c); break; case OP_63_FSEL: - fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + fpd_fsel(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FMSUB: - fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + fpd_fmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FMADD: - fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + fpd_fmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FNMSUB: - fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + fpd_fnmsub(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FNMADD: - fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); + fpd_fnmadd(&vcpu->arch.fp.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; } break; } #ifdef DEBUG - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { + for (i = 0; i < ARRAY_SIZE(vcpu->arch.fp.fpr); i++) { u32 f; - kvm_cvt_df(&vcpu->arch.fpr[i], &f); + kvm_cvt_df(&VCPU_FPR(vcpu, i), &f); dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); } #endif diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index d63a91f825d3..2bb425b22461 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -545,12 +545,6 @@ static inline int get_fpr_index(int i) void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) { struct thread_struct *t = ¤t->thread; - u64 *vcpu_fpr = vcpu->arch.fpr; -#ifdef CONFIG_VSX - u64 *vcpu_vsx = vcpu->arch.vsr; -#endif - u64 *thread_fpr = &t->fp_state.fpr[0][0]; - int i; /* * VSX instructions can access FP and vector registers, so if @@ -575,24 +569,14 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) */ if (current->thread.regs->msr & MSR_FP) giveup_fpu(current); - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) - vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; - - vcpu->arch.fpscr = t->fp_state.fpscr; - -#ifdef CONFIG_VSX - if (cpu_has_feature(CPU_FTR_VSX)) - for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) - vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; -#endif + vcpu->arch.fp = t->fp_state; } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { if (current->thread.regs->msr & MSR_VEC) giveup_altivec(current); - memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr)); - vcpu->arch.vscr = t->vr_state.vscr; + vcpu->arch.vr = t->vr_state; } #endif @@ -640,12 +624,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, ulong msr) { struct thread_struct *t = ¤t->thread; - u64 *vcpu_fpr = vcpu->arch.fpr; -#ifdef CONFIG_VSX - u64 *vcpu_vsx = vcpu->arch.vsr; -#endif - u64 *thread_fpr = &t->fp_state.fpr[0][0]; - int i; /* When we have paired singles, we emulate in software */ if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) @@ -683,13 +661,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, #endif if (msr & MSR_FP) { - for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) - thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; -#ifdef CONFIG_VSX - for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++) - thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; -#endif - t->fp_state.fpscr = vcpu->arch.fpscr; + t->fp_state = vcpu->arch.fp; t->fpexc_mode = 0; enable_kernel_fp(); load_fp_state(&t->fp_state); @@ -697,8 +669,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, if (msr & MSR_VEC) { #ifdef CONFIG_ALTIVEC - memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); - t->vr_state.vscr = vcpu->arch.vscr; + t->vr_state = vcpu->arch.vr; t->vrsave = -1; enable_kernel_altivec(); load_vr_state(&t->vr_state); @@ -1118,19 +1089,6 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_HIOR: *val = get_reg_val(id, to_book3s(vcpu)->hior); break; -#ifdef CONFIG_VSX - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: { - long int i = id - KVM_REG_PPC_VSR0; - - if (!cpu_has_feature(CPU_FTR_VSX)) { - r = -ENXIO; - break; - } - val->vsxval[0] = vcpu->arch.fpr[i]; - val->vsxval[1] = vcpu->arch.vsr[i]; - break; - } -#endif /* CONFIG_VSX */ default: r = -EINVAL; break; @@ -1149,19 +1107,6 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, to_book3s(vcpu)->hior = set_reg_val(id, *val); to_book3s(vcpu)->hior_explicit = true; break; -#ifdef CONFIG_VSX - case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: { - long int i = id - KVM_REG_PPC_VSR0; - - if (!cpu_has_feature(CPU_FTR_VSX)) { - r = -ENXIO; - break; - } - vcpu->arch.fpr[i] = val->vsxval[0]; - vcpu->arch.vsr[i] = val->vsxval[1]; - break; - } -#endif /* CONFIG_VSX */ default: r = -EINVAL; break; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53e65a210b9a..0033465ecc3f 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -707,9 +707,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) fpexc_mode = current->thread.fpexc_mode; /* Restore guest FPU state to thread */ - memcpy(current->thread.fp_state.fpr, vcpu->arch.fpr, - sizeof(vcpu->arch.fpr)); - current->thread.fp_state.fpscr = vcpu->arch.fpscr; + current->thread.fp_state = vcpu->arch.fp; /* * Since we can't trap on MSR_FP in GS-mode, we consider the guest @@ -745,9 +743,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) vcpu->fpu_active = 0; /* Save guest FPU state from thread */ - memcpy(vcpu->arch.fpr, current->thread.fp_state.fpr, - sizeof(vcpu->arch.fpr)); - vcpu->arch.fpscr = current->thread.fp_state.fpscr; + vcpu->arch.fp = current->thread.fp_state; /* Restore userspace FPU state from stack */ current->thread.fp_state = fp; diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 9ae97686e9f4..7ca9e0a80499 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -656,14 +656,14 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); break; case KVM_MMIO_REG_FPR: - vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; + VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; break; #ifdef CONFIG_PPC_BOOK3S case KVM_MMIO_REG_QPR: vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; case KVM_MMIO_REG_FQPR: - vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; + VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #endif From 99dae3bad28d8fdd32b7bfdd5e2ec7bb2d4d019d Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 15 Oct 2013 20:43:03 +1100 Subject: [PATCH 08/45] KVM: PPC: Load/save FP/VMX/VSX state directly to/from vcpu struct Now that we have the vcpu floating-point and vector state stored in the same type of struct as the main kernel uses, we can load that state directly from the vcpu struct instead of having extra copies to/from the thread_struct. Similarly, when the guest state needs to be saved, we can have it saved it directly to the vcpu struct by setting the current->thread.fp_save_area and current->thread.vr_save_area pointers. That also means that we don't need to back up and restore userspace's FP/vector state. This all makes the code simpler and faster. Note that it's not necessary to save or modify current->thread.fpexc_mode, since nothing in KVM uses or is affected by its value. Nor is it necessary to touch used_vr or used_vsr. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_pr.c | 72 ++++++++---------------------------- arch/powerpc/kvm/booke.c | 16 -------- arch/powerpc/kvm/booke.h | 4 +- 3 files changed, 19 insertions(+), 73 deletions(-) diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 2bb425b22461..aedba681bb94 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -567,16 +567,16 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) * both the traditional FP registers and the added VSX * registers into thread.fp_state.fpr[]. */ - if (current->thread.regs->msr & MSR_FP) + if (t->regs->msr & MSR_FP) giveup_fpu(current); - vcpu->arch.fp = t->fp_state; + t->fp_save_area = NULL; } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { if (current->thread.regs->msr & MSR_VEC) giveup_altivec(current); - vcpu->arch.vr = t->vr_state; + t->vr_save_area = NULL; } #endif @@ -661,22 +661,20 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, #endif if (msr & MSR_FP) { - t->fp_state = vcpu->arch.fp; - t->fpexc_mode = 0; enable_kernel_fp(); - load_fp_state(&t->fp_state); + load_fp_state(&vcpu->arch.fp); + t->fp_save_area = &vcpu->arch.fp; } if (msr & MSR_VEC) { #ifdef CONFIG_ALTIVEC - t->vr_state = vcpu->arch.vr; - t->vrsave = -1; enable_kernel_altivec(); - load_vr_state(&t->vr_state); + load_vr_state(&vcpu->arch.vr); + t->vr_save_area = &vcpu->arch.vr; #endif } - current->thread.regs->msr |= msr; + t->regs->msr |= msr; vcpu->arch.guest_owned_ext |= msr; kvmppc_recalc_shadow_msr(vcpu); @@ -697,12 +695,12 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) if (lost_ext & MSR_FP) { enable_kernel_fp(); - load_fp_state(¤t->thread.fp_state); + load_fp_state(&vcpu->arch.fp); } #ifdef CONFIG_ALTIVEC if (lost_ext & MSR_VEC) { enable_kernel_altivec(); - load_vr_state(¤t->thread.vr_state); + load_vr_state(&vcpu->arch.vr); } #endif current->thread.regs->msr |= lost_ext; @@ -1204,17 +1202,9 @@ static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret; - struct thread_fp_state fp; - int fpexc_mode; #ifdef CONFIG_ALTIVEC - struct thread_vr_state vr; unsigned long uninitialized_var(vrsave); - int used_vr; #endif -#ifdef CONFIG_VSX - int used_vsr; -#endif - ulong ext_msr; /* Check if we can run the vcpu at all */ if (!vcpu->arch.sane) { @@ -1236,33 +1226,22 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) goto out; } - /* Save FPU state in stack */ + /* Save FPU state in thread_struct */ if (current->thread.regs->msr & MSR_FP) giveup_fpu(current); - fp = current->thread.fp_state; - fpexc_mode = current->thread.fpexc_mode; #ifdef CONFIG_ALTIVEC - /* Save Altivec state in stack */ - used_vr = current->thread.used_vr; - if (used_vr) { - if (current->thread.regs->msr & MSR_VEC) - giveup_altivec(current); - vr = current->thread.vr_state; - vrsave = current->thread.vrsave; - } + /* Save Altivec state in thread_struct */ + if (current->thread.regs->msr & MSR_VEC) + giveup_altivec(current); #endif #ifdef CONFIG_VSX - /* Save VSX state in stack */ - used_vsr = current->thread.used_vsr; - if (used_vsr && (current->thread.regs->msr & MSR_VSX)) + /* Save VSX state in thread_struct */ + if (current->thread.regs->msr & MSR_VSX) __giveup_vsx(current); #endif - /* Remember the MSR with disabled extensions */ - ext_msr = current->thread.regs->msr; - /* Preload FPU if it's enabled */ if (vcpu->arch.shared->msr & MSR_FP) kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); @@ -1277,25 +1256,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) /* Make sure we save the guest FPU/Altivec/VSX state */ kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); - current->thread.regs->msr = ext_msr; - - /* Restore FPU/VSX state from stack */ - current->thread.fp_state = fp; - current->thread.fpexc_mode = fpexc_mode; - -#ifdef CONFIG_ALTIVEC - /* Restore Altivec state from stack */ - if (used_vr && current->thread.used_vr) { - current->thread.vr_state = vr; - current->thread.vrsave = vrsave; - } - current->thread.used_vr = used_vr; -#endif - -#ifdef CONFIG_VSX - current->thread.used_vsr = used_vsr; -#endif - out: vcpu->mode = OUTSIDE_GUEST_MODE; return ret; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 0033465ecc3f..a983ccaf3cce 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -682,10 +682,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) { int ret, s; struct thread_struct thread; -#ifdef CONFIG_PPC_FPU - struct thread_fp_state fp; - int fpexc_mode; -#endif if (!vcpu->arch.sane) { kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; @@ -703,11 +699,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) #ifdef CONFIG_PPC_FPU /* Save userspace FPU state in stack */ enable_kernel_fp(); - fp = current->thread.fp_state; - fpexc_mode = current->thread.fpexc_mode; - - /* Restore guest FPU state to thread */ - current->thread.fp_state = vcpu->arch.fp; /* * Since we can't trap on MSR_FP in GS-mode, we consider the guest @@ -741,13 +732,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) kvmppc_save_guest_fp(vcpu); vcpu->fpu_active = 0; - - /* Save guest FPU state from thread */ - vcpu->arch.fp = current->thread.fp_state; - - /* Restore userspace FPU state from stack */ - current->thread.fp_state = fp; - current->thread.fpexc_mode = fpexc_mode; #endif out: diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index fe59f225327f..b632cd35919b 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h @@ -137,7 +137,8 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) #ifdef CONFIG_PPC_FPU if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) { enable_kernel_fp(); - load_fp_state(¤t->thread.fp_state); + load_fp_state(&vcpu->arch.fp); + current->thread.fp_save_area = &vcpu->arch.fp; current->thread.regs->msr |= MSR_FP; } #endif @@ -152,6 +153,7 @@ static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu) #ifdef CONFIG_PPC_FPU if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP)) giveup_fpu(current); + current->thread.fp_save_area = NULL; #endif } From 595e4f7e697e2e6fb252f6be6a83d5b9460b59a3 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 15 Oct 2013 20:43:04 +1100 Subject: [PATCH 09/45] KVM: PPC: Book3S HV: Use load/store_fp_state functions in HV guest entry/exit This modifies kvmppc_load_fp and kvmppc_save_fp to use the generic FP/VSX and VMX load/store functions instead of open-coding the FP/VSX/VMX load/store instructions. Since kvmppc_load/save_fp don't follow C calling conventions, we make them private symbols within book3s_hv_rmhandlers.S. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kernel/asm-offsets.c | 2 - arch/powerpc/kvm/book3s_hv_rmhandlers.S | 86 +++++++------------------ 2 files changed, 22 insertions(+), 66 deletions(-) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 8403f9031d93..5e64c3d2149f 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -426,10 +426,8 @@ int main(void) DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fp.fpr)); - DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fp.fpscr)); #ifdef CONFIG_ALTIVEC DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr.vr)); - DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vr.vscr)); #endif DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 47fd536fb13b..acbd1d6afba0 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1261,7 +1261,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) /* save FP state */ mr r3, r9 - bl .kvmppc_save_fp + bl kvmppc_save_fp /* Increment yield count if they have a VPA */ ld r8, VCPU_VPA(r9) /* do they have a VPA? */ @@ -1691,7 +1691,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) std r31, VCPU_GPR(R31)(r3) /* save FP state */ - bl .kvmppc_save_fp + bl kvmppc_save_fp /* * Take a nap until a decrementer or external interrupt occurs, @@ -1869,8 +1869,12 @@ kvmppc_read_intr: /* * Save away FP, VMX and VSX registers. * r3 = vcpu pointer + * N.B. r30 and r31 are volatile across this function, + * thus it is not callable from C. */ -_GLOBAL(kvmppc_save_fp) +kvmppc_save_fp: + mflr r30 + mr r31,r3 mfmsr r5 ori r8,r5,MSR_FP #ifdef CONFIG_ALTIVEC @@ -1885,42 +1889,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif mtmsrd r8 isync -#ifdef CONFIG_VSX -BEGIN_FTR_SECTION - reg = 0 - .rept 32 - li r6,reg*16+VCPU_FPRS - STXVD2X(reg,R6,R3) - reg = reg + 1 - .endr -FTR_SECTION_ELSE -#endif - reg = 0 - .rept 32 - stfd reg,reg*8+VCPU_FPRS(r3) - reg = reg + 1 - .endr -#ifdef CONFIG_VSX -ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) -#endif - mffs fr0 - stfd fr0,VCPU_FPSCR(r3) - + addi r3,r3,VCPU_FPRS + bl .store_fp_state #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION - reg = 0 - .rept 32 - li r6,reg*16+VCPU_VRS - stvx reg,r6,r3 - reg = reg + 1 - .endr - mfvscr vr0 - li r6,VCPU_VSCR - stvx vr0,r6,r3 + addi r3,r31,VCPU_VRS + bl .store_vr_state END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif mfspr r6,SPRN_VRSAVE stw r6,VCPU_VRSAVE(r3) + mtlr r30 mtmsrd r5 isync blr @@ -1928,9 +1907,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) /* * Load up FP, VMX and VSX registers * r4 = vcpu pointer + * N.B. r30 and r31 are volatile across this function, + * thus it is not callable from C. */ - .globl kvmppc_load_fp kvmppc_load_fp: + mflr r30 + mr r31,r4 mfmsr r9 ori r8,r9,MSR_FP #ifdef CONFIG_ALTIVEC @@ -1945,42 +1927,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif mtmsrd r8 isync - lfd fr0,VCPU_FPSCR(r4) - MTFSF_L(fr0) -#ifdef CONFIG_VSX -BEGIN_FTR_SECTION - reg = 0 - .rept 32 - li r7,reg*16+VCPU_FPRS - LXVD2X(reg,R7,R4) - reg = reg + 1 - .endr -FTR_SECTION_ELSE -#endif - reg = 0 - .rept 32 - lfd reg,reg*8+VCPU_FPRS(r4) - reg = reg + 1 - .endr -#ifdef CONFIG_VSX -ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) -#endif - + addi r3,r4,VCPU_FPRS + bl .load_fp_state #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION - li r7,VCPU_VSCR - lvx vr0,r7,r4 - mtvscr vr0 - reg = 0 - .rept 32 - li r7,reg*16+VCPU_VRS - lvx reg,r7,r4 - reg = reg + 1 - .endr + addi r3,r31,VCPU_VRS + bl .load_vr_state END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif lwz r7,VCPU_VRSAVE(r4) mtspr SPRN_VRSAVE,r7 + mtlr r30 + mr r4,r31 blr /* From 30a91fe24b7a6475d22d22fb0f772318ed435a1d Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Fri, 15 Nov 2013 11:01:13 +0530 Subject: [PATCH 10/45] kvm: booke: clear host tlb reference flag on guest tlb invalidation On booke, "struct tlbe_ref" contains host tlb mapping information (pfn: for guest-pfn to pfn, flags: attribute associated with this mapping) for a guest tlb entry. So when a guest creates a TLB entry then "struct tlbe_ref" is set to point to valid "pfn" and set attributes in "flags" field of the above said structure. When a guest TLB entry is invalidated then flags field of corresponding "struct tlbe_ref" is updated to point that this is no more valid, also we selectively clear some other attribute bits, example: if E500_TLB_BITMAP was set then we clear E500_TLB_BITMAP, if E500_TLB_TLB0 is set then we clear this. Ideally we should clear complete "flags" as this entry is invalid and does not have anything to re-used. The other part of the problem is that when we use the same entry again then also we do not clear (started doing or-ing etc). So far it was working because the selectively clearing mentioned above actually clears "flags" what was set during TLB mapping. But the problem starts coming when we add more attributes to this then we need to selectively clear them and which is not needed. Signed-off-by: Bharat Bhushan Reviewed-by: Scott Wood Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500_mmu_host.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index ecf2247b13be..a45e25cd78fc 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -231,15 +231,15 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID); } - /* Already invalidated in between */ - if (!(ref->flags & E500_TLB_VALID)) - return; - - /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ - kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); + /* + * If TLB entry is still valid then it's a TLB0 entry, and thus + * backed by at most one host tlbe per shadow pid + */ + if (ref->flags & E500_TLB_VALID) + kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); /* Mark the TLB as not backed by the host anymore */ - ref->flags &= ~E500_TLB_VALID; + ref->flags = 0; } static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) @@ -252,7 +252,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, pfn_t pfn) { ref->pfn = pfn; - ref->flags |= E500_TLB_VALID; + ref->flags = E500_TLB_VALID; /* Mark the page accessed */ kvm_set_pfn_accessed(pfn); From 7c85e6b39ce880869929958bd7b95f72db03a9af Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Fri, 15 Nov 2013 11:01:14 +0530 Subject: [PATCH 11/45] kvm: book3s: rename lookup_linux_pte() to lookup_linux_pte_and_update() lookup_linux_pte() is doing more than lookup, updating the pte, so for clarity it is renamed to lookup_linux_pte_and_update() Signed-off-by: Bharat Bhushan Reviewed-by: Scott Wood Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9c515440ad1a..daa19a043677 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -134,7 +134,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index, unlock_rmap(rmap); } -static pte_t lookup_linux_pte(pgd_t *pgdir, unsigned long hva, +static pte_t lookup_linux_pte_and_update(pgd_t *pgdir, unsigned long hva, int writing, unsigned long *pte_sizep) { pte_t *ptep; @@ -231,7 +231,8 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, /* Look up the Linux PTE for the backing page */ pte_size = psize; - pte = lookup_linux_pte(pgdir, hva, writing, &pte_size); + pte = lookup_linux_pte_and_update(pgdir, hva, writing, + &pte_size); if (pte_present(pte)) { if (writing && !pte_write(pte)) /* make the actual HPTE be read-only */ @@ -671,7 +672,8 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn); if (memslot) { hva = __gfn_to_hva_memslot(memslot, gfn); - pte = lookup_linux_pte(pgdir, hva, 1, &psize); + pte = lookup_linux_pte_and_update(pgdir, hva, + 1, &psize); if (pte_present(pte) && !pte_write(pte)) r = hpte_make_readonly(r); } From f5e3fe091f5238459752a81b478398b7cb22e575 Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Fri, 15 Nov 2013 11:01:15 +0530 Subject: [PATCH 12/45] kvm: powerpc: define a linux pte lookup function We need to search linux "pte" to get "pte" attributes for setting TLB in KVM. This patch defines a lookup_linux_ptep() function which returns pte pointer. Signed-off-by: Bharat Bhushan Reviewed-by: Scott Wood Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/pgtable.h | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 7d6eacf249cf..b60ceb8d86c8 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -223,6 +223,27 @@ extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, #endif pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift); + +static inline pte_t *lookup_linux_ptep(pgd_t *pgdir, unsigned long hva, + unsigned long *pte_sizep) +{ + pte_t *ptep; + unsigned long ps = *pte_sizep; + unsigned int shift; + + ptep = find_linux_pte_or_hugepte(pgdir, hva, &shift); + if (!ptep) + return NULL; + if (shift) + *pte_sizep = 1ul << shift; + else + *pte_sizep = PAGE_SIZE; + + if (ps > *pte_sizep) + return NULL; + + return ptep; +} #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ From 08c9a188d0d0fc0f0c5e17d89a06bb59c493110f Mon Sep 17 00:00:00 2001 From: Bharat Bhushan Date: Mon, 18 Nov 2013 11:18:54 +0530 Subject: [PATCH 13/45] kvm: powerpc: use caching attributes as per linux pte KVM uses same WIM tlb attributes as the corresponding qemu pte. For this we now search the linux pte for the requested page and get these cache caching/coherency attributes from pte. Signed-off-by: Bharat Bhushan Reviewed-by: Scott Wood Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_host.h | 2 +- arch/powerpc/kvm/booke.c | 1 + arch/powerpc/kvm/e500.h | 8 ++++-- arch/powerpc/kvm/e500_mmu_host.c | 43 +++++++++++++++++------------ 4 files changed, 33 insertions(+), 21 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 3ca0b430eaee..2c2ca5faf7f2 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -540,6 +540,7 @@ struct kvm_vcpu_arch { #endif gpa_t paddr_accessed; gva_t vaddr_accessed; + pgd_t *pgdir; u8 io_gpr; /* GPR used as IO source/target */ u8 mmio_is_bigendian; @@ -597,7 +598,6 @@ struct kvm_vcpu_arch { struct list_head run_list; struct task_struct *run_task; struct kvm_run *kvm_run; - pgd_t *pgdir; spinlock_t vpa_update_lock; struct kvmppc_vpa vpa; diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index a983ccaf3cce..54ee1c01798f 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -717,6 +717,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) thread.debug = current->thread.debug; current->thread.debug = vcpu->arch.shadow_dbg_reg; + vcpu->arch.pgdir = current->mm->pgd; kvmppc_fix_ee_before_entry(); ret = __kvmppc_vcpu_run(kvm_run, vcpu); diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index 4fd9650eb018..a326178bdea5 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h @@ -31,11 +31,13 @@ enum vcpu_ftr { #define E500_TLB_NUM 2 /* entry is mapped somewhere in host TLB */ -#define E500_TLB_VALID (1 << 0) +#define E500_TLB_VALID (1 << 31) /* TLB1 entry is mapped by host TLB1, tracked by bitmaps */ -#define E500_TLB_BITMAP (1 << 1) +#define E500_TLB_BITMAP (1 << 30) /* TLB1 entry is mapped by host TLB0 */ -#define E500_TLB_TLB0 (1 << 2) +#define E500_TLB_TLB0 (1 << 29) +/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */ +#define E500_TLB_MAS2_ATTR (0x7f) struct tlbe_ref { pfn_t pfn; /* valid only for TLB0, except briefly */ diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index a45e25cd78fc..dd2cc03f406f 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -65,15 +65,6 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) return mas3; } -static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) -{ -#ifdef CONFIG_SMP - return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M; -#else - return mas2 & MAS2_ATTRIB_MASK; -#endif -} - /* * writing shadow tlb entry to host TLB */ @@ -249,11 +240,14 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe) static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref, struct kvm_book3e_206_tlb_entry *gtlbe, - pfn_t pfn) + pfn_t pfn, unsigned int wimg) { ref->pfn = pfn; ref->flags = E500_TLB_VALID; + /* Use guest supplied MAS2_G and MAS2_E */ + ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg; + /* Mark the page accessed */ kvm_set_pfn_accessed(pfn); @@ -316,8 +310,7 @@ static void kvmppc_e500_setup_stlbe( /* Force IPROT=0 for all guest mappings. */ stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; - stlbe->mas2 = (gvaddr & MAS2_EPN) | - e500_shadow_mas2_attrib(gtlbe->mas2, pr); + stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR); stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); @@ -339,6 +332,10 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, int ret = 0; unsigned long mmu_seq; struct kvm *kvm = vcpu_e500->vcpu.kvm; + unsigned long tsize_pages = 0; + pte_t *ptep; + unsigned int wimg = 0; + pgd_t *pgdir; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_notifier_seq; @@ -405,7 +402,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, */ for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) { - unsigned long gfn_start, gfn_end, tsize_pages; + unsigned long gfn_start, gfn_end; tsize_pages = 1 << (tsize - 2); gfn_start = gfn & ~(tsize_pages - 1); @@ -447,11 +444,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, } if (likely(!pfnmap)) { - unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); + tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT); pfn = gfn_to_pfn_memslot(slot, gfn); if (is_error_noslot_pfn(pfn)) { - printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", - (long)gfn); + if (printk_ratelimit()) + pr_err("%s: real page not found for gfn %lx\n", + __func__, (long)gfn); return -EINVAL; } @@ -466,7 +464,18 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, goto out; } - kvmppc_e500_ref_setup(ref, gtlbe, pfn); + + pgdir = vcpu_e500->vcpu.arch.pgdir; + ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages); + if (pte_present(*ptep)) + wimg = (*ptep >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK; + else { + if (printk_ratelimit()) + pr_err("%s: pte not present: gfn %lx, pfn %lx\n", + __func__, (long)gfn, pfn); + return -EINVAL; + } + kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe); From 9bd880a2c882d2181b1eaba0aed422cced8f0e8a Mon Sep 17 00:00:00 2001 From: Tiejun Chen Date: Wed, 23 Oct 2013 09:26:48 +0800 Subject: [PATCH 14/45] KVM: PPC: Book3E HV: call RECONCILE_IRQ_STATE to sync the software state Rather than calling hard_irq_disable() when we're back in C code we can just call RECONCILE_IRQ_STATE to soft disable IRQs while we're already in hard disabled state. This should be functionally equivalent to the code before, but cleaner and faster. Signed-off-by: Tiejun Chen [agraf: fix comment, commit message] Signed-off-by: Alexander Graf --- arch/powerpc/kvm/booke.c | 11 ----------- arch/powerpc/kvm/bookehv_interrupts.S | 11 +++++++++++ 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 54ee1c01798f..6a8c32ec4173 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -879,17 +879,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, int s; int idx; -#ifdef CONFIG_PPC64 - WARN_ON(local_paca->irq_happened != 0); -#endif - - /* - * We enter with interrupts disabled in hardware, but - * we need to call hard_irq_disable anyway to ensure that - * the software state is kept in sync. - */ - hard_irq_disable(); - /* update before a new last_exit_type is rewritten */ kvmppc_update_timing_stats(vcpu); diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S index e8ed7d659c55..be3de1dd7bb3 100644 --- a/arch/powerpc/kvm/bookehv_interrupts.S +++ b/arch/powerpc/kvm/bookehv_interrupts.S @@ -33,6 +33,8 @@ #ifdef CONFIG_64BIT #include +#include +#include #else #include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */ #endif @@ -465,6 +467,15 @@ _GLOBAL(kvmppc_resume_host) mtspr SPRN_EPCR, r3 isync +#ifdef CONFIG_64BIT + /* + * We enter with interrupts disabled in hardware, but + * we need to call RECONCILE_IRQ_STATE to ensure + * that the software state is kept in sync. + */ + RECONCILE_IRQ_STATE(r3,r5) +#endif + /* Switch to kernel stack and jump to handler. */ PPC_LL r3, HOST_RUN(r1) mr r5, r14 /* intno */ From 47d45d9f53a7c478fc83dff7b421cb4bc3ad9f94 Mon Sep 17 00:00:00 2001 From: Zhouyi Zhou Date: Mon, 2 Dec 2013 18:21:58 +0800 Subject: [PATCH 15/45] KVM: PPC: NULL return of kvmppc_mmu_hpte_cache_next should be handled NULL return of kvmppc_mmu_hpte_cache_next should be handled Signed-off-by: Zhouyi Zhou Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_32_mmu_host.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 3a0abd2e5a15..5fac89dfe4cd 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -243,6 +243,11 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, /* Now tell our Shadow PTE code about the new page */ pte = kvmppc_mmu_hpte_cache_next(vcpu); + if (!pte) { + kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT); + r = -EAGAIN; + goto out; + } dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", orig_pte->may_write ? 'w' : '-', From 7a8ff56be68239bd36a2b639cb40bfbcfc58dad3 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 9 Jan 2014 11:10:44 +0100 Subject: [PATCH 16/45] KVM: PPC: Unify kvmppc_get_last_inst and sc We had code duplication between the inline functions to get our last instruction on normal interrupts and system call interrupts. Unify both helper functions towards a single implementation. Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_book3s.h | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 8bb870694616..f8b23201c105 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -264,10 +264,8 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) return vcpu->arch.pc; } -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) +static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) { - ulong pc = kvmppc_get_pc(vcpu); - /* Load the instruction manually if it failed to do so in the * exit path */ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) @@ -276,6 +274,11 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) return vcpu->arch.last_inst; } +static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) +{ + return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu)); +} + /* * Like kvmppc_get_last_inst(), but for fetching a sc instruction. * Because the sc instruction sets SRR0 to point to the following @@ -283,14 +286,7 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) */ static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) { - ulong pc = kvmppc_get_pc(vcpu) - 4; - - /* Load the instruction manually if it failed to do so in the - * exit path */ - if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) - kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); - - return vcpu->arch.last_inst; + return kvmppc_get_last_inst_internal(vcpu, kvmppc_get_pc(vcpu) - 4); } static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) From 7feb6bb8e6dbd129c11fc93bf206daa156bf1c0f Mon Sep 17 00:00:00 2001 From: Michael Mueller Date: Fri, 28 Jun 2013 13:30:24 +0200 Subject: [PATCH 17/45] KVM: s390: enable Transactional Execution This patch enables transactional execution for KVM guests on s390 systems zec12 or later. We rework the allocation of the page containing the sie_block to also back the Interception Transaction Diagnostic Block. If available the TE facilities will be enabled. Setting bit 73 and 50 in vfacilities bitmask reveals the HW facilities Transactional Memory and Constraint Transactional Memory respectively to the KVM guest. Furthermore, the patch restores the Program-Interruption TDB from the Interception TDB in case a program interception has occurred and the ITDB has a valid format. Signed-off-by: Michael Mueller Signed-off-by: Christian Borntraeger --- arch/s390/include/asm/kvm_host.h | 15 ++++++++++++++- arch/s390/kvm/intercept.c | 11 +++++++++++ arch/s390/kvm/kvm-s390.c | 17 +++++++++++------ arch/s390/kvm/kvm-s390.h | 6 ++++++ 4 files changed, 42 insertions(+), 7 deletions(-) diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index d5bc3750616e..eef3dd3fd9a9 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h @@ -106,9 +106,22 @@ struct kvm_s390_sie_block { __u64 gbea; /* 0x0180 */ __u8 reserved188[24]; /* 0x0188 */ __u32 fac; /* 0x01a0 */ - __u8 reserved1a4[92]; /* 0x01a4 */ + __u8 reserved1a4[68]; /* 0x01a4 */ + __u64 itdba; /* 0x01e8 */ + __u8 reserved1f0[16]; /* 0x01f0 */ } __attribute__((packed)); +struct kvm_s390_itdb { + __u8 data[256]; +} __packed; + +struct sie_page { + struct kvm_s390_sie_block sie_block; + __u8 reserved200[1024]; /* 0x0200 */ + struct kvm_s390_itdb itdb; /* 0x0600 */ + __u8 reserved700[2304]; /* 0x0700 */ +} __packed; + struct kvm_vcpu_stat { u32 exit_userspace; u32 exit_null; diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index 5ddbbde6f65c..eeb1ac7d8fa4 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -112,6 +112,17 @@ static int handle_instruction(struct kvm_vcpu *vcpu) static int handle_prog(struct kvm_vcpu *vcpu) { vcpu->stat.exit_program_interruption++; + + /* Restore ITDB to Program-Interruption TDB in guest memory */ + if (IS_TE_ENABLED(vcpu) && + !(current->thread.per_flags & PER_FLAG_NO_TE) && + IS_ITDB_VALID(vcpu)) { + copy_to_guest(vcpu, TDB_ADDR, vcpu->arch.sie_block->itdba, + sizeof(struct kvm_s390_itdb)); + memset((void *) vcpu->arch.sie_block->itdba, 0, + sizeof(struct kvm_s390_itdb)); + } + trace_kvm_s390_intercept_prog(vcpu, vcpu->arch.sie_block->iprcc); return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc); } diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 1bb1ddaf93c0..0084c2c272e3 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -395,6 +395,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) CPUSTAT_STOPPED | CPUSTAT_GED); vcpu->arch.sie_block->ecb = 6; + if (test_vfacility(50) && test_vfacility(73)) + vcpu->arch.sie_block->ecb |= 0x10; + vcpu->arch.sie_block->ecb2 = 8; vcpu->arch.sie_block->eca = 0xC1002001U; vcpu->arch.sie_block->fac = (int) (long) vfacilities; @@ -411,6 +414,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { struct kvm_vcpu *vcpu; + struct sie_page *sie_page; int rc = -EINVAL; if (id >= KVM_MAX_VCPUS) @@ -422,12 +426,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, if (!vcpu) goto out; - vcpu->arch.sie_block = (struct kvm_s390_sie_block *) - get_zeroed_page(GFP_KERNEL); - - if (!vcpu->arch.sie_block) + sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL); + if (!sie_page) goto out_free_cpu; + vcpu->arch.sie_block = &sie_page->sie_block; + vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb; + vcpu->arch.sie_block->icpua = id; if (!kvm_is_ucontrol(kvm)) { if (!kvm->arch.sca) { @@ -1178,8 +1183,8 @@ static int __init kvm_s390_init(void) return -ENOMEM; } memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); - vfacilities[0] &= 0xff82fff3f47c0000UL; - vfacilities[1] &= 0x001c000000000000UL; + vfacilities[0] &= 0xff82fff3f47c2000UL; + vfacilities[1] &= 0x005c000000000000UL; return 0; } diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index 095cf51b16ec..f9559b0bd620 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h @@ -26,6 +26,12 @@ extern unsigned long *vfacilities; int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu); +/* Transactional Memory Execution related macros */ +#define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10)) +#define TDB_ADDR 0x1800UL +#define TDB_FORMAT1 1 +#define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) + #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\ do { \ debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \ From d208c79d63e06457eef077af770d23dc4cde4d43 Mon Sep 17 00:00:00 2001 From: Thomas Huth Date: Thu, 12 Dec 2013 13:40:40 +0100 Subject: [PATCH 18/45] KVM: s390: Enable the LPP facility for guests The Load-Program-Parameter Facility is available for guests without any further ado, so we should indicate its availability by setting facility bit 40 if it is supported by the host. Signed-off-by: Thomas Huth Reviewed-by: Michael Mueller Acked-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 0084c2c272e3..597114b7ab99 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1183,7 +1183,7 @@ static int __init kvm_s390_init(void) return -ENOMEM; } memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16); - vfacilities[0] &= 0xff82fff3f47c2000UL; + vfacilities[0] &= 0xff82fff3f4fc2000UL; vfacilities[1] &= 0x005c000000000000UL; return 0; } From 19e4735bd7f02bd38db43a8521377b35f236b3b6 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Tue, 4 Jun 2013 11:04:47 +0200 Subject: [PATCH 19/45] KVM: s390: virtio-ccw: Handle command rejects. A command reject for a ccw may happen if we run on a host not supporting a certain feature. We want to be able to handle this as special case of command failure, so let's split this off from the generic -EIO error code. Reviewed-by: Thomas Huth Signed-off-by: Cornelia Huck Signed-off-by: Christian Borntraeger --- drivers/s390/kvm/virtio_ccw.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c index d6297176ab85..0fc584832001 100644 --- a/drivers/s390/kvm/virtio_ccw.c +++ b/drivers/s390/kvm/virtio_ccw.c @@ -642,8 +642,15 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev, (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { /* OK */ } - if (irb_is_error(irb)) - vcdev->err = -EIO; /* XXX - use real error */ + if (irb_is_error(irb)) { + /* Command reject? */ + if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && + (irb->ecw[0] & SNS0_CMD_REJECT)) + vcdev->err = -EOPNOTSUPP; + else + /* Map everything else to -EIO. */ + vcdev->err = -EIO; + } if (vcdev->curr_io & activity) { switch (activity) { case VIRTIO_CCW_DOING_READ_FEAT: From b94b64c9a79a322802f13f44e9690a7b6a22710e Mon Sep 17 00:00:00 2001 From: Vadim Rozenfeld Date: Thu, 23 Jan 2014 18:12:52 +1100 Subject: [PATCH 20/45] KVM: x86: mark hyper-v hypercall page as dirty Signed-off-by: Vadim Rozenfeld Reviewed-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0c76f7cfdb32..0fa9c84e36cb 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1840,6 +1840,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data) if (__copy_to_user((void __user *)addr, instructions, 4)) return 1; kvm->arch.hv_hypercall = data; + mark_page_dirty(kvm, gfn); break; } case HV_X64_MSR_REFERENCE_TSC: { From b3af1e889ec4909f6b48dabd19a311d9c9f8d58e Mon Sep 17 00:00:00 2001 From: Vadim Rozenfeld Date: Thu, 23 Jan 2014 18:12:53 +1100 Subject: [PATCH 21/45] KVM: x86: mark hyper-v vapic assist page as dirty Signed-off-by: Vadim Rozenfeld Reviewed-by: Marcelo Tosatti Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0fa9c84e36cb..dc11b4f98577 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1869,19 +1869,21 @@ static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case HV_X64_MSR_APIC_ASSIST_PAGE: { + u64 gfn; unsigned long addr; if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { vcpu->arch.hv_vapic = data; break; } - addr = gfn_to_hva(vcpu->kvm, data >> - HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); + gfn = data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT; + addr = gfn_to_hva(vcpu->kvm, gfn); if (kvm_is_error_hva(addr)) return 1; if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; + mark_page_dirty(vcpu->kvm, gfn); break; } case HV_X64_MSR_EOI: From 58cb628dbe24ce21b884729aebe15acb903dbfb5 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 24 Jan 2014 16:48:44 +0100 Subject: [PATCH 22/45] KVM: x86: Validate guest writes to MSR_IA32_APICBASE Check for invalid state transitions on guest-initiated updates of MSR_IA32_APICBASE. This address both enabling of the x2APIC when it is not supported and all invalid transitions as described in SDM section 10.12.5. It also checks that no reserved bit is set in APICBASE by the guest. Signed-off-by: Jan Kiszka [Use cpuid_maxphyaddr instead of guest_cpuid_get_phys_bits. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/cpuid.h | 8 ++++++++ arch/x86/kvm/lapic.h | 2 +- arch/x86/kvm/vmx.c | 9 +++++---- arch/x86/kvm/x86.c | 30 ++++++++++++++++++++++++------ 4 files changed, 38 insertions(+), 11 deletions(-) diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index f1e4895174b2..a2a1bb7ed8c1 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -72,4 +72,12 @@ static inline bool guest_cpuid_has_pcid(struct kvm_vcpu *vcpu) return best && (best->ecx & bit(X86_FEATURE_PCID)); } +static inline bool guest_cpuid_has_x2apic(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid_entry2 *best; + + best = kvm_find_cpuid_entry(vcpu, 1, 0); + return best && (best->ecx & bit(X86_FEATURE_X2APIC)); +} + #endif diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index c8b0d0d2da5c..6a11845fd8b9 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h @@ -65,7 +65,7 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src, struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map); u64 kvm_get_apic_base(struct kvm_vcpu *vcpu); -void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data); +int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info); void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s); int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5c8879127cfa..a06f101ef64b 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4392,7 +4392,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); - u64 msr; + struct msr_data apic_base_msr; vmx->rmode.vm86_active = 0; @@ -4400,10 +4400,11 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu) vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(&vmx->vcpu, 0); - msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; + apic_base_msr.data = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; if (kvm_vcpu_is_bsp(&vmx->vcpu)) - msr |= MSR_IA32_APICBASE_BSP; - kvm_set_apic_base(&vmx->vcpu, msr); + apic_base_msr.data |= MSR_IA32_APICBASE_BSP; + apic_base_msr.host_initiated = true; + kvm_set_apic_base(&vmx->vcpu, &apic_base_msr); vmx_segment_cache_clear(vmx); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index dc11b4f98577..34d0d610aa8a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -257,10 +257,26 @@ u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvm_get_apic_base); -void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) +int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { - /* TODO: reserve bits check */ - kvm_lapic_set_base(vcpu, data); + u64 old_state = vcpu->arch.apic_base & + (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); + u64 new_state = msr_info->data & + (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE); + u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | + 0x2ff | (guest_cpuid_has_x2apic(vcpu) ? 0 : X2APIC_ENABLE); + + if (!msr_info->host_initiated && + ((msr_info->data & reserved_bits) != 0 || + new_state == X2APIC_ENABLE || + (new_state == MSR_IA32_APICBASE_ENABLE && + old_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) || + (new_state == (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE) && + old_state == 0))) + return 1; + + kvm_lapic_set_base(vcpu, msr_info->data); + return 0; } EXPORT_SYMBOL_GPL(kvm_set_apic_base); @@ -2009,8 +2025,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case 0x200 ... 0x2ff: return set_msr_mtrr(vcpu, msr, data); case MSR_IA32_APICBASE: - kvm_set_apic_base(vcpu, data); - break; + return kvm_set_apic_base(vcpu, msr_info); case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: return kvm_x2apic_msr_write(vcpu, msr, data); case MSR_IA32_TSCDEADLINE: @@ -6412,6 +6427,7 @@ EXPORT_SYMBOL_GPL(kvm_task_switch); int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { + struct msr_data apic_base_msr; int mmu_reset_needed = 0; int pending_vec, max_bits, idx; struct desc_ptr dt; @@ -6435,7 +6451,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, mmu_reset_needed |= vcpu->arch.efer != sregs->efer; kvm_x86_ops->set_efer(vcpu, sregs->efer); - kvm_set_apic_base(vcpu, sregs->apic_base); + apic_base_msr.data = sregs->apic_base; + apic_base_msr.host_initiated = true; + kvm_set_apic_base(vcpu, &apic_base_msr); mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; kvm_x86_ops->set_cr0(vcpu, sregs->cr0); From 736017752d2f6ed0d64f5e15cf48e79779b11c85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= Date: Thu, 9 Jan 2014 11:51:16 +0100 Subject: [PATCH 23/45] KVM: PPC: Book3S: MMIO emulation support for little endian guests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MMIO emulation reads the last instruction executed by the guest and then emulates. If the guest is running in Little Endian order, or more generally in a different endian order of the host, the instruction needs to be byte-swapped before being emulated. This patch adds a helper routine which tests the endian order of the host and the guest in order to decide whether a byteswap is needed or not. It is then used to byteswap the last instruction of the guest in the endian order of the host before MMIO emulation is performed. Finally, kvmppc_handle_load() of kvmppc_handle_store() are modified to reverse the endianness of the MMIO if required. Signed-off-by: Cédric Le Goater [agraf: add booke handling] Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_book3s.h | 8 +++++++- arch/powerpc/include/asm/kvm_booke.h | 6 ++++++ arch/powerpc/include/asm/kvm_ppc.h | 7 ++++--- arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 +- arch/powerpc/kvm/emulate.c | 1 - arch/powerpc/kvm/powerpc.c | 28 +++++++++++++++++++++++---- 6 files changed, 42 insertions(+), 10 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index f8b23201c105..1e9c26f45d18 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -264,6 +264,11 @@ static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) return vcpu->arch.pc; } +static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.shared->msr & MSR_LE) != (MSR_KERNEL & MSR_LE); +} + static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) { /* Load the instruction manually if it failed to do so in the @@ -271,7 +276,8 @@ static inline u32 kvmppc_get_last_inst_internal(struct kvm_vcpu *vcpu, ulong pc) if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false); - return vcpu->arch.last_inst; + return kvmppc_need_byteswap(vcpu) ? swab32(vcpu->arch.last_inst) : + vcpu->arch.last_inst; } static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index dd8f61510dfd..80d46b5a7efb 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -63,6 +63,12 @@ static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) return vcpu->arch.xer; } +static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu) +{ + /* XXX Would need to check TLB entry */ + return false; +} + static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) { return vcpu->arch.last_inst; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index c8317fbf92c4..629277df4798 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -54,12 +54,13 @@ extern void kvmppc_handler_highmem(void); extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, - int is_bigendian); + int is_default_endian); extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int rt, unsigned int bytes, - int is_bigendian); + int is_default_endian); extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, - u64 val, unsigned int bytes, int is_bigendian); + u64 val, unsigned int bytes, + int is_default_endian); extern int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3ff587a8b7d..efb8aa544876 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -558,7 +558,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, * we just return and retry the instruction. */ - if (instruction_is_store(vcpu->arch.last_inst) != !!is_store) + if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store) return RESUME_GUEST; /* diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 2f9a0873b44f..c2b887be2c29 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c @@ -219,7 +219,6 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) * lmw * stmw * - * XXX is_bigendian should depend on MMU mapping or MSR[LE] */ /* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 7ca9e0a80499..026dfaaa4772 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -673,9 +673,19 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, } int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int rt, unsigned int bytes, int is_bigendian) + unsigned int rt, unsigned int bytes, + int is_default_endian) { int idx, ret; + int is_bigendian; + + if (kvmppc_need_byteswap(vcpu)) { + /* Default endianness is "little endian". */ + is_bigendian = !is_default_endian; + } else { + /* Default endianness is "big endian". */ + is_bigendian = is_default_endian; + } if (bytes > sizeof(run->mmio.data)) { printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, @@ -711,21 +721,31 @@ EXPORT_SYMBOL_GPL(kvmppc_handle_load); /* Same as above, but sign extends */ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int rt, unsigned int bytes, int is_bigendian) + unsigned int rt, unsigned int bytes, + int is_default_endian) { int r; vcpu->arch.mmio_sign_extend = 1; - r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); + r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian); return r; } int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, - u64 val, unsigned int bytes, int is_bigendian) + u64 val, unsigned int bytes, int is_default_endian) { void *data = run->mmio.data; int idx, ret; + int is_bigendian; + + if (kvmppc_need_byteswap(vcpu)) { + /* Default endianness is "little endian". */ + is_bigendian = !is_default_endian; + } else { + /* Default endianness is "big endian". */ + is_bigendian = is_default_endian; + } if (bytes > sizeof(run->mmio.data)) { printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, From 48eaef0518a565d3852e301c860e1af6a6db5a84 Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Mon, 30 Dec 2013 15:36:56 +0100 Subject: [PATCH 24/45] KVM: PPC: Book3S HV: use xics_wake_cpu only when defined Signed-off-by: Andreas Schwab CC: stable@vger.kernel.org Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 461f55566167..7e1813ceabc1 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -86,10 +86,13 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) /* CPU points to the first thread of the core */ if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { +#ifdef CONFIG_KVM_XICS int real_cpu = cpu + vcpu->arch.ptid; if (paca[real_cpu].kvm_hstate.xics_phys) xics_wake_cpu(real_cpu); - else if (cpu_online(cpu)) + else +#endif + if (cpu_online(cpu)) smp_send_reschedule(cpu); } put_cpu(); @@ -1142,7 +1145,9 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) smp_wmb(); #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) if (vcpu->arch.ptid) { +#ifdef CONFIG_KVM_XICS xics_wake_cpu(cpu); +#endif ++vc->n_woken; } #endif From 70713fe315ed14cd1bb07d1a7f33e973d136ae3d Mon Sep 17 00:00:00 2001 From: Mihai Caraman Date: Thu, 9 Jan 2014 17:01:05 +0200 Subject: [PATCH 25/45] KVM: PPC: e500: Fix bad address type in deliver_tlb_misss() Use gva_t instead of unsigned int for eaddr in deliver_tlb_miss(). Signed-off-by: Mihai Caraman CC: stable@vger.kernel.org Signed-off-by: Alexander Graf --- arch/powerpc/kvm/e500_mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index ebca6b88ea5e..50860e919cb8 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c @@ -127,7 +127,7 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, } static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, - unsigned int eaddr, int as) + gva_t eaddr, int as) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); unsigned int victim, tsized; From 6c85f52b10fd60e45c6e30c5b85d116406bd3c9b Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Thu, 9 Jan 2014 19:18:40 -0600 Subject: [PATCH 26/45] kvm/ppc: IRQ disabling cleanup Simplify the handling of lazy EE by going directly from fully-enabled to hard-disabled. This replaces the lazy_irq_pending() check (including its misplaced kvm_guest_exit() call). As suggested by Tiejun Chen, move the interrupt disabling into kvmppc_prepare_to_enter() rather than have each caller do it. Also move the IRQ enabling on heavyweight exit into kvmppc_prepare_to_enter(). Signed-off-by: Scott Wood Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_ppc.h | 6 ++++++ arch/powerpc/kvm/book3s_pr.c | 14 ++++++-------- arch/powerpc/kvm/booke.c | 12 +++++------- arch/powerpc/kvm/powerpc.c | 26 +++++++++----------------- 4 files changed, 26 insertions(+), 32 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 629277df4798..fcd53f0d34ba 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -456,6 +456,12 @@ static inline void kvmppc_fix_ee_before_entry(void) trace_hardirqs_on(); #ifdef CONFIG_PPC64 + /* + * To avoid races, the caller must have gone directly from having + * interrupts fully-enabled to hard-disabled. + */ + WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS); + /* Only need to enable IRQs by hard enabling them after this */ local_paca->irq_happened = 0; local_paca->soft_enabled = 1; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index aedba681bb94..e82fafdaf880 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -999,14 +999,14 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, * and if we really did time things so badly, then we just exit * again due to a host external interrupt. */ - local_irq_disable(); s = kvmppc_prepare_to_enter(vcpu); - if (s <= 0) { - local_irq_enable(); + if (s <= 0) r = s; - } else { + else { + /* interrupts now hard-disabled */ kvmppc_fix_ee_before_entry(); } + kvmppc_handle_lost_ext(vcpu); } @@ -1219,12 +1219,10 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) * really did time things so badly, then we just exit again due to * a host external interrupt. */ - local_irq_disable(); ret = kvmppc_prepare_to_enter(vcpu); - if (ret <= 0) { - local_irq_enable(); + if (ret <= 0) goto out; - } + /* interrupts now hard-disabled */ /* Save FPU state in thread_struct */ if (current->thread.regs->msr & MSR_FP) diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 6a8c32ec4173..07b89c711898 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -643,7 +643,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) local_irq_enable(); kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); - local_irq_disable(); + hard_irq_disable(); kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); r = 1; @@ -688,13 +688,12 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) return -EINVAL; } - local_irq_disable(); s = kvmppc_prepare_to_enter(vcpu); if (s <= 0) { - local_irq_enable(); ret = s; goto out; } + /* interrupts now hard-disabled */ #ifdef CONFIG_PPC_FPU /* Save userspace FPU state in stack */ @@ -1187,12 +1186,11 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, * aren't already exiting to userspace for some other reason. */ if (!(r & RESUME_HOST)) { - local_irq_disable(); s = kvmppc_prepare_to_enter(vcpu); - if (s <= 0) { - local_irq_enable(); + if (s <= 0) r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); - } else { + else { + /* interrupts now hard-disabled */ kvmppc_fix_ee_before_entry(); } } diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 026dfaaa4772..3cf541a53e2a 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -68,14 +68,16 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) */ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) { - int r = 1; + int r; + + WARN_ON(irqs_disabled()); + hard_irq_disable(); - WARN_ON_ONCE(!irqs_disabled()); while (true) { if (need_resched()) { local_irq_enable(); cond_resched(); - local_irq_disable(); + hard_irq_disable(); continue; } @@ -101,7 +103,7 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) local_irq_enable(); trace_kvm_check_requests(vcpu); r = kvmppc_core_check_requests(vcpu); - local_irq_disable(); + hard_irq_disable(); if (r > 0) continue; break; @@ -113,22 +115,12 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) continue; } -#ifdef CONFIG_PPC64 - /* lazy EE magic */ - hard_irq_disable(); - if (lazy_irq_pending()) { - /* Got an interrupt in between, try again */ - local_irq_enable(); - local_irq_disable(); - kvm_guest_exit(); - continue; - } -#endif - kvm_guest_enter(); - break; + return 1; } + /* return to host */ + local_irq_enable(); return r; } EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); From eee7ff9d2cc0eaaa00496bdf4193144104c7dc63 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Wed, 8 Jan 2014 21:25:19 +1100 Subject: [PATCH 27/45] KVM: PPC: Book3S HV: Don't set DABR on POWER8 POWER8 doesn't have the DABR and DABRX registers; instead it has new DAWR/DAWRX registers, which will be handled in a later patch. Signed-off-by: Michael Neuling Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv_interrupts.S | 2 ++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 13 ++++++++++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 928142c64cb0..00b7ed41ea17 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -57,9 +57,11 @@ BEGIN_FTR_SECTION std r3, HSTATE_DSCR(r13) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) +BEGIN_FTR_SECTION /* Save host DABR */ mfspr r3, SPRN_DABR std r3, HSTATE_DABR(r13) +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* Hard-disable interrupts */ mfmsr r10 diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index acbd1d6afba0..66db71c9156a 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -61,11 +61,13 @@ kvmppc_call_hv_entry: /* Back from guest - restore host state and return to caller */ +BEGIN_FTR_SECTION /* Restore host DABR and DABRX */ ld r5,HSTATE_DABR(r13) li r6,7 mtspr SPRN_DABR,r5 mtspr SPRN_DABRX,r6 +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* Restore SPRG3 */ ld r3,PACA_SPRG3(r13) @@ -284,15 +286,17 @@ kvmppc_hv_entry: std r0, PPC_LR_STKOFF(r1) stdu r1, -112(r1) +BEGIN_FTR_SECTION /* Set partition DABR */ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ li r5,3 ld r6,VCPU_DABR(r4) mtspr SPRN_DABRX,r5 mtspr SPRN_DABR,r6 -BEGIN_FTR_SECTION + BEGIN_FTR_SECTION_NESTED(89) isync -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) + END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89) +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) /* Load guest PMU registers */ /* R4 is live here (vcpu pointer) */ @@ -1609,6 +1613,9 @@ ignore_hdec: b fast_guest_return _GLOBAL(kvmppc_h_set_dabr) +BEGIN_FTR_SECTION + b 2f +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) std r4,VCPU_DABR(r3) /* Work around P7 bug where DABR can get corrupted on mtspr */ 1: mtspr SPRN_DABR,r4 @@ -1616,7 +1623,7 @@ _GLOBAL(kvmppc_h_set_dabr) cmpd r4, r5 bne 1b isync - li r3,0 +2: li r3,0 blr _GLOBAL(kvmppc_h_cede) From e0b7ec058c0eb7ba8d5d937d81de2bd16db6970e Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Jan 2014 21:25:20 +1100 Subject: [PATCH 28/45] KVM: PPC: Book3S HV: Align physical and virtual CPU thread numbers On a threaded processor such as POWER7, we group VCPUs into virtual cores and arrange that the VCPUs in a virtual core run on the same physical core. Currently we don't enforce any correspondence between virtual thread numbers within a virtual core and physical thread numbers. Physical threads are allocated starting at 0 on a first-come first-served basis to runnable virtual threads (VCPUs). POWER8 implements a new "msgsndp" instruction which guest kernels can use to interrupt other threads in the same core or sub-core. Since the instruction takes the destination physical thread ID as a parameter, it becomes necessary to align the physical thread IDs with the virtual thread IDs, that is, to make sure virtual thread N within a virtual core always runs on physical thread N. This means that it's possible that thread 0, which is where we call __kvmppc_vcore_entry, may end up running some other vcpu than the one whose task called kvmppc_run_core(), or it may end up running no vcpu at all, if for example thread 0 of the virtual core is currently executing in userspace. However, we do need thread 0 to be responsible for switching the MMU -- a previous version of this patch that had other threads switching the MMU was found to be responsible for occasional memory corruption and machine check interrupts in the guest on POWER7 machines. To accommodate this, we no longer pass the vcpu pointer to __kvmppc_vcore_entry, but instead let the assembly code load it from the PACA. Since the assembly code will need to know the kvm pointer and the thread ID for threads which don't have a vcpu, we move the thread ID into the PACA and we add a kvm pointer to the virtual core structure. In the case where thread 0 has no vcpu to run, it still calls into kvmppc_hv_entry in order to do the MMU switch, and then naps until either its vcpu is ready to run in the guest, or some other thread needs to exit the guest. In the latter case, thread 0 jumps to the code that switches the MMU back to the host. This control flow means that now we switch the MMU before loading any guest vcpu state. Similarly, on guest exit we now save all the guest vcpu state before switching the MMU back to the host. This has required substantial code movement, making the diff rather large. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_book3s_asm.h | 1 + arch/powerpc/include/asm/kvm_host.h | 2 + arch/powerpc/kernel/asm-offsets.c | 3 +- arch/powerpc/kvm/book3s_hv.c | 46 +- arch/powerpc/kvm/book3s_hv_interrupts.S | 6 +- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 1052 +++++++++++---------- 6 files changed, 585 insertions(+), 525 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348a4db9..490b34f5d6bf 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -87,6 +87,7 @@ struct kvmppc_host_state { u8 hwthread_req; u8 hwthread_state; u8 host_ipi; + u8 ptid; struct kvm_vcpu *kvm_vcpu; struct kvmppc_vcore *kvm_vcore; unsigned long xics_phys; diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 2c2ca5faf7f2..b850544dbc3f 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -288,6 +288,7 @@ struct kvmppc_vcore { int n_woken; int nap_count; int napping_threads; + int first_vcpuid; u16 pcpu; u16 last_cpu; u8 vcore_state; @@ -298,6 +299,7 @@ struct kvmppc_vcore { u64 stolen_tb; u64 preempt_tb; struct kvm_vcpu *runner; + struct kvm *kvm; u64 tb_offset; /* guest timebase - host timebase */ ulong lpcr; u32 arch_compat; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 5e64c3d2149f..332ae66883e4 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -506,7 +506,6 @@ int main(void) DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar)); DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst)); DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); - DEFINE(VCPU_PTID, offsetof(struct kvm_vcpu, arch.ptid)); DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); @@ -514,6 +513,7 @@ int main(void) DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); DEFINE(VCORE_IN_GUEST, offsetof(struct kvmppc_vcore, in_guest)); DEFINE(VCORE_NAPPING_THREADS, offsetof(struct kvmppc_vcore, napping_threads)); + DEFINE(VCORE_KVM, offsetof(struct kvmppc_vcore, kvm)); DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); @@ -583,6 +583,7 @@ int main(void) HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); + HSTATE_FIELD(HSTATE_PTID, ptid); HSTATE_FIELD(HSTATE_MMCR, host_mmcr); HSTATE_FIELD(HSTATE_PMC, host_pmc); HSTATE_FIELD(HSTATE_PURR, host_purr); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 7e1813ceabc1..7da53cd215db 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -990,6 +990,8 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, init_waitqueue_head(&vcore->wq); vcore->preempt_tb = TB_NIL; vcore->lpcr = kvm->arch.lpcr; + vcore->first_vcpuid = core * threads_per_core; + vcore->kvm = kvm; } kvm->arch.vcores[core] = vcore; kvm->arch.online_vcores++; @@ -1003,6 +1005,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, ++vcore->num_threads; spin_unlock(&vcore->lock); vcpu->arch.vcore = vcore; + vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; vcpu->arch.cpu_type = KVM_CPU_3S_64; kvmppc_sanity_check(vcpu); @@ -1066,7 +1069,7 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu) } } -extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); +extern void __kvmppc_vcore_entry(void); static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, struct kvm_vcpu *vcpu) @@ -1140,15 +1143,16 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu) tpaca = &paca[cpu]; tpaca->kvm_hstate.kvm_vcpu = vcpu; tpaca->kvm_hstate.kvm_vcore = vc; - tpaca->kvm_hstate.napping = 0; + tpaca->kvm_hstate.ptid = vcpu->arch.ptid; vcpu->cpu = vc->pcpu; smp_wmb(); #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) - if (vcpu->arch.ptid) { + if (cpu != smp_processor_id()) { #ifdef CONFIG_KVM_XICS xics_wake_cpu(cpu); #endif - ++vc->n_woken; + if (vcpu->arch.ptid) + ++vc->n_woken; } #endif } @@ -1205,10 +1209,10 @@ static int on_primary_thread(void) */ static void kvmppc_run_core(struct kvmppc_vcore *vc) { - struct kvm_vcpu *vcpu, *vcpu0, *vnext; + struct kvm_vcpu *vcpu, *vnext; long ret; u64 now; - int ptid, i, need_vpa_update; + int i, need_vpa_update; int srcu_idx; struct kvm_vcpu *vcpus_to_update[threads_per_core]; @@ -1245,25 +1249,6 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) spin_lock(&vc->lock); } - /* - * Assign physical thread IDs, first to non-ceded vcpus - * and then to ceded ones. - */ - ptid = 0; - vcpu0 = NULL; - list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { - if (!vcpu->arch.ceded) { - if (!ptid) - vcpu0 = vcpu; - vcpu->arch.ptid = ptid++; - } - } - if (!vcpu0) - goto out; /* nothing to run; should never happen */ - list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) - if (vcpu->arch.ceded) - vcpu->arch.ptid = ptid++; - /* * Make sure we are running on thread 0, and that * secondary threads are offline. @@ -1280,15 +1265,19 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) kvmppc_create_dtl_entry(vcpu, vc); } + /* Set this explicitly in case thread 0 doesn't have a vcpu */ + get_paca()->kvm_hstate.kvm_vcore = vc; + get_paca()->kvm_hstate.ptid = 0; + vc->vcore_state = VCORE_RUNNING; preempt_disable(); spin_unlock(&vc->lock); kvm_guest_enter(); - srcu_idx = srcu_read_lock(&vcpu0->kvm->srcu); + srcu_idx = srcu_read_lock(&vc->kvm->srcu); - __kvmppc_vcore_entry(NULL, vcpu0); + __kvmppc_vcore_entry(); spin_lock(&vc->lock); /* disable sending of IPIs on virtual external irqs */ @@ -1303,7 +1292,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc) vc->vcore_state = VCORE_EXITING; spin_unlock(&vc->lock); - srcu_read_unlock(&vcpu0->kvm->srcu, srcu_idx); + srcu_read_unlock(&vc->kvm->srcu, srcu_idx); /* make sure updates to secondary vcpu structs are visible now */ smp_mb(); @@ -1411,7 +1400,6 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) if (!signal_pending(current)) { if (vc->vcore_state == VCORE_RUNNING && VCORE_EXIT_COUNT(vc) == 0) { - vcpu->arch.ptid = vc->n_runnable - 1; kvmppc_create_dtl_entry(vcpu, vc); kvmppc_start_thread(vcpu); } else if (vc->vcore_state == VCORE_SLEEPING) { diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S index 00b7ed41ea17..e873796b1a29 100644 --- a/arch/powerpc/kvm/book3s_hv_interrupts.S +++ b/arch/powerpc/kvm/book3s_hv_interrupts.S @@ -35,7 +35,7 @@ ****************************************************************************/ /* Registers: - * r4: vcpu pointer + * none */ _GLOBAL(__kvmppc_vcore_entry) @@ -71,7 +71,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) mtmsrd r10,1 /* Save host PMU registers */ - /* R4 is live here (vcpu pointer) but not r3 or r5 */ li r3, 1 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ mfspr r7, SPRN_MMCR0 /* save MMCR0 */ @@ -136,16 +135,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) * enters the guest with interrupts enabled. */ BEGIN_FTR_SECTION + ld r4, HSTATE_KVM_VCPU(r13) ld r0, VCPU_PENDING_EXC(r4) li r7, (1 << BOOK3S_IRQPRIO_EXTERNAL) oris r7, r7, (1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h and. r0, r0, r7 beq 32f - mr r31, r4 lhz r3, PACAPACAINDEX(r13) bl smp_send_reschedule nop - mr r4, r31 32: END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) #endif /* CONFIG_SMP */ diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 66db71c9156a..8bbe91bdb6da 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -33,6 +33,10 @@ #error Need to fix lppaca and SLB shadow accesses in little endian mode #endif +/* Values in HSTATE_NAPPING(r13) */ +#define NAPPING_CEDE 1 +#define NAPPING_NOVCPU 2 + /* * Call kvmppc_hv_entry in real mode. * Must be called with interrupts hard-disabled. @@ -57,6 +61,7 @@ _GLOBAL(kvmppc_hv_entry_trampoline) RFI kvmppc_call_hv_entry: + ld r4, HSTATE_KVM_VCPU(r13) bl kvmppc_hv_entry /* Back from guest - restore host state and return to caller */ @@ -73,15 +78,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ld r3,PACA_SPRG3(r13) mtspr SPRN_SPRG3,r3 - /* - * Reload DEC. HDEC interrupts were disabled when - * we reloaded the host's LPCR value. - */ - ld r3, HSTATE_DECEXP(r13) - mftb r4 - subf r4, r4, r3 - mtspr SPRN_DEC, r4 - /* Reload the host's PMU registers */ ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ lbz r4, LPPACA_PMCINUSE(r3) @@ -116,6 +112,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) isync 23: + /* + * Reload DEC. HDEC interrupts were disabled when + * we reloaded the host's LPCR value. + */ + ld r3, HSTATE_DECEXP(r13) + mftb r4 + subf r4, r4, r3 + mtspr SPRN_DEC, r4 + /* * For external and machine check interrupts, we need * to call the Linux handler to process the interrupt. @@ -156,15 +161,82 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 13: b machine_check_fwnmi +kvmppc_primary_no_guest: + /* We handle this much like a ceded vcpu */ + /* set our bit in napping_threads */ + ld r5, HSTATE_KVM_VCORE(r13) + lbz r7, HSTATE_PTID(r13) + li r0, 1 + sld r0, r0, r7 + addi r6, r5, VCORE_NAPPING_THREADS +1: lwarx r3, 0, r6 + or r3, r3, r0 + stwcx. r3, 0, r6 + bne 1b + /* order napping_threads update vs testing entry_exit_count */ + isync + li r12, 0 + lwz r7, VCORE_ENTRY_EXIT(r5) + cmpwi r7, 0x100 + bge kvm_novcpu_exit /* another thread already exiting */ + li r3, NAPPING_NOVCPU + stb r3, HSTATE_NAPPING(r13) + li r3, 1 + stb r3, HSTATE_HWTHREAD_REQ(r13) + + b kvm_do_nap + +kvm_novcpu_wakeup: + ld r1, HSTATE_HOST_R1(r13) + ld r5, HSTATE_KVM_VCORE(r13) + li r0, 0 + stb r0, HSTATE_NAPPING(r13) + stb r0, HSTATE_HWTHREAD_REQ(r13) + + /* see if any other thread is already exiting */ + li r12, 0 + lwz r0, VCORE_ENTRY_EXIT(r5) + cmpwi r0, 0x100 + bge kvm_novcpu_exit + + /* clear our bit in napping_threads */ + lbz r7, HSTATE_PTID(r13) + li r0, 1 + sld r0, r0, r7 + addi r6, r5, VCORE_NAPPING_THREADS +4: lwarx r3, 0, r6 + andc r3, r3, r0 + stwcx. r3, 0, r6 + bne 4b + + /* Check the wake reason in SRR1 to see why we got here */ + mfspr r3, SPRN_SRR1 + rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */ + cmpwi r3, 4 /* was it an external interrupt? */ + bne kvm_novcpu_exit /* if not, exit the guest */ + + /* extern interrupt - read and handle it */ + li r12, BOOK3S_INTERRUPT_EXTERNAL + bl kvmppc_read_intr + cmpdi r3, 0 + bge kvm_novcpu_exit + li r12, 0 + + /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ + ld r4, HSTATE_KVM_VCPU(r13) + cmpdi r4, 0 + bne kvmppc_got_guest + +kvm_novcpu_exit: + b hdec_soon + /* - * We come in here when wakened from nap mode on a secondary hw thread. + * We come in here when wakened from nap mode. * Relocation is off and most register values are lost. * r13 points to the PACA. */ .globl kvm_start_guest kvm_start_guest: - ld r1,PACAEMERGSP(r13) - subi r1,r1,STACK_FRAME_OVERHEAD ld r2,PACATOC(r13) li r0,KVM_HWTHREAD_IN_KVM @@ -176,8 +248,13 @@ kvm_start_guest: /* were we napping due to cede? */ lbz r0,HSTATE_NAPPING(r13) - cmpwi r0,0 - bne kvm_end_cede + cmpwi r0,NAPPING_CEDE + beq kvm_end_cede + cmpwi r0,NAPPING_NOVCPU + beq kvm_novcpu_wakeup + + ld r1,PACAEMERGSP(r13) + subi r1,r1,STACK_FRAME_OVERHEAD /* * We weren't napping due to cede, so this must be a secondary @@ -220,7 +297,13 @@ kvm_start_guest: stw r8,HSTATE_SAVED_XIRR(r13) b kvm_no_guest -30: bl kvmppc_hv_entry +30: + /* Set HSTATE_DSCR(r13) to something sensible */ + LOAD_REG_ADDR(r6, dscr_default) + ld r6, 0(r6) + std r6, HSTATE_DSCR(r13) + + bl kvmppc_hv_entry /* Back from the guest, go back to nap */ /* Clear our vcpu pointer so we don't come back in early */ @@ -252,6 +335,7 @@ kvm_start_guest: kvm_no_guest: li r0, KVM_HWTHREAD_IN_NAP stb r0, HSTATE_HWTHREAD_STATE(r13) +kvm_do_nap: li r3, LPCR_PECE0 mfspr r4, SPRN_LPCR rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 @@ -276,7 +360,7 @@ kvmppc_hv_entry: /* Required state: * - * R4 = vcpu pointer + * R4 = vcpu pointer (or NULL) * MSR = ~IR|DR * R13 = PACA * R1 = host R1 @@ -286,6 +370,248 @@ kvmppc_hv_entry: std r0, PPC_LR_STKOFF(r1) stdu r1, -112(r1) + /* Save R1 in the PACA */ + std r1, HSTATE_HOST_R1(r13) + + li r6, KVM_GUEST_MODE_HOST_HV + stb r6, HSTATE_IN_GUEST(r13) + + /* Clear out SLB */ + li r6,0 + slbmte r6,r6 + slbia + ptesync + +BEGIN_FTR_SECTION + b 30f +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) + /* + * POWER7 host -> guest partition switch code. + * We don't have to lock against concurrent tlbies, + * but we do have to coordinate across hardware threads. + */ + /* Increment entry count iff exit count is zero. */ + ld r5,HSTATE_KVM_VCORE(r13) + addi r9,r5,VCORE_ENTRY_EXIT +21: lwarx r3,0,r9 + cmpwi r3,0x100 /* any threads starting to exit? */ + bge secondary_too_late /* if so we're too late to the party */ + addi r3,r3,1 + stwcx. r3,0,r9 + bne 21b + + /* Primary thread switches to guest partition. */ + ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ + lbz r6,HSTATE_PTID(r13) + cmpwi r6,0 + bne 20f + ld r6,KVM_SDR1(r9) + lwz r7,KVM_LPID(r9) + li r0,LPID_RSVD /* switch to reserved LPID */ + mtspr SPRN_LPID,r0 + ptesync + mtspr SPRN_SDR1,r6 /* switch to partition page table */ + mtspr SPRN_LPID,r7 + isync + + /* See if we need to flush the TLB */ + lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ + clrldi r7,r6,64-6 /* extract bit number (6 bits) */ + srdi r6,r6,6 /* doubleword number */ + sldi r6,r6,3 /* address offset */ + add r6,r6,r9 + addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ + li r0,1 + sld r0,r0,r7 + ld r7,0(r6) + and. r7,r7,r0 + beq 22f +23: ldarx r7,0,r6 /* if set, clear the bit */ + andc r7,r7,r0 + stdcx. r7,0,r6 + bne 23b + li r6,128 /* and flush the TLB */ + mtctr r6 + li r7,0x800 /* IS field = 0b10 */ + ptesync +28: tlbiel r7 + addi r7,r7,0x1000 + bdnz 28b + ptesync + + /* Add timebase offset onto timebase */ +22: ld r8,VCORE_TB_OFFSET(r5) + cmpdi r8,0 + beq 37f + mftb r6 /* current host timebase */ + add r8,r8,r6 + mtspr SPRN_TBU40,r8 /* update upper 40 bits */ + mftb r7 /* check if lower 24 bits overflowed */ + clrldi r6,r6,40 + clrldi r7,r7,40 + cmpld r7,r6 + bge 37f + addis r8,r8,0x100 /* if so, increment upper 40 bits */ + mtspr SPRN_TBU40,r8 + + /* Load guest PCR value to select appropriate compat mode */ +37: ld r7, VCORE_PCR(r5) + cmpdi r7, 0 + beq 38f + mtspr SPRN_PCR, r7 +38: + li r0,1 + stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ + b 10f + + /* Secondary threads wait for primary to have done partition switch */ +20: lbz r0,VCORE_IN_GUEST(r5) + cmpwi r0,0 + beq 20b + + /* Set LPCR and RMOR. */ +10: ld r8,VCORE_LPCR(r5) + mtspr SPRN_LPCR,r8 + ld r8,KVM_RMOR(r9) + mtspr SPRN_RMOR,r8 + isync + + /* Check if HDEC expires soon */ + mfspr r3,SPRN_HDEC + cmpwi r3,512 /* 1 microsecond */ + li r12,BOOK3S_INTERRUPT_HV_DECREMENTER + blt hdec_soon + b 31f + + /* + * PPC970 host -> guest partition switch code. + * We have to lock against concurrent tlbies, + * using native_tlbie_lock to lock against host tlbies + * and kvm->arch.tlbie_lock to lock against guest tlbies. + * We also have to invalidate the TLB since its + * entries aren't tagged with the LPID. + */ +30: ld r5,HSTATE_KVM_VCORE(r13) + ld r9,VCORE_KVM(r5) /* pointer to struct kvm */ + + /* first take native_tlbie_lock */ + .section ".toc","aw" +toc_tlbie_lock: + .tc native_tlbie_lock[TC],native_tlbie_lock + .previous + ld r3,toc_tlbie_lock@toc(2) +#ifdef __BIG_ENDIAN__ + lwz r8,PACA_LOCK_TOKEN(r13) +#else + lwz r8,PACAPACAINDEX(r13) +#endif +24: lwarx r0,0,r3 + cmpwi r0,0 + bne 24b + stwcx. r8,0,r3 + bne 24b + isync + + ld r5,HSTATE_KVM_VCORE(r13) + ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */ + li r0,0x18f + rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ + or r0,r7,r0 + ptesync + sync + mtspr SPRN_HID4,r0 /* switch to reserved LPID */ + isync + li r0,0 + stw r0,0(r3) /* drop native_tlbie_lock */ + + /* invalidate the whole TLB */ + li r0,256 + mtctr r0 + li r6,0 +25: tlbiel r6 + addi r6,r6,0x1000 + bdnz 25b + ptesync + + /* Take the guest's tlbie_lock */ + addi r3,r9,KVM_TLBIE_LOCK +24: lwarx r0,0,r3 + cmpwi r0,0 + bne 24b + stwcx. r8,0,r3 + bne 24b + isync + ld r6,KVM_SDR1(r9) + mtspr SPRN_SDR1,r6 /* switch to partition page table */ + + /* Set up HID4 with the guest's LPID etc. */ + sync + mtspr SPRN_HID4,r7 + isync + + /* drop the guest's tlbie_lock */ + li r0,0 + stw r0,0(r3) + + /* Check if HDEC expires soon */ + mfspr r3,SPRN_HDEC + cmpwi r3,10 + li r12,BOOK3S_INTERRUPT_HV_DECREMENTER + blt hdec_soon + + /* Enable HDEC interrupts */ + mfspr r0,SPRN_HID0 + li r3,1 + rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 + sync + mtspr SPRN_HID0,r0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 +31: + /* Do we have a guest vcpu to run? */ + cmpdi r4, 0 + beq kvmppc_primary_no_guest +kvmppc_got_guest: + + /* Load up guest SLB entries */ + lwz r5,VCPU_SLB_MAX(r4) + cmpwi r5,0 + beq 9f + mtctr r5 + addi r6,r4,VCPU_SLB +1: ld r8,VCPU_SLB_E(r6) + ld r9,VCPU_SLB_V(r6) + slbmte r9,r8 + addi r6,r6,VCPU_SLB_SIZE + bdnz 1b +9: + /* Increment yield count if they have a VPA */ + ld r3, VCPU_VPA(r4) + cmpdi r3, 0 + beq 25f + lwz r5, LPPACA_YIELDCOUNT(r3) + addi r5, r5, 1 + stw r5, LPPACA_YIELDCOUNT(r3) + li r6, 1 + stb r6, VCPU_VPA_DIRTY(r4) +25: + +BEGIN_FTR_SECTION + /* Save purr/spurr */ + mfspr r5,SPRN_PURR + mfspr r6,SPRN_SPURR + std r5,HSTATE_PURR(r13) + std r6,HSTATE_SPURR(r13) + ld r7,VCPU_PURR(r4) + ld r8,VCPU_SPURR(r4) + mtspr SPRN_PURR,r7 + mtspr SPRN_SPURR,r8 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) + BEGIN_FTR_SECTION /* Set partition DABR */ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ @@ -382,18 +708,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) mtspr SPRN_SPRG2, r7 mtspr SPRN_SPRG3, r8 - /* Save R1 in the PACA */ - std r1, HSTATE_HOST_R1(r13) - /* Load up DAR and DSISR */ ld r5, VCPU_DAR(r4) lwz r6, VCPU_DSISR(r4) mtspr SPRN_DAR, r5 mtspr SPRN_DSISR, r6 - li r6, KVM_GUEST_MODE_HOST_HV - stb r6, HSTATE_IN_GUEST(r13) - BEGIN_FTR_SECTION /* Restore AMR and UAMOR, set AMOR to all 1s */ ld r5,VCPU_AMR(r4) @@ -404,236 +724,6 @@ BEGIN_FTR_SECTION mtspr SPRN_AMOR,r7 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) - /* Clear out SLB */ - li r6,0 - slbmte r6,r6 - slbia - ptesync - -BEGIN_FTR_SECTION - b 30f -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) - /* - * POWER7 host -> guest partition switch code. - * We don't have to lock against concurrent tlbies, - * but we do have to coordinate across hardware threads. - */ - /* Increment entry count iff exit count is zero. */ - ld r5,HSTATE_KVM_VCORE(r13) - addi r9,r5,VCORE_ENTRY_EXIT -21: lwarx r3,0,r9 - cmpwi r3,0x100 /* any threads starting to exit? */ - bge secondary_too_late /* if so we're too late to the party */ - addi r3,r3,1 - stwcx. r3,0,r9 - bne 21b - - /* Primary thread switches to guest partition. */ - ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ - lwz r6,VCPU_PTID(r4) - cmpwi r6,0 - bne 20f - ld r6,KVM_SDR1(r9) - lwz r7,KVM_LPID(r9) - li r0,LPID_RSVD /* switch to reserved LPID */ - mtspr SPRN_LPID,r0 - ptesync - mtspr SPRN_SDR1,r6 /* switch to partition page table */ - mtspr SPRN_LPID,r7 - isync - - /* See if we need to flush the TLB */ - lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ - clrldi r7,r6,64-6 /* extract bit number (6 bits) */ - srdi r6,r6,6 /* doubleword number */ - sldi r6,r6,3 /* address offset */ - add r6,r6,r9 - addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ - li r0,1 - sld r0,r0,r7 - ld r7,0(r6) - and. r7,r7,r0 - beq 22f -23: ldarx r7,0,r6 /* if set, clear the bit */ - andc r7,r7,r0 - stdcx. r7,0,r6 - bne 23b - li r6,128 /* and flush the TLB */ - mtctr r6 - li r7,0x800 /* IS field = 0b10 */ - ptesync -28: tlbiel r7 - addi r7,r7,0x1000 - bdnz 28b - ptesync - - /* Add timebase offset onto timebase */ -22: ld r8,VCORE_TB_OFFSET(r5) - cmpdi r8,0 - beq 37f - mftb r6 /* current host timebase */ - add r8,r8,r6 - mtspr SPRN_TBU40,r8 /* update upper 40 bits */ - mftb r7 /* check if lower 24 bits overflowed */ - clrldi r6,r6,40 - clrldi r7,r7,40 - cmpld r7,r6 - bge 37f - addis r8,r8,0x100 /* if so, increment upper 40 bits */ - mtspr SPRN_TBU40,r8 - - /* Load guest PCR value to select appropriate compat mode */ -37: ld r7, VCORE_PCR(r5) - cmpdi r7, 0 - beq 38f - mtspr SPRN_PCR, r7 -38: - li r0,1 - stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ - b 10f - - /* Secondary threads wait for primary to have done partition switch */ -20: lbz r0,VCORE_IN_GUEST(r5) - cmpwi r0,0 - beq 20b - - /* Set LPCR and RMOR. */ -10: ld r8,VCORE_LPCR(r5) - mtspr SPRN_LPCR,r8 - ld r8,KVM_RMOR(r9) - mtspr SPRN_RMOR,r8 - isync - - /* Increment yield count if they have a VPA */ - ld r3, VCPU_VPA(r4) - cmpdi r3, 0 - beq 25f - lwz r5, LPPACA_YIELDCOUNT(r3) - addi r5, r5, 1 - stw r5, LPPACA_YIELDCOUNT(r3) - li r6, 1 - stb r6, VCPU_VPA_DIRTY(r4) -25: - /* Check if HDEC expires soon */ - mfspr r3,SPRN_HDEC - cmpwi r3,10 - li r12,BOOK3S_INTERRUPT_HV_DECREMENTER - mr r9,r4 - blt hdec_soon - - /* Save purr/spurr */ - mfspr r5,SPRN_PURR - mfspr r6,SPRN_SPURR - std r5,HSTATE_PURR(r13) - std r6,HSTATE_SPURR(r13) - ld r7,VCPU_PURR(r4) - ld r8,VCPU_SPURR(r4) - mtspr SPRN_PURR,r7 - mtspr SPRN_SPURR,r8 - b 31f - - /* - * PPC970 host -> guest partition switch code. - * We have to lock against concurrent tlbies, - * using native_tlbie_lock to lock against host tlbies - * and kvm->arch.tlbie_lock to lock against guest tlbies. - * We also have to invalidate the TLB since its - * entries aren't tagged with the LPID. - */ -30: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ - - /* first take native_tlbie_lock */ - .section ".toc","aw" -toc_tlbie_lock: - .tc native_tlbie_lock[TC],native_tlbie_lock - .previous - ld r3,toc_tlbie_lock@toc(2) -#ifdef __BIG_ENDIAN__ - lwz r8,PACA_LOCK_TOKEN(r13) -#else - lwz r8,PACAPACAINDEX(r13) -#endif -24: lwarx r0,0,r3 - cmpwi r0,0 - bne 24b - stwcx. r8,0,r3 - bne 24b - isync - - ld r5,HSTATE_KVM_VCORE(r13) - ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */ - li r0,0x18f - rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ - or r0,r7,r0 - ptesync - sync - mtspr SPRN_HID4,r0 /* switch to reserved LPID */ - isync - li r0,0 - stw r0,0(r3) /* drop native_tlbie_lock */ - - /* invalidate the whole TLB */ - li r0,256 - mtctr r0 - li r6,0 -25: tlbiel r6 - addi r6,r6,0x1000 - bdnz 25b - ptesync - - /* Take the guest's tlbie_lock */ - addi r3,r9,KVM_TLBIE_LOCK -24: lwarx r0,0,r3 - cmpwi r0,0 - bne 24b - stwcx. r8,0,r3 - bne 24b - isync - ld r6,KVM_SDR1(r9) - mtspr SPRN_SDR1,r6 /* switch to partition page table */ - - /* Set up HID4 with the guest's LPID etc. */ - sync - mtspr SPRN_HID4,r7 - isync - - /* drop the guest's tlbie_lock */ - li r0,0 - stw r0,0(r3) - - /* Check if HDEC expires soon */ - mfspr r3,SPRN_HDEC - cmpwi r3,10 - li r12,BOOK3S_INTERRUPT_HV_DECREMENTER - mr r9,r4 - blt hdec_soon - - /* Enable HDEC interrupts */ - mfspr r0,SPRN_HID0 - li r3,1 - rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 - sync - mtspr SPRN_HID0,r0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - - /* Load up guest SLB entries */ -31: lwz r5,VCPU_SLB_MAX(r4) - cmpwi r5,0 - beq 9f - mtctr r5 - addi r6,r4,VCPU_SLB -1: ld r8,VCPU_SLB_E(r6) - ld r9,VCPU_SLB_V(r6) - slbmte r9,r8 - addi r6,r6,VCPU_SLB_SIZE - bdnz 1b -9: - /* Restore state of CTRL run bit; assume 1 on entry */ lwz r5,VCPU_CTRL(r4) andi. r5,r5,1 @@ -984,226 +1074,6 @@ BEGIN_FTR_SECTION mtspr SPRN_SPURR,r4 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) - /* Clear out SLB */ - li r5,0 - slbmte r5,r5 - slbia - ptesync - -hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */ -BEGIN_FTR_SECTION - b 32f -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) - /* - * POWER7 guest -> host partition switch code. - * We don't have to lock against tlbies but we do - * have to coordinate the hardware threads. - */ - /* Increment the threads-exiting-guest count in the 0xff00 - bits of vcore->entry_exit_count */ - lwsync - ld r5,HSTATE_KVM_VCORE(r13) - addi r6,r5,VCORE_ENTRY_EXIT -41: lwarx r3,0,r6 - addi r0,r3,0x100 - stwcx. r0,0,r6 - bne 41b - lwsync - - /* - * At this point we have an interrupt that we have to pass - * up to the kernel or qemu; we can't handle it in real mode. - * Thus we have to do a partition switch, so we have to - * collect the other threads, if we are the first thread - * to take an interrupt. To do this, we set the HDEC to 0, - * which causes an HDEC interrupt in all threads within 2ns - * because the HDEC register is shared between all 4 threads. - * However, we don't need to bother if this is an HDEC - * interrupt, since the other threads will already be on their - * way here in that case. - */ - cmpwi r3,0x100 /* Are we the first here? */ - bge 43f - cmpwi r3,1 /* Are any other threads in the guest? */ - ble 43f - cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER - beq 40f - li r0,0 - mtspr SPRN_HDEC,r0 -40: - /* - * Send an IPI to any napping threads, since an HDEC interrupt - * doesn't wake CPUs up from nap. - */ - lwz r3,VCORE_NAPPING_THREADS(r5) - lwz r4,VCPU_PTID(r9) - li r0,1 - sld r0,r0,r4 - andc. r3,r3,r0 /* no sense IPI'ing ourselves */ - beq 43f - mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ - subf r6,r4,r13 -42: andi. r0,r3,1 - beq 44f - ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ - li r0,IPI_PRIORITY - li r7,XICS_MFRR - stbcix r0,r7,r8 /* trigger the IPI */ -44: srdi. r3,r3,1 - addi r6,r6,PACA_SIZE - bne 42b - - /* Secondary threads wait for primary to do partition switch */ -43: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ - ld r5,HSTATE_KVM_VCORE(r13) - lwz r3,VCPU_PTID(r9) - cmpwi r3,0 - beq 15f - HMT_LOW -13: lbz r3,VCORE_IN_GUEST(r5) - cmpwi r3,0 - bne 13b - HMT_MEDIUM - b 16f - - /* Primary thread waits for all the secondaries to exit guest */ -15: lwz r3,VCORE_ENTRY_EXIT(r5) - srwi r0,r3,8 - clrldi r3,r3,56 - cmpw r3,r0 - bne 15b - isync - - /* Primary thread switches back to host partition */ - ld r6,KVM_HOST_SDR1(r4) - lwz r7,KVM_HOST_LPID(r4) - li r8,LPID_RSVD /* switch to reserved LPID */ - mtspr SPRN_LPID,r8 - ptesync - mtspr SPRN_SDR1,r6 /* switch to partition page table */ - mtspr SPRN_LPID,r7 - isync - - /* Subtract timebase offset from timebase */ - ld r8,VCORE_TB_OFFSET(r5) - cmpdi r8,0 - beq 17f - mftb r6 /* current host timebase */ - subf r8,r8,r6 - mtspr SPRN_TBU40,r8 /* update upper 40 bits */ - mftb r7 /* check if lower 24 bits overflowed */ - clrldi r6,r6,40 - clrldi r7,r7,40 - cmpld r7,r6 - bge 17f - addis r8,r8,0x100 /* if so, increment upper 40 bits */ - mtspr SPRN_TBU40,r8 - - /* Reset PCR */ -17: ld r0, VCORE_PCR(r5) - cmpdi r0, 0 - beq 18f - li r0, 0 - mtspr SPRN_PCR, r0 -18: - /* Signal secondary CPUs to continue */ - stb r0,VCORE_IN_GUEST(r5) - lis r8,0x7fff /* MAX_INT@h */ - mtspr SPRN_HDEC,r8 - -16: ld r8,KVM_HOST_LPCR(r4) - mtspr SPRN_LPCR,r8 - isync - b 33f - - /* - * PPC970 guest -> host partition switch code. - * We have to lock against concurrent tlbies, and - * we have to flush the whole TLB. - */ -32: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ - - /* Take the guest's tlbie_lock */ -#ifdef __BIG_ENDIAN__ - lwz r8,PACA_LOCK_TOKEN(r13) -#else - lwz r8,PACAPACAINDEX(r13) -#endif - addi r3,r4,KVM_TLBIE_LOCK -24: lwarx r0,0,r3 - cmpwi r0,0 - bne 24b - stwcx. r8,0,r3 - bne 24b - isync - - ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ - li r0,0x18f - rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ - or r0,r7,r0 - ptesync - sync - mtspr SPRN_HID4,r0 /* switch to reserved LPID */ - isync - li r0,0 - stw r0,0(r3) /* drop guest tlbie_lock */ - - /* invalidate the whole TLB */ - li r0,256 - mtctr r0 - li r6,0 -25: tlbiel r6 - addi r6,r6,0x1000 - bdnz 25b - ptesync - - /* take native_tlbie_lock */ - ld r3,toc_tlbie_lock@toc(2) -24: lwarx r0,0,r3 - cmpwi r0,0 - bne 24b - stwcx. r8,0,r3 - bne 24b - isync - - ld r6,KVM_HOST_SDR1(r4) - mtspr SPRN_SDR1,r6 /* switch to host page table */ - - /* Set up host HID4 value */ - sync - mtspr SPRN_HID4,r7 - isync - li r0,0 - stw r0,0(r3) /* drop native_tlbie_lock */ - - lis r8,0x7fff /* MAX_INT@h */ - mtspr SPRN_HDEC,r8 - - /* Disable HDEC interrupts */ - mfspr r0,SPRN_HID0 - li r3,0 - rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 - sync - mtspr SPRN_HID0,r0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - mfspr r0,SPRN_HID0 - - /* load host SLB entries */ -33: ld r8,PACA_SLBSHADOWPTR(r13) - - .rept SLB_NUM_BOLTED - ld r5,SLBSHADOW_SAVEAREA(r8) - ld r6,SLBSHADOW_SAVEAREA+8(r8) - andis. r7,r5,SLB_ESID_V@h - beq 1f - slbmte r6,r5 -1: addi r8,r8,16 - .endr - /* Save DEC */ mfspr r5,SPRN_DEC mftb r6 @@ -1221,10 +1091,6 @@ BEGIN_FTR_SECTION mtspr SPRN_AMR,r6 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) - /* Unset guest mode */ - li r0, KVM_GUEST_MODE_NONE - stb r0, HSTATE_IN_GUEST(r13) - /* Switch DSCR back to host value */ BEGIN_FTR_SECTION mfspr r8, SPRN_DSCR @@ -1325,30 +1191,234 @@ BEGIN_FTR_SECTION stw r11, VCPU_PMC + 28(r9) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 22: - ld r0, 112+PPC_LR_STKOFF(r1) - addi r1, r1, 112 - mtlr r0 - blr -secondary_too_late: + /* Clear out SLB */ + li r5,0 + slbmte r5,r5 + slbia + ptesync + +hdec_soon: /* r12 = trap, r13 = paca */ +BEGIN_FTR_SECTION + b 32f +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) + /* + * POWER7 guest -> host partition switch code. + * We don't have to lock against tlbies but we do + * have to coordinate the hardware threads. + */ + /* Increment the threads-exiting-guest count in the 0xff00 + bits of vcore->entry_exit_count */ + lwsync ld r5,HSTATE_KVM_VCORE(r13) + addi r6,r5,VCORE_ENTRY_EXIT +41: lwarx r3,0,r6 + addi r0,r3,0x100 + stwcx. r0,0,r6 + bne 41b + lwsync + + /* + * At this point we have an interrupt that we have to pass + * up to the kernel or qemu; we can't handle it in real mode. + * Thus we have to do a partition switch, so we have to + * collect the other threads, if we are the first thread + * to take an interrupt. To do this, we set the HDEC to 0, + * which causes an HDEC interrupt in all threads within 2ns + * because the HDEC register is shared between all 4 threads. + * However, we don't need to bother if this is an HDEC + * interrupt, since the other threads will already be on their + * way here in that case. + */ + cmpwi r3,0x100 /* Are we the first here? */ + bge 43f + cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER + beq 40f + li r0,0 + mtspr SPRN_HDEC,r0 +40: + /* + * Send an IPI to any napping threads, since an HDEC interrupt + * doesn't wake CPUs up from nap. + */ + lwz r3,VCORE_NAPPING_THREADS(r5) + lbz r4,HSTATE_PTID(r13) + li r0,1 + sld r0,r0,r4 + andc. r3,r3,r0 /* no sense IPI'ing ourselves */ + beq 43f + mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ + subf r6,r4,r13 +42: andi. r0,r3,1 + beq 44f + ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ + li r0,IPI_PRIORITY + li r7,XICS_MFRR + stbcix r0,r7,r8 /* trigger the IPI */ +44: srdi. r3,r3,1 + addi r6,r6,PACA_SIZE + bne 42b + +secondary_too_late: + /* Secondary threads wait for primary to do partition switch */ +43: ld r5,HSTATE_KVM_VCORE(r13) + ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ + lbz r3,HSTATE_PTID(r13) + cmpwi r3,0 + beq 15f HMT_LOW 13: lbz r3,VCORE_IN_GUEST(r5) cmpwi r3,0 bne 13b HMT_MEDIUM - li r0, KVM_GUEST_MODE_NONE - stb r0, HSTATE_IN_GUEST(r13) - ld r11,PACA_SLBSHADOWPTR(r13) + b 16f + + /* Primary thread waits for all the secondaries to exit guest */ +15: lwz r3,VCORE_ENTRY_EXIT(r5) + srwi r0,r3,8 + clrldi r3,r3,56 + cmpw r3,r0 + bne 15b + isync + + /* Primary thread switches back to host partition */ + ld r6,KVM_HOST_SDR1(r4) + lwz r7,KVM_HOST_LPID(r4) + li r8,LPID_RSVD /* switch to reserved LPID */ + mtspr SPRN_LPID,r8 + ptesync + mtspr SPRN_SDR1,r6 /* switch to partition page table */ + mtspr SPRN_LPID,r7 + isync + + /* Subtract timebase offset from timebase */ + ld r8,VCORE_TB_OFFSET(r5) + cmpdi r8,0 + beq 17f + mftb r6 /* current host timebase */ + subf r8,r8,r6 + mtspr SPRN_TBU40,r8 /* update upper 40 bits */ + mftb r7 /* check if lower 24 bits overflowed */ + clrldi r6,r6,40 + clrldi r7,r7,40 + cmpld r7,r6 + bge 17f + addis r8,r8,0x100 /* if so, increment upper 40 bits */ + mtspr SPRN_TBU40,r8 + + /* Reset PCR */ +17: ld r0, VCORE_PCR(r5) + cmpdi r0, 0 + beq 18f + li r0, 0 + mtspr SPRN_PCR, r0 +18: + /* Signal secondary CPUs to continue */ + stb r0,VCORE_IN_GUEST(r5) + lis r8,0x7fff /* MAX_INT@h */ + mtspr SPRN_HDEC,r8 + +16: ld r8,KVM_HOST_LPCR(r4) + mtspr SPRN_LPCR,r8 + isync + b 33f + + /* + * PPC970 guest -> host partition switch code. + * We have to lock against concurrent tlbies, and + * we have to flush the whole TLB. + */ +32: ld r5,HSTATE_KVM_VCORE(r13) + ld r4,VCORE_KVM(r5) /* pointer to struct kvm */ + + /* Take the guest's tlbie_lock */ +#ifdef __BIG_ENDIAN__ + lwz r8,PACA_LOCK_TOKEN(r13) +#else + lwz r8,PACAPACAINDEX(r13) +#endif + addi r3,r4,KVM_TLBIE_LOCK +24: lwarx r0,0,r3 + cmpwi r0,0 + bne 24b + stwcx. r8,0,r3 + bne 24b + isync + + ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */ + li r0,0x18f + rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */ + or r0,r7,r0 + ptesync + sync + mtspr SPRN_HID4,r0 /* switch to reserved LPID */ + isync + li r0,0 + stw r0,0(r3) /* drop guest tlbie_lock */ + + /* invalidate the whole TLB */ + li r0,256 + mtctr r0 + li r6,0 +25: tlbiel r6 + addi r6,r6,0x1000 + bdnz 25b + ptesync + + /* take native_tlbie_lock */ + ld r3,toc_tlbie_lock@toc(2) +24: lwarx r0,0,r3 + cmpwi r0,0 + bne 24b + stwcx. r8,0,r3 + bne 24b + isync + + ld r6,KVM_HOST_SDR1(r4) + mtspr SPRN_SDR1,r6 /* switch to host page table */ + + /* Set up host HID4 value */ + sync + mtspr SPRN_HID4,r7 + isync + li r0,0 + stw r0,0(r3) /* drop native_tlbie_lock */ + + lis r8,0x7fff /* MAX_INT@h */ + mtspr SPRN_HDEC,r8 + + /* Disable HDEC interrupts */ + mfspr r0,SPRN_HID0 + li r3,0 + rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1 + sync + mtspr SPRN_HID0,r0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + mfspr r0,SPRN_HID0 + + /* load host SLB entries */ +33: ld r8,PACA_SLBSHADOWPTR(r13) .rept SLB_NUM_BOLTED - ld r5,SLBSHADOW_SAVEAREA(r11) - ld r6,SLBSHADOW_SAVEAREA+8(r11) + ld r5,SLBSHADOW_SAVEAREA(r8) + ld r6,SLBSHADOW_SAVEAREA+8(r8) andis. r7,r5,SLB_ESID_V@h beq 1f slbmte r6,r5 -1: addi r11,r11,16 +1: addi r8,r8,16 .endr - b 22b + + /* Unset guest mode */ + li r0, KVM_GUEST_MODE_NONE + stb r0, HSTATE_IN_GUEST(r13) + + ld r0, 112+PPC_LR_STKOFF(r1) + addi r1, r1, 112 + mtlr r0 + blr /* * Check whether an HDSI is an HPTE not found fault or something else. @@ -1649,7 +1719,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) * up to the host. */ ld r5,HSTATE_KVM_VCORE(r13) - lwz r6,VCPU_PTID(r3) + lbz r6,HSTATE_PTID(r13) lwz r8,VCORE_ENTRY_EXIT(r5) clrldi r8,r8,56 li r0,1 @@ -1662,7 +1732,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) bge kvm_cede_exit stwcx. r4,0,r6 bne 31b - li r0,1 + li r0,NAPPING_CEDE stb r0,HSTATE_NAPPING(r13) /* order napping_threads update vs testing entry_exit_count */ lwsync @@ -1751,7 +1821,7 @@ kvm_end_cede: /* clear our bit in vcore->napping_threads */ 33: ld r5,HSTATE_KVM_VCORE(r13) - lwz r3,VCPU_PTID(r4) + lbz r3,HSTATE_PTID(r13) li r0,1 sld r0,r0,r3 addi r6,r5,VCORE_NAPPING_THREADS From b005255e12a311d2c87ea70a7c7b192b2187c22c Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Wed, 8 Jan 2014 21:25:21 +1100 Subject: [PATCH 29/45] KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs This adds fields to the struct kvm_vcpu_arch to store the new guest-accessible SPRs on POWER8, adds code to the get/set_one_reg functions to allow userspace to access this state, and adds code to the guest entry and exit to context-switch these SPRs between host and guest. Note that DPDES (Directed Privileged Doorbell Exception State) is shared between threads on a core; hence we store it in struct kvmppc_vcore and have the master thread save and restore it. Signed-off-by: Michael Neuling Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_host.h | 25 +++- arch/powerpc/include/asm/reg.h | 17 +++ arch/powerpc/include/uapi/asm/kvm.h | 1 + arch/powerpc/kernel/asm-offsets.c | 23 ++++ arch/powerpc/kvm/book3s_hv.c | 153 +++++++++++++++++++++++- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 145 ++++++++++++++++++++++ 6 files changed, 361 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index b850544dbc3f..81c92d1d7978 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -304,6 +304,7 @@ struct kvmppc_vcore { ulong lpcr; u32 arch_compat; ulong pcr; + ulong dpdes; /* doorbell state (POWER8) */ }; #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) @@ -448,6 +449,7 @@ struct kvm_vcpu_arch { ulong pc; ulong ctr; ulong lr; + ulong tar; ulong xer; u32 cr; @@ -457,13 +459,32 @@ struct kvm_vcpu_arch { ulong guest_owned_ext; ulong purr; ulong spurr; + ulong ic; + ulong vtb; ulong dscr; ulong amr; ulong uamor; + ulong iamr; u32 ctrl; ulong dabr; + ulong dawr; + ulong dawrx; + ulong ciabr; ulong cfar; ulong ppr; + ulong pspb; + ulong fscr; + ulong tfhar; + ulong tfiar; + ulong texasr; + ulong ebbhr; + ulong ebbrr; + ulong bescr; + ulong csigr; + ulong tacr; + ulong tcscr; + ulong acop; + ulong wort; ulong shadow_srr1; #endif u32 vrsave; /* also USPRG0 */ @@ -498,10 +519,12 @@ struct kvm_vcpu_arch { u32 ccr1; u32 dbsr; - u64 mmcr[3]; + u64 mmcr[5]; u32 pmc[8]; + u32 spmc[2]; u64 siar; u64 sdar; + u64 sier; #ifdef CONFIG_KVM_EXIT_TIMING struct mutex exit_timing_lock; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 5c45787d551e..2f41e6475648 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -223,6 +223,11 @@ #define CTRL_TE 0x00c00000 /* thread enable */ #define CTRL_RUNLATCH 0x1 #define SPRN_DAWR 0xB4 +#define SPRN_CIABR 0xBB +#define CIABR_PRIV 0x3 +#define CIABR_PRIV_USER 1 +#define CIABR_PRIV_SUPER 2 +#define CIABR_PRIV_HYPER 3 #define SPRN_DAWRX 0xBC #define DAWRX_USER (1UL << 0) #define DAWRX_KERNEL (1UL << 1) @@ -260,6 +265,8 @@ #define SPRN_HRMOR 0x139 /* Real mode offset register */ #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ +#define SPRN_IC 0x350 /* Virtual Instruction Count */ +#define SPRN_VTB 0x351 /* Virtual Time Base */ /* HFSCR and FSCR bit numbers are the same */ #define FSCR_TAR_LG 8 /* Enable Target Address Register */ #define FSCR_EBB_LG 7 /* Enable Event Based Branching */ @@ -368,6 +375,8 @@ #define DER_EBRKE 0x00000002 /* External Breakpoint Interrupt */ #define DER_DPIE 0x00000001 /* Dev. Port Nonmaskable Request */ #define SPRN_DMISS 0x3D0 /* Data TLB Miss Register */ +#define SPRN_DHDES 0x0B1 /* Directed Hyp. Doorbell Exc. State */ +#define SPRN_DPDES 0x0B0 /* Directed Priv. Doorbell Exc. State */ #define SPRN_EAR 0x11A /* External Address Register */ #define SPRN_HASH1 0x3D2 /* Primary Hash Address Register */ #define SPRN_HASH2 0x3D3 /* Secondary Hash Address Resgister */ @@ -427,6 +436,7 @@ #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ #define SPRN_IABR2 0x3FA /* 83xx */ #define SPRN_IBCR 0x135 /* 83xx Insn Breakpoint Control Reg */ +#define SPRN_IAMR 0x03D /* Instr. Authority Mask Reg */ #define SPRN_HID4 0x3F4 /* 970 HID4 */ #define HID4_LPES0 (1ul << (63-0)) /* LPAR env. sel. bit 0 */ #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ @@ -541,6 +551,7 @@ #define SPRN_PIR 0x3FF /* Processor Identification Register */ #endif #define SPRN_TIR 0x1BE /* Thread Identification Register */ +#define SPRN_PSPB 0x09F /* Problem State Priority Boost reg */ #define SPRN_PTEHI 0x3D5 /* 981 7450 PTE HI word (S/W TLB load) */ #define SPRN_PTELO 0x3D6 /* 982 7450 PTE LO word (S/W TLB load) */ #define SPRN_PURR 0x135 /* Processor Utilization of Resources Reg */ @@ -682,6 +693,7 @@ #define SPRN_EBBHR 804 /* Event based branch handler register */ #define SPRN_EBBRR 805 /* Event based branch return register */ #define SPRN_BESCR 806 /* Branch event status and control register */ +#define SPRN_WORT 895 /* Workload optimization register - thread */ #define SPRN_PMC1 787 #define SPRN_PMC2 788 @@ -698,6 +710,11 @@ #define SIER_SIHV 0x1000000 /* Sampled MSR_HV */ #define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */ #define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */ +#define SPRN_TACR 888 +#define SPRN_TCSCR 889 +#define SPRN_CSIGR 890 +#define SPRN_SPMC1 892 +#define SPRN_SPMC2 893 /* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */ #define MMCR0_USER_MASK (MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO) diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 6836ec79a830..a586fb9b77bd 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -545,6 +545,7 @@ struct kvm_get_htab_header { #define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) #define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) #define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) +#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb4) #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 332ae66883e4..043900abbbb0 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -432,6 +432,7 @@ int main(void) DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr)); DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr)); + DEFINE(VCPU_TAR, offsetof(struct kvm_vcpu, arch.tar)); DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc)); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE @@ -484,11 +485,17 @@ int main(void) DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); + DEFINE(VCPU_IC, offsetof(struct kvm_vcpu, arch.ic)); + DEFINE(VCPU_VTB, offsetof(struct kvm_vcpu, arch.vtb)); DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr)); DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr)); DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor)); + DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr)); DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); + DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr)); + DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx)); + DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr)); DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec)); DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires)); @@ -497,8 +504,10 @@ int main(void) DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); + DEFINE(VCPU_SPMC, offsetof(struct kvm_vcpu, arch.spmc)); DEFINE(VCPU_SIAR, offsetof(struct kvm_vcpu, arch.siar)); DEFINE(VCPU_SDAR, offsetof(struct kvm_vcpu, arch.sdar)); + DEFINE(VCPU_SIER, offsetof(struct kvm_vcpu, arch.sier)); DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max)); DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr)); @@ -508,6 +517,19 @@ int main(void) DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap)); DEFINE(VCPU_CFAR, offsetof(struct kvm_vcpu, arch.cfar)); DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); + DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); + DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); + DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); + DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); + DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); + DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); + DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); + DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr)); + DEFINE(VCPU_CSIGR, offsetof(struct kvm_vcpu, arch.csigr)); + DEFINE(VCPU_TACR, offsetof(struct kvm_vcpu, arch.tacr)); + DEFINE(VCPU_TCSCR, offsetof(struct kvm_vcpu, arch.tcscr)); + DEFINE(VCPU_ACOP, offsetof(struct kvm_vcpu, arch.acop)); + DEFINE(VCPU_WORT, offsetof(struct kvm_vcpu, arch.wort)); DEFINE(VCPU_SHADOW_SRR1, offsetof(struct kvm_vcpu, arch.shadow_srr1)); DEFINE(VCORE_ENTRY_EXIT, offsetof(struct kvmppc_vcore, entry_exit_count)); DEFINE(VCORE_NAP_COUNT, offsetof(struct kvmppc_vcore, nap_count)); @@ -517,6 +539,7 @@ int main(void) DEFINE(VCORE_TB_OFFSET, offsetof(struct kvmppc_vcore, tb_offset)); DEFINE(VCORE_LPCR, offsetof(struct kvmppc_vcore, lpcr)); DEFINE(VCORE_PCR, offsetof(struct kvmppc_vcore, pcr)); + DEFINE(VCORE_DPDES, offsetof(struct kvmppc_vcore, dpdes)); DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 7da53cd215db..5b08ddf91d2d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -800,7 +800,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_UAMOR: *val = get_reg_val(id, vcpu->arch.uamor); break; - case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: i = id - KVM_REG_PPC_MMCR0; *val = get_reg_val(id, vcpu->arch.mmcr[i]); break; @@ -808,12 +808,85 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, i = id - KVM_REG_PPC_PMC1; *val = get_reg_val(id, vcpu->arch.pmc[i]); break; + case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: + i = id - KVM_REG_PPC_SPMC1; + *val = get_reg_val(id, vcpu->arch.spmc[i]); + break; case KVM_REG_PPC_SIAR: *val = get_reg_val(id, vcpu->arch.siar); break; case KVM_REG_PPC_SDAR: *val = get_reg_val(id, vcpu->arch.sdar); break; + case KVM_REG_PPC_SIER: + *val = get_reg_val(id, vcpu->arch.sier); + break; + case KVM_REG_PPC_IAMR: + *val = get_reg_val(id, vcpu->arch.iamr); + break; + case KVM_REG_PPC_TFHAR: + *val = get_reg_val(id, vcpu->arch.tfhar); + break; + case KVM_REG_PPC_TFIAR: + *val = get_reg_val(id, vcpu->arch.tfiar); + break; + case KVM_REG_PPC_TEXASR: + *val = get_reg_val(id, vcpu->arch.texasr); + break; + case KVM_REG_PPC_FSCR: + *val = get_reg_val(id, vcpu->arch.fscr); + break; + case KVM_REG_PPC_PSPB: + *val = get_reg_val(id, vcpu->arch.pspb); + break; + case KVM_REG_PPC_EBBHR: + *val = get_reg_val(id, vcpu->arch.ebbhr); + break; + case KVM_REG_PPC_EBBRR: + *val = get_reg_val(id, vcpu->arch.ebbrr); + break; + case KVM_REG_PPC_BESCR: + *val = get_reg_val(id, vcpu->arch.bescr); + break; + case KVM_REG_PPC_TAR: + *val = get_reg_val(id, vcpu->arch.tar); + break; + case KVM_REG_PPC_DPDES: + *val = get_reg_val(id, vcpu->arch.vcore->dpdes); + break; + case KVM_REG_PPC_DAWR: + *val = get_reg_val(id, vcpu->arch.dawr); + break; + case KVM_REG_PPC_DAWRX: + *val = get_reg_val(id, vcpu->arch.dawrx); + break; + case KVM_REG_PPC_CIABR: + *val = get_reg_val(id, vcpu->arch.ciabr); + break; + case KVM_REG_PPC_IC: + *val = get_reg_val(id, vcpu->arch.ic); + break; + case KVM_REG_PPC_VTB: + *val = get_reg_val(id, vcpu->arch.vtb); + break; + case KVM_REG_PPC_CSIGR: + *val = get_reg_val(id, vcpu->arch.csigr); + break; + case KVM_REG_PPC_TACR: + *val = get_reg_val(id, vcpu->arch.tacr); + break; + case KVM_REG_PPC_TCSCR: + *val = get_reg_val(id, vcpu->arch.tcscr); + break; + case KVM_REG_PPC_PID: + *val = get_reg_val(id, vcpu->arch.pid); + break; + case KVM_REG_PPC_ACOP: + *val = get_reg_val(id, vcpu->arch.acop); + break; + case KVM_REG_PPC_WORT: + *val = get_reg_val(id, vcpu->arch.wort); + break; case KVM_REG_PPC_VPA_ADDR: spin_lock(&vcpu->arch.vpa_update_lock); *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); @@ -882,7 +955,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_UAMOR: vcpu->arch.uamor = set_reg_val(id, *val); break; - case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRA: + case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: i = id - KVM_REG_PPC_MMCR0; vcpu->arch.mmcr[i] = set_reg_val(id, *val); break; @@ -890,12 +963,88 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, i = id - KVM_REG_PPC_PMC1; vcpu->arch.pmc[i] = set_reg_val(id, *val); break; + case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: + i = id - KVM_REG_PPC_SPMC1; + vcpu->arch.spmc[i] = set_reg_val(id, *val); + break; case KVM_REG_PPC_SIAR: vcpu->arch.siar = set_reg_val(id, *val); break; case KVM_REG_PPC_SDAR: vcpu->arch.sdar = set_reg_val(id, *val); break; + case KVM_REG_PPC_SIER: + vcpu->arch.sier = set_reg_val(id, *val); + break; + case KVM_REG_PPC_IAMR: + vcpu->arch.iamr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TFHAR: + vcpu->arch.tfhar = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TFIAR: + vcpu->arch.tfiar = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TEXASR: + vcpu->arch.texasr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_FSCR: + vcpu->arch.fscr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_PSPB: + vcpu->arch.pspb = set_reg_val(id, *val); + break; + case KVM_REG_PPC_EBBHR: + vcpu->arch.ebbhr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_EBBRR: + vcpu->arch.ebbrr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_BESCR: + vcpu->arch.bescr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TAR: + vcpu->arch.tar = set_reg_val(id, *val); + break; + case KVM_REG_PPC_DPDES: + vcpu->arch.vcore->dpdes = set_reg_val(id, *val); + break; + case KVM_REG_PPC_DAWR: + vcpu->arch.dawr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_DAWRX: + vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; + break; + case KVM_REG_PPC_CIABR: + vcpu->arch.ciabr = set_reg_val(id, *val); + /* Don't allow setting breakpoints in hypervisor code */ + if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) + vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ + break; + case KVM_REG_PPC_IC: + vcpu->arch.ic = set_reg_val(id, *val); + break; + case KVM_REG_PPC_VTB: + vcpu->arch.vtb = set_reg_val(id, *val); + break; + case KVM_REG_PPC_CSIGR: + vcpu->arch.csigr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TACR: + vcpu->arch.tacr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_TCSCR: + vcpu->arch.tcscr = set_reg_val(id, *val); + break; + case KVM_REG_PPC_PID: + vcpu->arch.pid = set_reg_val(id, *val); + break; + case KVM_REG_PPC_ACOP: + vcpu->arch.acop = set_reg_val(id, *val); + break; + case KVM_REG_PPC_WORT: + vcpu->arch.wort = set_reg_val(id, *val); + break; case KVM_REG_PPC_VPA_ADDR: addr = set_reg_val(id, *val); r = -EINVAL; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 8bbe91bdb6da..691dd1ef555b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -460,6 +460,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) beq 38f mtspr SPRN_PCR, r7 38: + +BEGIN_FTR_SECTION + /* DPDES is shared between threads */ + ld r8, VCORE_DPDES(r5) + mtspr SPRN_DPDES, r8 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + li r0,1 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */ b 10f @@ -659,6 +666,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) mtspr SPRN_MMCRA, r6 mtspr SPRN_SIAR, r7 mtspr SPRN_SDAR, r8 +BEGIN_FTR_SECTION + ld r5, VCPU_MMCR + 24(r4) + ld r6, VCPU_SIER(r4) + lwz r7, VCPU_PMC + 24(r4) + lwz r8, VCPU_PMC + 28(r4) + ld r9, VCPU_MMCR + 32(r4) + mtspr SPRN_MMCR2, r5 + mtspr SPRN_SIER, r6 + mtspr SPRN_SPMC1, r7 + mtspr SPRN_SPMC2, r8 + mtspr SPRN_MMCRS, r9 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mtspr SPRN_MMCR0, r3 isync @@ -690,6 +709,61 @@ BEGIN_FTR_SECTION mtspr SPRN_DSCR, r5 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) +BEGIN_FTR_SECTION + /* Skip next section on POWER7 or PPC970 */ + b 8f +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) + /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ + mfmsr r8 + li r0, 1 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG + mtmsrd r8 + + /* Load up POWER8-specific registers */ + ld r5, VCPU_IAMR(r4) + lwz r6, VCPU_PSPB(r4) + ld r7, VCPU_FSCR(r4) + mtspr SPRN_IAMR, r5 + mtspr SPRN_PSPB, r6 + mtspr SPRN_FSCR, r7 + ld r5, VCPU_DAWR(r4) + ld r6, VCPU_DAWRX(r4) + ld r7, VCPU_CIABR(r4) + ld r8, VCPU_TAR(r4) + mtspr SPRN_DAWR, r5 + mtspr SPRN_DAWRX, r6 + mtspr SPRN_CIABR, r7 + mtspr SPRN_TAR, r8 + ld r5, VCPU_IC(r4) + ld r6, VCPU_VTB(r4) + mtspr SPRN_IC, r5 + mtspr SPRN_VTB, r6 + ld r5, VCPU_TFHAR(r4) + ld r6, VCPU_TFIAR(r4) + ld r7, VCPU_TEXASR(r4) + ld r8, VCPU_EBBHR(r4) + mtspr SPRN_TFHAR, r5 + mtspr SPRN_TFIAR, r6 + mtspr SPRN_TEXASR, r7 + mtspr SPRN_EBBHR, r8 + ld r5, VCPU_EBBRR(r4) + ld r6, VCPU_BESCR(r4) + ld r7, VCPU_CSIGR(r4) + ld r8, VCPU_TACR(r4) + mtspr SPRN_EBBRR, r5 + mtspr SPRN_BESCR, r6 + mtspr SPRN_CSIGR, r7 + mtspr SPRN_TACR, r8 + ld r5, VCPU_TCSCR(r4) + ld r6, VCPU_ACOP(r4) + lwz r7, VCPU_GUEST_PID(r4) + ld r8, VCPU_WORT(r4) + mtspr SPRN_TCSCR, r5 + mtspr SPRN_ACOP, r6 + mtspr SPRN_PID, r7 + mtspr SPRN_WORT, r8 +8: + /* * Set the decrementer to the guest decrementer. */ @@ -1081,6 +1155,54 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201) add r5,r5,r6 std r5,VCPU_DEC_EXPIRES(r9) +BEGIN_FTR_SECTION + b 8f +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) + /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ + mfmsr r8 + li r0, 1 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG + mtmsrd r8 + + /* Save POWER8-specific registers */ + mfspr r5, SPRN_IAMR + mfspr r6, SPRN_PSPB + mfspr r7, SPRN_FSCR + std r5, VCPU_IAMR(r9) + stw r6, VCPU_PSPB(r9) + std r7, VCPU_FSCR(r9) + mfspr r5, SPRN_IC + mfspr r6, SPRN_VTB + mfspr r7, SPRN_TAR + std r5, VCPU_IC(r9) + std r6, VCPU_VTB(r9) + std r7, VCPU_TAR(r9) + mfspr r5, SPRN_TFHAR + mfspr r6, SPRN_TFIAR + mfspr r7, SPRN_TEXASR + mfspr r8, SPRN_EBBHR + std r5, VCPU_TFHAR(r9) + std r6, VCPU_TFIAR(r9) + std r7, VCPU_TEXASR(r9) + std r8, VCPU_EBBHR(r9) + mfspr r5, SPRN_EBBRR + mfspr r6, SPRN_BESCR + mfspr r7, SPRN_CSIGR + mfspr r8, SPRN_TACR + std r5, VCPU_EBBRR(r9) + std r6, VCPU_BESCR(r9) + std r7, VCPU_CSIGR(r9) + std r8, VCPU_TACR(r9) + mfspr r5, SPRN_TCSCR + mfspr r6, SPRN_ACOP + mfspr r7, SPRN_PID + mfspr r8, SPRN_WORT + std r5, VCPU_TCSCR(r9) + std r6, VCPU_ACOP(r9) + stw r7, VCPU_GUEST_PID(r9) + std r8, VCPU_WORT(r9) +8: + /* Save and reset AMR and UAMOR before turning on the MMU */ BEGIN_FTR_SECTION mfspr r5,SPRN_AMR @@ -1190,6 +1312,20 @@ BEGIN_FTR_SECTION stw r10, VCPU_PMC + 24(r9) stw r11, VCPU_PMC + 28(r9) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) +BEGIN_FTR_SECTION + mfspr r4, SPRN_MMCR2 + mfspr r5, SPRN_SIER + mfspr r6, SPRN_SPMC1 + mfspr r7, SPRN_SPMC2 + mfspr r8, SPRN_MMCRS + std r4, VCPU_MMCR + 24(r9) + std r5, VCPU_SIER(r9) + stw r6, VCPU_PMC + 24(r9) + stw r7, VCPU_PMC + 28(r9) + std r8, VCPU_MMCR + 32(r9) + lis r4, 0x8000 + mtspr SPRN_MMCRS, r4 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 22: /* Clear out SLB */ li r5,0 @@ -1290,6 +1426,15 @@ secondary_too_late: mtspr SPRN_LPID,r7 isync +BEGIN_FTR_SECTION + /* DPDES is shared between threads */ + mfspr r7, SPRN_DPDES + std r7, VCORE_DPDES(r5) + /* clear DPDES so we don't get guest doorbells in the host */ + li r8, 0 + mtspr SPRN_DPDES, r8 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + /* Subtract timebase offset from timebase */ ld r8,VCORE_TB_OFFSET(r5) cmpdi r8,0 From ca252055130b6a1affa12df94a4694c1aafc2a6c Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Jan 2014 21:25:22 +1100 Subject: [PATCH 30/45] KVM: PPC: Book3S HV: Flush the correct number of TLB sets on POWER8 POWER8 has 512 sets in the TLB, compared to 128 for POWER7, so we need to do more tlbiel instructions when flushing the TLB on POWER8. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 691dd1ef555b..19f8819f90fa 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -430,7 +430,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) andc r7,r7,r0 stdcx. r7,0,r6 bne 23b - li r6,128 /* and flush the TLB */ + /* Flush the TLB of any entries for this LPID */ + /* use arch 2.07S as a proxy for POWER8 */ +BEGIN_FTR_SECTION + li r6,512 /* POWER8 has 512 sets */ +FTR_SECTION_ELSE + li r6,128 /* POWER7 has 128 sets */ +ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) mtctr r6 li r7,0x800 /* IS field = 0b10 */ ptesync From bd3048b80caace9cf0ae9ad22b2fbb8333b44a97 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 8 Jan 2014 21:25:23 +1100 Subject: [PATCH 31/45] KVM: PPC: Book3S HV: Add handler for HV facility unavailable At present this should never happen, since the host kernel sets HFSCR to allow access to all facilities. It's better to be prepared to handle it cleanly if it does ever happen, though. Signed-off-by: Michael Ellerman Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_asm.h | 1 + arch/powerpc/kvm/book3s_hv.c | 11 ++++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 1bd92fd43cfb..dba8fb244100 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -99,6 +99,7 @@ #define BOOK3S_INTERRUPT_PERFMON 0xf00 #define BOOK3S_INTERRUPT_ALTIVEC 0xf20 #define BOOK3S_INTERRUPT_VSX 0xf40 +#define BOOK3S_INTERRUPT_H_FAC_UNAVAIL 0xf80 #define BOOK3S_IRQPRIO_SYSTEM_RESET 0 #define BOOK3S_IRQPRIO_DATA_SEGMENT 1 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 5b08ddf91d2d..1bf681e8a05f 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -706,7 +706,16 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, * we don't emulate any guest instructions at this stage. */ case BOOK3S_INTERRUPT_H_EMUL_ASSIST: - kvmppc_core_queue_program(vcpu, 0x80000); + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); + r = RESUME_GUEST; + break; + /* + * This occurs if the guest (kernel or userspace), does something that + * is prohibited by HFSCR. We just generate a program interrupt to + * the guest. + */ + case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: + kvmppc_core_queue_program(vcpu, SRR1_PROGILL); r = RESUME_GUEST; break; default: From 5557ae0ec77c2b4b5bbce2883c0603054ab66e68 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Jan 2014 21:25:24 +1100 Subject: [PATCH 32/45] KVM: PPC: Book3S HV: Implement architecture compatibility modes for POWER8 This allows us to select architecture 2.05 (POWER6) or 2.06 (POWER7) compatibility modes on a POWER8 processor. (Note that transactional memory is disabled for usermode if either or both of the PCR_TM_DIS and PCR_ARCH_206 bits are set.) Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/reg.h | 2 ++ arch/powerpc/kvm/book3s_hv.c | 16 +++++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 2f41e6475648..5a9983147683 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -329,6 +329,8 @@ #define SPRN_PCR 0x152 /* Processor compatibility register */ #define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */ #define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */ +#define PCR_TM_DIS (1ul << (63-2)) /* Trans. memory disable (POWER8) */ +#define PCR_ARCH_206 0x4 /* Architecture 2.06 */ #define PCR_ARCH_205 0x2 /* Architecture 2.05 */ #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ #define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 1bf681e8a05f..1e9f4b45432b 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -184,14 +184,28 @@ int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) switch (arch_compat) { case PVR_ARCH_205: - pcr = PCR_ARCH_205; + /* + * If an arch bit is set in PCR, all the defined + * higher-order arch bits also have to be set. + */ + pcr = PCR_ARCH_206 | PCR_ARCH_205; break; case PVR_ARCH_206: case PVR_ARCH_206p: + pcr = PCR_ARCH_206; + break; + case PVR_ARCH_207: break; default: return -EINVAL; } + + if (!cpu_has_feature(CPU_FTR_ARCH_207S)) { + /* POWER7 can't emulate POWER8 */ + if (!(pcr & PCR_ARCH_206)) + return -EINVAL; + pcr &= ~PCR_ARCH_206; + } } spin_lock(&vc->lock); From e3bbbbfa13ea2901050a58b2cb382df7974e7373 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Jan 2014 21:25:25 +1100 Subject: [PATCH 33/45] KVM: PPC: Book3S HV: Consolidate code that checks reason for wake from nap Currently in book3s_hv_rmhandlers.S we have three places where we have woken up from nap mode and we check the reason field in SRR1 to see what event woke us up. This consolidates them into a new function, kvmppc_check_wake_reason. It looks at the wake reason field in SRR1, and if it indicates that an external interrupt caused the wakeup, calls kvmppc_read_intr to check what sort of interrupt it was. This also consolidates the two places where we synthesize an external interrupt (0x500 vector) for the guest. Now, if the guest exit code finds that there was an external interrupt which has been handled (i.e. it was an IPI indicating that there is now an interrupt pending for the guest), it jumps to deliver_guest_interrupt, which is in the last part of the guest entry code, where we synthesize guest external and decrementer interrupts. That code has been streamlined a little and now clears LPCR[MER] when appropriate as well as setting it. The extra clearing of any pending IPI on a secondary, offline CPU thread before going back to nap mode has been removed. It is no longer necessary now that we have code to read and acknowledge IPIs in the guest exit path. This fixes a minor bug in the H_CEDE real-mode handling - previously, if we found that other threads were already exiting the guest when we were about to go to nap mode, we would branch to the cede wakeup path and end up looking in SRR1 for a wakeup reason. Now we branch to a point after we have checked the wakeup reason. This also fixes a minor bug in kvmppc_read_intr - previously it could return 0xff rather than 1, in the case where we find that a host IPI is pending after we have cleared the IPI. Now it returns 1. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 192 ++++++++++-------------- 1 file changed, 77 insertions(+), 115 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 19f8819f90fa..386e141dbf16 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -193,8 +193,10 @@ kvm_novcpu_wakeup: stb r0, HSTATE_NAPPING(r13) stb r0, HSTATE_HWTHREAD_REQ(r13) + /* check the wake reason */ + bl kvmppc_check_wake_reason + /* see if any other thread is already exiting */ - li r12, 0 lwz r0, VCORE_ENTRY_EXIT(r5) cmpwi r0, 0x100 bge kvm_novcpu_exit @@ -204,23 +206,14 @@ kvm_novcpu_wakeup: li r0, 1 sld r0, r0, r7 addi r6, r5, VCORE_NAPPING_THREADS -4: lwarx r3, 0, r6 - andc r3, r3, r0 - stwcx. r3, 0, r6 +4: lwarx r7, 0, r6 + andc r7, r7, r0 + stwcx. r7, 0, r6 bne 4b - /* Check the wake reason in SRR1 to see why we got here */ - mfspr r3, SPRN_SRR1 - rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */ - cmpwi r3, 4 /* was it an external interrupt? */ - bne kvm_novcpu_exit /* if not, exit the guest */ - - /* extern interrupt - read and handle it */ - li r12, BOOK3S_INTERRUPT_EXTERNAL - bl kvmppc_read_intr + /* See if the wake reason means we need to exit */ cmpdi r3, 0 bge kvm_novcpu_exit - li r12, 0 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */ ld r4, HSTATE_KVM_VCPU(r13) @@ -264,40 +257,16 @@ kvm_start_guest: */ /* Check the wake reason in SRR1 to see why we got here */ - mfspr r3,SPRN_SRR1 - rlwinm r3,r3,44-31,0x7 /* extract wake reason field */ - cmpwi r3,4 /* was it an external interrupt? */ - bne 27f /* if not */ - ld r5,HSTATE_XICS_PHYS(r13) - li r7,XICS_XIRR /* if it was an external interrupt, */ - lwzcix r8,r5,r7 /* get and ack the interrupt */ - sync - clrldi. r9,r8,40 /* get interrupt source ID. */ - beq 28f /* none there? */ - cmpwi r9,XICS_IPI /* was it an IPI? */ - bne 29f - li r0,0xff - li r6,XICS_MFRR - stbcix r0,r5,r6 /* clear IPI */ - stwcix r8,r5,r7 /* EOI the interrupt */ - sync /* order loading of vcpu after that */ + bl kvmppc_check_wake_reason + cmpdi r3, 0 + bge kvm_no_guest /* get vcpu pointer, NULL if we have no vcpu to run */ ld r4,HSTATE_KVM_VCPU(r13) cmpdi r4,0 /* if we have no vcpu to run, go back to sleep */ beq kvm_no_guest - b 30f -27: /* XXX should handle hypervisor maintenance interrupts etc. here */ - b kvm_no_guest -28: /* SRR1 said external but ICP said nope?? */ - b kvm_no_guest -29: /* External non-IPI interrupt to offline secondary thread? help?? */ - stw r8,HSTATE_SAVED_XIRR(r13) - b kvm_no_guest - -30: /* Set HSTATE_DSCR(r13) to something sensible */ LOAD_REG_ADDR(r6, dscr_default) ld r6, 0(r6) @@ -310,18 +279,6 @@ kvm_start_guest: li r0, 0 std r0, HSTATE_KVM_VCPU(r13) lwsync - /* Clear any pending IPI - we're an offline thread */ - ld r5, HSTATE_XICS_PHYS(r13) - li r7, XICS_XIRR - lwzcix r3, r5, r7 /* ack any pending interrupt */ - rlwinm. r0, r3, 0, 0xffffff /* any pending? */ - beq 37f - sync - li r0, 0xff - li r6, XICS_MFRR - stbcix r0, r5, r6 /* clear the IPI */ - stwcix r3, r5, r7 /* EOI it */ -37: sync /* increment the nap count and then go to nap mode */ ld r4, HSTATE_KVM_VCORE(r13) @@ -818,47 +775,46 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) mtctr r6 mtxer r7 +kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ ld r10, VCPU_PC(r4) ld r11, VCPU_MSR(r4) -kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */ ld r6, VCPU_SRR0(r4) ld r7, VCPU_SRR1(r4) + mtspr SPRN_SRR0, r6 + mtspr SPRN_SRR1, r7 +deliver_guest_interrupt: /* r11 = vcpu->arch.msr & ~MSR_HV */ rldicl r11, r11, 63 - MSR_HV_LG, 1 rotldi r11, r11, 1 + MSR_HV_LG ori r11, r11, MSR_ME /* Check if we can deliver an external or decrementer interrupt now */ - ld r0,VCPU_PENDING_EXC(r4) - lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h - and r0,r0,r8 - cmpdi cr1,r0,0 - andi. r0,r11,MSR_EE - beq cr1,11f + ld r0, VCPU_PENDING_EXC(r4) + rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 + cmpdi cr1, r0, 0 + andi. r8, r11, MSR_EE BEGIN_FTR_SECTION - mfspr r8,SPRN_LPCR - ori r8,r8,LPCR_MER - mtspr SPRN_LPCR,r8 + mfspr r8, SPRN_LPCR + /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ + rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH + mtspr SPRN_LPCR, r8 isync END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) beq 5f - li r0,BOOK3S_INTERRUPT_EXTERNAL -12: mr r6,r10 + li r0, BOOK3S_INTERRUPT_EXTERNAL + bne cr1, 12f + mfspr r0, SPRN_DEC + cmpwi r0, 0 + li r0, BOOK3S_INTERRUPT_DECREMENTER + bge 5f + +12: mtspr SPRN_SRR0, r10 mr r10,r0 - mr r7,r11 + mtspr SPRN_SRR1, r11 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ rotldi r11,r11,63 - b 5f -11: beq 5f - mfspr r0,SPRN_DEC - cmpwi r0,0 - li r0,BOOK3S_INTERRUPT_DECREMENTER - blt 12b - - /* Move SRR0 and SRR1 into the respective regs */ -5: mtspr SPRN_SRR0, r6 - mtspr SPRN_SRR1, r7 +5: /* * Required state: @@ -1047,39 +1003,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) /* External interrupt, first check for host_ipi. If this is * set, we know the host wants us out so let's do it now */ -do_ext_interrupt: bl kvmppc_read_intr cmpdi r3, 0 bgt ext_interrupt_to_host - /* Allright, looks like an IPI for the guest, we need to set MER */ /* Check if any CPU is heading out to the host, if so head out too */ ld r5, HSTATE_KVM_VCORE(r13) lwz r0, VCORE_ENTRY_EXIT(r5) cmpwi r0, 0x100 bge ext_interrupt_to_host - /* See if there is a pending interrupt for the guest */ - mfspr r8, SPRN_LPCR - ld r0, VCPU_PENDING_EXC(r9) - /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */ - rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63 - rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH - beq 2f - - /* And if the guest EE is set, we can deliver immediately, else - * we return to the guest with MER set - */ - andi. r0, r11, MSR_EE - beq 2f - mtspr SPRN_SRR0, r10 - mtspr SPRN_SRR1, r11 - li r10, BOOK3S_INTERRUPT_EXTERNAL - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ - rotldi r11, r11, 63 -2: mr r4, r9 - mtspr SPRN_LPCR, r8 - b fast_guest_return + /* Return to guest after delivering any pending interrupt */ + mr r4, r9 + b deliver_guest_interrupt ext_interrupt_to_host: @@ -1887,7 +1823,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) stb r0,HSTATE_NAPPING(r13) /* order napping_threads update vs testing entry_exit_count */ lwsync - mr r4,r3 lwz r7,VCORE_ENTRY_EXIT(r5) cmpwi r7,0x100 bge 33f /* another thread already exiting */ @@ -1940,6 +1875,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) nap b . +33: mr r4, r3 + li r3, 0 + li r12, 0 + b 34f + kvm_end_cede: /* get vcpu pointer */ ld r4, HSTATE_KVM_VCPU(r13) @@ -1969,12 +1909,15 @@ kvm_end_cede: ld r29, VCPU_GPR(R29)(r4) ld r30, VCPU_GPR(R30)(r4) ld r31, VCPU_GPR(R31)(r4) + + /* Check the wake reason in SRR1 to see why we got here */ + bl kvmppc_check_wake_reason /* clear our bit in vcore->napping_threads */ -33: ld r5,HSTATE_KVM_VCORE(r13) - lbz r3,HSTATE_PTID(r13) +34: ld r5,HSTATE_KVM_VCORE(r13) + lbz r7,HSTATE_PTID(r13) li r0,1 - sld r0,r0,r3 + sld r0,r0,r7 addi r6,r5,VCORE_NAPPING_THREADS 32: lwarx r7,0,r6 andc r7,r7,r0 @@ -1983,23 +1926,18 @@ kvm_end_cede: li r0,0 stb r0,HSTATE_NAPPING(r13) - /* Check the wake reason in SRR1 to see why we got here */ - mfspr r3, SPRN_SRR1 - rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */ - cmpwi r3, 4 /* was it an external interrupt? */ - li r12, BOOK3S_INTERRUPT_EXTERNAL + /* See if the wake reason means we need to exit */ + stw r12, VCPU_TRAP(r4) mr r9, r4 - ld r10, VCPU_PC(r9) - ld r11, VCPU_MSR(r9) - beq do_ext_interrupt /* if so */ + cmpdi r3, 0 + bgt guest_exit_cont /* see if any other thread is already exiting */ lwz r0,VCORE_ENTRY_EXIT(r5) cmpwi r0,0x100 - blt kvmppc_cede_reentry /* if not go back to guest */ + bge guest_exit_cont - /* some threads are exiting, so go to the guest exit path */ - b hcall_real_fallback + b kvmppc_cede_reentry /* if not go back to guest */ /* cede when already previously prodded case */ kvm_cede_prodded: @@ -2029,6 +1967,29 @@ machine_check_realmode: rotldi r11, r11, 63 b fast_interrupt_c_return +/* + * Check the reason we woke from nap, and take appropriate action. + * Returns: + * 0 if nothing needs to be done + * 1 if something happened that needs to be handled by the host + * -1 if there was a guest wakeup (IPI) + * + * Also sets r12 to the interrupt vector for any interrupt that needs + * to be handled now by the host (0x500 for external interrupt), or zero. + */ +kvmppc_check_wake_reason: + mfspr r6, SPRN_SRR1 + rlwinm r6, r6, 44-31, 0x7 /* extract wake reason field */ + cmpwi r6, 4 /* was it an external interrupt? */ + li r12, BOOK3S_INTERRUPT_EXTERNAL + beq kvmppc_read_intr /* if so, see what it was */ + li r3, 0 + li r12, 0 + cmpwi r6, 6 /* was it the decrementer? */ + beq 0f + li r3, 1 /* anything else, return 1 */ +0: blr + /* * Determine what sort of external interrupt is pending (if any). * Returns: @@ -2060,7 +2021,6 @@ kvmppc_read_intr: * interrupts directly to the guest */ cmpwi r3, XICS_IPI /* if there is, is it an IPI? */ - li r3, 1 bne 42f /* It's an IPI, clear the MFRR and EOI it */ @@ -2086,12 +2046,14 @@ kvmppc_read_intr: * before exit, it will be picked up by the host ICP driver */ stw r0, HSTATE_SAVED_XIRR(r13) + li r3, 1 b 1b 43: /* We raced with the host, we need to resend that IPI, bummer */ li r0, IPI_PRIORITY stbcix r0, r6, r8 /* set the IPI */ sync + li r3, 1 b 1b /* From aa31e843225769735b79795c955426c9479046a5 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Jan 2014 21:25:26 +1100 Subject: [PATCH 34/45] KVM: PPC: Book3S HV: Handle guest using doorbells for IPIs * SRR1 wake reason field for system reset interrupt on wakeup from nap is now a 4-bit field on P8, compared to 3 bits on P7. * Set PECEDP in LPCR when napping because of H_CEDE so guest doorbells will wake us up. * Waking up from nap because of a guest doorbell interrupt is not a reason to exit the guest. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/reg.h | 4 +++- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 19 +++++++++++++++---- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 5a9983147683..1248b40107ea 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -307,7 +307,9 @@ #define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */ #define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */ #define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */ -#define LPCR_PECE 0x00007000 /* powersave exit cause enable */ +#define LPCR_PECE 0x0001f000 /* powersave exit cause enable */ +#define LPCR_PECEDP 0x00010000 /* directed priv dbells cause exit */ +#define LPCR_PECEDH 0x00008000 /* directed hyp dbells cause exit */ #define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */ #define LPCR_PECE1 0x00002000 /* decrementer can cause exit */ #define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 386e141dbf16..9e89c7577b4a 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1857,13 +1857,16 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) bl kvmppc_save_fp /* - * Take a nap until a decrementer or external interrupt occurs, - * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR + * Take a nap until a decrementer or external or doobell interrupt + * occurs, with PECE1, PECE0 and PECEDP set in LPCR */ li r0,1 stb r0,HSTATE_HWTHREAD_REQ(r13) mfspr r5,SPRN_LPCR ori r5,r5,LPCR_PECE0 | LPCR_PECE1 +BEGIN_FTR_SECTION + oris r5,r5,LPCR_PECEDP@h +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) mtspr SPRN_LPCR,r5 isync li r0, 0 @@ -1979,14 +1982,22 @@ machine_check_realmode: */ kvmppc_check_wake_reason: mfspr r6, SPRN_SRR1 - rlwinm r6, r6, 44-31, 0x7 /* extract wake reason field */ - cmpwi r6, 4 /* was it an external interrupt? */ +BEGIN_FTR_SECTION + rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */ +FTR_SECTION_ELSE + rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */ +ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) + cmpwi r6, 8 /* was it an external interrupt? */ li r12, BOOK3S_INTERRUPT_EXTERNAL beq kvmppc_read_intr /* if so, see what it was */ li r3, 0 li r12, 0 cmpwi r6, 6 /* was it the decrementer? */ beq 0f +BEGIN_FTR_SECTION + cmpwi r6, 5 /* privileged doorbell? */ + beq 0f +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) li r3, 1 /* anything else, return 1 */ 0: blr From e0622bd9f2fccc8a801fa7aaf4fa6d7c728c3a78 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Jan 2014 21:25:27 +1100 Subject: [PATCH 35/45] KVM: PPC: Book3S HV: Handle new LPCR bits on POWER8 POWER8 has a bit in the LPCR to enable or disable the PURR and SPURR registers to count when in the guest. Set this bit. POWER8 has a field in the LPCR called AIL (Alternate Interrupt Location) which is used to enable relocation-on interrupts. Allow userspace to set this field. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/reg.h | 2 ++ arch/powerpc/kvm/book3s_hv.c | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 1248b40107ea..05ecb072540c 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -305,8 +305,10 @@ #define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */ #define LPCR_RMLS_SH (63-37) #define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */ +#define LPCR_AIL 0x01800000 /* Alternate interrupt location */ #define LPCR_AIL_0 0x00000000 /* MMU off exception offset 0x0 */ #define LPCR_AIL_3 0x01800000 /* MMU on exception offset 0xc00...4xxx */ +#define LPCR_ONL 0x00040000 /* online - PURR/SPURR count */ #define LPCR_PECE 0x0001f000 /* powersave exit cause enable */ #define LPCR_PECEDP 0x00010000 /* directed priv dbells cause exit */ #define LPCR_PECEDH 0x00008000 /* directed hyp dbells cause exit */ diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 1e9f4b45432b..d7f2ec6f1419 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -789,8 +789,11 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) /* * Userspace can only modify DPFD (default prefetch depth), * ILE (interrupt little-endian) and TC (translation control). + * On POWER8 userspace can also modify AIL (alt. interrupt loc.) */ mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; + if (cpu_has_feature(CPU_FTR_ARCH_207S)) + mask |= LPCR_AIL; vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); spin_unlock(&vc->lock); } @@ -2166,6 +2169,9 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm) LPCR_VPM0 | LPCR_VPM1; kvm->arch.vrma_slb_v = SLB_VSID_B_1T | (VRMA_VSID << SLB_VSID_SHIFT_1T); + /* On POWER8 turn on online bit to enable PURR/SPURR */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) + lpcr |= LPCR_ONL; } kvm->arch.lpcr = lpcr; From 5d00f66b865e3782c5852cdafe1cea11a292a81e Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Jan 2014 21:25:28 +1100 Subject: [PATCH 36/45] KVM: PPC: Book3S HV: Prepare for host using hypervisor doorbells POWER8 has support for hypervisor doorbell interrupts. Though the kernel doesn't use them for IPIs on the powernv platform yet, it probably will in future, so this makes KVM cope gracefully if a hypervisor doorbell interrupt arrives while in a guest. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_asm.h | 1 + arch/powerpc/kvm/book3s_hv.c | 1 + arch/powerpc/kvm/book3s_hv_rmhandlers.S | 7 +++++++ 3 files changed, 9 insertions(+) diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index dba8fb244100..c3815b1e79e8 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -96,6 +96,7 @@ #define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00 #define BOOK3S_INTERRUPT_H_INST_STORAGE 0xe20 #define BOOK3S_INTERRUPT_H_EMUL_ASSIST 0xe40 +#define BOOK3S_INTERRUPT_H_DOORBELL 0xe80 #define BOOK3S_INTERRUPT_PERFMON 0xf00 #define BOOK3S_INTERRUPT_ALTIVEC 0xf20 #define BOOK3S_INTERRUPT_VSX 0xf40 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index d7f2ec6f1419..216049ff7368 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -651,6 +651,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, r = RESUME_GUEST; break; case BOOK3S_INTERRUPT_EXTERNAL: + case BOOK3S_INTERRUPT_H_DOORBELL: vcpu->stat.ext_intr_exits++; r = RESUME_GUEST; break; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 9e89c7577b4a..eae4ab9b9135 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1997,10 +1997,17 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S) BEGIN_FTR_SECTION cmpwi r6, 5 /* privileged doorbell? */ beq 0f + cmpwi r6, 3 /* hypervisor doorbell? */ + beq 3f END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) li r3, 1 /* anything else, return 1 */ 0: blr + /* hypervisor doorbell */ +3: li r12, BOOK3S_INTERRUPT_H_DOORBELL + li r3, 1 + blr + /* * Determine what sort of external interrupt is pending (if any). * Returns: From 8563bf52d509213e746295341ab52896b562ca5e Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Jan 2014 21:25:29 +1100 Subject: [PATCH 37/45] KVM: PPC: Book3S HV: Add support for DABRX register on POWER7 The DABRX (DABR extension) register on POWER7 processors provides finer control over which accesses cause a data breakpoint interrupt. It contains 3 bits which indicate whether to enable accesses in user, kernel and hypervisor modes respectively to cause data breakpoint interrupts, plus one bit that enables both real mode and virtual mode accesses to cause interrupts. Currently, KVM sets DABRX to allow both kernel and user accesses to cause interrupts while in the guest. This adds support for the guest to specify other values for DABRX. PAPR defines a H_SET_XDABR hcall to allow the guest to set both DABR and DABRX with one call. This adds a real-mode implementation of H_SET_XDABR, which shares most of its code with the existing H_SET_DABR implementation. To support this, we add a per-vcpu field to store the DABRX value plus code to get and set it via the ONE_REG interface. For Linux guests to use this new hcall, userspace needs to add "hcall-xdabr" to the set of strings in the /chosen/hypertas-functions property in the device tree. If userspace does this and then migrates the guest to a host where the kernel doesn't include this patch, then userspace will need to implement H_SET_XDABR by writing the specified DABR value to the DABR using the ONE_REG interface. In that case, the old kernel will set DABRX to DABRX_USER | DABRX_KERNEL. That should still work correctly, at least for Linux guests, since Linux guests cope with getting data breakpoint interrupts in modes that weren't requested by just ignoring the interrupt, and Linux guests never set DABRX_BTI. The other thing this does is to make H_SET_DABR and H_SET_XDABR work on POWER8, which has the DAWR and DAWRX instead of DABR/X. Guests that know about POWER8 should use H_SET_MODE rather than H_SET_[X]DABR, but guests running in POWER7 compatibility mode will still use H_SET_[X]DABR. For them, this adds the logic to convert DABR/X values into DAWR/X values on POWER8. Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- Documentation/virtual/kvm/api.txt | 1 + arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/include/asm/reg.h | 18 ++++++++------ arch/powerpc/include/uapi/asm/kvm.h | 2 ++ arch/powerpc/kernel/asm-offsets.c | 1 + arch/powerpc/kvm/book3s_hv.c | 6 +++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 32 +++++++++++++++++++++++-- 7 files changed, 52 insertions(+), 9 deletions(-) diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index a30035dd4c26..5056baf574f4 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt @@ -1838,6 +1838,7 @@ registers, find a list below: PPC | KVM_REG_PPC_LPCR | 64 PPC | KVM_REG_PPC_PPR | 64 PPC | KVM_REG_PPC_ARCH_COMPAT 32 + PPC | KVM_REG_PPC_DABRX | 32 PPC | KVM_REG_PPC_TM_GPR0 | 64 ... PPC | KVM_REG_PPC_TM_GPR31 | 64 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 81c92d1d7978..d161bc09153b 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -466,6 +466,7 @@ struct kvm_vcpu_arch { ulong uamor; ulong iamr; u32 ctrl; + u32 dabrx; ulong dabr; ulong dawr; ulong dawrx; diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 05ecb072540c..adf644a80a3e 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -229,16 +229,20 @@ #define CIABR_PRIV_SUPER 2 #define CIABR_PRIV_HYPER 3 #define SPRN_DAWRX 0xBC -#define DAWRX_USER (1UL << 0) -#define DAWRX_KERNEL (1UL << 1) -#define DAWRX_HYP (1UL << 2) +#define DAWRX_USER __MASK(0) +#define DAWRX_KERNEL __MASK(1) +#define DAWRX_HYP __MASK(2) +#define DAWRX_WTI __MASK(3) +#define DAWRX_WT __MASK(4) +#define DAWRX_DR __MASK(5) +#define DAWRX_DW __MASK(6) #define SPRN_DABR 0x3F5 /* Data Address Breakpoint Register */ #define SPRN_DABR2 0x13D /* e300 */ #define SPRN_DABRX 0x3F7 /* Data Address Breakpoint Register Extension */ -#define DABRX_USER (1UL << 0) -#define DABRX_KERNEL (1UL << 1) -#define DABRX_HYP (1UL << 2) -#define DABRX_BTI (1UL << 3) +#define DABRX_USER __MASK(0) +#define DABRX_KERNEL __MASK(1) +#define DABRX_HYP __MASK(2) +#define DABRX_BTI __MASK(3) #define DABRX_ALL (DABRX_BTI | DABRX_HYP | DABRX_KERNEL | DABRX_USER) #define SPRN_DAR 0x013 /* Data Address Register */ #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index a586fb9b77bd..a6665be4f3ab 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -554,6 +554,8 @@ struct kvm_get_htab_header { /* Architecture compatibility level */ #define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7) +#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8) + /* Transactional Memory checkpointed state: * This is all GPRs, all VSX regs and a subset of SPRs */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 043900abbbb0..239a857f1141 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -493,6 +493,7 @@ int main(void) DEFINE(VCPU_IAMR, offsetof(struct kvm_vcpu, arch.iamr)); DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl)); DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr)); + DEFINE(VCPU_DABRX, offsetof(struct kvm_vcpu, arch.dabrx)); DEFINE(VCPU_DAWR, offsetof(struct kvm_vcpu, arch.dawr)); DEFINE(VCPU_DAWRX, offsetof(struct kvm_vcpu, arch.dawrx)); DEFINE(VCPU_CIABR, offsetof(struct kvm_vcpu, arch.ciabr)); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 216049ff7368..eb4eed4a7173 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -812,6 +812,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_DABR: *val = get_reg_val(id, vcpu->arch.dabr); break; + case KVM_REG_PPC_DABRX: + *val = get_reg_val(id, vcpu->arch.dabrx); + break; case KVM_REG_PPC_DSCR: *val = get_reg_val(id, vcpu->arch.dscr); break; @@ -967,6 +970,9 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_DABR: vcpu->arch.dabr = set_reg_val(id, *val); break; + case KVM_REG_PPC_DABRX: + vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; + break; case KVM_REG_PPC_DSCR: vcpu->arch.dscr = set_reg_val(id, *val); break; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index eae4ab9b9135..56299349e94b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -585,7 +585,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) BEGIN_FTR_SECTION /* Set partition DABR */ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */ - li r5,3 + lwz r5,VCPU_DABRX(r4) ld r6,VCPU_DABR(r4) mtspr SPRN_DABRX,r5 mtspr SPRN_DABR,r6 @@ -1763,24 +1763,52 @@ hcall_real_table: .long 0 /* 0x11c */ .long 0 /* 0x120 */ .long .kvmppc_h_bulk_remove - hcall_real_table + .long 0 /* 0x128 */ + .long 0 /* 0x12c */ + .long 0 /* 0x130 */ + .long .kvmppc_h_set_xdabr - hcall_real_table hcall_real_table_end: ignore_hdec: mr r4,r9 b fast_guest_return +_GLOBAL(kvmppc_h_set_xdabr) + andi. r0, r5, DABRX_USER | DABRX_KERNEL + beq 6f + li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI + andc. r0, r5, r0 + beq 3f +6: li r3, H_PARAMETER + blr + _GLOBAL(kvmppc_h_set_dabr) + li r5, DABRX_USER | DABRX_KERNEL +3: BEGIN_FTR_SECTION b 2f END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) std r4,VCPU_DABR(r3) + stw r5, VCPU_DABRX(r3) + mtspr SPRN_DABRX, r5 /* Work around P7 bug where DABR can get corrupted on mtspr */ 1: mtspr SPRN_DABR,r4 mfspr r5, SPRN_DABR cmpd r4, r5 bne 1b isync -2: li r3,0 + li r3,0 + blr + + /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */ +2: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW + rlwimi r5, r4, 1, DAWRX_WT + clrrdi r4, r4, 3 + std r4, VCPU_DAWR(r3) + std r5, VCPU_DAWRX(r3) + mtspr SPRN_DAWR, r4 + mtspr SPRN_DAWRX, r5 + li r3, 0 blr _GLOBAL(kvmppc_h_cede) From d682916a381ac7c8eb965c10ab64bc7cc2f18647 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 8 Jan 2014 21:25:30 +1100 Subject: [PATCH 38/45] KVM: PPC: Book3S HV: Basic little-endian guest support We create a guest MSR from scratch when delivering exceptions in a few places. Instead of extracting LPCR[ILE] and inserting it into MSR_LE each time, we simply create a new variable intr_msr which contains the entire MSR to use. For a little-endian guest, userspace needs to set the ILE (interrupt little-endian) bit in the LPCR for each vcpu (or at least one vcpu in each virtual core). [paulus@samba.org - removed H_SET_MODE implementation from original version of the patch, and made kvmppc_set_lpcr update vcpu->arch.intr_msr.] Signed-off-by: Anton Blanchard Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_host.h | 1 + arch/powerpc/kernel/asm-offsets.c | 1 + arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 +- arch/powerpc/kvm/book3s_hv.c | 22 ++++++++++++++++++++++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 15 +++++---------- 5 files changed, 30 insertions(+), 11 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index d161bc09153b..7726a3bc8ff0 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -636,6 +636,7 @@ struct kvm_vcpu_arch { spinlock_t tbacct_lock; u64 busy_stolen; u64 busy_preempt; + unsigned long intr_msr; #endif }; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 239a857f1141..a60a2fdae5bd 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -480,6 +480,7 @@ int main(void) DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr)); DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty)); + DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr)); #endif #ifdef CONFIG_PPC_BOOK3S DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index efb8aa544876..22cc895333e6 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void) static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) { - kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); + kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); } /* diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index eb4eed4a7173..3195e4f8a2ed 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -787,6 +787,27 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) u64 mask; spin_lock(&vc->lock); + /* + * If ILE (interrupt little-endian) has changed, update the + * MSR_LE bit in the intr_msr for each vcpu in this vcore. + */ + if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { + struct kvm *kvm = vcpu->kvm; + struct kvm_vcpu *vcpu; + int i; + + mutex_lock(&kvm->lock); + kvm_for_each_vcpu(i, vcpu, kvm) { + if (vcpu->arch.vcore != vc) + continue; + if (new_lpcr & LPCR_ILE) + vcpu->arch.intr_msr |= MSR_LE; + else + vcpu->arch.intr_msr &= ~MSR_LE; + } + mutex_unlock(&kvm->lock); + } + /* * Userspace can only modify DPFD (default prefetch depth), * ILE (interrupt little-endian) and TC (translation control). @@ -1155,6 +1176,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, spin_lock_init(&vcpu->arch.vpa_update_lock); spin_lock_init(&vcpu->arch.tbacct_lock); vcpu->arch.busy_preempt = TB_NIL; + vcpu->arch.intr_msr = MSR_SF | MSR_ME; kvmppc_mmu_book3s_hv_init(vcpu); diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 56299349e94b..ecb76357c9d0 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -812,8 +812,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 12: mtspr SPRN_SRR0, r10 mr r10,r0 mtspr SPRN_SRR1, r11 - li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ - rotldi r11,r11,63 + ld r11, VCPU_INTR_MSR(r4) 5: /* @@ -1551,8 +1550,7 @@ kvmppc_hdsi: mtspr SPRN_SRR0, r10 mtspr SPRN_SRR1, r11 li r10, BOOK3S_INTERRUPT_DATA_STORAGE - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ - rotldi r11, r11, 63 + ld r11, VCPU_INTR_MSR(r9) fast_interrupt_c_return: 6: ld r7, VCPU_CTR(r9) lwz r8, VCPU_XER(r9) @@ -1621,8 +1619,7 @@ kvmppc_hisi: 1: mtspr SPRN_SRR0, r10 mtspr SPRN_SRR1, r11 li r10, BOOK3S_INTERRUPT_INST_STORAGE - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ - rotldi r11, r11, 63 + ld r11, VCPU_INTR_MSR(r9) b fast_interrupt_c_return 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */ @@ -1665,8 +1662,7 @@ sc_1_fast_return: mtspr SPRN_SRR0,r10 mtspr SPRN_SRR1,r11 li r10, BOOK3S_INTERRUPT_SYSCALL - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ - rotldi r11, r11, 63 + ld r11, VCPU_INTR_MSR(r9) mr r4,r9 b fast_guest_return @@ -1994,8 +1990,7 @@ machine_check_realmode: beq mc_cont /* If not, deliver a machine check. SRR0/1 are already set */ li r10, BOOK3S_INTERRUPT_MACHINE_CHECK - li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */ - rotldi r11, r11, 63 + ld r11, VCPU_INTR_MSR(r9) b fast_interrupt_c_return /* From 7b37a1232273912dd57cd72b82fe70407bff7683 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Wed, 8 Jan 2014 21:25:31 +1100 Subject: [PATCH 39/45] powerpc/Kconfig: Make TM select VSX and VMX There are no processors in existence that have TM but no VMX or VSX. So let's makes CONFIG_PPC_TRANSACTIONAL_MEM select both CONFIG_VSX and CONFIG_ALTIVEC. This makes the code a lot simpler by removing the need for a bunch of #ifdefs. Signed-off-by: Michael Neuling Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/Kconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 4740b0a15fa8..799d758f42c4 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -338,6 +338,8 @@ config PPC_TRANSACTIONAL_MEM bool "Transactional Memory support for POWERPC" depends on PPC_BOOK3S_64 depends on SMP + select ALTIVEC + select VSX default n ---help--- Support user-mode Transactional Memory on POWERPC. From 7b490411c37f7ab7965cbdfe5e3ec28eadb6db5b Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Wed, 8 Jan 2014 21:25:32 +1100 Subject: [PATCH 40/45] KVM: PPC: Book3S HV: Add new state for transactional memory Add new state for transactional memory (TM) to kvm_vcpu_arch. Also add asm-offset bits that are going to be required. This also moves the existing TFHAR, TFIAR and TEXASR SPRs into a CONFIG_PPC_TRANSACTIONAL_MEM section. This requires some code changes to ensure we still compile with CONFIG_PPC_TRANSACTIONAL_MEM=N. Much of the added the added #ifdefs are removed in a later patch when the bulk of the TM code is added. Signed-off-by: Michael Neuling Signed-off-by: Paul Mackerras [agraf: fix merge conflict] Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_host.h | 24 +++++++- arch/powerpc/kernel/asm-offsets.c | 19 ++++++- arch/powerpc/kvm/book3s_hv.c | 4 ++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 75 ++++++++++++++++++++++++- 4 files changed, 114 insertions(+), 8 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 7726a3bc8ff0..1eaea2dea174 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -475,9 +475,6 @@ struct kvm_vcpu_arch { ulong ppr; ulong pspb; ulong fscr; - ulong tfhar; - ulong tfiar; - ulong texasr; ulong ebbhr; ulong ebbrr; ulong bescr; @@ -526,6 +523,27 @@ struct kvm_vcpu_arch { u64 siar; u64 sdar; u64 sier; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + u64 tfhar; + u64 texasr; + u64 tfiar; + + u32 cr_tm; + u64 lr_tm; + u64 ctr_tm; + u64 amr_tm; + u64 ppr_tm; + u64 dscr_tm; + u64 tar_tm; + + ulong gpr_tm[32]; + + struct thread_fp_state fp_tm; + + struct thread_vr_state vr_tm; + u32 vrsave_tm; /* also USPRG0 */ + +#endif #ifdef CONFIG_KVM_EXIT_TIMING struct mutex exit_timing_lock; diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index a60a2fdae5bd..687f2ebf0cce 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -521,9 +521,6 @@ int main(void) DEFINE(VCPU_PPR, offsetof(struct kvm_vcpu, arch.ppr)); DEFINE(VCPU_FSCR, offsetof(struct kvm_vcpu, arch.fscr)); DEFINE(VCPU_PSPB, offsetof(struct kvm_vcpu, arch.pspb)); - DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); - DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); - DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); DEFINE(VCPU_EBBHR, offsetof(struct kvm_vcpu, arch.ebbhr)); DEFINE(VCPU_EBBRR, offsetof(struct kvm_vcpu, arch.ebbrr)); DEFINE(VCPU_BESCR, offsetof(struct kvm_vcpu, arch.bescr)); @@ -545,6 +542,22 @@ int main(void) DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige)); DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv)); DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb)); +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + DEFINE(VCPU_TFHAR, offsetof(struct kvm_vcpu, arch.tfhar)); + DEFINE(VCPU_TFIAR, offsetof(struct kvm_vcpu, arch.tfiar)); + DEFINE(VCPU_TEXASR, offsetof(struct kvm_vcpu, arch.texasr)); + DEFINE(VCPU_GPR_TM, offsetof(struct kvm_vcpu, arch.gpr_tm)); + DEFINE(VCPU_FPRS_TM, offsetof(struct kvm_vcpu, arch.fp_tm.fpr)); + DEFINE(VCPU_VRS_TM, offsetof(struct kvm_vcpu, arch.vr_tm.vr)); + DEFINE(VCPU_VRSAVE_TM, offsetof(struct kvm_vcpu, arch.vrsave_tm)); + DEFINE(VCPU_CR_TM, offsetof(struct kvm_vcpu, arch.cr_tm)); + DEFINE(VCPU_LR_TM, offsetof(struct kvm_vcpu, arch.lr_tm)); + DEFINE(VCPU_CTR_TM, offsetof(struct kvm_vcpu, arch.ctr_tm)); + DEFINE(VCPU_AMR_TM, offsetof(struct kvm_vcpu, arch.amr_tm)); + DEFINE(VCPU_PPR_TM, offsetof(struct kvm_vcpu, arch.ppr_tm)); + DEFINE(VCPU_DSCR_TM, offsetof(struct kvm_vcpu, arch.dscr_tm)); + DEFINE(VCPU_TAR_TM, offsetof(struct kvm_vcpu, arch.tar_tm)); +#endif #ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 3195e4f8a2ed..f4a4c5c82fb2 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -875,6 +875,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_IAMR: *val = get_reg_val(id, vcpu->arch.iamr); break; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM case KVM_REG_PPC_TFHAR: *val = get_reg_val(id, vcpu->arch.tfhar); break; @@ -884,6 +885,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_TEXASR: *val = get_reg_val(id, vcpu->arch.texasr); break; +#endif case KVM_REG_PPC_FSCR: *val = get_reg_val(id, vcpu->arch.fscr); break; @@ -1033,6 +1035,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_IAMR: vcpu->arch.iamr = set_reg_val(id, *val); break; +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM case KVM_REG_PPC_TFHAR: vcpu->arch.tfhar = set_reg_val(id, *val); break; @@ -1042,6 +1045,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, case KVM_REG_PPC_TEXASR: vcpu->arch.texasr = set_reg_val(id, *val); break; +#endif case KVM_REG_PPC_FSCR: vcpu->arch.fscr = set_reg_val(id, *val); break; diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index ecb76357c9d0..dfa144cb199b 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -701,13 +701,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) ld r6, VCPU_VTB(r4) mtspr SPRN_IC, r5 mtspr SPRN_VTB, r6 +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM ld r5, VCPU_TFHAR(r4) ld r6, VCPU_TFIAR(r4) ld r7, VCPU_TEXASR(r4) - ld r8, VCPU_EBBHR(r4) mtspr SPRN_TFHAR, r5 mtspr SPRN_TFIAR, r6 mtspr SPRN_TEXASR, r7 +#endif + ld r8, VCPU_EBBHR(r4) mtspr SPRN_EBBHR, r8 ld r5, VCPU_EBBRR(r4) ld r6, VCPU_BESCR(r4) @@ -1118,13 +1120,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) std r5, VCPU_IC(r9) std r6, VCPU_VTB(r9) std r7, VCPU_TAR(r9) +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM mfspr r5, SPRN_TFHAR mfspr r6, SPRN_TFIAR mfspr r7, SPRN_TEXASR - mfspr r8, SPRN_EBBHR std r5, VCPU_TFHAR(r9) std r6, VCPU_TFIAR(r9) std r7, VCPU_TEXASR(r9) +#endif + mfspr r8, SPRN_EBBHR std r8, VCPU_EBBHR(r9) mfspr r5, SPRN_EBBRR mfspr r6, SPRN_BESCR @@ -1497,6 +1501,73 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 1: addi r8,r8,16 .endr + /* Save DEC */ + mfspr r5,SPRN_DEC + mftb r6 + extsw r5,r5 + add r5,r5,r6 + std r5,VCPU_DEC_EXPIRES(r9) + +BEGIN_FTR_SECTION + b 8f +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) + /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ + mfmsr r8 + li r0, 1 + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG + mtmsrd r8 + + /* Save POWER8-specific registers */ + mfspr r5, SPRN_IAMR + mfspr r6, SPRN_PSPB + mfspr r7, SPRN_FSCR + std r5, VCPU_IAMR(r9) + stw r6, VCPU_PSPB(r9) + std r7, VCPU_FSCR(r9) + mfspr r5, SPRN_IC + mfspr r6, SPRN_VTB + mfspr r7, SPRN_TAR + std r5, VCPU_IC(r9) + std r6, VCPU_VTB(r9) + std r7, VCPU_TAR(r9) +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + mfspr r5, SPRN_TFHAR + mfspr r6, SPRN_TFIAR + mfspr r7, SPRN_TEXASR + std r5, VCPU_TFHAR(r9) + std r6, VCPU_TFIAR(r9) + std r7, VCPU_TEXASR(r9) +#endif + mfspr r8, SPRN_EBBHR + std r8, VCPU_EBBHR(r9) + mfspr r5, SPRN_EBBRR + mfspr r6, SPRN_BESCR + mfspr r7, SPRN_CSIGR + mfspr r8, SPRN_TACR + std r5, VCPU_EBBRR(r9) + std r6, VCPU_BESCR(r9) + std r7, VCPU_CSIGR(r9) + std r8, VCPU_TACR(r9) + mfspr r5, SPRN_TCSCR + mfspr r6, SPRN_ACOP + mfspr r7, SPRN_PID + mfspr r8, SPRN_WORT + std r5, VCPU_TCSCR(r9) + std r6, VCPU_ACOP(r9) + stw r7, VCPU_GUEST_PID(r9) + std r8, VCPU_WORT(r9) +8: + + /* Save and reset AMR and UAMOR before turning on the MMU */ +BEGIN_FTR_SECTION + mfspr r5,SPRN_AMR + mfspr r6,SPRN_UAMOR + std r5,VCPU_AMR(r9) + std r6,VCPU_UAMOR(r9) + li r6,0 + mtspr SPRN_AMR,r6 +END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) + /* Unset guest mode */ li r0, KVM_GUEST_MODE_NONE stb r0, HSTATE_IN_GUEST(r13) From b17dfec0606151ba3a51fc27624aef935377e132 Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Wed, 8 Jan 2014 21:25:33 +1100 Subject: [PATCH 41/45] KVM: PPC: Book3S HV: Add software abort codes for transactional memory This adds the software abort code defines for transactional memory (TM). These values are from PAPR. Signed-off-by: Michael Neuling Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/uapi/asm/tm.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h index 85059a00f560..5d836b7c1176 100644 --- a/arch/powerpc/include/uapi/asm/tm.h +++ b/arch/powerpc/include/uapi/asm/tm.h @@ -6,6 +6,8 @@ * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor. */ #define TM_CAUSE_PERSISTENT 0x01 +#define TM_CAUSE_KVM_RESCHED 0xe0 /* From PAPR */ +#define TM_CAUSE_KVM_FAC_UNAV 0xe2 /* From PAPR */ #define TM_CAUSE_RESCHED 0xde #define TM_CAUSE_TLBI 0xdc #define TM_CAUSE_FAC_UNAV 0xda From 4068890931f62752abc3591e7b3736e7537c6dcb Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Wed, 8 Jan 2014 21:25:36 +1100 Subject: [PATCH 42/45] KVM: PPC: Book3S PR: Cope with doorbell interrupts When the PR host is running on a POWER8 machine in POWER8 mode, it will use doorbell interrupts for IPIs. If one of them arrives while we are in the guest, we pop out of the guest with trap number 0xA00, which isn't handled by kvmppc_handle_exit_pr, leading to the following BUG_ON: [ 331.436215] exit_nr=0xa00 | pc=0x1d2c | msr=0x800000000000d032 [ 331.437522] ------------[ cut here ]------------ [ 331.438296] kernel BUG at arch/powerpc/kvm/book3s_pr.c:982! [ 331.439063] Oops: Exception in kernel mode, sig: 5 [#2] [ 331.439819] SMP NR_CPUS=1024 NUMA pSeries [ 331.440552] Modules linked in: tun nf_conntrack_netbios_ns nf_conntrack_broadcast ipt_MASQUERADE ip6t_REJECT xt_conntrack ebtable_nat ebtable_broute bridge stp llc ebtable_filter ebtables ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw ip6table_filter ip6_tables iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack iptable_mangle iptable_security iptable_raw virtio_net kvm binfmt_misc ibmvscsi scsi_transport_srp scsi_tgt virtio_blk [ 331.447614] CPU: 11 PID: 1296 Comm: qemu-system-ppc Tainted: G D 3.11.7-200.2.fc19.ppc64p7 #1 [ 331.448920] task: c0000003bdc8c000 ti: c0000003bd32c000 task.ti: c0000003bd32c000 [ 331.450088] NIP: d0000000025d6b9c LR: d0000000025d6b98 CTR: c0000000004cfdd0 [ 331.451042] REGS: c0000003bd32f420 TRAP: 0700 Tainted: G D (3.11.7-200.2.fc19.ppc64p7) [ 331.452331] MSR: 800000000282b032 CR: 28004824 XER: 20000000 [ 331.454616] SOFTE: 1 [ 331.455106] CFAR: c000000000848bb8 [ 331.455726] GPR00: d0000000025d6b98 c0000003bd32f6a0 d0000000026017b8 0000000000000032 GPR04: c0000000018627f8 c000000001873208 320d0a3030303030 3030303030643033 GPR08: c000000000c490a8 0000000000000000 0000000000000000 0000000000000002 GPR12: 0000000028004822 c00000000fdc6300 0000000000000000 00000100076ec310 GPR16: 000000002ae343b8 00003ffffd397398 0000000000000000 0000000000000000 GPR20: 00000100076f16f4 00000100076ebe60 0000000000000008 ffffffffffffffff GPR24: 0000000000000000 0000008001041e60 0000000000000000 0000008001040ce8 GPR28: c0000003a2d80000 0000000000000a00 0000000000000001 c0000003a2681810 [ 331.466504] NIP [d0000000025d6b9c] .kvmppc_handle_exit_pr+0x75c/0xa80 [kvm] [ 331.466999] LR [d0000000025d6b98] .kvmppc_handle_exit_pr+0x758/0xa80 [kvm] [ 331.467517] Call Trace: [ 331.467909] [c0000003bd32f6a0] [d0000000025d6b98] .kvmppc_handle_exit_pr+0x758/0xa80 [kvm] (unreliable) [ 331.468553] [c0000003bd32f750] [d0000000025d98f0] kvm_start_lightweight+0xb4/0xc4 [kvm] [ 331.469189] [c0000003bd32f920] [d0000000025d7648] .kvmppc_vcpu_run_pr+0xd8/0x270 [kvm] [ 331.469838] [c0000003bd32f9c0] [d0000000025cf748] .kvmppc_vcpu_run+0xc8/0xf0 [kvm] [ 331.470790] [c0000003bd32fa50] [d0000000025cc19c] .kvm_arch_vcpu_ioctl_run+0x5c/0x1b0 [kvm] [ 331.471401] [c0000003bd32fae0] [d0000000025c4888] .kvm_vcpu_ioctl+0x478/0x730 [kvm] [ 331.472026] [c0000003bd32fc90] [c00000000026192c] .do_vfs_ioctl+0x4dc/0x7a0 [ 331.472561] [c0000003bd32fd80] [c000000000261cc4] .SyS_ioctl+0xd4/0xf0 [ 331.473095] [c0000003bd32fe30] [c000000000009ed8] syscall_exit+0x0/0x98 [ 331.473633] Instruction dump: [ 331.473766] 4bfff9b4 2b9d0800 419efc18 60000000 60420000 3d220000 e8bf11a0 e8df12a8 [ 331.474733] 7fa4eb78 e8698660 48015165 e8410028 <0fe00000> 813f00e4 3ba00000 39290001 [ 331.475386] ---[ end trace 49fc47d994c1f8f2 ]--- [ 331.479817] This fixes the problem by making kvmppc_handle_exit_pr() recognize the interrupt. We also need to jump to the doorbell interrupt handler in book3s_segment.S to handle the interrupt on the way out of the guest. Having done that, there's nothing further to be done in kvmppc_handle_exit_pr(). Signed-off-by: Paul Mackerras Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_asm.h | 1 + arch/powerpc/kvm/book3s_pr.c | 1 + arch/powerpc/kvm/book3s_segment.S | 2 ++ 3 files changed, 4 insertions(+) diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index c3815b1e79e8..8337c33c2eb3 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -91,6 +91,7 @@ #define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 #define BOOK3S_INTERRUPT_DECREMENTER 0x900 #define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980 +#define BOOK3S_INTERRUPT_DOORBELL 0xa00 #define BOOK3S_INTERRUPT_SYSCALL 0xc00 #define BOOK3S_INTERRUPT_TRACE 0xd00 #define BOOK3S_INTERRUPT_H_DATA_STORAGE 0xe00 diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index e82fafdaf880..6a5fc7d53de6 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -827,6 +827,7 @@ int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, /* We're good on these - the host merely wanted to get our attention */ case BOOK3S_INTERRUPT_DECREMENTER: case BOOK3S_INTERRUPT_HV_DECREMENTER: + case BOOK3S_INTERRUPT_DOORBELL: vcpu->stat.dec_exits++; r = RESUME_GUEST; break; diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index bc50c97751d3..1e0cc2adfd40 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -361,6 +361,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) beqa BOOK3S_INTERRUPT_DECREMENTER cmpwi r12, BOOK3S_INTERRUPT_PERFMON beqa BOOK3S_INTERRUPT_PERFMON + cmpwi r12, BOOK3S_INTERRUPT_DOORBELL + beqa BOOK3S_INTERRUPT_DOORBELL RFI kvmppc_handler_trampoline_exit_end: From 5f66b62095d028b70a322df15e8f9ffcdbcb474c Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Wed, 29 Jan 2014 18:10:45 +0100 Subject: [PATCH 43/45] kvm: x86: move KVM_CAP_HYPERV_TIME outside #ifdef Self explanatory. Reported-by: Radim Krcmar Cc: Vadim Rozenfeld Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 34d0d610aa8a..39c28f09dfd5 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -2616,10 +2616,10 @@ int kvm_dev_ioctl_check_extension(long ext) case KVM_CAP_GET_TSC_KHZ: case KVM_CAP_KVMCLOCK_CTRL: case KVM_CAP_READONLY_MEM: + case KVM_CAP_HYPERV_TIME: #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT case KVM_CAP_ASSIGN_DEV_IRQ: case KVM_CAP_PCI_2_3: - case KVM_CAP_HYPERV_TIME: #endif r = 1; break; From 1c300a40772dae829b91dad634999a6a522c0829 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 27 Jan 2014 14:49:40 +0100 Subject: [PATCH 44/45] x86, kvm: cache the base of the KVM cpuid leaves It is unnecessary to go through hypervisor_cpuid_base every time a leaf is found (which will be every time a feature is requested after the next patch). Fixes: 1085ba7f552d84aa8ac0ae903fa8d0cc2ff9f79d Cc: stable@vger.kernel.org Cc: mtosatti@redhat.com Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_para.h | 22 ++++++---------------- arch/x86/kernel/kvm.c | 27 +++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 16 deletions(-) diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 1df115909758..1679cc799b26 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -85,28 +85,13 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, return ret; } -static inline uint32_t kvm_cpuid_base(void) -{ - if (boot_cpu_data.cpuid_level < 0) - return 0; /* So we don't blow up on old processors */ - - if (cpu_has_hypervisor) - return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); - - return 0; -} - -static inline bool kvm_para_available(void) -{ - return kvm_cpuid_base() != 0; -} - static inline unsigned int kvm_arch_para_features(void) { return cpuid_eax(KVM_CPUID_FEATURES); } #ifdef CONFIG_KVM_GUEST +bool kvm_para_available(void); void __init kvm_guest_init(void); void kvm_async_pf_task_wait(u32 token); void kvm_async_pf_task_wake(u32 token); @@ -126,6 +111,11 @@ static inline void kvm_spinlock_init(void) #define kvm_async_pf_task_wait(T) do {} while(0) #define kvm_async_pf_task_wake(T) do {} while(0) +static inline bool kvm_para_available(void) +{ + return 0; +} + static inline u32 kvm_read_and_reset_pf_reason(void) { return 0; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 6dd802c6d780..0823003770fc 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -500,6 +500,33 @@ void __init kvm_guest_init(void) #endif } +static noinline uint32_t __kvm_cpuid_base(void) +{ + if (boot_cpu_data.cpuid_level < 0) + return 0; /* So we don't blow up on old processors */ + + if (cpu_has_hypervisor) + return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0); + + return 0; +} + +static inline uint32_t kvm_cpuid_base(void) +{ + static int kvm_cpuid_base = -1; + + if (kvm_cpuid_base == -1) + kvm_cpuid_base = __kvm_cpuid_base(); + + return kvm_cpuid_base; +} + +bool kvm_para_available(void) +{ + return kvm_cpuid_base() != 0; +} +EXPORT_SYMBOL_GPL(kvm_para_available); + static uint32_t __init kvm_detect(void) { return kvm_cpuid_base(); From 77f01bdfa5e55dc19d3eb747181d2730a9bb3ca8 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 27 Jan 2014 14:51:44 +0100 Subject: [PATCH 45/45] x86, kvm: correctly access the KVM_CPUID_FEATURES leaf at 0x40000101 When Hyper-V hypervisor leaves are present, KVM must relocate its own leaves at 0x40000100, because Windows does not look for Hyper-V leaves at indices other than 0x40000000. In this case, the KVM features are at 0x40000101, but the old code would always look at 0x40000001. Fix by using kvm_cpuid_base(). This also requires making the function non-inline, since kvm_cpuid_base() is static. Fixes: 1085ba7f552d84aa8ac0ae903fa8d0cc2ff9f79d Cc: stable@vger.kernel.org Cc: mtosatti@redhat.com Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_para.h | 11 ++++++----- arch/x86/kernel/kvm.c | 5 +++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index 1679cc799b26..c7678e43465b 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h @@ -85,13 +85,9 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, return ret; } -static inline unsigned int kvm_arch_para_features(void) -{ - return cpuid_eax(KVM_CPUID_FEATURES); -} - #ifdef CONFIG_KVM_GUEST bool kvm_para_available(void); +unsigned int kvm_arch_para_features(void); void __init kvm_guest_init(void); void kvm_async_pf_task_wait(u32 token); void kvm_async_pf_task_wake(u32 token); @@ -116,6 +112,11 @@ static inline bool kvm_para_available(void) return 0; } +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + static inline u32 kvm_read_and_reset_pf_reason(void) { return 0; diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 0823003770fc..f81cadebcc0e 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -527,6 +527,11 @@ bool kvm_para_available(void) } EXPORT_SYMBOL_GPL(kvm_para_available); +unsigned int kvm_arch_para_features(void) +{ + return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES); +} + static uint32_t __init kvm_detect(void) { return kvm_cpuid_base();