mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-02 23:16:49 +07:00
Merge branch 'kvm-ppchv-next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into kvm-next
This commit is contained in:
commit
7227fc0666
@ -304,6 +304,11 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
|
||||
return vcpu->arch.fault_dar;
|
||||
}
|
||||
|
||||
static inline bool is_kvmppc_resume_guest(int r)
|
||||
{
|
||||
return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
|
||||
}
|
||||
|
||||
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
|
||||
* instruction for the OSI hypercalls */
|
||||
#define OSI_SC_MAGIC_R3 0x113724FA
|
||||
|
@ -289,6 +289,18 @@ static inline void note_hpte_modification(struct kvm *kvm,
|
||||
if (atomic_read(&kvm->arch.hpte_mod_interest))
|
||||
rev->guest_rpte |= HPTE_GR_MODIFIED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Like kvm_memslots(), but for use in real mode when we can't do
|
||||
* any RCU stuff (since the secondary threads are offline from the
|
||||
* kernel's point of view), and we can't print anything.
|
||||
* Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
|
||||
*/
|
||||
static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
|
||||
{
|
||||
return rcu_dereference_raw_notrace(kvm->memslots);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
|
||||
|
||||
#endif /* __ASM_KVM_BOOK3S_64_H__ */
|
||||
|
@ -94,7 +94,7 @@ struct kvmppc_host_state {
|
||||
unsigned long xics_phys;
|
||||
u32 saved_xirr;
|
||||
u64 dabr;
|
||||
u64 host_mmcr[3];
|
||||
u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */
|
||||
u32 host_pmc[8];
|
||||
u64 host_purr;
|
||||
u64 host_spurr;
|
||||
|
@ -129,6 +129,8 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
|
||||
struct kvm_create_spapr_tce *args);
|
||||
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
unsigned long ioba, unsigned long tce);
|
||||
extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
unsigned long ioba);
|
||||
extern struct kvm_rma_info *kvm_alloc_rma(void);
|
||||
extern void kvm_release_rma(struct kvm_rma_info *ri);
|
||||
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
|
||||
|
@ -213,6 +213,7 @@
|
||||
#define SPRN_ACOP 0x1F /* Available Coprocessor Register */
|
||||
#define SPRN_TFIAR 0x81 /* Transaction Failure Inst Addr */
|
||||
#define SPRN_TEXASR 0x82 /* Transaction EXception & Summary */
|
||||
#define TEXASR_FS __MASK(63-36) /* Transaction Failure Summary */
|
||||
#define SPRN_TEXASRU 0x83 /* '' '' '' Upper 32 */
|
||||
#define SPRN_TFHAR 0x80 /* Transaction Failure Handler Addr */
|
||||
#define SPRN_CTRLF 0x088
|
||||
|
@ -7,6 +7,8 @@
|
||||
|
||||
#include <uapi/asm/tm.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
extern void do_load_up_transact_fpu(struct thread_struct *thread);
|
||||
extern void do_load_up_transact_altivec(struct thread_struct *thread);
|
||||
@ -21,3 +23,5 @@ extern void tm_recheckpoint(struct thread_struct *thread,
|
||||
extern void tm_abort(uint8_t cause);
|
||||
extern void tm_save_sprs(struct thread_struct *thread);
|
||||
extern void tm_restore_sprs(struct thread_struct *thread);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -262,7 +262,14 @@ int kvmppc_mmu_hv_init(void)
|
||||
|
||||
static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
|
||||
unsigned long msr = vcpu->arch.intr_msr;
|
||||
|
||||
/* If transactional, change to suspend mode on IRQ delivery */
|
||||
if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
|
||||
msr |= MSR_TS_S;
|
||||
else
|
||||
msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
|
||||
kvmppc_set_msr(vcpu, msr);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -75,3 +75,31 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
return H_TOO_HARD;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
|
||||
|
||||
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
|
||||
unsigned long ioba)
|
||||
{
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
struct kvmppc_spapr_tce_table *stt;
|
||||
|
||||
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
|
||||
if (stt->liobn == liobn) {
|
||||
unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
|
||||
struct page *page;
|
||||
u64 *tbl;
|
||||
|
||||
if (ioba >= stt->window_size)
|
||||
return H_PARAMETER;
|
||||
|
||||
page = stt->pages[idx / TCES_PER_PAGE];
|
||||
tbl = (u64 *)page_address(page);
|
||||
|
||||
vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
|
||||
return H_SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
/* Didn't find the liobn, punt it to userspace */
|
||||
return H_TOO_HARD;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
|
||||
|
@ -86,7 +86,7 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
|
||||
|
||||
/* CPU points to the first thread of the core */
|
||||
if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) {
|
||||
#ifdef CONFIG_KVM_XICS
|
||||
#ifdef CONFIG_PPC_ICP_NATIVE
|
||||
int real_cpu = cpu + vcpu->arch.ptid;
|
||||
if (paca[real_cpu].kvm_hstate.xics_phys)
|
||||
xics_wake_cpu(real_cpu);
|
||||
@ -879,17 +879,6 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
case KVM_REG_PPC_IAMR:
|
||||
*val = get_reg_val(id, vcpu->arch.iamr);
|
||||
break;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
case KVM_REG_PPC_TFHAR:
|
||||
*val = get_reg_val(id, vcpu->arch.tfhar);
|
||||
break;
|
||||
case KVM_REG_PPC_TFIAR:
|
||||
*val = get_reg_val(id, vcpu->arch.tfiar);
|
||||
break;
|
||||
case KVM_REG_PPC_TEXASR:
|
||||
*val = get_reg_val(id, vcpu->arch.texasr);
|
||||
break;
|
||||
#endif
|
||||
case KVM_REG_PPC_FSCR:
|
||||
*val = get_reg_val(id, vcpu->arch.fscr);
|
||||
break;
|
||||
@ -970,6 +959,69 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
case KVM_REG_PPC_PPR:
|
||||
*val = get_reg_val(id, vcpu->arch.ppr);
|
||||
break;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
case KVM_REG_PPC_TFHAR:
|
||||
*val = get_reg_val(id, vcpu->arch.tfhar);
|
||||
break;
|
||||
case KVM_REG_PPC_TFIAR:
|
||||
*val = get_reg_val(id, vcpu->arch.tfiar);
|
||||
break;
|
||||
case KVM_REG_PPC_TEXASR:
|
||||
*val = get_reg_val(id, vcpu->arch.texasr);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
|
||||
i = id - KVM_REG_PPC_TM_GPR0;
|
||||
*val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
|
||||
{
|
||||
int j;
|
||||
i = id - KVM_REG_PPC_TM_VSR0;
|
||||
if (i < 32)
|
||||
for (j = 0; j < TS_FPRWIDTH; j++)
|
||||
val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
|
||||
else {
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
val->vval = vcpu->arch.vr_tm.vr[i-32];
|
||||
else
|
||||
r = -ENXIO;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case KVM_REG_PPC_TM_CR:
|
||||
*val = get_reg_val(id, vcpu->arch.cr_tm);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_LR:
|
||||
*val = get_reg_val(id, vcpu->arch.lr_tm);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_CTR:
|
||||
*val = get_reg_val(id, vcpu->arch.ctr_tm);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_FPSCR:
|
||||
*val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_AMR:
|
||||
*val = get_reg_val(id, vcpu->arch.amr_tm);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_PPR:
|
||||
*val = get_reg_val(id, vcpu->arch.ppr_tm);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_VRSAVE:
|
||||
*val = get_reg_val(id, vcpu->arch.vrsave_tm);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_VSCR:
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
*val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
|
||||
else
|
||||
r = -ENXIO;
|
||||
break;
|
||||
case KVM_REG_PPC_TM_DSCR:
|
||||
*val = get_reg_val(id, vcpu->arch.dscr_tm);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_TAR:
|
||||
*val = get_reg_val(id, vcpu->arch.tar_tm);
|
||||
break;
|
||||
#endif
|
||||
case KVM_REG_PPC_ARCH_COMPAT:
|
||||
*val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
|
||||
break;
|
||||
@ -1039,17 +1091,6 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
case KVM_REG_PPC_IAMR:
|
||||
vcpu->arch.iamr = set_reg_val(id, *val);
|
||||
break;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
case KVM_REG_PPC_TFHAR:
|
||||
vcpu->arch.tfhar = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TFIAR:
|
||||
vcpu->arch.tfiar = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TEXASR:
|
||||
vcpu->arch.texasr = set_reg_val(id, *val);
|
||||
break;
|
||||
#endif
|
||||
case KVM_REG_PPC_FSCR:
|
||||
vcpu->arch.fscr = set_reg_val(id, *val);
|
||||
break;
|
||||
@ -1144,6 +1185,68 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
||||
case KVM_REG_PPC_PPR:
|
||||
vcpu->arch.ppr = set_reg_val(id, *val);
|
||||
break;
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
case KVM_REG_PPC_TFHAR:
|
||||
vcpu->arch.tfhar = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TFIAR:
|
||||
vcpu->arch.tfiar = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TEXASR:
|
||||
vcpu->arch.texasr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
|
||||
i = id - KVM_REG_PPC_TM_GPR0;
|
||||
vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
|
||||
{
|
||||
int j;
|
||||
i = id - KVM_REG_PPC_TM_VSR0;
|
||||
if (i < 32)
|
||||
for (j = 0; j < TS_FPRWIDTH; j++)
|
||||
vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
|
||||
else
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
vcpu->arch.vr_tm.vr[i-32] = val->vval;
|
||||
else
|
||||
r = -ENXIO;
|
||||
break;
|
||||
}
|
||||
case KVM_REG_PPC_TM_CR:
|
||||
vcpu->arch.cr_tm = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_LR:
|
||||
vcpu->arch.lr_tm = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_CTR:
|
||||
vcpu->arch.ctr_tm = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_FPSCR:
|
||||
vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_AMR:
|
||||
vcpu->arch.amr_tm = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_PPR:
|
||||
vcpu->arch.ppr_tm = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_VRSAVE:
|
||||
vcpu->arch.vrsave_tm = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_VSCR:
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
|
||||
else
|
||||
r = - ENXIO;
|
||||
break;
|
||||
case KVM_REG_PPC_TM_DSCR:
|
||||
vcpu->arch.dscr_tm = set_reg_val(id, *val);
|
||||
break;
|
||||
case KVM_REG_PPC_TM_TAR:
|
||||
vcpu->arch.tar_tm = set_reg_val(id, *val);
|
||||
break;
|
||||
#endif
|
||||
case KVM_REG_PPC_ARCH_COMPAT:
|
||||
r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
|
||||
break;
|
||||
@ -1360,9 +1463,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
|
||||
smp_wmb();
|
||||
#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
|
||||
if (cpu != smp_processor_id()) {
|
||||
#ifdef CONFIG_KVM_XICS
|
||||
xics_wake_cpu(cpu);
|
||||
#endif
|
||||
if (vcpu->arch.ptid)
|
||||
++vc->n_woken;
|
||||
}
|
||||
@ -1530,7 +1631,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||
vcpu->arch.trap = 0;
|
||||
|
||||
if (vcpu->arch.ceded) {
|
||||
if (ret != RESUME_GUEST)
|
||||
if (!is_kvmppc_resume_guest(ret))
|
||||
kvmppc_end_cede(vcpu);
|
||||
else
|
||||
kvmppc_set_timer(vcpu);
|
||||
@ -1541,7 +1642,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||
vc->vcore_state = VCORE_INACTIVE;
|
||||
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
|
||||
arch.run_list) {
|
||||
if (vcpu->arch.ret != RESUME_GUEST) {
|
||||
if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
|
||||
kvmppc_remove_runnable(vc, vcpu);
|
||||
wake_up(&vcpu->arch.cpu_run);
|
||||
}
|
||||
@ -1731,7 +1832,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
|
||||
}
|
||||
} while (r == RESUME_GUEST);
|
||||
} while (is_kvmppc_resume_guest(r));
|
||||
|
||||
out:
|
||||
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
|
||||
@ -2366,7 +2467,7 @@ static int kvmppc_book3s_init_hv(void)
|
||||
*/
|
||||
r = kvmppc_core_check_processor_compat_hv();
|
||||
if (r < 0)
|
||||
return r;
|
||||
return -ENODEV;
|
||||
|
||||
kvm_ops_hv.owner = THIS_MODULE;
|
||||
kvmppc_hv_ops = &kvm_ops_hv;
|
||||
|
@ -71,6 +71,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
mtmsrd r10,1
|
||||
|
||||
/* Save host PMU registers */
|
||||
BEGIN_FTR_SECTION
|
||||
/* Work around P8 PMAE bug */
|
||||
li r3, -1
|
||||
clrrdi r3, r3, 10
|
||||
mfspr r8, SPRN_MMCR2
|
||||
mtspr SPRN_MMCR2, r3 /* freeze all counters using MMCR2 */
|
||||
isync
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
li r3, 1
|
||||
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
|
||||
mfspr r7, SPRN_MMCR0 /* save MMCR0 */
|
||||
@ -87,9 +95,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
cmpwi r5, 0
|
||||
beq 31f /* skip if not */
|
||||
mfspr r5, SPRN_MMCR1
|
||||
mfspr r9, SPRN_SIAR
|
||||
mfspr r10, SPRN_SDAR
|
||||
std r7, HSTATE_MMCR(r13)
|
||||
std r5, HSTATE_MMCR + 8(r13)
|
||||
std r6, HSTATE_MMCR + 16(r13)
|
||||
std r9, HSTATE_MMCR + 24(r13)
|
||||
std r10, HSTATE_MMCR + 32(r13)
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r9, SPRN_SIER
|
||||
std r8, HSTATE_MMCR + 40(r13)
|
||||
std r9, HSTATE_MMCR + 48(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
mfspr r3, SPRN_PMC1
|
||||
mfspr r5, SPRN_PMC2
|
||||
mfspr r6, SPRN_PMC3
|
||||
@ -110,6 +127,11 @@ BEGIN_FTR_SECTION
|
||||
stw r10, HSTATE_PMC + 24(r13)
|
||||
stw r11, HSTATE_PMC + 28(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
BEGIN_FTR_SECTION
|
||||
mfspr r9, SPRN_SIER
|
||||
std r8, HSTATE_MMCR + 40(r13)
|
||||
std r9, HSTATE_MMCR + 48(r13)
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
31:
|
||||
|
||||
/*
|
||||
|
@ -111,7 +111,7 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
|
||||
rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
|
||||
ptel = rev->guest_rpte |= rcbits;
|
||||
gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
|
||||
memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
|
||||
memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
|
||||
if (!memslot)
|
||||
return;
|
||||
|
||||
@ -192,7 +192,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
/* Find the memslot (if any) for this address */
|
||||
gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
|
||||
gfn = gpa >> PAGE_SHIFT;
|
||||
memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
|
||||
memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
|
||||
pa = 0;
|
||||
is_io = ~0ul;
|
||||
rmap = NULL;
|
||||
@ -670,7 +670,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||
|
||||
psize = hpte_page_size(v, r);
|
||||
gfn = ((r & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
|
||||
memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
|
||||
memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
|
||||
if (memslot) {
|
||||
hva = __gfn_to_hva_memslot(memslot, gfn);
|
||||
pte = lookup_linux_pte_and_update(pgdir, hva,
|
||||
|
@ -28,6 +28,9 @@
|
||||
#include <asm/exception-64s.h>
|
||||
#include <asm/kvm_book3s_asm.h>
|
||||
#include <asm/mmu-hash64.h>
|
||||
#include <asm/tm.h>
|
||||
|
||||
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
|
||||
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
#error Need to fix lppaca and SLB shadow accesses in little endian mode
|
||||
@ -106,8 +109,18 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
|
||||
ld r3, HSTATE_MMCR(r13)
|
||||
ld r4, HSTATE_MMCR + 8(r13)
|
||||
ld r5, HSTATE_MMCR + 16(r13)
|
||||
ld r6, HSTATE_MMCR + 24(r13)
|
||||
ld r7, HSTATE_MMCR + 32(r13)
|
||||
mtspr SPRN_MMCR1, r4
|
||||
mtspr SPRN_MMCRA, r5
|
||||
mtspr SPRN_SIAR, r6
|
||||
mtspr SPRN_SDAR, r7
|
||||
BEGIN_FTR_SECTION
|
||||
ld r8, HSTATE_MMCR + 40(r13)
|
||||
ld r9, HSTATE_MMCR + 48(r13)
|
||||
mtspr SPRN_MMCR2, r8
|
||||
mtspr SPRN_SIER, r9
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
mtspr SPRN_MMCR0, r3
|
||||
isync
|
||||
23:
|
||||
@ -597,6 +610,116 @@ BEGIN_FTR_SECTION
|
||||
END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
BEGIN_FTR_SECTION
|
||||
b skip_tm
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_TM)
|
||||
|
||||
/* Turn on TM/FP/VSX/VMX so we can restore them. */
|
||||
mfmsr r5
|
||||
li r6, MSR_TM >> 32
|
||||
sldi r6, r6, 32
|
||||
or r5, r5, r6
|
||||
ori r5, r5, MSR_FP
|
||||
oris r5, r5, (MSR_VEC | MSR_VSX)@h
|
||||
mtmsrd r5
|
||||
|
||||
/*
|
||||
* The user may change these outside of a transaction, so they must
|
||||
* always be context switched.
|
||||
*/
|
||||
ld r5, VCPU_TFHAR(r4)
|
||||
ld r6, VCPU_TFIAR(r4)
|
||||
ld r7, VCPU_TEXASR(r4)
|
||||
mtspr SPRN_TFHAR, r5
|
||||
mtspr SPRN_TFIAR, r6
|
||||
mtspr SPRN_TEXASR, r7
|
||||
|
||||
ld r5, VCPU_MSR(r4)
|
||||
rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
|
||||
beq skip_tm /* TM not active in guest */
|
||||
|
||||
/* Make sure the failure summary is set, otherwise we'll program check
|
||||
* when we trechkpt. It's possible that this might have been not set
|
||||
* on a kvmppc_set_one_reg() call but we shouldn't let this crash the
|
||||
* host.
|
||||
*/
|
||||
oris r7, r7, (TEXASR_FS)@h
|
||||
mtspr SPRN_TEXASR, r7
|
||||
|
||||
/*
|
||||
* We need to load up the checkpointed state for the guest.
|
||||
* We need to do this early as it will blow away any GPRs, VSRs and
|
||||
* some SPRs.
|
||||
*/
|
||||
|
||||
mr r31, r4
|
||||
addi r3, r31, VCPU_FPRS_TM
|
||||
bl .load_fp_state
|
||||
addi r3, r31, VCPU_VRS_TM
|
||||
bl .load_vr_state
|
||||
mr r4, r31
|
||||
lwz r7, VCPU_VRSAVE_TM(r4)
|
||||
mtspr SPRN_VRSAVE, r7
|
||||
|
||||
ld r5, VCPU_LR_TM(r4)
|
||||
lwz r6, VCPU_CR_TM(r4)
|
||||
ld r7, VCPU_CTR_TM(r4)
|
||||
ld r8, VCPU_AMR_TM(r4)
|
||||
ld r9, VCPU_TAR_TM(r4)
|
||||
mtlr r5
|
||||
mtcr r6
|
||||
mtctr r7
|
||||
mtspr SPRN_AMR, r8
|
||||
mtspr SPRN_TAR, r9
|
||||
|
||||
/*
|
||||
* Load up PPR and DSCR values but don't put them in the actual SPRs
|
||||
* till the last moment to avoid running with userspace PPR and DSCR for
|
||||
* too long.
|
||||
*/
|
||||
ld r29, VCPU_DSCR_TM(r4)
|
||||
ld r30, VCPU_PPR_TM(r4)
|
||||
|
||||
std r2, PACATMSCRATCH(r13) /* Save TOC */
|
||||
|
||||
/* Clear the MSR RI since r1, r13 are all going to be foobar. */
|
||||
li r5, 0
|
||||
mtmsrd r5, 1
|
||||
|
||||
/* Load GPRs r0-r28 */
|
||||
reg = 0
|
||||
.rept 29
|
||||
ld reg, VCPU_GPRS_TM(reg)(r31)
|
||||
reg = reg + 1
|
||||
.endr
|
||||
|
||||
mtspr SPRN_DSCR, r29
|
||||
mtspr SPRN_PPR, r30
|
||||
|
||||
/* Load final GPRs */
|
||||
ld 29, VCPU_GPRS_TM(29)(r31)
|
||||
ld 30, VCPU_GPRS_TM(30)(r31)
|
||||
ld 31, VCPU_GPRS_TM(31)(r31)
|
||||
|
||||
/* TM checkpointed state is now setup. All GPRs are now volatile. */
|
||||
TRECHKPT
|
||||
|
||||
/* Now let's get back the state we need. */
|
||||
HMT_MEDIUM
|
||||
GET_PACA(r13)
|
||||
ld r29, HSTATE_DSCR(r13)
|
||||
mtspr SPRN_DSCR, r29
|
||||
ld r4, HSTATE_KVM_VCPU(r13)
|
||||
ld r1, HSTATE_HOST_R1(r13)
|
||||
ld r2, PACATMSCRATCH(r13)
|
||||
|
||||
/* Set the MSR RI since we have our registers back. */
|
||||
li r5, MSR_RI
|
||||
mtmsrd r5, 1
|
||||
skip_tm:
|
||||
#endif
|
||||
|
||||
/* Load guest PMU registers */
|
||||
/* R4 is live here (vcpu pointer) */
|
||||
li r3, 1
|
||||
@ -704,14 +827,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
ld r6, VCPU_VTB(r4)
|
||||
mtspr SPRN_IC, r5
|
||||
mtspr SPRN_VTB, r6
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
ld r5, VCPU_TFHAR(r4)
|
||||
ld r6, VCPU_TFIAR(r4)
|
||||
ld r7, VCPU_TEXASR(r4)
|
||||
mtspr SPRN_TFHAR, r5
|
||||
mtspr SPRN_TFIAR, r6
|
||||
mtspr SPRN_TEXASR, r7
|
||||
#endif
|
||||
ld r8, VCPU_EBBHR(r4)
|
||||
mtspr SPRN_EBBHR, r8
|
||||
ld r5, VCPU_EBBRR(r4)
|
||||
@ -736,6 +851,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
* Set the decrementer to the guest decrementer.
|
||||
*/
|
||||
ld r8,VCPU_DEC_EXPIRES(r4)
|
||||
/* r8 is a host timebase value here, convert to guest TB */
|
||||
ld r5,HSTATE_KVM_VCORE(r13)
|
||||
ld r6,VCORE_TB_OFFSET(r5)
|
||||
add r8,r8,r6
|
||||
mftb r7
|
||||
subf r3,r7,r8
|
||||
mtspr SPRN_DEC,r3
|
||||
@ -817,7 +936,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
12: mtspr SPRN_SRR0, r10
|
||||
mr r10,r0
|
||||
mtspr SPRN_SRR1, r11
|
||||
ld r11, VCPU_INTR_MSR(r4)
|
||||
mr r9, r4
|
||||
bl kvmppc_msr_interrupt
|
||||
5:
|
||||
|
||||
/*
|
||||
@ -1098,17 +1218,15 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
|
||||
mftb r6
|
||||
extsw r5,r5
|
||||
add r5,r5,r6
|
||||
/* r5 is a guest timebase value here, convert to host TB */
|
||||
ld r3,HSTATE_KVM_VCORE(r13)
|
||||
ld r4,VCORE_TB_OFFSET(r3)
|
||||
subf r5,r4,r5
|
||||
std r5,VCPU_DEC_EXPIRES(r9)
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
b 8f
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
|
||||
mfmsr r8
|
||||
li r0, 1
|
||||
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
|
||||
mtmsrd r8
|
||||
|
||||
/* Save POWER8-specific registers */
|
||||
mfspr r5, SPRN_IAMR
|
||||
mfspr r6, SPRN_PSPB
|
||||
@ -1122,14 +1240,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
||||
std r5, VCPU_IC(r9)
|
||||
std r6, VCPU_VTB(r9)
|
||||
std r7, VCPU_TAR(r9)
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
mfspr r5, SPRN_TFHAR
|
||||
mfspr r6, SPRN_TFIAR
|
||||
mfspr r7, SPRN_TEXASR
|
||||
std r5, VCPU_TFHAR(r9)
|
||||
std r6, VCPU_TFIAR(r9)
|
||||
std r7, VCPU_TEXASR(r9)
|
||||
#endif
|
||||
mfspr r8, SPRN_EBBHR
|
||||
std r8, VCPU_EBBHR(r9)
|
||||
mfspr r5, SPRN_EBBRR
|
||||
@ -1387,7 +1497,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
||||
ld r8,VCORE_TB_OFFSET(r5)
|
||||
cmpdi r8,0
|
||||
beq 17f
|
||||
mftb r6 /* current host timebase */
|
||||
mftb r6 /* current guest timebase */
|
||||
subf r8,r8,r6
|
||||
mtspr SPRN_TBU40,r8 /* update upper 40 bits */
|
||||
mftb r7 /* check if lower 24 bits overflowed */
|
||||
@ -1557,7 +1667,7 @@ kvmppc_hdsi:
|
||||
mtspr SPRN_SRR0, r10
|
||||
mtspr SPRN_SRR1, r11
|
||||
li r10, BOOK3S_INTERRUPT_DATA_STORAGE
|
||||
ld r11, VCPU_INTR_MSR(r9)
|
||||
bl kvmppc_msr_interrupt
|
||||
fast_interrupt_c_return:
|
||||
6: ld r7, VCPU_CTR(r9)
|
||||
lwz r8, VCPU_XER(r9)
|
||||
@ -1626,7 +1736,7 @@ kvmppc_hisi:
|
||||
1: mtspr SPRN_SRR0, r10
|
||||
mtspr SPRN_SRR1, r11
|
||||
li r10, BOOK3S_INTERRUPT_INST_STORAGE
|
||||
ld r11, VCPU_INTR_MSR(r9)
|
||||
bl kvmppc_msr_interrupt
|
||||
b fast_interrupt_c_return
|
||||
|
||||
3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
|
||||
@ -1669,7 +1779,7 @@ sc_1_fast_return:
|
||||
mtspr SPRN_SRR0,r10
|
||||
mtspr SPRN_SRR1,r11
|
||||
li r10, BOOK3S_INTERRUPT_SYSCALL
|
||||
ld r11, VCPU_INTR_MSR(r9)
|
||||
bl kvmppc_msr_interrupt
|
||||
mr r4,r9
|
||||
b fast_guest_return
|
||||
|
||||
@ -1691,7 +1801,7 @@ hcall_real_table:
|
||||
.long 0 /* 0x10 - H_CLEAR_MOD */
|
||||
.long 0 /* 0x14 - H_CLEAR_REF */
|
||||
.long .kvmppc_h_protect - hcall_real_table
|
||||
.long 0 /* 0x1c - H_GET_TCE */
|
||||
.long .kvmppc_h_get_tce - hcall_real_table
|
||||
.long .kvmppc_h_put_tce - hcall_real_table
|
||||
.long 0 /* 0x24 - H_SET_SPRG0 */
|
||||
.long .kvmppc_h_set_dabr - hcall_real_table
|
||||
@ -1997,7 +2107,7 @@ machine_check_realmode:
|
||||
beq mc_cont
|
||||
/* If not, deliver a machine check. SRR0/1 are already set */
|
||||
li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
|
||||
ld r11, VCPU_INTR_MSR(r9)
|
||||
bl kvmppc_msr_interrupt
|
||||
b fast_interrupt_c_return
|
||||
|
||||
/*
|
||||
@ -2138,8 +2248,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
mfspr r6,SPRN_VRSAVE
|
||||
stw r6,VCPU_VRSAVE(r31)
|
||||
mtlr r30
|
||||
mtmsrd r5
|
||||
isync
|
||||
blr
|
||||
|
||||
/*
|
||||
@ -2186,3 +2294,20 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
*/
|
||||
kvmppc_bad_host_intr:
|
||||
b .
|
||||
|
||||
/*
|
||||
* This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
|
||||
* from VCPU_INTR_MSR and is modified based on the required TM state changes.
|
||||
* r11 has the guest MSR value (in/out)
|
||||
* r9 has a vcpu pointer (in)
|
||||
* r0 is used as a scratch register
|
||||
*/
|
||||
kvmppc_msr_interrupt:
|
||||
rldicl r0, r11, 64 - MSR_TS_S_LG, 62
|
||||
cmpwi r0, 2 /* Check if we are in transactional state.. */
|
||||
ld r11, VCPU_INTR_MSR(r9)
|
||||
bne 1f
|
||||
/* ... if transactional, change to suspended */
|
||||
li r0, 1
|
||||
1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
|
||||
blr
|
||||
|
@ -213,8 +213,11 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
|
||||
gpa_t args_phys;
|
||||
int rc;
|
||||
|
||||
/* r4 contains the guest physical address of the RTAS args */
|
||||
args_phys = kvmppc_get_gpr(vcpu, 4);
|
||||
/*
|
||||
* r4 contains the guest physical address of the RTAS args
|
||||
* Mask off the top 4 bits since this is a guest real address
|
||||
*/
|
||||
args_phys = kvmppc_get_gpr(vcpu, 4) & KVM_PAM;
|
||||
|
||||
rc = kvm_read_guest(vcpu->kvm, args_phys, &args, sizeof(args));
|
||||
if (rc)
|
||||
|
Loading…
Reference in New Issue
Block a user