mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 16:36:42 +07:00
KVM: PPC: Book3S HV: Reuse kvmppc_inject_interrupt for async guest delivery
This consolidates the HV interrupt delivery logic into one place. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
parent
87a45e07a5
commit
268f4ef995
@ -32,4 +32,7 @@ extern void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val);
|
||||
static inline void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val) {}
|
||||
#endif
|
||||
|
||||
extern void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
|
||||
extern void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
|
||||
|
||||
#endif
|
||||
|
@ -133,7 +133,6 @@ static inline bool nesting_enabled(struct kvm *kvm)
|
||||
/* If set, the threads on each CPU core have to be in the same MMU mode */
|
||||
static bool no_mixing_hpt_and_radix;
|
||||
|
||||
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
|
||||
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
|
||||
|
||||
/*
|
||||
@ -338,39 +337,6 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
|
||||
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
|
||||
}
|
||||
|
||||
static void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
|
||||
{
|
||||
unsigned long msr, pc, new_msr, new_pc;
|
||||
|
||||
msr = kvmppc_get_msr(vcpu);
|
||||
pc = kvmppc_get_pc(vcpu);
|
||||
new_msr = vcpu->arch.intr_msr;
|
||||
new_pc = vec;
|
||||
|
||||
/* If transactional, change to suspend mode on IRQ delivery */
|
||||
if (MSR_TM_TRANSACTIONAL(msr))
|
||||
new_msr |= MSR_TS_S;
|
||||
else
|
||||
new_msr |= msr & MSR_TS_MASK;
|
||||
|
||||
kvmppc_set_srr0(vcpu, pc);
|
||||
kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
|
||||
kvmppc_set_pc(vcpu, new_pc);
|
||||
kvmppc_set_msr(vcpu, new_msr);
|
||||
}
|
||||
|
||||
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
|
||||
{
|
||||
/*
|
||||
* Check for illegal transactional state bit combination
|
||||
* and if we find it, force the TS field to a safe state.
|
||||
*/
|
||||
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
|
||||
msr &= ~MSR_TS_MASK;
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
kvmppc_end_cede(vcpu);
|
||||
}
|
||||
|
||||
static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
|
||||
{
|
||||
vcpu->arch.pvr = pvr;
|
||||
@ -2475,15 +2441,6 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.timer_running = 1;
|
||||
}
|
||||
|
||||
static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.ceded = 0;
|
||||
if (vcpu->arch.timer_running) {
|
||||
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
|
||||
vcpu->arch.timer_running = 0;
|
||||
}
|
||||
}
|
||||
|
||||
extern int __kvmppc_vcore_entry(void);
|
||||
|
||||
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
|
||||
|
@ -755,6 +755,56 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
|
||||
local_paca->kvm_hstate.kvm_split_mode = NULL;
|
||||
}
|
||||
|
||||
static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.ceded = 0;
|
||||
if (vcpu->arch.timer_running) {
|
||||
hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
|
||||
vcpu->arch.timer_running = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
|
||||
{
|
||||
/*
|
||||
* Check for illegal transactional state bit combination
|
||||
* and if we find it, force the TS field to a safe state.
|
||||
*/
|
||||
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
|
||||
msr &= ~MSR_TS_MASK;
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
kvmppc_end_cede(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
|
||||
|
||||
static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
|
||||
{
|
||||
unsigned long msr, pc, new_msr, new_pc;
|
||||
|
||||
msr = kvmppc_get_msr(vcpu);
|
||||
pc = kvmppc_get_pc(vcpu);
|
||||
new_msr = vcpu->arch.intr_msr;
|
||||
new_pc = vec;
|
||||
|
||||
/* If transactional, change to suspend mode on IRQ delivery */
|
||||
if (MSR_TM_TRANSACTIONAL(msr))
|
||||
new_msr |= MSR_TS_S;
|
||||
else
|
||||
new_msr |= msr & MSR_TS_MASK;
|
||||
|
||||
kvmppc_set_srr0(vcpu, pc);
|
||||
kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
|
||||
kvmppc_set_pc(vcpu, new_pc);
|
||||
vcpu->arch.shregs.msr = new_msr;
|
||||
}
|
||||
|
||||
void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
|
||||
{
|
||||
inject_interrupt(vcpu, vec, srr1_flags);
|
||||
kvmppc_end_cede(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv);
|
||||
|
||||
/*
|
||||
* Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
|
||||
* Can we inject a Decrementer or a External interrupt?
|
||||
@ -762,7 +812,6 @@ void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
|
||||
void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ext;
|
||||
unsigned long vec = 0;
|
||||
unsigned long lpcr;
|
||||
|
||||
/* Insert EXTERNAL bit into LPCR at the MER bit position */
|
||||
@ -774,26 +823,16 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (vcpu->arch.shregs.msr & MSR_EE) {
|
||||
if (ext) {
|
||||
vec = BOOK3S_INTERRUPT_EXTERNAL;
|
||||
inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0);
|
||||
} else {
|
||||
long int dec = mfspr(SPRN_DEC);
|
||||
if (!(lpcr & LPCR_LD))
|
||||
dec = (int) dec;
|
||||
if (dec < 0)
|
||||
vec = BOOK3S_INTERRUPT_DECREMENTER;
|
||||
inject_interrupt(vcpu,
|
||||
BOOK3S_INTERRUPT_DECREMENTER, 0);
|
||||
}
|
||||
}
|
||||
if (vec) {
|
||||
unsigned long msr, old_msr = vcpu->arch.shregs.msr;
|
||||
|
||||
kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
|
||||
kvmppc_set_srr1(vcpu, old_msr);
|
||||
kvmppc_set_pc(vcpu, vec);
|
||||
msr = vcpu->arch.intr_msr;
|
||||
if (MSR_TM_ACTIVE(old_msr))
|
||||
msr |= MSR_TS_S;
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
}
|
||||
|
||||
if (vcpu->arch.doorbell_request) {
|
||||
mtspr(SPRN_DPDES, 1);
|
||||
|
Loading…
Reference in New Issue
Block a user