mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-02-18 18:37:39 +07:00
ARM: KVM: Cleanup exception injection
David Binderman reported that the exception injection code had a couple of unused variables lingering around. Upon examination, it looked like this code could do with an anticipated spring cleaning, which amounts to deduplicating the CPSR/SPSR update, and making it look a bit more like the architecture spec. The spurious variables are removed in the process. Reported-by: David Binderman <dcb314@hotmail.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
This commit is contained in:
parent
3ffa75cd18
commit
e078ef8151
@ -275,6 +275,40 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
|
|||||||
return vbar;
|
return vbar;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Switch to an exception mode, updating both CPSR and SPSR. Follow
|
||||||
|
* the logic described in AArch32.EnterMode() from the ARMv8 ARM.
|
||||||
|
*/
|
||||||
|
static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
|
||||||
|
{
|
||||||
|
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||||
|
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
|
||||||
|
|
||||||
|
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
|
||||||
|
|
||||||
|
switch (mode) {
|
||||||
|
case FIQ_MODE:
|
||||||
|
*vcpu_cpsr(vcpu) |= PSR_F_BIT;
|
||||||
|
/* Fall through */
|
||||||
|
case ABT_MODE:
|
||||||
|
case IRQ_MODE:
|
||||||
|
*vcpu_cpsr(vcpu) |= PSR_A_BIT;
|
||||||
|
/* Fall through */
|
||||||
|
default:
|
||||||
|
*vcpu_cpsr(vcpu) |= PSR_I_BIT;
|
||||||
|
}
|
||||||
|
|
||||||
|
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
|
||||||
|
|
||||||
|
if (sctlr & SCTLR_TE)
|
||||||
|
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
||||||
|
if (sctlr & SCTLR_EE)
|
||||||
|
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
|
||||||
|
|
||||||
|
/* Note: These now point to the mode banked copies */
|
||||||
|
*vcpu_spsr(vcpu) = cpsr;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kvm_inject_undefined - inject an undefined exception into the guest
|
* kvm_inject_undefined - inject an undefined exception into the guest
|
||||||
* @vcpu: The VCPU to receive the undefined exception
|
* @vcpu: The VCPU to receive the undefined exception
|
||||||
@ -286,29 +320,13 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
unsigned long new_lr_value;
|
|
||||||
unsigned long new_spsr_value;
|
|
||||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||||
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
|
|
||||||
bool is_thumb = (cpsr & PSR_T_BIT);
|
bool is_thumb = (cpsr & PSR_T_BIT);
|
||||||
u32 vect_offset = 4;
|
u32 vect_offset = 4;
|
||||||
u32 return_offset = (is_thumb) ? 2 : 4;
|
u32 return_offset = (is_thumb) ? 2 : 4;
|
||||||
|
|
||||||
new_spsr_value = cpsr;
|
kvm_update_psr(vcpu, UND_MODE);
|
||||||
new_lr_value = *vcpu_pc(vcpu) - return_offset;
|
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
|
||||||
|
|
||||||
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
|
|
||||||
*vcpu_cpsr(vcpu) |= PSR_I_BIT;
|
|
||||||
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
|
|
||||||
|
|
||||||
if (sctlr & SCTLR_TE)
|
|
||||||
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
|
||||||
if (sctlr & SCTLR_EE)
|
|
||||||
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
|
|
||||||
|
|
||||||
/* Note: These now point to UND banked copies */
|
|
||||||
*vcpu_spsr(vcpu) = cpsr;
|
|
||||||
*vcpu_reg(vcpu, 14) = new_lr_value;
|
|
||||||
|
|
||||||
/* Branch to exception vector */
|
/* Branch to exception vector */
|
||||||
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
|
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
|
||||||
@ -320,30 +338,14 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
|||||||
*/
|
*/
|
||||||
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
|
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
|
||||||
{
|
{
|
||||||
unsigned long new_lr_value;
|
|
||||||
unsigned long new_spsr_value;
|
|
||||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||||
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
|
|
||||||
bool is_thumb = (cpsr & PSR_T_BIT);
|
bool is_thumb = (cpsr & PSR_T_BIT);
|
||||||
u32 vect_offset;
|
u32 vect_offset;
|
||||||
u32 return_offset = (is_thumb) ? 4 : 0;
|
u32 return_offset = (is_thumb) ? 4 : 0;
|
||||||
bool is_lpae;
|
bool is_lpae;
|
||||||
|
|
||||||
new_spsr_value = cpsr;
|
kvm_update_psr(vcpu, ABT_MODE);
|
||||||
new_lr_value = *vcpu_pc(vcpu) + return_offset;
|
*vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
|
||||||
|
|
||||||
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
|
|
||||||
*vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
|
|
||||||
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
|
|
||||||
|
|
||||||
if (sctlr & SCTLR_TE)
|
|
||||||
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
|
||||||
if (sctlr & SCTLR_EE)
|
|
||||||
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
|
|
||||||
|
|
||||||
/* Note: These now point to ABT banked copies */
|
|
||||||
*vcpu_spsr(vcpu) = cpsr;
|
|
||||||
*vcpu_reg(vcpu, 14) = new_lr_value;
|
|
||||||
|
|
||||||
if (is_pabt)
|
if (is_pabt)
|
||||||
vect_offset = 12;
|
vect_offset = 12;
|
||||||
|
Loading…
Reference in New Issue
Block a user