2015-10-21 15:57:10 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2015 - ARM Ltd
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2015-10-28 22:06:47 +07:00
|
|
|
#include <linux/types.h>
|
2016-09-12 21:49:15 +07:00
|
|
|
#include <linux/jump_label.h>
|
|
|
|
|
2015-01-29 22:47:55 +07:00
|
|
|
#include <asm/kvm_asm.h>
|
arm64: KVM: vgic-v2: Add the GICV emulation infrastructure
In order to efficiently perform the GICV access on behalf of the
guest, we need to be able to avoid going back all the way to
the host kernel.
For this, we introduce a new hook in the world switch code,
conveniently placed just after populating the fault info.
At that point, we only have saved/restored the GP registers,
and we can quickly perform all the required checks (data abort,
translation fault, valid faulting syndrome, not an external
abort, not a PTW).
Coming back from the emulation code, we need to skip the emulated
instruction. This involves an additional bit of save/restore in
order to be able to access the guest's PC (and possibly CPSR if
this is a 32bit guest).
At this stage, no emulation code is provided.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-06 15:28:45 +07:00
|
|
|
#include <asm/kvm_emulate.h>
|
2016-01-28 20:44:07 +07:00
|
|
|
#include <asm/kvm_hyp.h>
|
2016-11-08 20:56:21 +07:00
|
|
|
#include <asm/fpsimd.h>
|
2015-10-21 15:57:10 +07:00
|
|
|
|
2015-10-28 21:15:45 +07:00
|
|
|
static bool __hyp_text __fpsimd_enabled_nvhe(void)
|
|
|
|
{
|
|
|
|
return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __hyp_text __fpsimd_enabled_vhe(void)
|
|
|
|
{
|
|
|
|
return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static hyp_alternate_select(__fpsimd_is_enabled,
|
|
|
|
__fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
|
|
|
|
ARM64_HAS_VIRT_HOST_EXTN);
|
|
|
|
|
|
|
|
bool __hyp_text __fpsimd_enabled(void)
|
|
|
|
{
|
|
|
|
return __fpsimd_is_enabled()();
|
|
|
|
}
|
|
|
|
|
2015-01-29 22:47:55 +07:00
|
|
|
static void __hyp_text __activate_traps_vhe(void)
|
|
|
|
{
|
|
|
|
u64 val;
|
|
|
|
|
|
|
|
val = read_sysreg(cpacr_el1);
|
|
|
|
val |= CPACR_EL1_TTA;
|
|
|
|
val &= ~CPACR_EL1_FPEN;
|
|
|
|
write_sysreg(val, cpacr_el1);
|
|
|
|
|
|
|
|
write_sysreg(__kvm_hyp_vector, vbar_el1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __hyp_text __activate_traps_nvhe(void)
|
|
|
|
{
|
|
|
|
u64 val;
|
|
|
|
|
|
|
|
val = CPTR_EL2_DEFAULT;
|
|
|
|
val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
|
|
|
|
write_sysreg(val, cptr_el2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static hyp_alternate_select(__activate_traps_arch,
|
|
|
|
__activate_traps_nvhe, __activate_traps_vhe,
|
|
|
|
ARM64_HAS_VIRT_HOST_EXTN);
|
|
|
|
|
2015-10-21 15:57:10 +07:00
|
|
|
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u64 val;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are about to set CPTR_EL2.TFP to trap all floating point
|
|
|
|
* register accesses to EL2, however, the ARM ARM clearly states that
|
|
|
|
* traps are only taken to EL2 if the operation would not otherwise
|
|
|
|
* trap to EL1. Therefore, always make sure that for 32-bit guests,
|
|
|
|
* we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
|
2016-11-08 20:56:21 +07:00
|
|
|
* If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
|
|
|
|
* it will cause an exception.
|
2015-10-21 15:57:10 +07:00
|
|
|
*/
|
|
|
|
val = vcpu->arch.hcr_el2;
|
2016-11-08 20:56:21 +07:00
|
|
|
if (!(val & HCR_RW) && system_supports_fpsimd()) {
|
2015-10-21 15:57:10 +07:00
|
|
|
write_sysreg(1 << 30, fpexc32_el2);
|
|
|
|
isb();
|
|
|
|
}
|
|
|
|
write_sysreg(val, hcr_el2);
|
|
|
|
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
|
|
|
|
write_sysreg(1 << 15, hstr_el2);
|
2016-12-06 21:34:22 +07:00
|
|
|
/*
|
|
|
|
* Make sure we trap PMU access from EL0 to EL2. Also sanitize
|
|
|
|
* PMSELR_EL0 to make sure it never contains the cycle
|
|
|
|
* counter, which could make a PMXEVCNTR_EL0 access UNDEF at
|
|
|
|
* EL1 instead of being trapped to EL2.
|
|
|
|
*/
|
|
|
|
write_sysreg(0, pmselr_el0);
|
2015-09-08 14:15:56 +07:00
|
|
|
write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
|
2015-01-29 22:47:55 +07:00
|
|
|
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
|
|
|
|
__activate_traps_arch()();
|
|
|
|
}
|
2016-01-19 23:20:18 +07:00
|
|
|
|
2015-01-29 22:47:55 +07:00
|
|
|
static void __hyp_text __deactivate_traps_vhe(void)
|
|
|
|
{
|
|
|
|
extern char vectors[]; /* kernel exception vectors */
|
2016-09-22 17:35:43 +07:00
|
|
|
u64 mdcr_el2 = read_sysreg(mdcr_el2);
|
2016-01-19 23:20:18 +07:00
|
|
|
|
2016-09-22 17:35:43 +07:00
|
|
|
mdcr_el2 &= MDCR_EL2_HPMN_MASK |
|
|
|
|
MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
|
|
|
|
MDCR_EL2_TPMS;
|
|
|
|
|
|
|
|
write_sysreg(mdcr_el2, mdcr_el2);
|
2015-01-29 22:47:55 +07:00
|
|
|
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
|
|
|
|
write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
|
|
|
|
write_sysreg(vectors, vbar_el1);
|
2015-10-21 15:57:10 +07:00
|
|
|
}
|
|
|
|
|
2015-01-29 22:47:55 +07:00
|
|
|
static void __hyp_text __deactivate_traps_nvhe(void)
|
2015-10-21 15:57:10 +07:00
|
|
|
{
|
2016-09-22 17:35:43 +07:00
|
|
|
u64 mdcr_el2 = read_sysreg(mdcr_el2);
|
|
|
|
|
|
|
|
mdcr_el2 &= MDCR_EL2_HPMN_MASK;
|
|
|
|
mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
|
|
|
|
|
|
|
|
write_sysreg(mdcr_el2, mdcr_el2);
|
2015-10-21 15:57:10 +07:00
|
|
|
write_sysreg(HCR_RW, hcr_el2);
|
2015-01-29 22:47:55 +07:00
|
|
|
write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static hyp_alternate_select(__deactivate_traps_arch,
|
|
|
|
__deactivate_traps_nvhe, __deactivate_traps_vhe,
|
|
|
|
ARM64_HAS_VIRT_HOST_EXTN);
|
|
|
|
|
|
|
|
static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-09-06 20:02:00 +07:00
|
|
|
/*
|
|
|
|
* If we pended a virtual abort, preserve it until it gets
|
|
|
|
* cleared. See D1.14.3 (Virtual Interrupts) for details, but
|
|
|
|
* the crucial bit is "On taking a vSError interrupt,
|
|
|
|
* HCR_EL2.VSE is cleared to 0."
|
|
|
|
*/
|
|
|
|
if (vcpu->arch.hcr_el2 & HCR_VSE)
|
|
|
|
vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
|
|
|
|
|
2015-01-29 22:47:55 +07:00
|
|
|
__deactivate_traps_arch()();
|
2015-10-21 15:57:10 +07:00
|
|
|
write_sysreg(0, hstr_el2);
|
2015-09-08 14:15:56 +07:00
|
|
|
write_sysreg(0, pmuserenr_el0);
|
2015-10-21 15:57:10 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
|
|
|
write_sysreg(kvm->arch.vttbr, vttbr_el2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
write_sysreg(0, vttbr_el2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2016-09-12 21:49:15 +07:00
|
|
|
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
|
|
|
__vgic_v3_save_state(vcpu);
|
|
|
|
else
|
|
|
|
__vgic_v2_save_state(vcpu);
|
|
|
|
|
2015-10-21 15:57:10 +07:00
|
|
|
write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u64 val;
|
|
|
|
|
|
|
|
val = read_sysreg(hcr_el2);
|
|
|
|
val |= HCR_INT_OVERRIDE;
|
|
|
|
val |= vcpu->arch.irq_lines;
|
|
|
|
write_sysreg(val, hcr_el2);
|
|
|
|
|
2016-09-12 21:49:15 +07:00
|
|
|
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
|
|
|
|
__vgic_v3_restore_state(vcpu);
|
|
|
|
else
|
|
|
|
__vgic_v2_restore_state(vcpu);
|
2015-10-21 15:57:10 +07:00
|
|
|
}
|
|
|
|
|
2015-10-28 22:06:47 +07:00
|
|
|
static bool __hyp_text __true_value(void)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __hyp_text __false_value(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static hyp_alternate_select(__check_arm_834220,
|
|
|
|
__false_value, __true_value,
|
|
|
|
ARM64_WORKAROUND_834220);
|
|
|
|
|
|
|
|
static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
|
|
|
|
{
|
|
|
|
u64 par, tmp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Resolve the IPA the hard way using the guest VA.
|
|
|
|
*
|
|
|
|
* Stage-1 translation already validated the memory access
|
|
|
|
* rights. As such, we can use the EL1 translation regime, and
|
|
|
|
* don't have to distinguish between EL0 and EL1 access.
|
|
|
|
*
|
|
|
|
* We do need to save/restore PAR_EL1 though, as we haven't
|
|
|
|
* saved the guest context yet, and we may return early...
|
|
|
|
*/
|
|
|
|
par = read_sysreg(par_el1);
|
|
|
|
asm volatile("at s1e1r, %0" : : "r" (far));
|
|
|
|
isb();
|
|
|
|
|
|
|
|
tmp = read_sysreg(par_el1);
|
|
|
|
write_sysreg(par, par_el1);
|
|
|
|
|
|
|
|
if (unlikely(tmp & 1))
|
|
|
|
return false; /* Translation failed, back to guest */
|
|
|
|
|
|
|
|
/* Convert PAR to HPFAR format */
|
|
|
|
*hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
u64 esr = read_sysreg_el2(esr);
|
2016-05-31 18:33:02 +07:00
|
|
|
u8 ec = ESR_ELx_EC(esr);
|
2015-10-28 22:06:47 +07:00
|
|
|
u64 hpfar, far;
|
|
|
|
|
|
|
|
vcpu->arch.fault.esr_el2 = esr;
|
|
|
|
|
|
|
|
if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
far = read_sysreg_el2(far);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The HPFAR can be invalid if the stage 2 fault did not
|
|
|
|
* happen during a stage 1 page table walk (the ESR_EL2.S1PTW
|
|
|
|
* bit is clear) and one of the two following cases are true:
|
|
|
|
* 1. The fault was due to a permission fault
|
|
|
|
* 2. The processor carries errata 834220
|
|
|
|
*
|
|
|
|
* Therefore, for all non S1PTW faults where we either have a
|
|
|
|
* permission fault or the errata workaround is enabled, we
|
|
|
|
* resolve the IPA using the AT instruction.
|
|
|
|
*/
|
|
|
|
if (!(esr & ESR_ELx_S1PTW) &&
|
|
|
|
(__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
|
|
|
|
if (!__translate_far_to_hpfar(far, &hpfar))
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
hpfar = read_sysreg(hpfar_el2);
|
|
|
|
}
|
|
|
|
|
|
|
|
vcpu->arch.fault.far_el2 = far;
|
|
|
|
vcpu->arch.fault.hpfar_el2 = hpfar;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
arm64: KVM: vgic-v2: Add the GICV emulation infrastructure
In order to efficiently perform the GICV access on behalf of the
guest, we need to be able to avoid going back all the way to
the host kernel.
For this, we introduce a new hook in the world switch code,
conveniently placed just after populating the fault info.
At that point, we only have saved/restored the GP registers,
and we can quickly perform all the required checks (data abort,
translation fault, valid faulting syndrome, not an external
abort, not a PTW).
Coming back from the emulation code, we need to skip the emulated
instruction. This involves an additional bit of save/restore in
order to be able to access the guest's PC (and possibly CPSR if
this is a 32bit guest).
At this stage, no emulation code is provided.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-06 15:28:45 +07:00
|
|
|
static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
*vcpu_pc(vcpu) = read_sysreg_el2(elr);
|
|
|
|
|
|
|
|
if (vcpu_mode_is_32bit(vcpu)) {
|
|
|
|
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
|
|
|
|
kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
|
|
|
write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
|
|
|
|
} else {
|
|
|
|
*vcpu_pc(vcpu) += 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
write_sysreg_el2(*vcpu_pc(vcpu), elr);
|
|
|
|
}
|
|
|
|
|
2016-09-01 18:16:03 +07:00
|
|
|
int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
2015-10-21 15:57:10 +07:00
|
|
|
{
|
|
|
|
struct kvm_cpu_context *host_ctxt;
|
|
|
|
struct kvm_cpu_context *guest_ctxt;
|
2015-10-26 15:34:09 +07:00
|
|
|
bool fp_enabled;
|
2015-10-21 15:57:10 +07:00
|
|
|
u64 exit_code;
|
|
|
|
|
|
|
|
vcpu = kern_hyp_va(vcpu);
|
|
|
|
write_sysreg(vcpu, tpidr_el2);
|
|
|
|
|
|
|
|
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
|
|
|
|
guest_ctxt = &vcpu->arch.ctxt;
|
|
|
|
|
2015-10-28 19:17:35 +07:00
|
|
|
__sysreg_save_host_state(host_ctxt);
|
2015-10-21 15:57:10 +07:00
|
|
|
__debug_cond_save_host_state(vcpu);
|
|
|
|
|
|
|
|
__activate_traps(vcpu);
|
|
|
|
__activate_vm(vcpu);
|
|
|
|
|
|
|
|
__vgic_restore_state(vcpu);
|
|
|
|
__timer_restore_state(vcpu);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We must restore the 32-bit state before the sysregs, thanks
|
2016-08-16 21:03:01 +07:00
|
|
|
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
|
2015-10-21 15:57:10 +07:00
|
|
|
*/
|
|
|
|
__sysreg32_restore_state(vcpu);
|
2015-10-28 19:17:35 +07:00
|
|
|
__sysreg_restore_guest_state(guest_ctxt);
|
2015-10-21 15:57:10 +07:00
|
|
|
__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
|
|
|
|
|
|
|
|
/* Jump in the fire! */
|
2015-10-28 22:06:47 +07:00
|
|
|
again:
|
2015-10-21 15:57:10 +07:00
|
|
|
exit_code = __guest_enter(vcpu, host_ctxt);
|
|
|
|
/* And we're baaack! */
|
|
|
|
|
2016-09-06 20:02:07 +07:00
|
|
|
/*
|
|
|
|
* We're using the raw exception code in order to only process
|
|
|
|
* the trap if no SError is pending. We will come back to the
|
|
|
|
* same PC once the SError has been injected, and replay the
|
|
|
|
* trapping instruction.
|
|
|
|
*/
|
2015-10-28 22:06:47 +07:00
|
|
|
if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
|
|
|
|
goto again;
|
|
|
|
|
arm64: KVM: vgic-v2: Add the GICV emulation infrastructure
In order to efficiently perform the GICV access on behalf of the
guest, we need to be able to avoid going back all the way to
the host kernel.
For this, we introduce a new hook in the world switch code,
conveniently placed just after populating the fault info.
At that point, we only have saved/restored the GP registers,
and we can quickly perform all the required checks (data abort,
translation fault, valid faulting syndrome, not an external
abort, not a PTW).
Coming back from the emulation code, we need to skip the emulated
instruction. This involves an additional bit of save/restore in
order to be able to access the guest's PC (and possibly CPSR if
this is a 32bit guest).
At this stage, no emulation code is provided.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-06 15:28:45 +07:00
|
|
|
if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
|
|
|
|
exit_code == ARM_EXCEPTION_TRAP) {
|
|
|
|
bool valid;
|
|
|
|
|
|
|
|
valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
|
|
|
|
kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
|
|
|
|
kvm_vcpu_dabt_isvalid(vcpu) &&
|
|
|
|
!kvm_vcpu_dabt_isextabt(vcpu) &&
|
|
|
|
!kvm_vcpu_dabt_iss1tw(vcpu);
|
|
|
|
|
2016-09-06 20:02:17 +07:00
|
|
|
if (valid) {
|
|
|
|
int ret = __vgic_v2_perform_cpuif_access(vcpu);
|
|
|
|
|
|
|
|
if (ret == 1) {
|
|
|
|
__skip_instr(vcpu);
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret == -1) {
|
|
|
|
/* Promote an illegal access to an SError */
|
|
|
|
__skip_instr(vcpu);
|
|
|
|
exit_code = ARM_EXCEPTION_EL1_SERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 0 falls through to be handler out of EL2 */
|
arm64: KVM: vgic-v2: Add the GICV emulation infrastructure
In order to efficiently perform the GICV access on behalf of the
guest, we need to be able to avoid going back all the way to
the host kernel.
For this, we introduce a new hook in the world switch code,
conveniently placed just after populating the fault info.
At that point, we only have saved/restored the GP registers,
and we can quickly perform all the required checks (data abort,
translation fault, valid faulting syndrome, not an external
abort, not a PTW).
Coming back from the emulation code, we need to skip the emulated
instruction. This involves an additional bit of save/restore in
order to be able to access the guest's PC (and possibly CPSR if
this is a 32bit guest).
At this stage, no emulation code is provided.
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
2016-09-06 15:28:45 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-26 15:34:09 +07:00
|
|
|
fp_enabled = __fpsimd_enabled();
|
|
|
|
|
2015-10-28 19:17:35 +07:00
|
|
|
__sysreg_save_guest_state(guest_ctxt);
|
2015-10-21 15:57:10 +07:00
|
|
|
__sysreg32_save_state(vcpu);
|
|
|
|
__timer_save_state(vcpu);
|
|
|
|
__vgic_save_state(vcpu);
|
|
|
|
|
|
|
|
__deactivate_traps(vcpu);
|
|
|
|
__deactivate_vm(vcpu);
|
|
|
|
|
2015-10-28 19:17:35 +07:00
|
|
|
__sysreg_restore_host_state(host_ctxt);
|
2015-10-21 15:57:10 +07:00
|
|
|
|
2015-10-26 15:34:09 +07:00
|
|
|
if (fp_enabled) {
|
|
|
|
__fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
|
|
|
|
__fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
|
|
|
|
}
|
|
|
|
|
2015-10-21 15:57:10 +07:00
|
|
|
__debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
|
2016-09-22 17:35:43 +07:00
|
|
|
/*
|
|
|
|
* This must come after restoring the host sysregs, since a non-VHE
|
|
|
|
* system may enable SPE here and make use of the TTBRs.
|
|
|
|
*/
|
2015-10-21 15:57:10 +07:00
|
|
|
__debug_cond_restore_host_state(vcpu);
|
|
|
|
|
|
|
|
return exit_code;
|
|
|
|
}
|
2015-10-25 22:21:52 +07:00
|
|
|
|
|
|
|
static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
|
|
|
|
|
2015-11-17 21:07:45 +07:00
|
|
|
static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
|
2015-10-25 22:21:52 +07:00
|
|
|
{
|
2016-07-01 00:40:35 +07:00
|
|
|
unsigned long str_va;
|
2015-11-17 21:07:45 +07:00
|
|
|
|
2016-07-01 00:40:35 +07:00
|
|
|
/*
|
|
|
|
* Force the panic string to be loaded from the literal pool,
|
|
|
|
* making sure it is a kernel address and not a PC-relative
|
|
|
|
* reference.
|
|
|
|
*/
|
|
|
|
asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
|
|
|
|
|
|
|
|
__hyp_do_panic(str_va,
|
2015-11-17 21:07:45 +07:00
|
|
|
spsr, elr,
|
|
|
|
read_sysreg(esr_el2), read_sysreg_el2(far),
|
|
|
|
read_sysreg(hpfar_el2), par,
|
|
|
|
(void *)read_sysreg(tpidr_el2));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
|
|
|
|
{
|
|
|
|
panic(__hyp_panic_string,
|
|
|
|
spsr, elr,
|
|
|
|
read_sysreg_el2(esr), read_sysreg_el2(far),
|
|
|
|
read_sysreg(hpfar_el2), par,
|
|
|
|
(void *)read_sysreg(tpidr_el2));
|
|
|
|
}
|
|
|
|
|
|
|
|
static hyp_alternate_select(__hyp_call_panic,
|
|
|
|
__hyp_call_panic_nvhe, __hyp_call_panic_vhe,
|
|
|
|
ARM64_HAS_VIRT_HOST_EXTN);
|
|
|
|
|
|
|
|
void __hyp_text __noreturn __hyp_panic(void)
|
|
|
|
{
|
|
|
|
u64 spsr = read_sysreg_el2(spsr);
|
|
|
|
u64 elr = read_sysreg_el2(elr);
|
2015-10-25 22:21:52 +07:00
|
|
|
u64 par = read_sysreg(par_el1);
|
|
|
|
|
|
|
|
if (read_sysreg(vttbr_el2)) {
|
|
|
|
struct kvm_vcpu *vcpu;
|
|
|
|
struct kvm_cpu_context *host_ctxt;
|
|
|
|
|
|
|
|
vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
|
|
|
|
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
|
2017-04-26 00:02:45 +07:00
|
|
|
__timer_save_state(vcpu);
|
2015-10-25 22:21:52 +07:00
|
|
|
__deactivate_traps(vcpu);
|
|
|
|
__deactivate_vm(vcpu);
|
2015-10-28 19:17:35 +07:00
|
|
|
__sysreg_restore_host_state(host_ctxt);
|
2015-10-25 22:21:52 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Call panic for real */
|
2015-11-17 21:07:45 +07:00
|
|
|
__hyp_call_panic()(spsr, elr, par);
|
2015-10-25 22:21:52 +07:00
|
|
|
|
|
|
|
unreachable();
|
|
|
|
}
|