mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 16:00:20 +07:00
2d0e63e030
We can finally get completely rid of any calls to the VGICv3 save/restore functions when the AP lists are empty on VHE systems. This requires carefully factoring out trap configuration from saving and restoring state, and carefully choosing what to do on the VHE and non-VHE path. One of the challenges is that we cannot save/restore the VMCR lazily because we can only write the VMCR when ICC_SRE_EL1.SRE is cleared when emulating a GICv2-on-GICv3, since otherwise all Group-0 interrupts end up being delivered as FIQ. To solve this problem, and still provide fast performance in the fast path of exiting a VM when no interrupts are pending (which also optimized the latency for actually delivering virtual interrupts coming from physical interrupts), we orchestrate a dance of only doing the activate/deactivate traps in vgic load/put for VHE systems (which can have ICC_SRE_EL1.SRE cleared when running in the host), and doing the configuration on every round-trip on non-VHE systems. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
133 lines
4.8 KiB
C
133 lines
4.8 KiB
C
/*
|
|
* Copyright (C) 2015 - ARM Ltd
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#ifndef __ARM_KVM_HYP_H__
|
|
#define __ARM_KVM_HYP_H__
|
|
|
|
#include <linux/compiler.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <asm/cp15.h>
|
|
#include <asm/vfp.h>
|
|
|
|
#define __hyp_text __section(.hyp.text) notrace
|
|
|
|
#define __ACCESS_VFP(CRn) \
|
|
"mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
|
|
|
|
#define write_special(v, r) \
|
|
asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
|
|
#define read_special(r) ({ \
|
|
u32 __val; \
|
|
asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
|
|
__val; \
|
|
})
|
|
|
|
#define TTBR0 __ACCESS_CP15_64(0, c2)
|
|
#define TTBR1 __ACCESS_CP15_64(1, c2)
|
|
#define VTTBR __ACCESS_CP15_64(6, c2)
|
|
#define PAR __ACCESS_CP15_64(0, c7)
|
|
#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
|
|
#define CNTVOFF __ACCESS_CP15_64(4, c14)
|
|
|
|
#define MIDR __ACCESS_CP15(c0, 0, c0, 0)
|
|
#define CSSELR __ACCESS_CP15(c0, 2, c0, 0)
|
|
#define VPIDR __ACCESS_CP15(c0, 4, c0, 0)
|
|
#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5)
|
|
#define SCTLR __ACCESS_CP15(c1, 0, c0, 0)
|
|
#define CPACR __ACCESS_CP15(c1, 0, c0, 2)
|
|
#define HCR __ACCESS_CP15(c1, 4, c1, 0)
|
|
#define HDCR __ACCESS_CP15(c1, 4, c1, 1)
|
|
#define HCPTR __ACCESS_CP15(c1, 4, c1, 2)
|
|
#define HSTR __ACCESS_CP15(c1, 4, c1, 3)
|
|
#define TTBCR __ACCESS_CP15(c2, 0, c0, 2)
|
|
#define HTCR __ACCESS_CP15(c2, 4, c0, 2)
|
|
#define VTCR __ACCESS_CP15(c2, 4, c1, 2)
|
|
#define DACR __ACCESS_CP15(c3, 0, c0, 0)
|
|
#define DFSR __ACCESS_CP15(c5, 0, c0, 0)
|
|
#define IFSR __ACCESS_CP15(c5, 0, c0, 1)
|
|
#define ADFSR __ACCESS_CP15(c5, 0, c1, 0)
|
|
#define AIFSR __ACCESS_CP15(c5, 0, c1, 1)
|
|
#define HSR __ACCESS_CP15(c5, 4, c2, 0)
|
|
#define DFAR __ACCESS_CP15(c6, 0, c0, 0)
|
|
#define IFAR __ACCESS_CP15(c6, 0, c0, 2)
|
|
#define HDFAR __ACCESS_CP15(c6, 4, c0, 0)
|
|
#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
|
|
#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
|
|
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
|
|
#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6)
|
|
#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1)
|
|
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
|
|
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
|
|
#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
|
|
#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
|
|
#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
|
|
#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
|
|
#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0)
|
|
#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1)
|
|
#define VBAR __ACCESS_CP15(c12, 0, c0, 0)
|
|
#define CID __ACCESS_CP15(c13, 0, c0, 1)
|
|
#define TID_URW __ACCESS_CP15(c13, 0, c0, 2)
|
|
#define TID_URO __ACCESS_CP15(c13, 0, c0, 3)
|
|
#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
|
|
#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
|
|
#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
|
|
#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
|
|
#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
|
|
|
|
#define VFP_FPEXC __ACCESS_VFP(FPEXC)
|
|
|
|
/* AArch64 compatibility macros, only for the timer so far */
|
|
#define read_sysreg_el0(r) read_sysreg(r##_el0)
|
|
#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0)
|
|
|
|
#define cntv_ctl_el0 CNTV_CTL
|
|
#define cntv_cval_el0 CNTV_CVAL
|
|
#define cntvoff_el2 CNTVOFF
|
|
#define cnthctl_el2 CNTHCTL
|
|
|
|
void __timer_enable_traps(struct kvm_vcpu *vcpu);
|
|
void __timer_disable_traps(struct kvm_vcpu *vcpu);
|
|
|
|
void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
|
|
void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
|
|
|
|
void __sysreg_save_state(struct kvm_cpu_context *ctxt);
|
|
void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
|
|
|
|
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
|
|
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
|
|
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
|
|
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
|
|
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
|
|
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
|
|
|
|
asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
|
|
asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
|
|
static inline bool __vfp_enabled(void)
|
|
{
|
|
return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
|
|
}
|
|
|
|
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
|
|
void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
|
|
|
|
asmlinkage int __guest_enter(struct kvm_vcpu *vcpu,
|
|
struct kvm_cpu_context *host);
|
|
asmlinkage int __hyp_do_panic(const char *, int, u32);
|
|
|
|
#endif /* __ARM_KVM_HYP_H__ */
|