mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 13:31:02 +07:00
GIC updates for Linux 4.4:
- Enable basic GICv3 support on 32bit ARM (mostly for running VMs with more than 8 virtual CPUs) - arm64 changes to deal with firmware limitations that forces a GICv3 to be used as a GICv2 - A GICv2m erratum workaround on Applied Micro X-Gene2 -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJWGD66AAoJECPQ0LrRPXpD+P8P/2rEr+s6jmhBO6OWFnnJzDEy s/EbgSMgbbCE29HxPnanhYriNUcM7qhyHkeUpLMrWiuPBBj6nB8dzry0ZVzy0JDb swmzlufRfBHXNIYyW9GdLPAZpkDwFUFrUsP4H+desoJ4JCBEHNegkwLY7BVUnGMK gujFzsFrMiDdNA8viaYHc+KmMSGowCpMlPLn5fS99SuZ/4B2+IwtM8bJFexNFPoX nyf0Y09Uymww5ObzvYcbcAnBOSeqGjhKpD2HjN4c5UArmh88Cd++GDLBBVS2hHkr m6Ds/ONaiEtjK58y+TD9QGtbrbThOTmnlhbp9hdZGP4RzoVBg2GdlWDqw2DzV7Qi ntRgFzcieDcNM+gu3zYNLBffXBqdBMx34srdSq/DU2iUFRf2RnJ39ybdGL4z7yRa ZT0gVBEYF7vGR0kmQXCWefNo7ogXgk8AOr4jdvPb3qkXIcXbxNU/dYEjEcH47E36 +Zj9K5YdqCVXOJFx4R/8kFhcEznMCPbGb07MGGGBkYvbaST35CfBEENooRyuOzbX 0WW2/0Ow+ubvrAchgVfHybnRFuzZAahwV8k49MA7htdOPQ9PMdMbElpMsThRjKZO ed7JtTxVFsyQwEg1WM8clcas/sit4kCtwGwQI2E8rXqNJRLRwMbmoMvrgzqGDTXX i7M0Ylhx4iGs04Dgsb8C =qtSH -----END PGP SIGNATURE----- Merge tag 'gic-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core GIC updates for Linux 4.4 from Marc Zyngier: - Enable basic GICv3 support on 32bit ARM (mostly for running VMs with more than 8 virtual CPUs) - arm64 changes to deal with firmware limitations that forces a GICv3 to be used as a GICv2 - A GICv2m erratum workaround on Applied Micro X-Gene2
This commit is contained in:
commit
fcf8ab690e
@ -173,13 +173,22 @@ Before jumping into the kernel, the following conditions must be met:
|
||||
the kernel image will be entered must be initialised by software at a
|
||||
higher exception level to prevent execution in an UNKNOWN state.
|
||||
|
||||
For systems with a GICv3 interrupt controller:
|
||||
For systems with a GICv3 interrupt controller to be used in v3 mode:
|
||||
- If EL3 is present:
|
||||
ICC_SRE_EL3.Enable (bit 3) must be initialiased to 0b1.
|
||||
ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b1.
|
||||
- If the kernel is entered at EL1:
|
||||
ICC.SRE_EL2.Enable (bit 3) must be initialised to 0b1
|
||||
ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b1.
|
||||
- The DT or ACPI tables must describe a GICv3 interrupt controller.
|
||||
|
||||
For systems with a GICv3 interrupt controller to be used in
|
||||
compatibility (v2) mode:
|
||||
- If EL3 is present:
|
||||
ICC_SRE_EL3.SRE (bit 0) must be initialised to 0b0.
|
||||
- If the kernel is entered at EL1:
|
||||
ICC_SRE_EL2.SRE (bit 0) must be initialised to 0b0.
|
||||
- The DT or ACPI tables must describe a GICv2 interrupt controller.
|
||||
|
||||
The requirements described above for CPU mode, caches, MMUs, architected
|
||||
timers, coherency and system registers apply to all CPUs. All CPUs must
|
||||
|
@ -819,6 +819,7 @@ config ARCH_VIRT
|
||||
bool "Dummy Virtual Machine" if ARCH_MULTI_V7
|
||||
select ARM_AMBA
|
||||
select ARM_GIC
|
||||
select ARM_GIC_V3
|
||||
select ARM_PSCI
|
||||
select HAVE_ARM_ARCH_TIMER
|
||||
|
||||
|
188
arch/arm/include/asm/arch_gicv3.h
Normal file
188
arch/arm/include/asm/arch_gicv3.h
Normal file
@ -0,0 +1,188 @@
|
||||
/*
|
||||
* arch/arm/include/asm/arch_gicv3.h
|
||||
*
|
||||
* Copyright (C) 2015 ARM Ltd.
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __ASM_ARCH_GICV3_H
|
||||
#define __ASM_ARCH_GICV3_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/io.h>
|
||||
|
||||
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
|
||||
#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
|
||||
|
||||
#define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1)
|
||||
#define ICC_DIR __ACCESS_CP15(c12, 0, c11, 1)
|
||||
#define ICC_IAR1 __ACCESS_CP15(c12, 0, c12, 0)
|
||||
#define ICC_SGI1R __ACCESS_CP15_64(0, c12)
|
||||
#define ICC_PMR __ACCESS_CP15(c4, 0, c6, 0)
|
||||
#define ICC_CTLR __ACCESS_CP15(c12, 0, c12, 4)
|
||||
#define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5)
|
||||
#define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7)
|
||||
|
||||
#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5)
|
||||
|
||||
#define ICH_VSEIR __ACCESS_CP15(c12, 4, c9, 4)
|
||||
#define ICH_HCR __ACCESS_CP15(c12, 4, c11, 0)
|
||||
#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1)
|
||||
#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2)
|
||||
#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3)
|
||||
#define ICH_ELSR __ACCESS_CP15(c12, 4, c11, 5)
|
||||
#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7)
|
||||
|
||||
#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x)
|
||||
#define __LR8(x) __ACCESS_CP15(c12, 4, c13, x)
|
||||
|
||||
#define ICH_LR0 __LR0(0)
|
||||
#define ICH_LR1 __LR0(1)
|
||||
#define ICH_LR2 __LR0(2)
|
||||
#define ICH_LR3 __LR0(3)
|
||||
#define ICH_LR4 __LR0(4)
|
||||
#define ICH_LR5 __LR0(5)
|
||||
#define ICH_LR6 __LR0(6)
|
||||
#define ICH_LR7 __LR0(7)
|
||||
#define ICH_LR8 __LR8(0)
|
||||
#define ICH_LR9 __LR8(1)
|
||||
#define ICH_LR10 __LR8(2)
|
||||
#define ICH_LR11 __LR8(3)
|
||||
#define ICH_LR12 __LR8(4)
|
||||
#define ICH_LR13 __LR8(5)
|
||||
#define ICH_LR14 __LR8(6)
|
||||
#define ICH_LR15 __LR8(7)
|
||||
|
||||
/* LR top half */
|
||||
#define __LRC0(x) __ACCESS_CP15(c12, 4, c14, x)
|
||||
#define __LRC8(x) __ACCESS_CP15(c12, 4, c15, x)
|
||||
|
||||
#define ICH_LRC0 __LRC0(0)
|
||||
#define ICH_LRC1 __LRC0(1)
|
||||
#define ICH_LRC2 __LRC0(2)
|
||||
#define ICH_LRC3 __LRC0(3)
|
||||
#define ICH_LRC4 __LRC0(4)
|
||||
#define ICH_LRC5 __LRC0(5)
|
||||
#define ICH_LRC6 __LRC0(6)
|
||||
#define ICH_LRC7 __LRC0(7)
|
||||
#define ICH_LRC8 __LRC8(0)
|
||||
#define ICH_LRC9 __LRC8(1)
|
||||
#define ICH_LRC10 __LRC8(2)
|
||||
#define ICH_LRC11 __LRC8(3)
|
||||
#define ICH_LRC12 __LRC8(4)
|
||||
#define ICH_LRC13 __LRC8(5)
|
||||
#define ICH_LRC14 __LRC8(6)
|
||||
#define ICH_LRC15 __LRC8(7)
|
||||
|
||||
#define __AP0Rx(x) __ACCESS_CP15(c12, 4, c8, x)
|
||||
#define ICH_AP0R0 __AP0Rx(0)
|
||||
#define ICH_AP0R1 __AP0Rx(1)
|
||||
#define ICH_AP0R2 __AP0Rx(2)
|
||||
#define ICH_AP0R3 __AP0Rx(3)
|
||||
|
||||
#define __AP1Rx(x) __ACCESS_CP15(c12, 4, c9, x)
|
||||
#define ICH_AP1R0 __AP1Rx(0)
|
||||
#define ICH_AP1R1 __AP1Rx(1)
|
||||
#define ICH_AP1R2 __AP1Rx(2)
|
||||
#define ICH_AP1R3 __AP1Rx(3)
|
||||
|
||||
/* Low-level accessors */
|
||||
|
||||
static inline void gic_write_eoir(u32 irq)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_EOIR1) : : "r" (irq));
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_dir(u32 val)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_DIR) : : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline u32 gic_read_iar(void)
|
||||
{
|
||||
u32 irqstat;
|
||||
|
||||
asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
|
||||
return irqstat;
|
||||
}
|
||||
|
||||
static inline void gic_write_pmr(u32 val)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_PMR) : : "r" (val));
|
||||
}
|
||||
|
||||
static inline void gic_write_ctlr(u32 val)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_CTLR) : : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_grpen1(u32 val)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_IGRPEN1) : : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_sgi1r(u64 val)
|
||||
{
|
||||
asm volatile("mcrr " __stringify(ICC_SGI1R) : : "r" (val));
|
||||
}
|
||||
|
||||
static inline u32 gic_read_sre(void)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
asm volatile("mrc " __stringify(ICC_SRE) : "=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void gic_write_sre(u32 val)
|
||||
{
|
||||
asm volatile("mcr " __stringify(ICC_SRE) : : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
/*
|
||||
* Even in 32bit systems that use LPAE, there is no guarantee that the I/O
|
||||
* interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
|
||||
* make much sense.
|
||||
* Moreover, 64bit I/O emulation is extremely difficult to implement on
|
||||
* AArch32, since the syndrome register doesn't provide any information for
|
||||
* them.
|
||||
* Consequently, the following IO helpers use 32bit accesses.
|
||||
*
|
||||
* There are only two registers that need 64bit accesses in this driver:
|
||||
* - GICD_IROUTERn, contain the affinity values associated to each interrupt.
|
||||
* The upper-word (aff3) will always be 0, so there is no need for a lock.
|
||||
* - GICR_TYPER is an ID register and doesn't need atomicity.
|
||||
*/
|
||||
static inline void gic_write_irouter(u64 val, volatile void __iomem *addr)
|
||||
{
|
||||
writel_relaxed((u32)val, addr);
|
||||
writel_relaxed((u32)(val >> 32), addr + 4);
|
||||
}
|
||||
|
||||
static inline u64 gic_read_typer(const volatile void __iomem *addr)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
val = readl_relaxed(addr);
|
||||
val |= (u64)readl_relaxed(addr + 4) << 32;
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* !__ASM_ARCH_GICV3_H */
|
170
arch/arm64/include/asm/arch_gicv3.h
Normal file
170
arch/arm64/include/asm/arch_gicv3.h
Normal file
@ -0,0 +1,170 @@
|
||||
/*
|
||||
* arch/arm64/include/asm/arch_gicv3.h
|
||||
*
|
||||
* Copyright (C) 2015 ARM Ltd.
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __ASM_ARCH_GICV3_H
|
||||
#define __ASM_ARCH_GICV3_H
|
||||
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
|
||||
#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
|
||||
#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
|
||||
#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
|
||||
#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
|
||||
#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
|
||||
#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
|
||||
#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
|
||||
|
||||
#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
|
||||
|
||||
/*
|
||||
* System register definitions
|
||||
*/
|
||||
#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
|
||||
#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
|
||||
#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
|
||||
#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
|
||||
#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
|
||||
#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
|
||||
#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
|
||||
|
||||
#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
|
||||
#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
|
||||
|
||||
#define ICH_LR0_EL2 __LR0_EL2(0)
|
||||
#define ICH_LR1_EL2 __LR0_EL2(1)
|
||||
#define ICH_LR2_EL2 __LR0_EL2(2)
|
||||
#define ICH_LR3_EL2 __LR0_EL2(3)
|
||||
#define ICH_LR4_EL2 __LR0_EL2(4)
|
||||
#define ICH_LR5_EL2 __LR0_EL2(5)
|
||||
#define ICH_LR6_EL2 __LR0_EL2(6)
|
||||
#define ICH_LR7_EL2 __LR0_EL2(7)
|
||||
#define ICH_LR8_EL2 __LR8_EL2(0)
|
||||
#define ICH_LR9_EL2 __LR8_EL2(1)
|
||||
#define ICH_LR10_EL2 __LR8_EL2(2)
|
||||
#define ICH_LR11_EL2 __LR8_EL2(3)
|
||||
#define ICH_LR12_EL2 __LR8_EL2(4)
|
||||
#define ICH_LR13_EL2 __LR8_EL2(5)
|
||||
#define ICH_LR14_EL2 __LR8_EL2(6)
|
||||
#define ICH_LR15_EL2 __LR8_EL2(7)
|
||||
|
||||
#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
|
||||
#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
|
||||
#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
|
||||
#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
|
||||
#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
|
||||
|
||||
#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
|
||||
#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
|
||||
#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
|
||||
#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
|
||||
#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/stringify.h>
|
||||
|
||||
/*
|
||||
* Low-level accessors
|
||||
*
|
||||
* These system registers are 32 bits, but we make sure that the compiler
|
||||
* sets the GP register's most significant bits to 0 with an explicit cast.
|
||||
*/
|
||||
|
||||
static inline void gic_write_eoir(u32 irq)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" ((u64)irq));
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_dir(u32 irq)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" ((u64)irq));
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline u64 gic_read_iar_common(void)
|
||||
{
|
||||
u64 irqstat;
|
||||
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
|
||||
return irqstat;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cavium ThunderX erratum 23154
|
||||
*
|
||||
* The gicv3 of ThunderX requires a modified version for reading the
|
||||
* IAR status to ensure data synchronization (access to icc_iar1_el1
|
||||
* is not sync'ed before and after).
|
||||
*/
|
||||
static inline u64 gic_read_iar_cavium_thunderx(void)
|
||||
{
|
||||
u64 irqstat;
|
||||
|
||||
asm volatile(
|
||||
"nop;nop;nop;nop\n\t"
|
||||
"nop;nop;nop;nop\n\t"
|
||||
"mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t"
|
||||
"nop;nop;nop;nop"
|
||||
: "=r" (irqstat));
|
||||
mb();
|
||||
|
||||
return irqstat;
|
||||
}
|
||||
|
||||
static inline void gic_write_pmr(u32 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val));
|
||||
}
|
||||
|
||||
static inline void gic_write_ctlr(u32 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" ((u64)val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_grpen1(u32 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" ((u64)val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_sgi1r(u64 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
|
||||
}
|
||||
|
||||
static inline u32 gic_read_sre(void)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void gic_write_sre(u32 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" ((u64)val));
|
||||
isb();
|
||||
}
|
||||
|
||||
#define gic_read_typer(c) readq_relaxed(c)
|
||||
#define gic_write_irouter(v, c) writeq_relaxed(v, c)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_ARCH_GICV3_H */
|
@ -23,6 +23,8 @@
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/processor.h>
|
||||
|
||||
#include <linux/irqchip/arm-gic-v3.h>
|
||||
|
||||
static bool
|
||||
feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
|
||||
{
|
||||
@ -45,11 +47,26 @@ __ID_FEAT_CHK(id_aa64pfr0);
|
||||
__ID_FEAT_CHK(id_aa64mmfr1);
|
||||
__ID_FEAT_CHK(id_aa64isar0);
|
||||
|
||||
static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
|
||||
{
|
||||
bool has_sre;
|
||||
|
||||
if (!has_id_aa64pfr0_feature(entry))
|
||||
return false;
|
||||
|
||||
has_sre = gic_enable_sre();
|
||||
if (!has_sre)
|
||||
pr_warn_once("%s present but disabled by higher exception level\n",
|
||||
entry->desc);
|
||||
|
||||
return has_sre;
|
||||
}
|
||||
|
||||
static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
{
|
||||
.desc = "GIC system register CPU interface",
|
||||
.capability = ARM64_HAS_SYSREG_GIC_CPUIF,
|
||||
.matches = has_id_aa64pfr0_feature,
|
||||
.matches = has_useable_gicv3_cpuif,
|
||||
.field_pos = 24,
|
||||
.min_field_value = 1,
|
||||
},
|
||||
|
@ -498,6 +498,8 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
|
||||
orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
|
||||
msr_s ICC_SRE_EL2, x0
|
||||
isb // Make sure SRE is now set
|
||||
mrs_s x0, ICC_SRE_EL2 // Read SRE back,
|
||||
tbz x0, #0, 3f // and check that it sticks
|
||||
msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
|
||||
|
||||
3:
|
||||
|
@ -16,6 +16,9 @@ menuconfig VIRTUALIZATION
|
||||
|
||||
if VIRTUALIZATION
|
||||
|
||||
config KVM_ARM_VGIC_V3
|
||||
bool
|
||||
|
||||
config KVM
|
||||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on OF
|
||||
@ -31,6 +34,7 @@ config KVM
|
||||
select KVM_VFIO
|
||||
select HAVE_KVM_EVENTFD
|
||||
select HAVE_KVM_IRQFD
|
||||
select KVM_ARM_VGIC_V3
|
||||
---help---
|
||||
Support hosting virtualized guest machines.
|
||||
|
||||
|
@ -37,12 +37,19 @@
|
||||
#define V2M_MSI_SETSPI_NS 0x040
|
||||
#define V2M_MIN_SPI 32
|
||||
#define V2M_MAX_SPI 1019
|
||||
#define V2M_MSI_IIDR 0xFCC
|
||||
|
||||
#define V2M_MSI_TYPER_BASE_SPI(x) \
|
||||
(((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK)
|
||||
|
||||
#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK)
|
||||
|
||||
/* APM X-Gene with GICv2m MSI_IIDR register value */
|
||||
#define XGENE_GICV2M_MSI_IIDR 0x06000170
|
||||
|
||||
/* List of flags for specific v2m implementation */
|
||||
#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
|
||||
|
||||
struct v2m_data {
|
||||
spinlock_t msi_cnt_lock;
|
||||
struct resource res; /* GICv2m resource */
|
||||
@ -50,6 +57,7 @@ struct v2m_data {
|
||||
u32 spi_start; /* The SPI number that MSIs start */
|
||||
u32 nr_spis; /* The number of SPIs for MSIs */
|
||||
unsigned long *bm; /* MSI vector bitmap */
|
||||
u32 flags; /* v2m flags for specific implementation */
|
||||
};
|
||||
|
||||
static void gicv2m_mask_msi_irq(struct irq_data *d)
|
||||
@ -98,6 +106,9 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
msg->address_hi = upper_32_bits(addr);
|
||||
msg->address_lo = lower_32_bits(addr);
|
||||
msg->data = data->hwirq;
|
||||
|
||||
if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
|
||||
msg->data -= v2m->spi_start;
|
||||
}
|
||||
|
||||
static struct irq_chip gicv2m_irq_chip = {
|
||||
@ -266,6 +277,17 @@ static int __init gicv2m_init_one(struct device_node *node,
|
||||
goto err_iounmap;
|
||||
}
|
||||
|
||||
/*
|
||||
* APM X-Gene GICv2m implementation has an erratum where
|
||||
* the MSI data needs to be the offset from the spi_start
|
||||
* in order to trigger the correct MSI interrupt. This is
|
||||
* different from the standard GICv2m implementation where
|
||||
* the MSI data is the absolute value within the range from
|
||||
* spi_start to (spi_start + num_spis).
|
||||
*/
|
||||
if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR)
|
||||
v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
|
||||
|
||||
v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
|
||||
GFP_KERNEL);
|
||||
if (!v2m->bm) {
|
||||
|
@ -108,37 +108,7 @@ static void gic_redist_wait_for_rwp(void)
|
||||
gic_do_wait_for_rwp(gic_data_rdist_rd_base());
|
||||
}
|
||||
|
||||
/* Low level accessors */
|
||||
static u64 gic_read_iar_common(void)
|
||||
{
|
||||
u64 irqstat;
|
||||
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
|
||||
return irqstat;
|
||||
}
|
||||
|
||||
/*
|
||||
* Cavium ThunderX erratum 23154
|
||||
*
|
||||
* The gicv3 of ThunderX requires a modified version for reading the
|
||||
* IAR status to ensure data synchronization (access to icc_iar1_el1
|
||||
* is not sync'ed before and after).
|
||||
*/
|
||||
static u64 gic_read_iar_cavium_thunderx(void)
|
||||
{
|
||||
u64 irqstat;
|
||||
|
||||
asm volatile(
|
||||
"nop;nop;nop;nop\n\t"
|
||||
"nop;nop;nop;nop\n\t"
|
||||
"mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t"
|
||||
"nop;nop;nop;nop"
|
||||
: "=r" (irqstat));
|
||||
mb();
|
||||
|
||||
return irqstat;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
static DEFINE_STATIC_KEY_FALSE(is_cavium_thunderx);
|
||||
|
||||
static u64 __maybe_unused gic_read_iar(void)
|
||||
@ -148,49 +118,7 @@ static u64 __maybe_unused gic_read_iar(void)
|
||||
else
|
||||
return gic_read_iar_common();
|
||||
}
|
||||
|
||||
static void __maybe_unused gic_write_pmr(u64 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
|
||||
}
|
||||
|
||||
static void __maybe_unused gic_write_ctlr(u64 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static void __maybe_unused gic_write_grpen1(u64 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static void __maybe_unused gic_write_sgi1r(u64 val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
|
||||
}
|
||||
|
||||
static void gic_enable_sre(void)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
|
||||
val |= ICC_SRE_EL1_SRE;
|
||||
asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
|
||||
isb();
|
||||
|
||||
/*
|
||||
* Need to check that the SRE bit has actually been set. If
|
||||
* not, it means that SRE is disabled at EL2. We're going to
|
||||
* die painfully, and there is nothing we can do about it.
|
||||
*
|
||||
* Kindly inform the luser.
|
||||
*/
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
|
||||
if (!(val & ICC_SRE_EL1_SRE))
|
||||
pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
static void gic_enable_redist(bool enable)
|
||||
{
|
||||
@ -391,11 +319,11 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 gic_mpidr_to_affinity(u64 mpidr)
|
||||
static u64 gic_mpidr_to_affinity(unsigned long mpidr)
|
||||
{
|
||||
u64 aff;
|
||||
|
||||
aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
|
||||
aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 0));
|
||||
@ -405,7 +333,7 @@ static u64 gic_mpidr_to_affinity(u64 mpidr)
|
||||
|
||||
static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u64 irqnr;
|
||||
u32 irqnr;
|
||||
|
||||
do {
|
||||
irqnr = gic_read_iar();
|
||||
@ -464,12 +392,12 @@ static void __init gic_dist_init(void)
|
||||
*/
|
||||
affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
|
||||
for (i = 32; i < gic_data.irq_nr; i++)
|
||||
writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
|
||||
gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
|
||||
}
|
||||
|
||||
static int gic_populate_rdist(void)
|
||||
{
|
||||
u64 mpidr = cpu_logical_map(smp_processor_id());
|
||||
unsigned long mpidr = cpu_logical_map(smp_processor_id());
|
||||
u64 typer;
|
||||
u32 aff;
|
||||
int i;
|
||||
@ -495,15 +423,14 @@ static int gic_populate_rdist(void)
|
||||
}
|
||||
|
||||
do {
|
||||
typer = readq_relaxed(ptr + GICR_TYPER);
|
||||
typer = gic_read_typer(ptr + GICR_TYPER);
|
||||
if ((typer >> 32) == aff) {
|
||||
u64 offset = ptr - gic_data.redist_regions[i].redist_base;
|
||||
gic_data_rdist_rd_base() = ptr;
|
||||
gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
|
||||
pr_info("CPU%d: found redistributor %llx region %d:%pa\n",
|
||||
smp_processor_id(),
|
||||
(unsigned long long)mpidr,
|
||||
i, &gic_data_rdist()->phys_base);
|
||||
pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
|
||||
smp_processor_id(), mpidr, i,
|
||||
&gic_data_rdist()->phys_base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -518,15 +445,22 @@ static int gic_populate_rdist(void)
|
||||
}
|
||||
|
||||
/* We couldn't even deal with ourselves... */
|
||||
WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
|
||||
smp_processor_id(), (unsigned long long)mpidr);
|
||||
WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
|
||||
smp_processor_id(), mpidr);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void gic_cpu_sys_reg_init(void)
|
||||
{
|
||||
/* Enable system registers */
|
||||
gic_enable_sre();
|
||||
/*
|
||||
* Need to check that the SRE bit has actually been set. If
|
||||
* not, it means that SRE is disabled at EL2. We're going to
|
||||
* die painfully, and there is nothing we can do about it.
|
||||
*
|
||||
* Kindly inform the luser.
|
||||
*/
|
||||
if (!gic_enable_sre())
|
||||
pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
|
||||
|
||||
/* Set priority mask register */
|
||||
gic_write_pmr(DEFAULT_PMR_VALUE);
|
||||
@ -589,10 +523,10 @@ static struct notifier_block gic_cpu_notifier = {
|
||||
};
|
||||
|
||||
static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
|
||||
u64 cluster_id)
|
||||
unsigned long cluster_id)
|
||||
{
|
||||
int cpu = *base_cpu;
|
||||
u64 mpidr = cpu_logical_map(cpu);
|
||||
unsigned long mpidr = cpu_logical_map(cpu);
|
||||
u16 tlist = 0;
|
||||
|
||||
while (cpu < nr_cpu_ids) {
|
||||
@ -653,7 +587,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
||||
smp_wmb();
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
|
||||
unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
|
||||
u16 tlist;
|
||||
|
||||
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
|
||||
@ -689,7 +623,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
||||
reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
|
||||
val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
|
||||
|
||||
writeq_relaxed(val, reg);
|
||||
gic_write_irouter(val, reg);
|
||||
|
||||
/*
|
||||
* If the interrupt was enabled, enabled it again. Otherwise,
|
||||
@ -870,8 +804,10 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
|
||||
|
||||
static void gicv3_enable_quirks(void)
|
||||
{
|
||||
#ifdef CONFIG_ARM64
|
||||
if (cpus_have_cap(ARM64_WORKAROUND_CAVIUM_23154))
|
||||
static_branch_enable(&is_cavium_thunderx);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
|
||||
|
@ -51,6 +51,19 @@
|
||||
|
||||
#include "irq-gic-common.h"
|
||||
|
||||
#ifdef CONFIG_ARM64
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
static void gic_check_cpu_features(void)
|
||||
{
|
||||
WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF),
|
||||
TAINT_CPU_OUT_OF_SPEC,
|
||||
"GICv3 system registers enabled, broken firmware!\n");
|
||||
}
|
||||
#else
|
||||
#define gic_check_cpu_features() do { } while(0)
|
||||
#endif
|
||||
|
||||
union gic_base {
|
||||
void __iomem *common_base;
|
||||
void __percpu * __iomem *percpu_base;
|
||||
@ -987,6 +1000,8 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
|
||||
|
||||
BUG_ON(gic_nr >= MAX_GIC_NR);
|
||||
|
||||
gic_check_cpu_features();
|
||||
|
||||
gic = &gic_data[gic_nr];
|
||||
#ifdef CONFIG_GIC_NON_BANKED
|
||||
if (percpu_offset) { /* Frankein-GIC without banked registers... */
|
||||
|
@ -282,7 +282,7 @@ struct vgic_v2_cpu_if {
|
||||
};
|
||||
|
||||
struct vgic_v3_cpu_if {
|
||||
#ifdef CONFIG_ARM_GIC_V3
|
||||
#ifdef CONFIG_KVM_ARM_VGIC_V3
|
||||
u32 vgic_hcr;
|
||||
u32 vgic_vmcr;
|
||||
u32 vgic_sre; /* Restored only, change ignored */
|
||||
@ -364,7 +364,7 @@ void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active);
|
||||
int vgic_v2_probe(struct device_node *vgic_node,
|
||||
const struct vgic_ops **ops,
|
||||
const struct vgic_params **params);
|
||||
#ifdef CONFIG_ARM_GIC_V3
|
||||
#ifdef CONFIG_KVM_ARM_VGIC_V3
|
||||
int vgic_v3_probe(struct device_node *vgic_node,
|
||||
const struct vgic_ops **ops,
|
||||
const struct vgic_params **params);
|
||||
|
@ -18,8 +18,6 @@
|
||||
#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
|
||||
#define __LINUX_IRQCHIP_ARM_GIC_V3_H
|
||||
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
/*
|
||||
* Distributor registers. We assume we're running non-secure, with ARE
|
||||
* being set. Secure-only and non-ARE registers are not described.
|
||||
@ -267,16 +265,16 @@
|
||||
/*
|
||||
* Hypervisor interface registers (SRE only)
|
||||
*/
|
||||
#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1)
|
||||
#define ICH_LR_VIRTUAL_ID_MASK ((1ULL << 32) - 1)
|
||||
|
||||
#define ICH_LR_EOI (1UL << 41)
|
||||
#define ICH_LR_GROUP (1UL << 60)
|
||||
#define ICH_LR_HW (1UL << 61)
|
||||
#define ICH_LR_STATE (3UL << 62)
|
||||
#define ICH_LR_PENDING_BIT (1UL << 62)
|
||||
#define ICH_LR_ACTIVE_BIT (1UL << 63)
|
||||
#define ICH_LR_EOI (1ULL << 41)
|
||||
#define ICH_LR_GROUP (1ULL << 60)
|
||||
#define ICH_LR_HW (1ULL << 61)
|
||||
#define ICH_LR_STATE (3ULL << 62)
|
||||
#define ICH_LR_PENDING_BIT (1ULL << 62)
|
||||
#define ICH_LR_ACTIVE_BIT (1ULL << 63)
|
||||
#define ICH_LR_PHYS_ID_SHIFT 32
|
||||
#define ICH_LR_PHYS_ID_MASK (0x3ffUL << ICH_LR_PHYS_ID_SHIFT)
|
||||
#define ICH_LR_PHYS_ID_MASK (0x3ffULL << ICH_LR_PHYS_ID_SHIFT)
|
||||
|
||||
#define ICH_MISR_EOI (1 << 0)
|
||||
#define ICH_MISR_U (1 << 1)
|
||||
@ -293,19 +291,8 @@
|
||||
#define ICH_VMCR_PMR_SHIFT 24
|
||||
#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
|
||||
|
||||
#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
|
||||
#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
|
||||
#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
|
||||
#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
|
||||
#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
|
||||
#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
|
||||
#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
|
||||
#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
|
||||
|
||||
#define ICC_IAR1_EL1_SPURIOUS 0x3ff
|
||||
|
||||
#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
|
||||
|
||||
#define ICC_SRE_EL2_SRE (1 << 0)
|
||||
#define ICC_SRE_EL2_ENABLE (1 << 3)
|
||||
|
||||
@ -321,54 +308,10 @@
|
||||
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
|
||||
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
|
||||
/*
|
||||
* System register definitions
|
||||
*/
|
||||
#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
|
||||
#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
|
||||
#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
|
||||
#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
|
||||
#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
|
||||
#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
|
||||
#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
|
||||
|
||||
#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
|
||||
#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
|
||||
|
||||
#define ICH_LR0_EL2 __LR0_EL2(0)
|
||||
#define ICH_LR1_EL2 __LR0_EL2(1)
|
||||
#define ICH_LR2_EL2 __LR0_EL2(2)
|
||||
#define ICH_LR3_EL2 __LR0_EL2(3)
|
||||
#define ICH_LR4_EL2 __LR0_EL2(4)
|
||||
#define ICH_LR5_EL2 __LR0_EL2(5)
|
||||
#define ICH_LR6_EL2 __LR0_EL2(6)
|
||||
#define ICH_LR7_EL2 __LR0_EL2(7)
|
||||
#define ICH_LR8_EL2 __LR8_EL2(0)
|
||||
#define ICH_LR9_EL2 __LR8_EL2(1)
|
||||
#define ICH_LR10_EL2 __LR8_EL2(2)
|
||||
#define ICH_LR11_EL2 __LR8_EL2(3)
|
||||
#define ICH_LR12_EL2 __LR8_EL2(4)
|
||||
#define ICH_LR13_EL2 __LR8_EL2(5)
|
||||
#define ICH_LR14_EL2 __LR8_EL2(6)
|
||||
#define ICH_LR15_EL2 __LR8_EL2(7)
|
||||
|
||||
#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
|
||||
#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
|
||||
#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
|
||||
#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
|
||||
#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
|
||||
|
||||
#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
|
||||
#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
|
||||
#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
|
||||
#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
|
||||
#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
|
||||
#include <asm/arch_gicv3.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/msi.h>
|
||||
|
||||
/*
|
||||
* We need a value to serve as a irq-type for LPIs. Choose one that will
|
||||
* hopefully pique the interest of the reviewer.
|
||||
@ -386,23 +329,26 @@ struct rdists {
|
||||
u64 flags;
|
||||
};
|
||||
|
||||
static inline void gic_write_eoir(u64 irq)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_dir(u64 irq)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq));
|
||||
isb();
|
||||
}
|
||||
|
||||
struct irq_domain;
|
||||
int its_cpu_init(void);
|
||||
int its_init(struct device_node *node, struct rdists *rdists,
|
||||
struct irq_domain *domain);
|
||||
|
||||
static inline bool gic_enable_sre(void)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = gic_read_sre();
|
||||
if (val & ICC_SRE_EL1_SRE)
|
||||
return true;
|
||||
|
||||
val |= ICC_SRE_EL1_SRE;
|
||||
gic_write_sre(val);
|
||||
val = gic_read_sre();
|
||||
|
||||
return !!(val & ICC_SRE_EL1_SRE);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -2122,7 +2122,7 @@ static int init_vgic_model(struct kvm *kvm, int type)
|
||||
case KVM_DEV_TYPE_ARM_VGIC_V2:
|
||||
vgic_v2_init_emulation(kvm);
|
||||
break;
|
||||
#ifdef CONFIG_ARM_GIC_V3
|
||||
#ifdef CONFIG_KVM_ARM_VGIC_V3
|
||||
case KVM_DEV_TYPE_ARM_VGIC_V3:
|
||||
vgic_v3_init_emulation(kvm);
|
||||
break;
|
||||
@ -2284,7 +2284,7 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
|
||||
block_size = KVM_VGIC_V2_CPU_SIZE;
|
||||
alignment = SZ_4K;
|
||||
break;
|
||||
#ifdef CONFIG_ARM_GIC_V3
|
||||
#ifdef CONFIG_KVM_ARM_VGIC_V3
|
||||
case KVM_VGIC_V3_ADDR_TYPE_DIST:
|
||||
type_needed = KVM_DEV_TYPE_ARM_VGIC_V3;
|
||||
addr_ptr = &vgic->vgic_dist_base;
|
||||
|
Loading…
Reference in New Issue
Block a user