mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 18:40:53 +07:00
Merge branch 'for-linus-2' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM updates (part two) from Russell King: - breakpoint and perf updates from Will Deacon. - hypervisor boot mode updates from Will. - support for Power State Coordination Interface via the Hypervisor - core ARM support for KVM * 'for-linus-2' of git://git.linaro.org/people/rmk/linux-arm: (32 commits) KVM: ARM: Add maintainer entry for KVM/ARM KVM: ARM: Power State Coordination Interface implementation KVM: ARM: Handle I/O aborts KVM: ARM: Handle guest faults in KVM KVM: ARM: VFP userspace interface KVM: ARM: Demux CCSIDR in the userspace API KVM: ARM: User space API for getting/setting co-proc registers KVM: ARM: Emulation framework and CP15 emulation KVM: ARM: World-switch implementation KVM: ARM: Inject IRQs and FIQs from userspace KVM: ARM: Memory virtualization setup KVM: ARM: Hypervisor initialization KVM: ARM: Initial skeleton to compile KVM support ARM: Section based HYP idmap ARM: Add page table and page defines needed by KVM ARM: perf: simplify __hw_perf_event_init err handling ARM: perf: remove unnecessary checks for idx < 0 ARM: perf: handle armpmu_register failing ARM: perf: don't pretend to support counting of L1I writes ARM: perf: remove redundant NULL check on cpu_pmu ...
This commit is contained in:
commit
6db167dfc0
55
Documentation/devicetree/bindings/arm/psci.txt
Normal file
55
Documentation/devicetree/bindings/arm/psci.txt
Normal file
@ -0,0 +1,55 @@
|
||||
* Power State Coordination Interface (PSCI)
|
||||
|
||||
Firmware implementing the PSCI functions described in ARM document number
|
||||
ARM DEN 0022A ("Power State Coordination Interface System Software on ARM
|
||||
processors") can be used by Linux to initiate various CPU-centric power
|
||||
operations.
|
||||
|
||||
Issue A of the specification describes functions for CPU suspend, hotplug
|
||||
and migration of secure software.
|
||||
|
||||
Functions are invoked by trapping to the privilege level of the PSCI
|
||||
firmware (specified as part of the binding below) and passing arguments
|
||||
in a manner similar to that specified by AAPCS:
|
||||
|
||||
r0 => 32-bit Function ID / return value
|
||||
{r1 - r3} => Parameters
|
||||
|
||||
Note that the immediate field of the trapping instruction must be set
|
||||
to #0.
|
||||
|
||||
|
||||
Main node required properties:
|
||||
|
||||
- compatible : Must be "arm,psci"
|
||||
|
||||
- method : The method of calling the PSCI firmware. Permitted
|
||||
values are:
|
||||
|
||||
"smc" : SMC #0, with the register assignments specified
|
||||
in this binding.
|
||||
|
||||
"hvc" : HVC #0, with the register assignments specified
|
||||
in this binding.
|
||||
|
||||
Main node optional properties:
|
||||
|
||||
- cpu_suspend : Function ID for CPU_SUSPEND operation
|
||||
|
||||
- cpu_off : Function ID for CPU_OFF operation
|
||||
|
||||
- cpu_on : Function ID for CPU_ON operation
|
||||
|
||||
- migrate : Function ID for MIGRATE operation
|
||||
|
||||
|
||||
Example:
|
||||
|
||||
psci {
|
||||
compatible = "arm,psci";
|
||||
method = "smc";
|
||||
cpu_suspend = <0x95c10000>;
|
||||
cpu_off = <0x95c10001>;
|
||||
cpu_on = <0x95c10002>;
|
||||
migrate = <0x95c10003>;
|
||||
};
|
@ -293,7 +293,7 @@ kvm_run' (see below).
|
||||
4.11 KVM_GET_REGS
|
||||
|
||||
Capability: basic
|
||||
Architectures: all
|
||||
Architectures: all except ARM
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct kvm_regs (out)
|
||||
Returns: 0 on success, -1 on error
|
||||
@ -314,7 +314,7 @@ struct kvm_regs {
|
||||
4.12 KVM_SET_REGS
|
||||
|
||||
Capability: basic
|
||||
Architectures: all
|
||||
Architectures: all except ARM
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct kvm_regs (in)
|
||||
Returns: 0 on success, -1 on error
|
||||
@ -600,7 +600,7 @@ struct kvm_fpu {
|
||||
4.24 KVM_CREATE_IRQCHIP
|
||||
|
||||
Capability: KVM_CAP_IRQCHIP
|
||||
Architectures: x86, ia64
|
||||
Architectures: x86, ia64, ARM
|
||||
Type: vm ioctl
|
||||
Parameters: none
|
||||
Returns: 0 on success, -1 on error
|
||||
@ -608,21 +608,39 @@ Returns: 0 on success, -1 on error
|
||||
Creates an interrupt controller model in the kernel. On x86, creates a virtual
|
||||
ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
|
||||
local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
|
||||
only go to the IOAPIC. On ia64, a IOSAPIC is created.
|
||||
only go to the IOAPIC. On ia64, a IOSAPIC is created. On ARM, a GIC is
|
||||
created.
|
||||
|
||||
|
||||
4.25 KVM_IRQ_LINE
|
||||
|
||||
Capability: KVM_CAP_IRQCHIP
|
||||
Architectures: x86, ia64
|
||||
Architectures: x86, ia64, arm
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_irq_level
|
||||
Returns: 0 on success, -1 on error
|
||||
|
||||
Sets the level of a GSI input to the interrupt controller model in the kernel.
|
||||
Requires that an interrupt controller model has been previously created with
|
||||
KVM_CREATE_IRQCHIP. Note that edge-triggered interrupts require the level
|
||||
to be set to 1 and then back to 0.
|
||||
On some architectures it is required that an interrupt controller model has
|
||||
been previously created with KVM_CREATE_IRQCHIP. Note that edge-triggered
|
||||
interrupts require the level to be set to 1 and then back to 0.
|
||||
|
||||
ARM can signal an interrupt either at the CPU level, or at the in-kernel irqchip
|
||||
(GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for
|
||||
specific cpus. The irq field is interpreted like this:
|
||||
|
||||
bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 |
|
||||
field: | irq_type | vcpu_index | irq_id |
|
||||
|
||||
The irq_type field has the following values:
|
||||
- irq_type[0]: out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ
|
||||
- irq_type[1]: in-kernel GIC: SPI, irq_id between 32 and 1019 (incl.)
|
||||
(the vcpu_index field is ignored)
|
||||
- irq_type[2]: in-kernel GIC: PPI, irq_id between 16 and 31 (incl.)
|
||||
|
||||
(The irq_id field thus corresponds nicely to the IRQ ID in the ARM GIC specs)
|
||||
|
||||
In both cases, level is used to raise/lower the line.
|
||||
|
||||
struct kvm_irq_level {
|
||||
union {
|
||||
@ -1775,6 +1793,27 @@ registers, find a list below:
|
||||
PPC | KVM_REG_PPC_VPA_DTL | 128
|
||||
PPC | KVM_REG_PPC_EPCR | 32
|
||||
|
||||
ARM registers are mapped using the lower 32 bits. The upper 16 of that
|
||||
is the register group type, or coprocessor number:
|
||||
|
||||
ARM core registers have the following id bit patterns:
|
||||
0x4002 0000 0010 <index into the kvm_regs struct:16>
|
||||
|
||||
ARM 32-bit CP15 registers have the following id bit patterns:
|
||||
0x4002 0000 000F <zero:1> <crn:4> <crm:4> <opc1:4> <opc2:3>
|
||||
|
||||
ARM 64-bit CP15 registers have the following id bit patterns:
|
||||
0x4003 0000 000F <zero:1> <zero:4> <crm:4> <opc1:4> <zero:3>
|
||||
|
||||
ARM CCSIDR registers are demultiplexed by CSSELR value:
|
||||
0x4002 0000 0011 00 <csselr:8>
|
||||
|
||||
ARM 32-bit VFP control registers have the following id bit patterns:
|
||||
0x4002 0000 0012 1 <regno:12>
|
||||
|
||||
ARM 64-bit FP registers have the following id bit patterns:
|
||||
0x4002 0000 0012 0 <regno:12>
|
||||
|
||||
4.69 KVM_GET_ONE_REG
|
||||
|
||||
Capability: KVM_CAP_ONE_REG
|
||||
@ -2127,6 +2166,50 @@ written, then `n_invalid' invalid entries, invalidating any previously
|
||||
valid entries found.
|
||||
|
||||
|
||||
4.77 KVM_ARM_VCPU_INIT
|
||||
|
||||
Capability: basic
|
||||
Architectures: arm
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct struct kvm_vcpu_init (in)
|
||||
Returns: 0 on success; -1 on error
|
||||
Errors:
|
||||
EINVAL: the target is unknown, or the combination of features is invalid.
|
||||
ENOENT: a features bit specified is unknown.
|
||||
|
||||
This tells KVM what type of CPU to present to the guest, and what
|
||||
optional features it should have. This will cause a reset of the cpu
|
||||
registers to their initial values. If this is not called, KVM_RUN will
|
||||
return ENOEXEC for that vcpu.
|
||||
|
||||
Note that because some registers reflect machine topology, all vcpus
|
||||
should be created before this ioctl is invoked.
|
||||
|
||||
Possible features:
|
||||
- KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
|
||||
Depends on KVM_CAP_ARM_PSCI.
|
||||
|
||||
|
||||
4.78 KVM_GET_REG_LIST
|
||||
|
||||
Capability: basic
|
||||
Architectures: arm
|
||||
Type: vcpu ioctl
|
||||
Parameters: struct kvm_reg_list (in/out)
|
||||
Returns: 0 on success; -1 on error
|
||||
Errors:
|
||||
E2BIG: the reg index list is too big to fit in the array specified by
|
||||
the user (the number required will be written into n).
|
||||
|
||||
struct kvm_reg_list {
|
||||
__u64 n; /* number of registers in reg[] */
|
||||
__u64 reg[0];
|
||||
};
|
||||
|
||||
This ioctl returns the guest registers that are supported for the
|
||||
KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
|
||||
|
||||
|
||||
5. The kvm_run structure
|
||||
------------------------
|
||||
|
||||
|
@ -4488,6 +4488,15 @@ F: arch/s390/include/asm/kvm*
|
||||
F: arch/s390/kvm/
|
||||
F: drivers/s390/kvm/
|
||||
|
||||
KERNEL VIRTUAL MACHINE (KVM) FOR ARM
|
||||
M: Christoffer Dall <cdall@cs.columbia.edu>
|
||||
L: kvmarm@lists.cs.columbia.edu
|
||||
W: http://systems.cs.columbia.edu/projects/kvm-arm
|
||||
S: Maintained
|
||||
F: arch/arm/include/uapi/asm/kvm*
|
||||
F: arch/arm/include/asm/kvm*
|
||||
F: arch/arm/kvm/
|
||||
|
||||
KEXEC
|
||||
M: Eric Biederman <ebiederm@xmission.com>
|
||||
W: http://kernel.org/pub/linux/utils/kernel/kexec/
|
||||
|
@ -1619,6 +1619,16 @@ config HOTPLUG_CPU
|
||||
Say Y here to experiment with turning CPUs off and on. CPUs
|
||||
can be controlled through /sys/devices/system/cpu.
|
||||
|
||||
config ARM_PSCI
|
||||
bool "Support for the ARM Power State Coordination Interface (PSCI)"
|
||||
depends on CPU_V7
|
||||
help
|
||||
Say Y here if you want Linux to communicate with system firmware
|
||||
implementing the PSCI specification for CPU-centric power
|
||||
management operations described in ARM document number ARM DEN
|
||||
0022A ("Power State Coordination Interface System Software on
|
||||
ARM processors").
|
||||
|
||||
config LOCAL_TIMERS
|
||||
bool "Use local timer interrupts"
|
||||
depends on SMP
|
||||
@ -2324,3 +2334,5 @@ source "security/Kconfig"
|
||||
source "crypto/Kconfig"
|
||||
|
||||
source "lib/Kconfig"
|
||||
|
||||
source "arch/arm/kvm/Kconfig"
|
||||
|
@ -252,6 +252,7 @@ core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/
|
||||
core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ)
|
||||
core-$(CONFIG_VFP) += arch/arm/vfp/
|
||||
core-$(CONFIG_XEN) += arch/arm/xen/
|
||||
core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/
|
||||
|
||||
# If we have a machine-specific directory, then include it in the build.
|
||||
core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/
|
||||
|
@ -246,18 +246,14 @@
|
||||
*
|
||||
* This macro is intended for forcing the CPU into SVC mode at boot time.
|
||||
* you cannot return to the original mode.
|
||||
*
|
||||
* Beware, it also clobers LR.
|
||||
*/
|
||||
.macro safe_svcmode_maskall reg:req
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
mrs \reg , cpsr
|
||||
mov lr , \reg
|
||||
and lr , lr , #MODE_MASK
|
||||
cmp lr , #HYP_MODE
|
||||
orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT
|
||||
eor \reg, \reg, #HYP_MODE
|
||||
tst \reg, #MODE_MASK
|
||||
bic \reg , \reg , #MODE_MASK
|
||||
orr \reg , \reg , #SVC_MODE
|
||||
orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
|
||||
THUMB( orr \reg , \reg , #PSR_T_BIT )
|
||||
bne 1f
|
||||
orr \reg, \reg, #PSR_A_BIT
|
||||
|
@ -64,6 +64,24 @@ extern unsigned int processor_id;
|
||||
#define read_cpuid_ext(reg) 0
|
||||
#endif
|
||||
|
||||
#define ARM_CPU_IMP_ARM 0x41
|
||||
#define ARM_CPU_IMP_INTEL 0x69
|
||||
|
||||
#define ARM_CPU_PART_ARM1136 0xB360
|
||||
#define ARM_CPU_PART_ARM1156 0xB560
|
||||
#define ARM_CPU_PART_ARM1176 0xB760
|
||||
#define ARM_CPU_PART_ARM11MPCORE 0xB020
|
||||
#define ARM_CPU_PART_CORTEX_A8 0xC080
|
||||
#define ARM_CPU_PART_CORTEX_A9 0xC090
|
||||
#define ARM_CPU_PART_CORTEX_A5 0xC050
|
||||
#define ARM_CPU_PART_CORTEX_A15 0xC0F0
|
||||
#define ARM_CPU_PART_CORTEX_A7 0xC070
|
||||
|
||||
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000
|
||||
#define ARM_CPU_XSCALE_ARCH_V1 0x2000
|
||||
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
|
||||
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
|
||||
|
||||
/*
|
||||
* The CPU ID never changes at run time, so we might as well tell the
|
||||
* compiler that it's constant. Use this function to read the CPU ID
|
||||
@ -74,6 +92,21 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)
|
||||
return read_cpuid(CPUID_ID);
|
||||
}
|
||||
|
||||
static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
|
||||
{
|
||||
return (read_cpuid_id() & 0xFF000000) >> 24;
|
||||
}
|
||||
|
||||
static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
|
||||
{
|
||||
return read_cpuid_id() & 0xFFF0;
|
||||
}
|
||||
|
||||
static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void)
|
||||
{
|
||||
return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
|
||||
{
|
||||
return read_cpuid(CPUID_CACHETYPE);
|
||||
|
@ -2,6 +2,7 @@
|
||||
#define __ASMARM_CTI_H
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/hardware/coresight.h>
|
||||
|
||||
/* The registers' definition is from section 3.2 of
|
||||
* Embedded Cross Trigger Revision: r0p0
|
||||
@ -35,11 +36,6 @@
|
||||
#define LOCKACCESS 0xFB0
|
||||
#define LOCKSTATUS 0xFB4
|
||||
|
||||
/* write this value to LOCKACCESS will unlock the module, and
|
||||
* other value will lock the module
|
||||
*/
|
||||
#define LOCKCODE 0xC5ACCE55
|
||||
|
||||
/**
|
||||
* struct cti - cross trigger interface struct
|
||||
* @base: mapped virtual address for the cti base
|
||||
@ -146,7 +142,7 @@ static inline void cti_irq_ack(struct cti *cti)
|
||||
*/
|
||||
static inline void cti_unlock(struct cti *cti)
|
||||
{
|
||||
__raw_writel(LOCKCODE, cti->base + LOCKACCESS);
|
||||
__raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -158,6 +154,6 @@ static inline void cti_unlock(struct cti *cti)
|
||||
*/
|
||||
static inline void cti_lock(struct cti *cti)
|
||||
{
|
||||
__raw_writel(~LOCKCODE, cti->base + LOCKACCESS);
|
||||
__raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);
|
||||
}
|
||||
#endif
|
||||
|
@ -36,7 +36,7 @@
|
||||
/* CoreSight Component Registers */
|
||||
#define CSCR_CLASS 0xff4
|
||||
|
||||
#define UNLOCK_MAGIC 0xc5acce55
|
||||
#define CS_LAR_KEY 0xc5acce55
|
||||
|
||||
/* ETM control register, "ETM Architecture", 3.3.1 */
|
||||
#define ETMR_CTRL 0
|
||||
@ -147,11 +147,11 @@
|
||||
|
||||
#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
|
||||
#define etm_unlock(t) \
|
||||
do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
|
||||
do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
|
||||
|
||||
#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
|
||||
#define etb_unlock(t) \
|
||||
do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0)
|
||||
do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
|
||||
|
||||
#endif /* __ASM_HARDWARE_CORESIGHT_H */
|
||||
|
||||
|
@ -85,6 +85,9 @@ static inline void decode_ctrl_reg(u32 reg,
|
||||
#define ARM_DSCR_HDBGEN (1 << 14)
|
||||
#define ARM_DSCR_MDBGEN (1 << 15)
|
||||
|
||||
/* OSLSR os lock model bits */
|
||||
#define ARM_OSLSR_OSLM0 (1 << 0)
|
||||
|
||||
/* opcode2 numbers for the co-processor instructions. */
|
||||
#define ARM_OP2_BVR 4
|
||||
#define ARM_OP2_BCR 5
|
||||
|
@ -8,6 +8,7 @@
|
||||
#define __idmap __section(.idmap.text) noinline notrace
|
||||
|
||||
extern pgd_t *idmap_pgd;
|
||||
extern pgd_t *hyp_pgd;
|
||||
|
||||
void setup_mm_for_reboot(void);
|
||||
|
||||
|
214
arch/arm/include/asm/kvm_arm.h
Normal file
214
arch/arm/include/asm/kvm_arm.h
Normal file
@ -0,0 +1,214 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_ARM_H__
|
||||
#define __ARM_KVM_ARM_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Hyp Configuration Register (HCR) bits */
|
||||
#define HCR_TGE (1 << 27)
|
||||
#define HCR_TVM (1 << 26)
|
||||
#define HCR_TTLB (1 << 25)
|
||||
#define HCR_TPU (1 << 24)
|
||||
#define HCR_TPC (1 << 23)
|
||||
#define HCR_TSW (1 << 22)
|
||||
#define HCR_TAC (1 << 21)
|
||||
#define HCR_TIDCP (1 << 20)
|
||||
#define HCR_TSC (1 << 19)
|
||||
#define HCR_TID3 (1 << 18)
|
||||
#define HCR_TID2 (1 << 17)
|
||||
#define HCR_TID1 (1 << 16)
|
||||
#define HCR_TID0 (1 << 15)
|
||||
#define HCR_TWE (1 << 14)
|
||||
#define HCR_TWI (1 << 13)
|
||||
#define HCR_DC (1 << 12)
|
||||
#define HCR_BSU (3 << 10)
|
||||
#define HCR_BSU_IS (1 << 10)
|
||||
#define HCR_FB (1 << 9)
|
||||
#define HCR_VA (1 << 8)
|
||||
#define HCR_VI (1 << 7)
|
||||
#define HCR_VF (1 << 6)
|
||||
#define HCR_AMO (1 << 5)
|
||||
#define HCR_IMO (1 << 4)
|
||||
#define HCR_FMO (1 << 3)
|
||||
#define HCR_PTW (1 << 2)
|
||||
#define HCR_SWIO (1 << 1)
|
||||
#define HCR_VM 1
|
||||
|
||||
/*
|
||||
* The bits we set in HCR:
|
||||
* TAC: Trap ACTLR
|
||||
* TSC: Trap SMC
|
||||
* TSW: Trap cache operations by set/way
|
||||
* TWI: Trap WFI
|
||||
* TIDCP: Trap L2CTLR/L2ECTLR
|
||||
* BSU_IS: Upgrade barriers to the inner shareable domain
|
||||
* FB: Force broadcast of all maintainance operations
|
||||
* AMO: Override CPSR.A and enable signaling with VA
|
||||
* IMO: Override CPSR.I and enable signaling with VI
|
||||
* FMO: Override CPSR.F and enable signaling with VF
|
||||
* SWIO: Turn set/way invalidates into set/way clean+invalidate
|
||||
*/
|
||||
#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
|
||||
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
|
||||
HCR_SWIO | HCR_TIDCP)
|
||||
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
|
||||
|
||||
/* System Control Register (SCTLR) bits */
|
||||
#define SCTLR_TE (1 << 30)
|
||||
#define SCTLR_EE (1 << 25)
|
||||
#define SCTLR_V (1 << 13)
|
||||
|
||||
/* Hyp System Control Register (HSCTLR) bits */
|
||||
#define HSCTLR_TE (1 << 30)
|
||||
#define HSCTLR_EE (1 << 25)
|
||||
#define HSCTLR_FI (1 << 21)
|
||||
#define HSCTLR_WXN (1 << 19)
|
||||
#define HSCTLR_I (1 << 12)
|
||||
#define HSCTLR_C (1 << 2)
|
||||
#define HSCTLR_A (1 << 1)
|
||||
#define HSCTLR_M 1
|
||||
#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
|
||||
HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
|
||||
|
||||
/* TTBCR and HTCR Registers bits */
|
||||
#define TTBCR_EAE (1 << 31)
|
||||
#define TTBCR_IMP (1 << 30)
|
||||
#define TTBCR_SH1 (3 << 28)
|
||||
#define TTBCR_ORGN1 (3 << 26)
|
||||
#define TTBCR_IRGN1 (3 << 24)
|
||||
#define TTBCR_EPD1 (1 << 23)
|
||||
#define TTBCR_A1 (1 << 22)
|
||||
#define TTBCR_T1SZ (3 << 16)
|
||||
#define TTBCR_SH0 (3 << 12)
|
||||
#define TTBCR_ORGN0 (3 << 10)
|
||||
#define TTBCR_IRGN0 (3 << 8)
|
||||
#define TTBCR_EPD0 (1 << 7)
|
||||
#define TTBCR_T0SZ 3
|
||||
#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
|
||||
|
||||
/* Hyp System Trap Register */
|
||||
#define HSTR_T(x) (1 << x)
|
||||
#define HSTR_TTEE (1 << 16)
|
||||
#define HSTR_TJDBX (1 << 17)
|
||||
|
||||
/* Hyp Coprocessor Trap Register */
|
||||
#define HCPTR_TCP(x) (1 << x)
|
||||
#define HCPTR_TCP_MASK (0x3fff)
|
||||
#define HCPTR_TASE (1 << 15)
|
||||
#define HCPTR_TTA (1 << 20)
|
||||
#define HCPTR_TCPAC (1 << 31)
|
||||
|
||||
/* Hyp Debug Configuration Register bits */
|
||||
#define HDCR_TDRA (1 << 11)
|
||||
#define HDCR_TDOSA (1 << 10)
|
||||
#define HDCR_TDA (1 << 9)
|
||||
#define HDCR_TDE (1 << 8)
|
||||
#define HDCR_HPME (1 << 7)
|
||||
#define HDCR_TPM (1 << 6)
|
||||
#define HDCR_TPMCR (1 << 5)
|
||||
#define HDCR_HPMN_MASK (0x1F)
|
||||
|
||||
/*
|
||||
* The architecture supports 40-bit IPA as input to the 2nd stage translations
|
||||
* and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
|
||||
* space.
|
||||
*/
|
||||
#define KVM_PHYS_SHIFT (40)
|
||||
#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT)
|
||||
#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL)
|
||||
#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30))
|
||||
#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
|
||||
#define S2_PGD_SIZE (1 << S2_PGD_ORDER)
|
||||
|
||||
/* Virtualization Translation Control Register (VTCR) bits */
|
||||
#define VTCR_SH0 (3 << 12)
|
||||
#define VTCR_ORGN0 (3 << 10)
|
||||
#define VTCR_IRGN0 (3 << 8)
|
||||
#define VTCR_SL0 (3 << 6)
|
||||
#define VTCR_S (1 << 4)
|
||||
#define VTCR_T0SZ (0xf)
|
||||
#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
|
||||
VTCR_S | VTCR_T0SZ)
|
||||
#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
|
||||
#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
|
||||
#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
|
||||
#define KVM_VTCR_SL0 VTCR_SL_L1
|
||||
/* stage-2 input address range defined as 2^(32-T0SZ) */
|
||||
#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
|
||||
#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
|
||||
#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
|
||||
|
||||
/* Virtualization Translation Table Base Register (VTTBR) bits */
|
||||
#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
|
||||
#define VTTBR_X (14 - KVM_T0SZ)
|
||||
#else
|
||||
#define VTTBR_X (5 - KVM_T0SZ)
|
||||
#endif
|
||||
#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
|
||||
#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
|
||||
#define VTTBR_VMID_SHIFT (48LLU)
|
||||
#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT)
|
||||
|
||||
/* Hyp Syndrome Register (HSR) bits */
|
||||
#define HSR_EC_SHIFT (26)
|
||||
#define HSR_EC (0x3fU << HSR_EC_SHIFT)
|
||||
#define HSR_IL (1U << 25)
|
||||
#define HSR_ISS (HSR_IL - 1)
|
||||
#define HSR_ISV_SHIFT (24)
|
||||
#define HSR_ISV (1U << HSR_ISV_SHIFT)
|
||||
#define HSR_SRT_SHIFT (16)
|
||||
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
|
||||
#define HSR_FSC (0x3f)
|
||||
#define HSR_FSC_TYPE (0x3c)
|
||||
#define HSR_SSE (1 << 21)
|
||||
#define HSR_WNR (1 << 6)
|
||||
#define HSR_CV_SHIFT (24)
|
||||
#define HSR_CV (1U << HSR_CV_SHIFT)
|
||||
#define HSR_COND_SHIFT (20)
|
||||
#define HSR_COND (0xfU << HSR_COND_SHIFT)
|
||||
|
||||
#define FSC_FAULT (0x04)
|
||||
#define FSC_PERM (0x0c)
|
||||
|
||||
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
|
||||
#define HPFAR_MASK (~0xf)
|
||||
|
||||
#define HSR_EC_UNKNOWN (0x00)
|
||||
#define HSR_EC_WFI (0x01)
|
||||
#define HSR_EC_CP15_32 (0x03)
|
||||
#define HSR_EC_CP15_64 (0x04)
|
||||
#define HSR_EC_CP14_MR (0x05)
|
||||
#define HSR_EC_CP14_LS (0x06)
|
||||
#define HSR_EC_CP_0_13 (0x07)
|
||||
#define HSR_EC_CP10_ID (0x08)
|
||||
#define HSR_EC_JAZELLE (0x09)
|
||||
#define HSR_EC_BXJ (0x0A)
|
||||
#define HSR_EC_CP14_64 (0x0C)
|
||||
#define HSR_EC_SVC_HYP (0x11)
|
||||
#define HSR_EC_HVC (0x12)
|
||||
#define HSR_EC_SMC (0x13)
|
||||
#define HSR_EC_IABT (0x20)
|
||||
#define HSR_EC_IABT_HYP (0x21)
|
||||
#define HSR_EC_DABT (0x24)
|
||||
#define HSR_EC_DABT_HYP (0x25)
|
||||
|
||||
#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)
|
||||
|
||||
#endif /* __ARM_KVM_ARM_H__ */
|
82
arch/arm/include/asm/kvm_asm.h
Normal file
82
arch/arm/include/asm/kvm_asm.h
Normal file
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_ASM_H__
|
||||
#define __ARM_KVM_ASM_H__
|
||||
|
||||
/* 0 is reserved as an invalid value. */
|
||||
#define c0_MPIDR 1 /* MultiProcessor ID Register */
|
||||
#define c0_CSSELR 2 /* Cache Size Selection Register */
|
||||
#define c1_SCTLR 3 /* System Control Register */
|
||||
#define c1_ACTLR 4 /* Auxilliary Control Register */
|
||||
#define c1_CPACR 5 /* Coprocessor Access Control */
|
||||
#define c2_TTBR0 6 /* Translation Table Base Register 0 */
|
||||
#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */
|
||||
#define c2_TTBR1 8 /* Translation Table Base Register 1 */
|
||||
#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */
|
||||
#define c2_TTBCR 10 /* Translation Table Base Control R. */
|
||||
#define c3_DACR 11 /* Domain Access Control Register */
|
||||
#define c5_DFSR 12 /* Data Fault Status Register */
|
||||
#define c5_IFSR 13 /* Instruction Fault Status Register */
|
||||
#define c5_ADFSR 14 /* Auxilary Data Fault Status R */
|
||||
#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
|
||||
#define c6_DFAR 16 /* Data Fault Address Register */
|
||||
#define c6_IFAR 17 /* Instruction Fault Address Register */
|
||||
#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */
|
||||
#define c10_PRRR 19 /* Primary Region Remap Register */
|
||||
#define c10_NMRR 20 /* Normal Memory Remap Register */
|
||||
#define c12_VBAR 21 /* Vector Base Address Register */
|
||||
#define c13_CID 22 /* Context ID Register */
|
||||
#define c13_TID_URW 23 /* Thread ID, User R/W */
|
||||
#define c13_TID_URO 24 /* Thread ID, User R/O */
|
||||
#define c13_TID_PRIV 25 /* Thread ID, Privileged */
|
||||
#define NR_CP15_REGS 26 /* Number of regs (incl. invalid) */
|
||||
|
||||
#define ARM_EXCEPTION_RESET 0
|
||||
#define ARM_EXCEPTION_UNDEFINED 1
|
||||
#define ARM_EXCEPTION_SOFTWARE 2
|
||||
#define ARM_EXCEPTION_PREF_ABORT 3
|
||||
#define ARM_EXCEPTION_DATA_ABORT 4
|
||||
#define ARM_EXCEPTION_IRQ 5
|
||||
#define ARM_EXCEPTION_FIQ 6
|
||||
#define ARM_EXCEPTION_HVC 7
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
struct kvm;
|
||||
struct kvm_vcpu;
|
||||
|
||||
extern char __kvm_hyp_init[];
|
||||
extern char __kvm_hyp_init_end[];
|
||||
|
||||
extern char __kvm_hyp_exit[];
|
||||
extern char __kvm_hyp_exit_end[];
|
||||
|
||||
extern char __kvm_hyp_vector[];
|
||||
|
||||
extern char __kvm_hyp_code_start[];
|
||||
extern char __kvm_hyp_code_end[];
|
||||
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
|
||||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
|
||||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
#endif
|
||||
|
||||
#endif /* __ARM_KVM_ASM_H__ */
|
47
arch/arm/include/asm/kvm_coproc.h
Normal file
47
arch/arm/include/asm/kvm_coproc.h
Normal file
@ -0,0 +1,47 @@
|
||||
/*
|
||||
* Copyright (C) 2012 Rusty Russell IBM Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_COPROC_H__
|
||||
#define __ARM_KVM_COPROC_H__
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_coproc_target_table {
|
||||
unsigned target;
|
||||
const struct coproc_reg *table;
|
||||
size_t num;
|
||||
};
|
||||
void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
|
||||
|
||||
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
|
||||
unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
|
||||
int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
|
||||
void kvm_coproc_table_init(void);
|
||||
|
||||
struct kvm_one_reg;
|
||||
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
|
||||
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
|
||||
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
|
||||
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
|
||||
#endif /* __ARM_KVM_COPROC_H__ */
|
72
arch/arm/include/asm/kvm_emulate.h
Normal file
72
arch/arm/include/asm/kvm_emulate.h
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_EMULATE_H__
|
||||
#define __ARM_KVM_EMULATE_H__
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
|
||||
u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
|
||||
u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
|
||||
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
|
||||
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
|
||||
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
|
||||
}
|
||||
|
||||
static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
|
||||
}
|
||||
|
||||
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
||||
}
|
||||
|
||||
static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
|
||||
return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
|
||||
}
|
||||
|
||||
static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
|
||||
return cpsr_mode > USR_MODE;;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
|
||||
{
|
||||
return reg == 15;
|
||||
}
|
||||
|
||||
#endif /* __ARM_KVM_EMULATE_H__ */
|
161
arch/arm/include/asm/kvm_host.h
Normal file
161
arch/arm/include/asm/kvm_host.h
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_HOST_H__
|
||||
#define __ARM_KVM_HOST_H__
|
||||
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/fpstate.h>
|
||||
|
||||
#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
|
||||
#define KVM_MEMORY_SLOTS 32
|
||||
#define KVM_PRIVATE_MEM_SLOTS 4
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
#define KVM_HAVE_ONE_REG
|
||||
|
||||
#define KVM_VCPU_MAX_FEATURES 1
|
||||
|
||||
/* We don't currently support large pages. */
|
||||
#define KVM_HPAGE_GFN_SHIFT(x) 0
|
||||
#define KVM_NR_PAGE_SIZES 1
|
||||
#define KVM_PAGES_PER_HPAGE(x) (1UL<<31)
|
||||
|
||||
struct kvm_vcpu;
|
||||
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
|
||||
int kvm_target_cpu(void);
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
||||
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_arch {
|
||||
/* VTTBR value associated with below pgd and vmid */
|
||||
u64 vttbr;
|
||||
|
||||
/*
|
||||
* Anything that is not used directly from assembly code goes
|
||||
* here.
|
||||
*/
|
||||
|
||||
/* The VMID generation used for the virt. memory system */
|
||||
u64 vmid_gen;
|
||||
u32 vmid;
|
||||
|
||||
/* Stage-2 page table */
|
||||
pgd_t *pgd;
|
||||
};
|
||||
|
||||
#define KVM_NR_MEM_OBJS 40
|
||||
|
||||
/*
|
||||
* We don't want allocation failures within the mmu code, so we preallocate
|
||||
* enough memory for a single page fault in a cache.
|
||||
*/
|
||||
struct kvm_mmu_memory_cache {
|
||||
int nobjs;
|
||||
void *objects[KVM_NR_MEM_OBJS];
|
||||
};
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
struct kvm_regs regs;
|
||||
|
||||
int target; /* Processor target */
|
||||
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
/* System control coprocessor (cp15) */
|
||||
u32 cp15[NR_CP15_REGS];
|
||||
|
||||
/* The CPU type we expose to the VM */
|
||||
u32 midr;
|
||||
|
||||
/* Exception Information */
|
||||
u32 hsr; /* Hyp Syndrome Register */
|
||||
u32 hxfar; /* Hyp Data/Inst Fault Address Register */
|
||||
u32 hpfar; /* Hyp IPA Fault Address Register */
|
||||
|
||||
/* Floating point registers (VFP and Advanced SIMD/NEON) */
|
||||
struct vfp_hard_struct vfp_guest;
|
||||
struct vfp_hard_struct *vfp_host;
|
||||
|
||||
/*
|
||||
* Anything that is not used directly from assembly code goes
|
||||
* here.
|
||||
*/
|
||||
/* dcache set/way operation pending */
|
||||
int last_pcpu;
|
||||
cpumask_t require_dcache_flush;
|
||||
|
||||
/* Don't run the guest on this vcpu */
|
||||
bool pause;
|
||||
|
||||
/* IO related fields */
|
||||
struct kvm_decode mmio_decode;
|
||||
|
||||
/* Interrupt related fields */
|
||||
u32 irq_lines; /* IRQ and FIQ levels */
|
||||
|
||||
/* Hyp exception information */
|
||||
u32 hyp_pc; /* PC when exception was taken from Hyp mode */
|
||||
|
||||
/* Cache some mmu pages needed inside spinlock regions */
|
||||
struct kvm_mmu_memory_cache mmu_page_cache;
|
||||
|
||||
/* Detect first run of a vcpu */
|
||||
bool has_run_once;
|
||||
};
|
||||
|
||||
struct kvm_vm_stat {
|
||||
u32 remote_tlb_flush;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_stat {
|
||||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_init;
|
||||
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_vcpu_init *init);
|
||||
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
|
||||
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
|
||||
struct kvm_one_reg;
|
||||
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
|
||||
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
|
||||
u64 kvm_call_hyp(void *hypfn, ...);
|
||||
void force_vm_exit(const cpumask_t *mask);
|
||||
|
||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
struct kvm;
|
||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
|
||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end);
|
||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
|
||||
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
|
||||
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
|
||||
|
||||
/* We do not have shadow page tables, hence the empty hooks */
|
||||
static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* __ARM_KVM_HOST_H__ */
|
56
arch/arm/include/asm/kvm_mmio.h
Normal file
56
arch/arm/include/asm/kvm_mmio.h
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_MMIO_H__
|
||||
#define __ARM_KVM_MMIO_H__
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
|
||||
struct kvm_decode {
|
||||
unsigned long rt;
|
||||
bool sign_extend;
|
||||
};
|
||||
|
||||
/*
|
||||
* The in-kernel MMIO emulation code wants to use a copy of run->mmio,
|
||||
* which is an anonymous type. Use our own type instead.
|
||||
*/
|
||||
struct kvm_exit_mmio {
|
||||
phys_addr_t phys_addr;
|
||||
u8 data[8];
|
||||
u32 len;
|
||||
bool is_write;
|
||||
};
|
||||
|
||||
static inline void kvm_prepare_mmio(struct kvm_run *run,
|
||||
struct kvm_exit_mmio *mmio)
|
||||
{
|
||||
run->mmio.phys_addr = mmio->phys_addr;
|
||||
run->mmio.len = mmio->len;
|
||||
run->mmio.is_write = mmio->is_write;
|
||||
memcpy(run->mmio.data, mmio->data, mmio->len);
|
||||
run->exit_reason = KVM_EXIT_MMIO;
|
||||
}
|
||||
|
||||
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
phys_addr_t fault_ipa);
|
||||
|
||||
#endif /* __ARM_KVM_MMIO_H__ */
|
50
arch/arm/include/asm/kvm_mmu.h
Normal file
50
arch/arm/include/asm/kvm_mmu.h
Normal file
@ -0,0 +1,50 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_MMU_H__
|
||||
#define __ARM_KVM_MMU_H__
|
||||
|
||||
int create_hyp_mappings(void *from, void *to);
|
||||
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
|
||||
void free_hyp_pmds(void);
|
||||
|
||||
int kvm_alloc_stage2_pgd(struct kvm *kvm);
|
||||
void kvm_free_stage2_pgd(struct kvm *kvm);
|
||||
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
phys_addr_t pa, unsigned long size);
|
||||
|
||||
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
|
||||
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
|
||||
|
||||
phys_addr_t kvm_mmu_get_httbr(void);
|
||||
int kvm_mmu_init(void);
|
||||
void kvm_clear_hyp_idmap(void);
|
||||
|
||||
static inline bool kvm_is_write_fault(unsigned long hsr)
|
||||
{
|
||||
unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
|
||||
if (hsr_ec == HSR_EC_IABT)
|
||||
return false;
|
||||
else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR))
|
||||
return false;
|
||||
else
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* __ARM_KVM_MMU_H__ */
|
23
arch/arm/include/asm/kvm_psci.h
Normal file
23
arch/arm/include/asm/kvm_psci.h
Normal file
@ -0,0 +1,23 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_PSCI_H__
|
||||
#define __ARM_KVM_PSCI_H__
|
||||
|
||||
bool kvm_psci_call(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif /* __ARM_KVM_PSCI_H__ */
|
24
arch/arm/include/asm/opcodes-sec.h
Normal file
24
arch/arm/include/asm/opcodes-sec.h
Normal file
@ -0,0 +1,24 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* Copyright (C) 2012 ARM Limited
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ARM_OPCODES_SEC_H
|
||||
#define __ASM_ARM_OPCODES_SEC_H
|
||||
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
#define __SMC(imm4) __inst_arm_thumb32( \
|
||||
0xE1600070 | (((imm4) & 0xF) << 0), \
|
||||
0xF7F08000 | (((imm4) & 0xF) << 16) \
|
||||
)
|
||||
|
||||
#endif /* __ASM_ARM_OPCODES_SEC_H */
|
@ -10,6 +10,7 @@
|
||||
#define __ASM_ARM_OPCODES_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/linkage.h>
|
||||
extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);
|
||||
#endif
|
||||
|
||||
|
@ -32,6 +32,9 @@
|
||||
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
|
||||
#define PMD_BIT4 (_AT(pmdval_t, 0))
|
||||
#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
|
||||
#define PMD_APTABLE_SHIFT (61)
|
||||
#define PMD_APTABLE (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT)
|
||||
#define PMD_PXNTABLE (_AT(pgdval_t, 1) << 59)
|
||||
|
||||
/*
|
||||
* - section
|
||||
@ -41,9 +44,11 @@
|
||||
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
|
||||
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
|
||||
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
|
||||
#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
|
||||
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
|
||||
#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
|
||||
#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
|
||||
#define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6)
|
||||
#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
|
||||
|
||||
/*
|
||||
|
@ -104,11 +104,29 @@
|
||||
*/
|
||||
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
|
||||
|
||||
/*
|
||||
* 2nd stage PTE definitions for LPAE.
|
||||
*/
|
||||
#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
|
||||
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
|
||||
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
|
||||
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
|
||||
#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
|
||||
|
||||
/*
|
||||
* Hyp-mode PL2 PTE definitions for LPAE.
|
||||
*/
|
||||
#define L_PTE_HYP L_PTE_USER
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define pud_none(pud) (!pud_val(pud))
|
||||
#define pud_bad(pud) (!(pud_val(pud) & 2))
|
||||
#define pud_present(pud) (pud_val(pud))
|
||||
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
|
||||
PMD_TYPE_TABLE)
|
||||
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
|
||||
PMD_TYPE_SECT)
|
||||
|
||||
#define pud_clear(pudp) \
|
||||
do { \
|
||||
|
@ -70,6 +70,9 @@ extern void __pgd_error(const char *file, int line, pgd_t);
|
||||
|
||||
extern pgprot_t pgprot_user;
|
||||
extern pgprot_t pgprot_kernel;
|
||||
extern pgprot_t pgprot_hyp_device;
|
||||
extern pgprot_t pgprot_s2;
|
||||
extern pgprot_t pgprot_s2_device;
|
||||
|
||||
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
|
||||
|
||||
@ -82,6 +85,10 @@ extern pgprot_t pgprot_kernel;
|
||||
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
|
||||
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
|
||||
#define PAGE_KERNEL_EXEC pgprot_kernel
|
||||
#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
|
||||
#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
|
||||
#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
|
||||
#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
|
||||
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
|
||||
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
|
||||
|
36
arch/arm/include/asm/psci.h
Normal file
36
arch/arm/include/asm/psci.h
Normal file
@ -0,0 +1,36 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* Copyright (C) 2012 ARM Limited
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ARM_PSCI_H
|
||||
#define __ASM_ARM_PSCI_H
|
||||
|
||||
#define PSCI_POWER_STATE_TYPE_STANDBY 0
|
||||
#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
|
||||
|
||||
struct psci_power_state {
|
||||
u16 id;
|
||||
u8 type;
|
||||
u8 affinity_level;
|
||||
};
|
||||
|
||||
struct psci_operations {
|
||||
int (*cpu_suspend)(struct psci_power_state state,
|
||||
unsigned long entry_point);
|
||||
int (*cpu_off)(struct psci_power_state state);
|
||||
int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
|
||||
int (*migrate)(unsigned long cpuid);
|
||||
};
|
||||
|
||||
extern struct psci_operations psci_ops;
|
||||
|
||||
#endif /* __ASM_ARM_PSCI_H */
|
@ -24,9 +24,9 @@
|
||||
/*
|
||||
* Flag indicating that the kernel was not entered in the same mode on every
|
||||
* CPU. The zImage loader stashes this value in an SPSR, so we need an
|
||||
* architecturally defined flag bit here (the N flag, as it happens)
|
||||
* architecturally defined flag bit here.
|
||||
*/
|
||||
#define BOOT_CPU_MODE_MISMATCH (1<<31)
|
||||
#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
164
arch/arm/include/uapi/asm/kvm.h
Normal file
164
arch/arm/include/uapi/asm/kvm.h
Normal file
@ -0,0 +1,164 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_H__
|
||||
#define __ARM_KVM_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
|
||||
#define KVM_REG_SIZE(id) \
|
||||
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
|
||||
|
||||
/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
|
||||
#define KVM_ARM_SVC_sp svc_regs[0]
|
||||
#define KVM_ARM_SVC_lr svc_regs[1]
|
||||
#define KVM_ARM_SVC_spsr svc_regs[2]
|
||||
#define KVM_ARM_ABT_sp abt_regs[0]
|
||||
#define KVM_ARM_ABT_lr abt_regs[1]
|
||||
#define KVM_ARM_ABT_spsr abt_regs[2]
|
||||
#define KVM_ARM_UND_sp und_regs[0]
|
||||
#define KVM_ARM_UND_lr und_regs[1]
|
||||
#define KVM_ARM_UND_spsr und_regs[2]
|
||||
#define KVM_ARM_IRQ_sp irq_regs[0]
|
||||
#define KVM_ARM_IRQ_lr irq_regs[1]
|
||||
#define KVM_ARM_IRQ_spsr irq_regs[2]
|
||||
|
||||
/* Valid only for fiq_regs in struct kvm_regs */
|
||||
#define KVM_ARM_FIQ_r8 fiq_regs[0]
|
||||
#define KVM_ARM_FIQ_r9 fiq_regs[1]
|
||||
#define KVM_ARM_FIQ_r10 fiq_regs[2]
|
||||
#define KVM_ARM_FIQ_fp fiq_regs[3]
|
||||
#define KVM_ARM_FIQ_ip fiq_regs[4]
|
||||
#define KVM_ARM_FIQ_sp fiq_regs[5]
|
||||
#define KVM_ARM_FIQ_lr fiq_regs[6]
|
||||
#define KVM_ARM_FIQ_spsr fiq_regs[7]
|
||||
|
||||
struct kvm_regs {
|
||||
struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */
|
||||
__u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
|
||||
__u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
|
||||
__u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */
|
||||
__u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
|
||||
__u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
|
||||
};
|
||||
|
||||
/* Supported Processor Types */
|
||||
#define KVM_ARM_TARGET_CORTEX_A15 0
|
||||
#define KVM_ARM_NUM_TARGETS 1
|
||||
|
||||
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
|
||||
|
||||
struct kvm_vcpu_init {
|
||||
__u32 target;
|
||||
__u32 features[7];
|
||||
};
|
||||
|
||||
struct kvm_sregs {
|
||||
};
|
||||
|
||||
struct kvm_fpu {
|
||||
};
|
||||
|
||||
struct kvm_guest_debug_arch {
|
||||
};
|
||||
|
||||
struct kvm_debug_exit_arch {
|
||||
};
|
||||
|
||||
struct kvm_sync_regs {
|
||||
};
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
};
|
||||
|
||||
/* If you need to interpret the index values, here is the key: */
|
||||
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
|
||||
#define KVM_REG_ARM_COPROC_SHIFT 16
|
||||
#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007
|
||||
#define KVM_REG_ARM_32_OPC2_SHIFT 0
|
||||
#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078
|
||||
#define KVM_REG_ARM_OPC1_SHIFT 3
|
||||
#define KVM_REG_ARM_CRM_MASK 0x0000000000000780
|
||||
#define KVM_REG_ARM_CRM_SHIFT 7
|
||||
#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
|
||||
#define KVM_REG_ARM_32_CRN_SHIFT 11
|
||||
|
||||
/* Normal registers are mapped as coprocessor 16. */
|
||||
#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4)
|
||||
|
||||
/* Some registers need more space to represent values. */
|
||||
#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
|
||||
#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
|
||||
#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
|
||||
#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
|
||||
#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
|
||||
|
||||
/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
|
||||
#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF
|
||||
#define KVM_REG_ARM_VFP_BASE_REG 0x0
|
||||
#define KVM_REG_ARM_VFP_FPSID 0x1000
|
||||
#define KVM_REG_ARM_VFP_FPSCR 0x1001
|
||||
#define KVM_REG_ARM_VFP_MVFR1 0x1006
|
||||
#define KVM_REG_ARM_VFP_MVFR0 0x1007
|
||||
#define KVM_REG_ARM_VFP_FPEXC 0x1008
|
||||
#define KVM_REG_ARM_VFP_FPINST 0x1009
|
||||
#define KVM_REG_ARM_VFP_FPINST2 0x100A
|
||||
|
||||
|
||||
/* KVM_IRQ_LINE irq field index values */
|
||||
#define KVM_ARM_IRQ_TYPE_SHIFT 24
|
||||
#define KVM_ARM_IRQ_TYPE_MASK 0xff
|
||||
#define KVM_ARM_IRQ_VCPU_SHIFT 16
|
||||
#define KVM_ARM_IRQ_VCPU_MASK 0xff
|
||||
#define KVM_ARM_IRQ_NUM_SHIFT 0
|
||||
#define KVM_ARM_IRQ_NUM_MASK 0xffff
|
||||
|
||||
/* irq_type field */
|
||||
#define KVM_ARM_IRQ_TYPE_CPU 0
|
||||
#define KVM_ARM_IRQ_TYPE_SPI 1
|
||||
#define KVM_ARM_IRQ_TYPE_PPI 2
|
||||
|
||||
/* out-of-kernel GIC cpu interrupt injection irq_number field */
|
||||
#define KVM_ARM_IRQ_CPU_IRQ 0
|
||||
#define KVM_ARM_IRQ_CPU_FIQ 1
|
||||
|
||||
/* Highest supported SPI, from VGIC_NR_IRQS */
|
||||
#define KVM_ARM_IRQ_GIC_MAX 127
|
||||
|
||||
/* PSCI interface */
|
||||
#define KVM_PSCI_FN_BASE 0x95c1ba5e
|
||||
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
|
||||
|
||||
#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
|
||||
#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
|
||||
#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
|
||||
#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
|
||||
|
||||
#define KVM_PSCI_RET_SUCCESS 0
|
||||
#define KVM_PSCI_RET_NI ((unsigned long)-1)
|
||||
#define KVM_PSCI_RET_INVAL ((unsigned long)-2)
|
||||
#define KVM_PSCI_RET_DENIED ((unsigned long)-3)
|
||||
|
||||
#endif /* __ARM_KVM_H__ */
|
@ -82,5 +82,6 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
|
||||
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
|
||||
|
||||
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
|
||||
obj-$(CONFIG_ARM_PSCI) += psci.o
|
||||
|
||||
extra-y := $(head-y) vmlinux.lds
|
||||
|
@ -13,6 +13,9 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#ifdef CONFIG_KVM_ARM_HOST
|
||||
#include <linux/kvm_host.h>
|
||||
#endif
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/glue-df.h>
|
||||
#include <asm/glue-pf.h>
|
||||
@ -146,5 +149,27 @@ int main(void)
|
||||
DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL);
|
||||
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
|
||||
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
|
||||
#ifdef CONFIG_KVM_ARM_HOST
|
||||
DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
|
||||
DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
|
||||
DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15));
|
||||
DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest));
|
||||
DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host));
|
||||
DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs));
|
||||
DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs));
|
||||
DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs));
|
||||
DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs));
|
||||
DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs));
|
||||
DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs));
|
||||
DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
|
||||
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
|
||||
DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
|
||||
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
|
||||
DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr));
|
||||
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar));
|
||||
DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar));
|
||||
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc));
|
||||
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/cpu_pm.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cputype.h>
|
||||
@ -35,6 +36,7 @@
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#include <asm/kdebug.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/hardware/coresight.h>
|
||||
|
||||
/* Breakpoint currently in use for each BRP. */
|
||||
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
|
||||
@ -49,6 +51,9 @@ static int core_num_wrps;
|
||||
/* Debug architecture version. */
|
||||
static u8 debug_arch;
|
||||
|
||||
/* Does debug architecture support OS Save and Restore? */
|
||||
static bool has_ossr;
|
||||
|
||||
/* Maximum supported watchpoint length. */
|
||||
static u8 max_watchpoint_len;
|
||||
|
||||
@ -903,6 +908,23 @@ static struct undef_hook debug_reg_hook = {
|
||||
.fn = debug_reg_trap,
|
||||
};
|
||||
|
||||
/* Does this core support OS Save and Restore? */
|
||||
static bool core_has_os_save_restore(void)
|
||||
{
|
||||
u32 oslsr;
|
||||
|
||||
switch (get_debug_arch()) {
|
||||
case ARM_DEBUG_ARCH_V7_1:
|
||||
return true;
|
||||
case ARM_DEBUG_ARCH_V7_ECP14:
|
||||
ARM_DBG_READ(c1, c1, 4, oslsr);
|
||||
if (oslsr & ARM_OSLSR_OSLM0)
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static void reset_ctrl_regs(void *unused)
|
||||
{
|
||||
int i, raw_num_brps, err = 0, cpu = smp_processor_id();
|
||||
@ -930,11 +952,7 @@ static void reset_ctrl_regs(void *unused)
|
||||
if ((val & 0x1) == 0)
|
||||
err = -EPERM;
|
||||
|
||||
/*
|
||||
* Check whether we implement OS save and restore.
|
||||
*/
|
||||
ARM_DBG_READ(c1, c1, 4, val);
|
||||
if ((val & 0x9) == 0)
|
||||
if (!has_ossr)
|
||||
goto clear_vcr;
|
||||
break;
|
||||
case ARM_DEBUG_ARCH_V7_1:
|
||||
@ -955,9 +973,9 @@ static void reset_ctrl_regs(void *unused)
|
||||
|
||||
/*
|
||||
* Unconditionally clear the OS lock by writing a value
|
||||
* other than 0xC5ACCE55 to the access register.
|
||||
* other than CS_LAR_KEY to the access register.
|
||||
*/
|
||||
ARM_DBG_WRITE(c1, c0, 4, 0);
|
||||
ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY);
|
||||
isb();
|
||||
|
||||
/*
|
||||
@ -1015,6 +1033,30 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
|
||||
.notifier_call = dbg_reset_notify,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_PM
|
||||
static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
|
||||
void *v)
|
||||
{
|
||||
if (action == CPU_PM_EXIT)
|
||||
reset_ctrl_regs(NULL);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
|
||||
.notifier_call = dbg_cpu_pm_notify,
|
||||
};
|
||||
|
||||
static void __init pm_init(void)
|
||||
{
|
||||
cpu_pm_register_notifier(&dbg_cpu_pm_nb);
|
||||
}
|
||||
#else
|
||||
static inline void pm_init(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __init arch_hw_breakpoint_init(void)
|
||||
{
|
||||
debug_arch = get_debug_arch();
|
||||
@ -1024,6 +1066,8 @@ static int __init arch_hw_breakpoint_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
has_ossr = core_has_os_save_restore();
|
||||
|
||||
/* Determine how many BRPs/WRPs are available. */
|
||||
core_num_brps = get_num_brps();
|
||||
core_num_wrps = get_num_wrps();
|
||||
@ -1062,8 +1106,9 @@ static int __init arch_hw_breakpoint_init(void)
|
||||
hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
|
||||
TRAP_HWBKPT, "breakpoint debug exception");
|
||||
|
||||
/* Register hotplug notifier. */
|
||||
/* Register hotplug and PM notifiers. */
|
||||
register_cpu_notifier(&dbg_reset_nb);
|
||||
pm_init();
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(arch_hw_breakpoint_init);
|
||||
|
@ -149,12 +149,6 @@ u64 armpmu_event_update(struct perf_event *event)
|
||||
static void
|
||||
armpmu_read(struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
/* Don't read disabled counters! */
|
||||
if (hwc->idx < 0)
|
||||
return;
|
||||
|
||||
armpmu_event_update(event);
|
||||
}
|
||||
|
||||
@ -207,8 +201,6 @@ armpmu_del(struct perf_event *event, int flags)
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx = hwc->idx;
|
||||
|
||||
WARN_ON(idx < 0);
|
||||
|
||||
armpmu_stop(event, PERF_EF_UPDATE);
|
||||
hw_events->events[idx] = NULL;
|
||||
clear_bit(idx, hw_events->used_mask);
|
||||
@ -358,7 +350,7 @@ __hw_perf_event_init(struct perf_event *event)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int mapping, err;
|
||||
int mapping;
|
||||
|
||||
mapping = armpmu->map_event(event);
|
||||
|
||||
@ -407,14 +399,12 @@ __hw_perf_event_init(struct perf_event *event)
|
||||
local64_set(&hwc->period_left, hwc->sample_period);
|
||||
}
|
||||
|
||||
err = 0;
|
||||
if (event->group_leader != event) {
|
||||
err = validate_group(event);
|
||||
if (err)
|
||||
if (validate_group(event) != 0);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int armpmu_event_init(struct perf_event *event)
|
||||
|
@ -147,7 +147,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
||||
cpu_pmu->free_irq = cpu_pmu_free_irq;
|
||||
|
||||
/* Ensure the PMU has sane values out of reset. */
|
||||
if (cpu_pmu && cpu_pmu->reset)
|
||||
if (cpu_pmu->reset)
|
||||
on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
|
||||
}
|
||||
|
||||
@ -201,48 +201,46 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
|
||||
static int probe_current_pmu(struct arm_pmu *pmu)
|
||||
{
|
||||
int cpu = get_cpu();
|
||||
unsigned long cpuid = read_cpuid_id();
|
||||
unsigned long implementor = (cpuid & 0xFF000000) >> 24;
|
||||
unsigned long part_number = (cpuid & 0xFFF0);
|
||||
unsigned long implementor = read_cpuid_implementor();
|
||||
unsigned long part_number = read_cpuid_part_number();
|
||||
int ret = -ENODEV;
|
||||
|
||||
pr_info("probing PMU on CPU %d\n", cpu);
|
||||
|
||||
/* ARM Ltd CPUs. */
|
||||
if (0x41 == implementor) {
|
||||
if (implementor == ARM_CPU_IMP_ARM) {
|
||||
switch (part_number) {
|
||||
case 0xB360: /* ARM1136 */
|
||||
case 0xB560: /* ARM1156 */
|
||||
case 0xB760: /* ARM1176 */
|
||||
case ARM_CPU_PART_ARM1136:
|
||||
case ARM_CPU_PART_ARM1156:
|
||||
case ARM_CPU_PART_ARM1176:
|
||||
ret = armv6pmu_init(pmu);
|
||||
break;
|
||||
case 0xB020: /* ARM11mpcore */
|
||||
case ARM_CPU_PART_ARM11MPCORE:
|
||||
ret = armv6mpcore_pmu_init(pmu);
|
||||
break;
|
||||
case 0xC080: /* Cortex-A8 */
|
||||
case ARM_CPU_PART_CORTEX_A8:
|
||||
ret = armv7_a8_pmu_init(pmu);
|
||||
break;
|
||||
case 0xC090: /* Cortex-A9 */
|
||||
case ARM_CPU_PART_CORTEX_A9:
|
||||
ret = armv7_a9_pmu_init(pmu);
|
||||
break;
|
||||
case 0xC050: /* Cortex-A5 */
|
||||
case ARM_CPU_PART_CORTEX_A5:
|
||||
ret = armv7_a5_pmu_init(pmu);
|
||||
break;
|
||||
case 0xC0F0: /* Cortex-A15 */
|
||||
case ARM_CPU_PART_CORTEX_A15:
|
||||
ret = armv7_a15_pmu_init(pmu);
|
||||
break;
|
||||
case 0xC070: /* Cortex-A7 */
|
||||
case ARM_CPU_PART_CORTEX_A7:
|
||||
ret = armv7_a7_pmu_init(pmu);
|
||||
break;
|
||||
}
|
||||
/* Intel CPUs [xscale]. */
|
||||
} else if (0x69 == implementor) {
|
||||
part_number = (cpuid >> 13) & 0x7;
|
||||
switch (part_number) {
|
||||
case 1:
|
||||
} else if (implementor == ARM_CPU_IMP_INTEL) {
|
||||
switch (xscale_cpu_arch_version()) {
|
||||
case ARM_CPU_XSCALE_ARCH_V1:
|
||||
ret = xscale1pmu_init(pmu);
|
||||
break;
|
||||
case 2:
|
||||
case ARM_CPU_XSCALE_ARCH_V2:
|
||||
ret = xscale2pmu_init(pmu);
|
||||
break;
|
||||
}
|
||||
@ -279,17 +277,22 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
pr_info("failed to register PMU devices!");
|
||||
kfree(pmu);
|
||||
return ret;
|
||||
pr_info("failed to probe PMU!");
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
cpu_pmu = pmu;
|
||||
cpu_pmu->plat_device = pdev;
|
||||
cpu_pmu_init(cpu_pmu);
|
||||
armpmu_register(cpu_pmu, PERF_TYPE_RAW);
|
||||
ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
|
||||
|
||||
return 0;
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
out_free:
|
||||
pr_info("failed to register PMU devices!");
|
||||
kfree(pmu);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct platform_driver cpu_pmu_driver = {
|
||||
|
@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
|
@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
/*
|
||||
* The prefetch counters don't differentiate between the I
|
||||
@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
|
@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
|
211
arch/arm/kernel/psci.c
Normal file
211
arch/arm/kernel/psci.c
Normal file
@ -0,0 +1,211 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* Copyright (C) 2012 ARM Limited
|
||||
*
|
||||
* Author: Will Deacon <will.deacon@arm.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "psci: " fmt
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/opcodes-sec.h>
|
||||
#include <asm/opcodes-virt.h>
|
||||
#include <asm/psci.h>
|
||||
|
||||
struct psci_operations psci_ops;
|
||||
|
||||
static int (*invoke_psci_fn)(u32, u32, u32, u32);
|
||||
|
||||
enum psci_function {
|
||||
PSCI_FN_CPU_SUSPEND,
|
||||
PSCI_FN_CPU_ON,
|
||||
PSCI_FN_CPU_OFF,
|
||||
PSCI_FN_MIGRATE,
|
||||
PSCI_FN_MAX,
|
||||
};
|
||||
|
||||
static u32 psci_function_id[PSCI_FN_MAX];
|
||||
|
||||
#define PSCI_RET_SUCCESS 0
|
||||
#define PSCI_RET_EOPNOTSUPP -1
|
||||
#define PSCI_RET_EINVAL -2
|
||||
#define PSCI_RET_EPERM -3
|
||||
|
||||
static int psci_to_linux_errno(int errno)
|
||||
{
|
||||
switch (errno) {
|
||||
case PSCI_RET_SUCCESS:
|
||||
return 0;
|
||||
case PSCI_RET_EOPNOTSUPP:
|
||||
return -EOPNOTSUPP;
|
||||
case PSCI_RET_EINVAL:
|
||||
return -EINVAL;
|
||||
case PSCI_RET_EPERM:
|
||||
return -EPERM;
|
||||
};
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define PSCI_POWER_STATE_ID_MASK 0xffff
|
||||
#define PSCI_POWER_STATE_ID_SHIFT 0
|
||||
#define PSCI_POWER_STATE_TYPE_MASK 0x1
|
||||
#define PSCI_POWER_STATE_TYPE_SHIFT 16
|
||||
#define PSCI_POWER_STATE_AFFL_MASK 0x3
|
||||
#define PSCI_POWER_STATE_AFFL_SHIFT 24
|
||||
|
||||
static u32 psci_power_state_pack(struct psci_power_state state)
|
||||
{
|
||||
return ((state.id & PSCI_POWER_STATE_ID_MASK)
|
||||
<< PSCI_POWER_STATE_ID_SHIFT) |
|
||||
((state.type & PSCI_POWER_STATE_TYPE_MASK)
|
||||
<< PSCI_POWER_STATE_TYPE_SHIFT) |
|
||||
((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
|
||||
<< PSCI_POWER_STATE_AFFL_SHIFT);
|
||||
}
|
||||
|
||||
/*
|
||||
* The following two functions are invoked via the invoke_psci_fn pointer
|
||||
* and will not be inlined, allowing us to piggyback on the AAPCS.
|
||||
*/
|
||||
static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1,
|
||||
u32 arg2)
|
||||
{
|
||||
asm volatile(
|
||||
__asmeq("%0", "r0")
|
||||
__asmeq("%1", "r1")
|
||||
__asmeq("%2", "r2")
|
||||
__asmeq("%3", "r3")
|
||||
__HVC(0)
|
||||
: "+r" (function_id)
|
||||
: "r" (arg0), "r" (arg1), "r" (arg2));
|
||||
|
||||
return function_id;
|
||||
}
|
||||
|
||||
static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
|
||||
u32 arg2)
|
||||
{
|
||||
asm volatile(
|
||||
__asmeq("%0", "r0")
|
||||
__asmeq("%1", "r1")
|
||||
__asmeq("%2", "r2")
|
||||
__asmeq("%3", "r3")
|
||||
__SMC(0)
|
||||
: "+r" (function_id)
|
||||
: "r" (arg0), "r" (arg1), "r" (arg2));
|
||||
|
||||
return function_id;
|
||||
}
|
||||
|
||||
static int psci_cpu_suspend(struct psci_power_state state,
|
||||
unsigned long entry_point)
|
||||
{
|
||||
int err;
|
||||
u32 fn, power_state;
|
||||
|
||||
fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
|
||||
power_state = psci_power_state_pack(state);
|
||||
err = invoke_psci_fn(fn, power_state, entry_point, 0);
|
||||
return psci_to_linux_errno(err);
|
||||
}
|
||||
|
||||
static int psci_cpu_off(struct psci_power_state state)
|
||||
{
|
||||
int err;
|
||||
u32 fn, power_state;
|
||||
|
||||
fn = psci_function_id[PSCI_FN_CPU_OFF];
|
||||
power_state = psci_power_state_pack(state);
|
||||
err = invoke_psci_fn(fn, power_state, 0, 0);
|
||||
return psci_to_linux_errno(err);
|
||||
}
|
||||
|
||||
static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
|
||||
{
|
||||
int err;
|
||||
u32 fn;
|
||||
|
||||
fn = psci_function_id[PSCI_FN_CPU_ON];
|
||||
err = invoke_psci_fn(fn, cpuid, entry_point, 0);
|
||||
return psci_to_linux_errno(err);
|
||||
}
|
||||
|
||||
static int psci_migrate(unsigned long cpuid)
|
||||
{
|
||||
int err;
|
||||
u32 fn;
|
||||
|
||||
fn = psci_function_id[PSCI_FN_MIGRATE];
|
||||
err = invoke_psci_fn(fn, cpuid, 0, 0);
|
||||
return psci_to_linux_errno(err);
|
||||
}
|
||||
|
||||
static const struct of_device_id psci_of_match[] __initconst = {
|
||||
{ .compatible = "arm,psci", },
|
||||
{},
|
||||
};
|
||||
|
||||
static int __init psci_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
const char *method;
|
||||
u32 id;
|
||||
|
||||
np = of_find_matching_node(NULL, psci_of_match);
|
||||
if (!np)
|
||||
return 0;
|
||||
|
||||
pr_info("probing function IDs from device-tree\n");
|
||||
|
||||
if (of_property_read_string(np, "method", &method)) {
|
||||
pr_warning("missing \"method\" property\n");
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
if (!strcmp("hvc", method)) {
|
||||
invoke_psci_fn = __invoke_psci_fn_hvc;
|
||||
} else if (!strcmp("smc", method)) {
|
||||
invoke_psci_fn = __invoke_psci_fn_smc;
|
||||
} else {
|
||||
pr_warning("invalid \"method\" property: %s\n", method);
|
||||
goto out_put_node;
|
||||
}
|
||||
|
||||
if (!of_property_read_u32(np, "cpu_suspend", &id)) {
|
||||
psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
|
||||
psci_ops.cpu_suspend = psci_cpu_suspend;
|
||||
}
|
||||
|
||||
if (!of_property_read_u32(np, "cpu_off", &id)) {
|
||||
psci_function_id[PSCI_FN_CPU_OFF] = id;
|
||||
psci_ops.cpu_off = psci_cpu_off;
|
||||
}
|
||||
|
||||
if (!of_property_read_u32(np, "cpu_on", &id)) {
|
||||
psci_function_id[PSCI_FN_CPU_ON] = id;
|
||||
psci_ops.cpu_on = psci_cpu_on;
|
||||
}
|
||||
|
||||
if (!of_property_read_u32(np, "migrate", &id)) {
|
||||
psci_function_id[PSCI_FN_MIGRATE] = id;
|
||||
psci_ops.migrate = psci_migrate;
|
||||
}
|
||||
|
||||
out_put_node:
|
||||
of_node_put(np);
|
||||
return 0;
|
||||
}
|
||||
early_initcall(psci_init);
|
@ -19,7 +19,11 @@
|
||||
ALIGN_FUNCTION(); \
|
||||
VMLINUX_SYMBOL(__idmap_text_start) = .; \
|
||||
*(.idmap.text) \
|
||||
VMLINUX_SYMBOL(__idmap_text_end) = .;
|
||||
VMLINUX_SYMBOL(__idmap_text_end) = .; \
|
||||
ALIGN_FUNCTION(); \
|
||||
VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
|
||||
*(.hyp.idmap.text) \
|
||||
VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
#define ARM_CPU_DISCARD(x)
|
||||
|
56
arch/arm/kvm/Kconfig
Normal file
56
arch/arm/kvm/Kconfig
Normal file
@ -0,0 +1,56 @@
|
||||
#
|
||||
# KVM configuration
|
||||
#
|
||||
|
||||
source "virt/kvm/Kconfig"
|
||||
|
||||
menuconfig VIRTUALIZATION
|
||||
bool "Virtualization"
|
||||
---help---
|
||||
Say Y here to get to see options for using your Linux host to run
|
||||
other operating systems inside virtual machines (guests).
|
||||
This option alone does not add any kernel code.
|
||||
|
||||
If you say N, all options in this submenu will be skipped and
|
||||
disabled.
|
||||
|
||||
if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select KVM_MMIO
|
||||
select KVM_ARM_HOST
|
||||
depends on ARM_VIRT_EXT && ARM_LPAE
|
||||
---help---
|
||||
Support hosting virtualized guest machines. You will also
|
||||
need to select one or more of the processor modules below.
|
||||
|
||||
This module provides access to the hardware capabilities through
|
||||
a character device node named /dev/kvm.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config KVM_ARM_HOST
|
||||
bool "KVM host support for ARM cpus."
|
||||
depends on KVM
|
||||
depends on MMU
|
||||
select MMU_NOTIFIER
|
||||
---help---
|
||||
Provides host support for ARM processors.
|
||||
|
||||
config KVM_ARM_MAX_VCPUS
|
||||
int "Number maximum supported virtual CPUs per VM"
|
||||
depends on KVM_ARM_HOST
|
||||
default 4
|
||||
help
|
||||
Static number of max supported virtual CPUs per VM.
|
||||
|
||||
If you choose a high number, the vcpu structures will be quite
|
||||
large, so only choose a reasonable number that you expect to
|
||||
actually use.
|
||||
|
||||
source drivers/virtio/Kconfig
|
||||
|
||||
endif # VIRTUALIZATION
|
21
arch/arm/kvm/Makefile
Normal file
21
arch/arm/kvm/Makefile
Normal file
@ -0,0 +1,21 @@
|
||||
#
|
||||
# Makefile for Kernel-based Virtual Machine module
|
||||
#
|
||||
|
||||
plus_virt := $(call as-instr,.arch_extension virt,+virt)
|
||||
ifeq ($(plus_virt),+virt)
|
||||
plus_virt_def := -DREQUIRES_VIRT=1
|
||||
endif
|
||||
|
||||
ccflags-y += -Ivirt/kvm -Iarch/arm/kvm
|
||||
CFLAGS_arm.o := -I. $(plus_virt_def)
|
||||
CFLAGS_mmu.o := -I.
|
||||
|
||||
AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
|
||||
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
|
||||
|
||||
kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
|
||||
|
||||
obj-y += kvm-arm.o init.o interrupts.o
|
||||
obj-y += arm.o guest.o mmu.o emulate.o reset.o
|
||||
obj-y += coproc.o coproc_a15.o mmio.o psci.o
|
1015
arch/arm/kvm/arm.c
Normal file
1015
arch/arm/kvm/arm.c
Normal file
File diff suppressed because it is too large
Load Diff
1046
arch/arm/kvm/coproc.c
Normal file
1046
arch/arm/kvm/coproc.c
Normal file
File diff suppressed because it is too large
Load Diff
153
arch/arm/kvm/coproc.h
Normal file
153
arch/arm/kvm/coproc.h
Normal file
@ -0,0 +1,153 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Authors: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_COPROC_LOCAL_H__
|
||||
#define __ARM_KVM_COPROC_LOCAL_H__
|
||||
|
||||
struct coproc_params {
|
||||
unsigned long CRn;
|
||||
unsigned long CRm;
|
||||
unsigned long Op1;
|
||||
unsigned long Op2;
|
||||
unsigned long Rt1;
|
||||
unsigned long Rt2;
|
||||
bool is_64bit;
|
||||
bool is_write;
|
||||
};
|
||||
|
||||
struct coproc_reg {
|
||||
/* MRC/MCR/MRRC/MCRR instruction which accesses it. */
|
||||
unsigned long CRn;
|
||||
unsigned long CRm;
|
||||
unsigned long Op1;
|
||||
unsigned long Op2;
|
||||
|
||||
bool is_64;
|
||||
|
||||
/* Trapped access from guest, if non-NULL. */
|
||||
bool (*access)(struct kvm_vcpu *,
|
||||
const struct coproc_params *,
|
||||
const struct coproc_reg *);
|
||||
|
||||
/* Initialization for vcpu. */
|
||||
void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
|
||||
|
||||
/* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */
|
||||
unsigned long reg;
|
||||
|
||||
/* Value (usually reset value) */
|
||||
u64 val;
|
||||
};
|
||||
|
||||
static inline void print_cp_instr(const struct coproc_params *p)
|
||||
{
|
||||
/* Look, we even formatted it for you to paste into the table! */
|
||||
if (p->is_64bit) {
|
||||
kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n",
|
||||
p->CRm, p->Op1, p->is_write ? "write" : "read");
|
||||
} else {
|
||||
kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
|
||||
" func_%s },\n",
|
||||
p->CRn, p->CRm, p->Op1, p->Op2,
|
||||
p->is_write ? "write" : "read");
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool ignore_write(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *p)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool read_zero(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *p)
|
||||
{
|
||||
*vcpu_reg(vcpu, p->Rt1) = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool write_to_read_only(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *params)
|
||||
{
|
||||
kvm_debug("CP15 write to read-only register at: %08x\n",
|
||||
*vcpu_pc(vcpu));
|
||||
print_cp_instr(params);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool read_from_write_only(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *params)
|
||||
{
|
||||
kvm_debug("CP15 read to write-only register at: %08x\n",
|
||||
*vcpu_pc(vcpu));
|
||||
print_cp_instr(params);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Reset functions */
|
||||
static inline void reset_unknown(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_reg *r)
|
||||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
|
||||
vcpu->arch.cp15[r->reg] = 0xdecafbad;
|
||||
}
|
||||
|
||||
static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
|
||||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
|
||||
vcpu->arch.cp15[r->reg] = r->val;
|
||||
}
|
||||
|
||||
static inline void reset_unknown64(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_reg *r)
|
||||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15));
|
||||
|
||||
vcpu->arch.cp15[r->reg] = 0xdecafbad;
|
||||
vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee;
|
||||
}
|
||||
|
||||
static inline int cmp_reg(const struct coproc_reg *i1,
|
||||
const struct coproc_reg *i2)
|
||||
{
|
||||
BUG_ON(i1 == i2);
|
||||
if (!i1)
|
||||
return 1;
|
||||
else if (!i2)
|
||||
return -1;
|
||||
if (i1->CRn != i2->CRn)
|
||||
return i1->CRn - i2->CRn;
|
||||
if (i1->CRm != i2->CRm)
|
||||
return i1->CRm - i2->CRm;
|
||||
if (i1->Op1 != i2->Op1)
|
||||
return i1->Op1 - i2->Op1;
|
||||
return i1->Op2 - i2->Op2;
|
||||
}
|
||||
|
||||
|
||||
#define CRn(_x) .CRn = _x
|
||||
#define CRm(_x) .CRm = _x
|
||||
#define Op1(_x) .Op1 = _x
|
||||
#define Op2(_x) .Op2 = _x
|
||||
#define is64 .is_64 = true
|
||||
#define is32 .is_64 = false
|
||||
|
||||
#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
|
162
arch/arm/kvm/coproc_a15.c
Normal file
162
arch/arm/kvm/coproc_a15.c
Normal file
@ -0,0 +1,162 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Authors: Rusty Russell <rusty@rustcorp.au>
|
||||
* Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
|
||||
{
|
||||
/*
|
||||
* Compute guest MPIDR:
|
||||
* (Even if we present only one VCPU to the guest on an SMP
|
||||
* host we don't set the U bit in the MPIDR, or vice versa, as
|
||||
* revealing the underlying hardware properties is likely to
|
||||
* be the best choice).
|
||||
*/
|
||||
vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
|
||||
| (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
|
||||
}
|
||||
|
||||
#include "coproc.h"
|
||||
|
||||
/* A15 TRM 4.3.28: RO WI */
|
||||
static bool access_actlr(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *p,
|
||||
const struct coproc_reg *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
return ignore_write(vcpu, p);
|
||||
|
||||
*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
|
||||
return true;
|
||||
}
|
||||
|
||||
/* A15 TRM 4.3.60: R/O. */
|
||||
static bool access_cbar(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *p,
|
||||
const struct coproc_reg *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
return write_to_read_only(vcpu, p);
|
||||
return read_zero(vcpu, p);
|
||||
}
|
||||
|
||||
/* A15 TRM 4.3.48: R/O WI. */
|
||||
static bool access_l2ctlr(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *p,
|
||||
const struct coproc_reg *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
return ignore_write(vcpu, p);
|
||||
|
||||
*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
|
||||
return true;
|
||||
}
|
||||
|
||||
static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
|
||||
{
|
||||
u32 l2ctlr, ncores;
|
||||
|
||||
asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
|
||||
l2ctlr &= ~(3 << 24);
|
||||
ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
|
||||
l2ctlr |= (ncores & 3) << 24;
|
||||
|
||||
vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
|
||||
}
|
||||
|
||||
static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
|
||||
{
|
||||
u32 actlr;
|
||||
|
||||
/* ACTLR contains SMP bit: make sure you create all cpus first! */
|
||||
asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
|
||||
/* Make the SMP bit consistent with the guest configuration */
|
||||
if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
|
||||
actlr |= 1U << 6;
|
||||
else
|
||||
actlr &= ~(1U << 6);
|
||||
|
||||
vcpu->arch.cp15[c1_ACTLR] = actlr;
|
||||
}
|
||||
|
||||
/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */
|
||||
static bool access_l2ectlr(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *p,
|
||||
const struct coproc_reg *r)
|
||||
{
|
||||
if (p->is_write)
|
||||
return ignore_write(vcpu, p);
|
||||
|
||||
*vcpu_reg(vcpu, p->Rt1) = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* A15-specific CP15 registers.
|
||||
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2
|
||||
*/
|
||||
static const struct coproc_reg a15_regs[] = {
|
||||
/* MPIDR: we use VMPIDR for guest access. */
|
||||
{ CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
|
||||
NULL, reset_mpidr, c0_MPIDR },
|
||||
|
||||
/* SCTLR: swapped by interrupt.S. */
|
||||
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
|
||||
NULL, reset_val, c1_SCTLR, 0x00C50078 },
|
||||
/* ACTLR: trapped by HCR.TAC bit. */
|
||||
{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
|
||||
access_actlr, reset_actlr, c1_ACTLR },
|
||||
/* CPACR: swapped by interrupt.S. */
|
||||
{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
|
||||
NULL, reset_val, c1_CPACR, 0x00000000 },
|
||||
|
||||
/*
|
||||
* L2CTLR access (guest wants to know #CPUs).
|
||||
*/
|
||||
{ CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
|
||||
access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
|
||||
{ CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
|
||||
|
||||
/* The Configuration Base Address Register. */
|
||||
{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
|
||||
};
|
||||
|
||||
static struct kvm_coproc_target_table a15_target_table = {
|
||||
.target = KVM_ARM_TARGET_CORTEX_A15,
|
||||
.table = a15_regs,
|
||||
.num = ARRAY_SIZE(a15_regs),
|
||||
};
|
||||
|
||||
static int __init coproc_a15_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 1; i < ARRAY_SIZE(a15_regs); i++)
|
||||
BUG_ON(cmp_reg(&a15_regs[i-1],
|
||||
&a15_regs[i]) >= 0);
|
||||
|
||||
kvm_register_target_coproc_table(&a15_target_table);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(coproc_a15_init);
|
373
arch/arm/kvm/emulate.c
Normal file
373
arch/arm/kvm/emulate.c
Normal file
@ -0,0 +1,373 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
#define VCPU_NR_MODES 6
|
||||
#define VCPU_REG_OFFSET_USR 0
|
||||
#define VCPU_REG_OFFSET_FIQ 1
|
||||
#define VCPU_REG_OFFSET_IRQ 2
|
||||
#define VCPU_REG_OFFSET_SVC 3
|
||||
#define VCPU_REG_OFFSET_ABT 4
|
||||
#define VCPU_REG_OFFSET_UND 5
|
||||
#define REG_OFFSET(_reg) \
|
||||
(offsetof(struct kvm_regs, _reg) / sizeof(u32))
|
||||
|
||||
#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
|
||||
|
||||
static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
|
||||
/* USR/SYS Registers */
|
||||
[VCPU_REG_OFFSET_USR] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
|
||||
},
|
||||
|
||||
/* FIQ Registers */
|
||||
[VCPU_REG_OFFSET_FIQ] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7),
|
||||
REG_OFFSET(fiq_regs[0]), /* r8 */
|
||||
REG_OFFSET(fiq_regs[1]), /* r9 */
|
||||
REG_OFFSET(fiq_regs[2]), /* r10 */
|
||||
REG_OFFSET(fiq_regs[3]), /* r11 */
|
||||
REG_OFFSET(fiq_regs[4]), /* r12 */
|
||||
REG_OFFSET(fiq_regs[5]), /* r13 */
|
||||
REG_OFFSET(fiq_regs[6]), /* r14 */
|
||||
},
|
||||
|
||||
/* IRQ Registers */
|
||||
[VCPU_REG_OFFSET_IRQ] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(irq_regs[0]), /* r13 */
|
||||
REG_OFFSET(irq_regs[1]), /* r14 */
|
||||
},
|
||||
|
||||
/* SVC Registers */
|
||||
[VCPU_REG_OFFSET_SVC] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(svc_regs[0]), /* r13 */
|
||||
REG_OFFSET(svc_regs[1]), /* r14 */
|
||||
},
|
||||
|
||||
/* ABT Registers */
|
||||
[VCPU_REG_OFFSET_ABT] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(abt_regs[0]), /* r13 */
|
||||
REG_OFFSET(abt_regs[1]), /* r14 */
|
||||
},
|
||||
|
||||
/* UND Registers */
|
||||
[VCPU_REG_OFFSET_UND] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(und_regs[0]), /* r13 */
|
||||
REG_OFFSET(und_regs[1]), /* r14 */
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Return a pointer to the register number valid in the current mode of
|
||||
* the virtual CPU.
|
||||
*/
|
||||
u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
|
||||
{
|
||||
u32 *reg_array = (u32 *)&vcpu->arch.regs;
|
||||
u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
|
||||
|
||||
switch (mode) {
|
||||
case USR_MODE...SVC_MODE:
|
||||
mode &= ~MODE32_BIT; /* 0 ... 3 */
|
||||
break;
|
||||
|
||||
case ABT_MODE:
|
||||
mode = VCPU_REG_OFFSET_ABT;
|
||||
break;
|
||||
|
||||
case UND_MODE:
|
||||
mode = VCPU_REG_OFFSET_UND;
|
||||
break;
|
||||
|
||||
case SYSTEM_MODE:
|
||||
mode = VCPU_REG_OFFSET_USR;
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return reg_array + vcpu_reg_offsets[mode][reg_num];
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the SPSR for the current mode of the virtual CPU.
|
||||
*/
|
||||
u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK;
|
||||
switch (mode) {
|
||||
case SVC_MODE:
|
||||
return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
|
||||
case ABT_MODE:
|
||||
return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
|
||||
case UND_MODE:
|
||||
return &vcpu->arch.regs.KVM_ARM_UND_spsr;
|
||||
case IRQ_MODE:
|
||||
return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
|
||||
case FIQ_MODE:
|
||||
return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
|
||||
* @vcpu: the vcpu pointer
|
||||
* @run: the kvm_run structure pointer
|
||||
*
|
||||
* Simply sets the wait_for_interrupts flag on the vcpu structure, which will
|
||||
* halt execution of world-switches and schedule other host processes until
|
||||
* there is an incoming IRQ or FIQ to the VM.
|
||||
*/
|
||||
int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
trace_kvm_wfi(*vcpu_pc(vcpu));
|
||||
kvm_vcpu_block(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
|
||||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
* When exceptions occur while instructions are executed in Thumb IF-THEN
|
||||
* blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
|
||||
* to do this little bit of work manually. The fields map like this:
|
||||
*
|
||||
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
|
||||
*/
|
||||
static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long itbits, cond;
|
||||
unsigned long cpsr = *vcpu_cpsr(vcpu);
|
||||
bool is_arm = !(cpsr & PSR_T_BIT);
|
||||
|
||||
BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
|
||||
|
||||
if (!(cpsr & PSR_IT_MASK))
|
||||
return;
|
||||
|
||||
cond = (cpsr & 0xe000) >> 13;
|
||||
itbits = (cpsr & 0x1c00) >> (10 - 2);
|
||||
itbits |= (cpsr & (0x3 << 25)) >> 25;
|
||||
|
||||
/* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
|
||||
if ((itbits & 0x7) == 0)
|
||||
itbits = cond = 0;
|
||||
else
|
||||
itbits = (itbits << 1) & 0x1f;
|
||||
|
||||
cpsr &= ~PSR_IT_MASK;
|
||||
cpsr |= cond << 13;
|
||||
cpsr |= (itbits & 0x1c) << (10 - 2);
|
||||
cpsr |= (itbits & 0x3) << 25;
|
||||
*vcpu_cpsr(vcpu) = cpsr;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_skip_instr - skip a trapped instruction and proceed to the next
|
||||
* @vcpu: The vcpu pointer
|
||||
*/
|
||||
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
{
|
||||
bool is_thumb;
|
||||
|
||||
is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
|
||||
if (is_thumb && !is_wide_instr)
|
||||
*vcpu_pc(vcpu) += 2;
|
||||
else
|
||||
*vcpu_pc(vcpu) += 4;
|
||||
kvm_adjust_itstate(vcpu);
|
||||
}
|
||||
|
||||
|
||||
/******************************************************************************
|
||||
* Inject exceptions into the guest
|
||||
*/
|
||||
|
||||
static u32 exc_vector_base(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
|
||||
u32 vbar = vcpu->arch.cp15[c12_VBAR];
|
||||
|
||||
if (sctlr & SCTLR_V)
|
||||
return 0xffff0000;
|
||||
else /* always have security exceptions */
|
||||
return vbar;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_inject_undefined - inject an undefined exception into the guest
|
||||
* @vcpu: The VCPU to receive the undefined exception
|
||||
*
|
||||
* It is assumed that this code is called from the VCPU thread and that the
|
||||
* VCPU therefore is not currently executing guest code.
|
||||
*
|
||||
* Modelled after TakeUndefInstrException() pseudocode.
|
||||
*/
|
||||
void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 new_lr_value;
|
||||
u32 new_spsr_value;
|
||||
u32 cpsr = *vcpu_cpsr(vcpu);
|
||||
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
|
||||
bool is_thumb = (cpsr & PSR_T_BIT);
|
||||
u32 vect_offset = 4;
|
||||
u32 return_offset = (is_thumb) ? 2 : 4;
|
||||
|
||||
new_spsr_value = cpsr;
|
||||
new_lr_value = *vcpu_pc(vcpu) - return_offset;
|
||||
|
||||
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
|
||||
*vcpu_cpsr(vcpu) |= PSR_I_BIT;
|
||||
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
|
||||
|
||||
if (sctlr & SCTLR_TE)
|
||||
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
||||
if (sctlr & SCTLR_EE)
|
||||
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
|
||||
|
||||
/* Note: These now point to UND banked copies */
|
||||
*vcpu_spsr(vcpu) = cpsr;
|
||||
*vcpu_reg(vcpu, 14) = new_lr_value;
|
||||
|
||||
/* Branch to exception vector */
|
||||
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* Modelled after TakeDataAbortException() and TakePrefetchAbortException
|
||||
* pseudocode.
|
||||
*/
|
||||
static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
|
||||
{
|
||||
u32 new_lr_value;
|
||||
u32 new_spsr_value;
|
||||
u32 cpsr = *vcpu_cpsr(vcpu);
|
||||
u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
|
||||
bool is_thumb = (cpsr & PSR_T_BIT);
|
||||
u32 vect_offset;
|
||||
u32 return_offset = (is_thumb) ? 4 : 0;
|
||||
bool is_lpae;
|
||||
|
||||
new_spsr_value = cpsr;
|
||||
new_lr_value = *vcpu_pc(vcpu) + return_offset;
|
||||
|
||||
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
|
||||
*vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
|
||||
*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
|
||||
|
||||
if (sctlr & SCTLR_TE)
|
||||
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
||||
if (sctlr & SCTLR_EE)
|
||||
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
|
||||
|
||||
/* Note: These now point to ABT banked copies */
|
||||
*vcpu_spsr(vcpu) = cpsr;
|
||||
*vcpu_reg(vcpu, 14) = new_lr_value;
|
||||
|
||||
if (is_pabt)
|
||||
vect_offset = 12;
|
||||
else
|
||||
vect_offset = 16;
|
||||
|
||||
/* Branch to exception vector */
|
||||
*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
|
||||
|
||||
if (is_pabt) {
|
||||
/* Set DFAR and DFSR */
|
||||
vcpu->arch.cp15[c6_IFAR] = addr;
|
||||
is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
|
||||
/* Always give debug fault for now - should give guest a clue */
|
||||
if (is_lpae)
|
||||
vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
|
||||
else
|
||||
vcpu->arch.cp15[c5_IFSR] = 2;
|
||||
} else { /* !iabt */
|
||||
/* Set DFAR and DFSR */
|
||||
vcpu->arch.cp15[c6_DFAR] = addr;
|
||||
is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
|
||||
/* Always give debug fault for now - should give guest a clue */
|
||||
if (is_lpae)
|
||||
vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
|
||||
else
|
||||
vcpu->arch.cp15[c5_DFSR] = 2;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_inject_dabt - inject a data abort into the guest
|
||||
* @vcpu: The VCPU to receive the undefined exception
|
||||
* @addr: The address to report in the DFAR
|
||||
*
|
||||
* It is assumed that this code is called from the VCPU thread and that the
|
||||
* VCPU therefore is not currently executing guest code.
|
||||
*/
|
||||
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
{
|
||||
inject_abt(vcpu, false, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_inject_pabt - inject a prefetch abort into the guest
|
||||
* @vcpu: The VCPU to receive the undefined exception
|
||||
* @addr: The address to report in the DFAR
|
||||
*
|
||||
* It is assumed that this code is called from the VCPU thread and that the
|
||||
* VCPU therefore is not currently executing guest code.
|
||||
*/
|
||||
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
{
|
||||
inject_abt(vcpu, true, addr);
|
||||
}
|
222
arch/arm/kvm/guest.c
Normal file
222
arch/arm/kvm/guest.c
Normal file
@ -0,0 +1,222 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
|
||||
#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
|
||||
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
|
||||
|
||||
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 core_reg_offset_from_id(u64 id)
|
||||
{
|
||||
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
|
||||
}
|
||||
|
||||
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
|
||||
struct kvm_regs *regs = &vcpu->arch.regs;
|
||||
u64 off;
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) != 4)
|
||||
return -ENOENT;
|
||||
|
||||
/* Our ID is an index into the kvm_regs struct. */
|
||||
off = core_reg_offset_from_id(reg->id);
|
||||
if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
|
||||
return -ENOENT;
|
||||
|
||||
return put_user(((u32 *)regs)[off], uaddr);
|
||||
}
|
||||
|
||||
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
|
||||
struct kvm_regs *regs = &vcpu->arch.regs;
|
||||
u64 off, val;
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) != 4)
|
||||
return -ENOENT;
|
||||
|
||||
/* Our ID is an index into the kvm_regs struct. */
|
||||
off = core_reg_offset_from_id(reg->id);
|
||||
if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
|
||||
return -ENOENT;
|
||||
|
||||
if (get_user(val, uaddr) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
|
||||
unsigned long mode = val & MODE_MASK;
|
||||
switch (mode) {
|
||||
case USR_MODE:
|
||||
case FIQ_MODE:
|
||||
case IRQ_MODE:
|
||||
case SVC_MODE:
|
||||
case ABT_MODE:
|
||||
case UND_MODE:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
((u32 *)regs)[off] = val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static unsigned long num_core_regs(void)
|
||||
{
|
||||
return sizeof(struct kvm_regs) / sizeof(u32);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
|
||||
*
|
||||
* This is for all registers.
|
||||
*/
|
||||
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return num_core_regs() + kvm_arm_num_coproc_regs(vcpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_copy_reg_indices - get indices of all registers.
|
||||
*
|
||||
* We do core registers right here, then we apppend coproc regs.
|
||||
*/
|
||||
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
{
|
||||
unsigned int i;
|
||||
const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
|
||||
|
||||
for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
|
||||
if (put_user(core_reg | i, uindices))
|
||||
return -EFAULT;
|
||||
uindices++;
|
||||
}
|
||||
|
||||
return kvm_arm_copy_coproc_indices(vcpu, uindices);
|
||||
}
|
||||
|
||||
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
/* We currently use nothing arch-specific in upper 32 bits */
|
||||
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
|
||||
return -EINVAL;
|
||||
|
||||
/* Register group 16 means we want a core register. */
|
||||
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
|
||||
return get_core_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_coproc_get_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
/* We currently use nothing arch-specific in upper 32 bits */
|
||||
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
|
||||
return -EINVAL;
|
||||
|
||||
/* Register group 16 means we set a core register. */
|
||||
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
|
||||
return set_core_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_coproc_set_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_vcpu_init *init)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
/* We can only do a cortex A15 for now. */
|
||||
if (init->target != kvm_target_cpu())
|
||||
return -EINVAL;
|
||||
|
||||
vcpu->arch.target = init->target;
|
||||
bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
/* -ENOENT for unknown features, -EINVAL for invalid combinations. */
|
||||
for (i = 0; i < sizeof(init->features) * 8; i++) {
|
||||
if (test_bit(i, (void *)init->features)) {
|
||||
if (i >= KVM_VCPU_MAX_FEATURES)
|
||||
return -ENOENT;
|
||||
set_bit(i, vcpu->arch.features);
|
||||
}
|
||||
}
|
||||
|
||||
/* Now we know what it is, we can reset it. */
|
||||
return kvm_reset_vcpu(vcpu);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
|
||||
struct kvm_translation *tr)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
114
arch/arm/kvm/init.S
Normal file
114
arch/arm/kvm/init.S
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/unified.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
|
||||
/********************************************************************
|
||||
* Hypervisor initialization
|
||||
* - should be called with:
|
||||
* r0,r1 = Hypervisor pgd pointer
|
||||
* r2 = top of Hyp stack (kernel VA)
|
||||
* r3 = pointer to hyp vectors
|
||||
*/
|
||||
|
||||
.text
|
||||
.pushsection .hyp.idmap.text,"ax"
|
||||
.align 5
|
||||
__kvm_hyp_init:
|
||||
.globl __kvm_hyp_init
|
||||
|
||||
@ Hyp-mode exception vector
|
||||
W(b) .
|
||||
W(b) .
|
||||
W(b) .
|
||||
W(b) .
|
||||
W(b) .
|
||||
W(b) __do_hyp_init
|
||||
W(b) .
|
||||
W(b) .
|
||||
|
||||
__do_hyp_init:
|
||||
@ Set the HTTBR to point to the hypervisor PGD pointer passed
|
||||
mcrr p15, 4, r0, r1, c2
|
||||
|
||||
@ Set the HTCR and VTCR to the same shareability and cacheability
|
||||
@ settings as the non-secure TTBCR and with T0SZ == 0.
|
||||
mrc p15, 4, r0, c2, c0, 2 @ HTCR
|
||||
ldr r12, =HTCR_MASK
|
||||
bic r0, r0, r12
|
||||
mrc p15, 0, r1, c2, c0, 2 @ TTBCR
|
||||
and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
|
||||
orr r0, r0, r1
|
||||
mcr p15, 4, r0, c2, c0, 2 @ HTCR
|
||||
|
||||
mrc p15, 4, r1, c2, c1, 2 @ VTCR
|
||||
ldr r12, =VTCR_MASK
|
||||
bic r1, r1, r12
|
||||
bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits
|
||||
orr r1, r0, r1
|
||||
orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S)
|
||||
mcr p15, 4, r1, c2, c1, 2 @ VTCR
|
||||
|
||||
@ Use the same memory attributes for hyp. accesses as the kernel
|
||||
@ (copy MAIRx ro HMAIRx).
|
||||
mrc p15, 0, r0, c10, c2, 0
|
||||
mcr p15, 4, r0, c10, c2, 0
|
||||
mrc p15, 0, r0, c10, c2, 1
|
||||
mcr p15, 4, r0, c10, c2, 1
|
||||
|
||||
@ Set the HSCTLR to:
|
||||
@ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
|
||||
@ - Endianness: Kernel config
|
||||
@ - Fast Interrupt Features: Kernel config
|
||||
@ - Write permission implies XN: disabled
|
||||
@ - Instruction cache: enabled
|
||||
@ - Data/Unified cache: enabled
|
||||
@ - Memory alignment checks: enabled
|
||||
@ - MMU: enabled (this code must be run from an identity mapping)
|
||||
mrc p15, 4, r0, c1, c0, 0 @ HSCR
|
||||
ldr r12, =HSCTLR_MASK
|
||||
bic r0, r0, r12
|
||||
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
|
||||
ldr r12, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
|
||||
and r1, r1, r12
|
||||
ARM( ldr r12, =(HSCTLR_M | HSCTLR_A) )
|
||||
THUMB( ldr r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
|
||||
orr r1, r1, r12
|
||||
orr r0, r0, r1
|
||||
isb
|
||||
mcr p15, 4, r0, c1, c0, 0 @ HSCR
|
||||
isb
|
||||
|
||||
@ Set stack pointer and return to the kernel
|
||||
mov sp, r2
|
||||
|
||||
@ Set HVBAR to point to the HYP vectors
|
||||
mcr p15, 4, r3, c12, c0, 0 @ HVBAR
|
||||
|
||||
eret
|
||||
|
||||
.ltorg
|
||||
|
||||
.globl __kvm_hyp_init_end
|
||||
__kvm_hyp_init_end:
|
||||
|
||||
.popsection
|
478
arch/arm/kvm/interrupts.S
Normal file
478
arch/arm/kvm/interrupts.S
Normal file
@ -0,0 +1,478 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/const.h>
|
||||
#include <asm/unified.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/vfpmacros.h>
|
||||
#include "interrupts_head.S"
|
||||
|
||||
.text
|
||||
|
||||
__kvm_hyp_code_start:
|
||||
.globl __kvm_hyp_code_start
|
||||
|
||||
/********************************************************************
|
||||
* Flush per-VMID TLBs
|
||||
*
|
||||
* void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
*
|
||||
* We rely on the hardware to broadcast the TLB invalidation to all CPUs
|
||||
* inside the inner-shareable domain (which is the case for all v7
|
||||
* implementations). If we come across a non-IS SMP implementation, we'll
|
||||
* have to use an IPI based mechanism. Until then, we stick to the simple
|
||||
* hardware assisted version.
|
||||
*/
|
||||
ENTRY(__kvm_tlb_flush_vmid)
|
||||
push {r2, r3}
|
||||
|
||||
add r0, r0, #KVM_VTTBR
|
||||
ldrd r2, r3, [r0]
|
||||
mcrr p15, 6, r2, r3, c2 @ Write VTTBR
|
||||
isb
|
||||
mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
|
||||
dsb
|
||||
isb
|
||||
mov r2, #0
|
||||
mov r3, #0
|
||||
mcrr p15, 6, r2, r3, c2 @ Back to VMID #0
|
||||
isb @ Not necessary if followed by eret
|
||||
|
||||
pop {r2, r3}
|
||||
bx lr
|
||||
ENDPROC(__kvm_tlb_flush_vmid)
|
||||
|
||||
/********************************************************************
|
||||
* Flush TLBs and instruction caches of all CPUs inside the inner-shareable
|
||||
* domain, for all VMIDs
|
||||
*
|
||||
* void __kvm_flush_vm_context(void);
|
||||
*/
|
||||
ENTRY(__kvm_flush_vm_context)
|
||||
mov r0, #0 @ rn parameter for c15 flushes is SBZ
|
||||
|
||||
/* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
|
||||
mcr p15, 4, r0, c8, c3, 4
|
||||
/* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
|
||||
mcr p15, 0, r0, c7, c1, 0
|
||||
dsb
|
||||
isb @ Not necessary if followed by eret
|
||||
|
||||
bx lr
|
||||
ENDPROC(__kvm_flush_vm_context)
|
||||
|
||||
|
||||
/********************************************************************
|
||||
* Hypervisor world-switch code
|
||||
*
|
||||
*
|
||||
* int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
ENTRY(__kvm_vcpu_run)
|
||||
@ Save the vcpu pointer
|
||||
mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR
|
||||
|
||||
save_host_regs
|
||||
|
||||
@ Store hardware CP15 state and load guest state
|
||||
read_cp15_state store_to_vcpu = 0
|
||||
write_cp15_state read_from_vcpu = 1
|
||||
|
||||
@ If the host kernel has not been configured with VFPv3 support,
|
||||
@ then it is safer if we deny guests from using it as well.
|
||||
#ifdef CONFIG_VFPv3
|
||||
@ Set FPEXC_EN so the guest doesn't trap floating point instructions
|
||||
VFPFMRX r2, FPEXC @ VMRS
|
||||
push {r2}
|
||||
orr r2, r2, #FPEXC_EN
|
||||
VFPFMXR FPEXC, r2 @ VMSR
|
||||
#endif
|
||||
|
||||
@ Configure Hyp-role
|
||||
configure_hyp_role vmentry
|
||||
|
||||
@ Trap coprocessor CRx accesses
|
||||
set_hstr vmentry
|
||||
set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
set_hdcr vmentry
|
||||
|
||||
@ Write configured ID register into MIDR alias
|
||||
ldr r1, [vcpu, #VCPU_MIDR]
|
||||
mcr p15, 4, r1, c0, c0, 0
|
||||
|
||||
@ Write guest view of MPIDR into VMPIDR
|
||||
ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
|
||||
mcr p15, 4, r1, c0, c0, 5
|
||||
|
||||
@ Set up guest memory translation
|
||||
ldr r1, [vcpu, #VCPU_KVM]
|
||||
add r1, r1, #KVM_VTTBR
|
||||
ldrd r2, r3, [r1]
|
||||
mcrr p15, 6, r2, r3, c2 @ Write VTTBR
|
||||
|
||||
@ We're all done, just restore the GPRs and go to the guest
|
||||
restore_guest_regs
|
||||
clrex @ Clear exclusive monitor
|
||||
eret
|
||||
|
||||
__kvm_vcpu_return:
|
||||
/*
|
||||
* return convention:
|
||||
* guest r0, r1, r2 saved on the stack
|
||||
* r0: vcpu pointer
|
||||
* r1: exception code
|
||||
*/
|
||||
save_guest_regs
|
||||
|
||||
@ Set VMID == 0
|
||||
mov r2, #0
|
||||
mov r3, #0
|
||||
mcrr p15, 6, r2, r3, c2 @ Write VTTBR
|
||||
|
||||
@ Don't trap coprocessor accesses for host kernel
|
||||
set_hstr vmexit
|
||||
set_hdcr vmexit
|
||||
set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
|
||||
#ifdef CONFIG_VFPv3
|
||||
@ Save floating point registers we if let guest use them.
|
||||
tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
bne after_vfp_restore
|
||||
|
||||
@ Switch VFP/NEON hardware state to the host's
|
||||
add r7, vcpu, #VCPU_VFP_GUEST
|
||||
store_vfp_state r7
|
||||
add r7, vcpu, #VCPU_VFP_HOST
|
||||
ldr r7, [r7]
|
||||
restore_vfp_state r7
|
||||
|
||||
after_vfp_restore:
|
||||
@ Restore FPEXC_EN which we clobbered on entry
|
||||
pop {r2}
|
||||
VFPFMXR FPEXC, r2
|
||||
#endif
|
||||
|
||||
@ Reset Hyp-role
|
||||
configure_hyp_role vmexit
|
||||
|
||||
@ Let host read hardware MIDR
|
||||
mrc p15, 0, r2, c0, c0, 0
|
||||
mcr p15, 4, r2, c0, c0, 0
|
||||
|
||||
@ Back to hardware MPIDR
|
||||
mrc p15, 0, r2, c0, c0, 5
|
||||
mcr p15, 4, r2, c0, c0, 5
|
||||
|
||||
@ Store guest CP15 state and restore host state
|
||||
read_cp15_state store_to_vcpu = 1
|
||||
write_cp15_state read_from_vcpu = 0
|
||||
|
||||
restore_host_regs
|
||||
clrex @ Clear exclusive monitor
|
||||
mov r0, r1 @ Return the return code
|
||||
mov r1, #0 @ Clear upper bits in return value
|
||||
bx lr @ return to IOCTL
|
||||
|
||||
/********************************************************************
|
||||
* Call function in Hyp mode
|
||||
*
|
||||
*
|
||||
* u64 kvm_call_hyp(void *hypfn, ...);
|
||||
*
|
||||
* This is not really a variadic function in the classic C-way and care must
|
||||
* be taken when calling this to ensure parameters are passed in registers
|
||||
* only, since the stack will change between the caller and the callee.
|
||||
*
|
||||
* Call the function with the first argument containing a pointer to the
|
||||
* function you wish to call in Hyp mode, and subsequent arguments will be
|
||||
* passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
|
||||
* function pointer can be passed). The function being called must be mapped
|
||||
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
|
||||
* passed in r0 and r1.
|
||||
*
|
||||
* The calling convention follows the standard AAPCS:
|
||||
* r0 - r3: caller save
|
||||
* r12: caller save
|
||||
* rest: callee save
|
||||
*/
|
||||
ENTRY(kvm_call_hyp)
|
||||
hvc #0
|
||||
bx lr
|
||||
|
||||
/********************************************************************
|
||||
* Hypervisor exception vector and handlers
|
||||
*
|
||||
*
|
||||
* The KVM/ARM Hypervisor ABI is defined as follows:
|
||||
*
|
||||
* Entry to Hyp mode from the host kernel will happen _only_ when an HVC
|
||||
* instruction is issued since all traps are disabled when running the host
|
||||
* kernel as per the Hyp-mode initialization at boot time.
|
||||
*
|
||||
* HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc
|
||||
* below) when the HVC instruction is called from SVC mode (i.e. a guest or the
|
||||
* host kernel) and they cause a trap to the vector page + offset 0xc when HVC
|
||||
* instructions are called from within Hyp-mode.
|
||||
*
|
||||
* Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
|
||||
* Switching to Hyp mode is done through a simple HVC #0 instruction. The
|
||||
* exception vector code will check that the HVC comes from VMID==0 and if
|
||||
* so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
|
||||
* - r0 contains a pointer to a HYP function
|
||||
* - r1, r2, and r3 contain arguments to the above function.
|
||||
* - The HYP function will be called with its arguments in r0, r1 and r2.
|
||||
* On HYP function return, we return directly to SVC.
|
||||
*
|
||||
* Note that the above is used to execute code in Hyp-mode from a host-kernel
|
||||
* point of view, and is a different concept from performing a world-switch and
|
||||
* executing guest code SVC mode (with a VMID != 0).
|
||||
*/
|
||||
|
||||
/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
|
||||
.macro bad_exception exception_code, panic_str
|
||||
push {r0-r2}
|
||||
mrrc p15, 6, r0, r1, c2 @ Read VTTBR
|
||||
lsr r1, r1, #16
|
||||
ands r1, r1, #0xff
|
||||
beq 99f
|
||||
|
||||
load_vcpu @ Load VCPU pointer
|
||||
.if \exception_code == ARM_EXCEPTION_DATA_ABORT
|
||||
mrc p15, 4, r2, c5, c2, 0 @ HSR
|
||||
mrc p15, 4, r1, c6, c0, 0 @ HDFAR
|
||||
str r2, [vcpu, #VCPU_HSR]
|
||||
str r1, [vcpu, #VCPU_HxFAR]
|
||||
.endif
|
||||
.if \exception_code == ARM_EXCEPTION_PREF_ABORT
|
||||
mrc p15, 4, r2, c5, c2, 0 @ HSR
|
||||
mrc p15, 4, r1, c6, c0, 2 @ HIFAR
|
||||
str r2, [vcpu, #VCPU_HSR]
|
||||
str r1, [vcpu, #VCPU_HxFAR]
|
||||
.endif
|
||||
mov r1, #\exception_code
|
||||
b __kvm_vcpu_return
|
||||
|
||||
@ We were in the host already. Let's craft a panic-ing return to SVC.
|
||||
99: mrs r2, cpsr
|
||||
bic r2, r2, #MODE_MASK
|
||||
orr r2, r2, #SVC_MODE
|
||||
THUMB( orr r2, r2, #PSR_T_BIT )
|
||||
msr spsr_cxsf, r2
|
||||
mrs r1, ELR_hyp
|
||||
ldr r2, =BSYM(panic)
|
||||
msr ELR_hyp, r2
|
||||
ldr r0, =\panic_str
|
||||
eret
|
||||
.endm
|
||||
|
||||
.text
|
||||
|
||||
.align 5
|
||||
__kvm_hyp_vector:
|
||||
.globl __kvm_hyp_vector
|
||||
|
||||
@ Hyp-mode exception vector
|
||||
W(b) hyp_reset
|
||||
W(b) hyp_undef
|
||||
W(b) hyp_svc
|
||||
W(b) hyp_pabt
|
||||
W(b) hyp_dabt
|
||||
W(b) hyp_hvc
|
||||
W(b) hyp_irq
|
||||
W(b) hyp_fiq
|
||||
|
||||
.align
|
||||
hyp_reset:
|
||||
b hyp_reset
|
||||
|
||||
.align
|
||||
hyp_undef:
|
||||
bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
|
||||
|
||||
.align
|
||||
hyp_svc:
|
||||
bad_exception ARM_EXCEPTION_HVC, svc_die_str
|
||||
|
||||
.align
|
||||
hyp_pabt:
|
||||
bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
|
||||
|
||||
.align
|
||||
hyp_dabt:
|
||||
bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
|
||||
|
||||
.align
|
||||
hyp_hvc:
|
||||
/*
|
||||
* Getting here is either becuase of a trap from a guest or from calling
|
||||
* HVC from the host kernel, which means "switch to Hyp mode".
|
||||
*/
|
||||
push {r0, r1, r2}
|
||||
|
||||
@ Check syndrome register
|
||||
mrc p15, 4, r1, c5, c2, 0 @ HSR
|
||||
lsr r0, r1, #HSR_EC_SHIFT
|
||||
#ifdef CONFIG_VFPv3
|
||||
cmp r0, #HSR_EC_CP_0_13
|
||||
beq switch_to_guest_vfp
|
||||
#endif
|
||||
cmp r0, #HSR_EC_HVC
|
||||
bne guest_trap @ Not HVC instr.
|
||||
|
||||
/*
|
||||
* Let's check if the HVC came from VMID 0 and allow simple
|
||||
* switch to Hyp mode
|
||||
*/
|
||||
mrrc p15, 6, r0, r2, c2
|
||||
lsr r2, r2, #16
|
||||
and r2, r2, #0xff
|
||||
cmp r2, #0
|
||||
bne guest_trap @ Guest called HVC
|
||||
|
||||
host_switch_to_hyp:
|
||||
pop {r0, r1, r2}
|
||||
|
||||
push {lr}
|
||||
mrs lr, SPSR
|
||||
push {lr}
|
||||
|
||||
mov lr, r0
|
||||
mov r0, r1
|
||||
mov r1, r2
|
||||
mov r2, r3
|
||||
|
||||
THUMB( orr lr, #1)
|
||||
blx lr @ Call the HYP function
|
||||
|
||||
pop {lr}
|
||||
msr SPSR_csxf, lr
|
||||
pop {lr}
|
||||
eret
|
||||
|
||||
guest_trap:
|
||||
load_vcpu @ Load VCPU pointer to r0
|
||||
str r1, [vcpu, #VCPU_HSR]
|
||||
|
||||
@ Check if we need the fault information
|
||||
lsr r1, r1, #HSR_EC_SHIFT
|
||||
cmp r1, #HSR_EC_IABT
|
||||
mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
|
||||
beq 2f
|
||||
cmp r1, #HSR_EC_DABT
|
||||
bne 1f
|
||||
mrc p15, 4, r2, c6, c0, 0 @ HDFAR
|
||||
|
||||
2: str r2, [vcpu, #VCPU_HxFAR]
|
||||
|
||||
/*
|
||||
* B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
|
||||
*
|
||||
* Abort on the stage 2 translation for a memory access from a
|
||||
* Non-secure PL1 or PL0 mode:
|
||||
*
|
||||
* For any Access flag fault or Translation fault, and also for any
|
||||
* Permission fault on the stage 2 translation of a memory access
|
||||
* made as part of a translation table walk for a stage 1 translation,
|
||||
* the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
|
||||
* is UNKNOWN.
|
||||
*/
|
||||
|
||||
/* Check for permission fault, and S1PTW */
|
||||
mrc p15, 4, r1, c5, c2, 0 @ HSR
|
||||
and r0, r1, #HSR_FSC_TYPE
|
||||
cmp r0, #FSC_PERM
|
||||
tsteq r1, #(1 << 7) @ S1PTW
|
||||
mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
|
||||
bne 3f
|
||||
|
||||
/* Resolve IPA using the xFAR */
|
||||
mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
|
||||
isb
|
||||
mrrc p15, 0, r0, r1, c7 @ PAR
|
||||
tst r0, #1
|
||||
bne 4f @ Failed translation
|
||||
ubfx r2, r0, #12, #20
|
||||
lsl r2, r2, #4
|
||||
orr r2, r2, r1, lsl #24
|
||||
|
||||
3: load_vcpu @ Load VCPU pointer to r0
|
||||
str r2, [r0, #VCPU_HPFAR]
|
||||
|
||||
1: mov r1, #ARM_EXCEPTION_HVC
|
||||
b __kvm_vcpu_return
|
||||
|
||||
4: pop {r0, r1, r2} @ Failed translation, return to guest
|
||||
eret
|
||||
|
||||
/*
|
||||
* If VFPv3 support is not available, then we will not switch the VFP
|
||||
* registers; however cp10 and cp11 accesses will still trap and fallback
|
||||
* to the regular coprocessor emulation code, which currently will
|
||||
* inject an undefined exception to the guest.
|
||||
*/
|
||||
#ifdef CONFIG_VFPv3
|
||||
switch_to_guest_vfp:
|
||||
load_vcpu @ Load VCPU pointer to r0
|
||||
push {r3-r7}
|
||||
|
||||
@ NEON/VFP used. Turn on VFP access.
|
||||
set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
|
||||
@ Switch VFP/NEON hardware state to the guest's
|
||||
add r7, r0, #VCPU_VFP_HOST
|
||||
ldr r7, [r7]
|
||||
store_vfp_state r7
|
||||
add r7, r0, #VCPU_VFP_GUEST
|
||||
restore_vfp_state r7
|
||||
|
||||
pop {r3-r7}
|
||||
pop {r0-r2}
|
||||
eret
|
||||
#endif
|
||||
|
||||
.align
|
||||
hyp_irq:
|
||||
push {r0, r1, r2}
|
||||
mov r1, #ARM_EXCEPTION_IRQ
|
||||
load_vcpu @ Load VCPU pointer to r0
|
||||
b __kvm_vcpu_return
|
||||
|
||||
.align
|
||||
hyp_fiq:
|
||||
b hyp_fiq
|
||||
|
||||
.ltorg
|
||||
|
||||
__kvm_hyp_code_end:
|
||||
.globl __kvm_hyp_code_end
|
||||
|
||||
.section ".rodata"
|
||||
|
||||
und_die_str:
|
||||
.ascii "unexpected undefined exception in Hyp mode at: %#08x"
|
||||
pabt_die_str:
|
||||
.ascii "unexpected prefetch abort in Hyp mode at: %#08x"
|
||||
dabt_die_str:
|
||||
.ascii "unexpected data abort in Hyp mode at: %#08x"
|
||||
svc_die_str:
|
||||
.ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x"
|
441
arch/arm/kvm/interrupts_head.S
Normal file
441
arch/arm/kvm/interrupts_head.S
Normal file
@ -0,0 +1,441 @@
|
||||
#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
|
||||
#define VCPU_USR_SP (VCPU_USR_REG(13))
|
||||
#define VCPU_USR_LR (VCPU_USR_REG(14))
|
||||
#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
|
||||
|
||||
/*
|
||||
* Many of these macros need to access the VCPU structure, which is always
|
||||
* held in r0. These macros should never clobber r1, as it is used to hold the
|
||||
* exception code on the return path (except of course the macro that switches
|
||||
* all the registers before the final jump to the VM).
|
||||
*/
|
||||
vcpu .req r0 @ vcpu pointer always in r0
|
||||
|
||||
/* Clobbers {r2-r6} */
|
||||
.macro store_vfp_state vfp_base
|
||||
@ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
|
||||
VFPFMRX r2, FPEXC
|
||||
@ Make sure VFP is enabled so we can touch the registers.
|
||||
orr r6, r2, #FPEXC_EN
|
||||
VFPFMXR FPEXC, r6
|
||||
|
||||
VFPFMRX r3, FPSCR
|
||||
tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
|
||||
beq 1f
|
||||
@ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
|
||||
@ we only need to save them if FPEXC_EX is set.
|
||||
VFPFMRX r4, FPINST
|
||||
tst r2, #FPEXC_FP2V
|
||||
VFPFMRX r5, FPINST2, ne @ vmrsne
|
||||
bic r6, r2, #FPEXC_EX @ FPEXC_EX disable
|
||||
VFPFMXR FPEXC, r6
|
||||
1:
|
||||
VFPFSTMIA \vfp_base, r6 @ Save VFP registers
|
||||
stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2
|
||||
.endm
|
||||
|
||||
/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
|
||||
.macro restore_vfp_state vfp_base
|
||||
VFPFLDMIA \vfp_base, r6 @ Load VFP registers
|
||||
ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2
|
||||
|
||||
VFPFMXR FPSCR, r3
|
||||
tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
|
||||
beq 1f
|
||||
VFPFMXR FPINST, r4
|
||||
tst r2, #FPEXC_FP2V
|
||||
VFPFMXR FPINST2, r5, ne
|
||||
1:
|
||||
VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN)
|
||||
.endm
|
||||
|
||||
/* These are simply for the macros to work - value don't have meaning */
|
||||
.equ usr, 0
|
||||
.equ svc, 1
|
||||
.equ abt, 2
|
||||
.equ und, 3
|
||||
.equ irq, 4
|
||||
.equ fiq, 5
|
||||
|
||||
.macro push_host_regs_mode mode
|
||||
mrs r2, SP_\mode
|
||||
mrs r3, LR_\mode
|
||||
mrs r4, SPSR_\mode
|
||||
push {r2, r3, r4}
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Store all host persistent registers on the stack.
|
||||
* Clobbers all registers, in all modes, except r0 and r1.
|
||||
*/
|
||||
.macro save_host_regs
|
||||
/* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
|
||||
mrs r2, ELR_hyp
|
||||
push {r2}
|
||||
|
||||
/* usr regs */
|
||||
push {r4-r12} @ r0-r3 are always clobbered
|
||||
mrs r2, SP_usr
|
||||
mov r3, lr
|
||||
push {r2, r3}
|
||||
|
||||
push_host_regs_mode svc
|
||||
push_host_regs_mode abt
|
||||
push_host_regs_mode und
|
||||
push_host_regs_mode irq
|
||||
|
||||
/* fiq regs */
|
||||
mrs r2, r8_fiq
|
||||
mrs r3, r9_fiq
|
||||
mrs r4, r10_fiq
|
||||
mrs r5, r11_fiq
|
||||
mrs r6, r12_fiq
|
||||
mrs r7, SP_fiq
|
||||
mrs r8, LR_fiq
|
||||
mrs r9, SPSR_fiq
|
||||
push {r2-r9}
|
||||
.endm
|
||||
|
||||
.macro pop_host_regs_mode mode
|
||||
pop {r2, r3, r4}
|
||||
msr SP_\mode, r2
|
||||
msr LR_\mode, r3
|
||||
msr SPSR_\mode, r4
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Restore all host registers from the stack.
|
||||
* Clobbers all registers, in all modes, except r0 and r1.
|
||||
*/
|
||||
.macro restore_host_regs
|
||||
pop {r2-r9}
|
||||
msr r8_fiq, r2
|
||||
msr r9_fiq, r3
|
||||
msr r10_fiq, r4
|
||||
msr r11_fiq, r5
|
||||
msr r12_fiq, r6
|
||||
msr SP_fiq, r7
|
||||
msr LR_fiq, r8
|
||||
msr SPSR_fiq, r9
|
||||
|
||||
pop_host_regs_mode irq
|
||||
pop_host_regs_mode und
|
||||
pop_host_regs_mode abt
|
||||
pop_host_regs_mode svc
|
||||
|
||||
pop {r2, r3}
|
||||
msr SP_usr, r2
|
||||
mov lr, r3
|
||||
pop {r4-r12}
|
||||
|
||||
pop {r2}
|
||||
msr ELR_hyp, r2
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Restore SP, LR and SPSR for a given mode. offset is the offset of
|
||||
* this mode's registers from the VCPU base.
|
||||
*
|
||||
* Assumes vcpu pointer in vcpu reg
|
||||
*
|
||||
* Clobbers r1, r2, r3, r4.
|
||||
*/
|
||||
.macro restore_guest_regs_mode mode, offset
|
||||
add r1, vcpu, \offset
|
||||
ldm r1, {r2, r3, r4}
|
||||
msr SP_\mode, r2
|
||||
msr LR_\mode, r3
|
||||
msr SPSR_\mode, r4
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Restore all guest registers from the vcpu struct.
|
||||
*
|
||||
* Assumes vcpu pointer in vcpu reg
|
||||
*
|
||||
* Clobbers *all* registers.
|
||||
*/
|
||||
.macro restore_guest_regs
|
||||
restore_guest_regs_mode svc, #VCPU_SVC_REGS
|
||||
restore_guest_regs_mode abt, #VCPU_ABT_REGS
|
||||
restore_guest_regs_mode und, #VCPU_UND_REGS
|
||||
restore_guest_regs_mode irq, #VCPU_IRQ_REGS
|
||||
|
||||
add r1, vcpu, #VCPU_FIQ_REGS
|
||||
ldm r1, {r2-r9}
|
||||
msr r8_fiq, r2
|
||||
msr r9_fiq, r3
|
||||
msr r10_fiq, r4
|
||||
msr r11_fiq, r5
|
||||
msr r12_fiq, r6
|
||||
msr SP_fiq, r7
|
||||
msr LR_fiq, r8
|
||||
msr SPSR_fiq, r9
|
||||
|
||||
@ Load return state
|
||||
ldr r2, [vcpu, #VCPU_PC]
|
||||
ldr r3, [vcpu, #VCPU_CPSR]
|
||||
msr ELR_hyp, r2
|
||||
msr SPSR_cxsf, r3
|
||||
|
||||
@ Load user registers
|
||||
ldr r2, [vcpu, #VCPU_USR_SP]
|
||||
ldr r3, [vcpu, #VCPU_USR_LR]
|
||||
msr SP_usr, r2
|
||||
mov lr, r3
|
||||
add vcpu, vcpu, #(VCPU_USR_REGS)
|
||||
ldm vcpu, {r0-r12}
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Save SP, LR and SPSR for a given mode. offset is the offset of
|
||||
* this mode's registers from the VCPU base.
|
||||
*
|
||||
* Assumes vcpu pointer in vcpu reg
|
||||
*
|
||||
* Clobbers r2, r3, r4, r5.
|
||||
*/
|
||||
.macro save_guest_regs_mode mode, offset
|
||||
add r2, vcpu, \offset
|
||||
mrs r3, SP_\mode
|
||||
mrs r4, LR_\mode
|
||||
mrs r5, SPSR_\mode
|
||||
stm r2, {r3, r4, r5}
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Save all guest registers to the vcpu struct
|
||||
* Expects guest's r0, r1, r2 on the stack.
|
||||
*
|
||||
* Assumes vcpu pointer in vcpu reg
|
||||
*
|
||||
* Clobbers r2, r3, r4, r5.
|
||||
*/
|
||||
.macro save_guest_regs
|
||||
@ Store usr registers
|
||||
add r2, vcpu, #VCPU_USR_REG(3)
|
||||
stm r2, {r3-r12}
|
||||
add r2, vcpu, #VCPU_USR_REG(0)
|
||||
pop {r3, r4, r5} @ r0, r1, r2
|
||||
stm r2, {r3, r4, r5}
|
||||
mrs r2, SP_usr
|
||||
mov r3, lr
|
||||
str r2, [vcpu, #VCPU_USR_SP]
|
||||
str r3, [vcpu, #VCPU_USR_LR]
|
||||
|
||||
@ Store return state
|
||||
mrs r2, ELR_hyp
|
||||
mrs r3, spsr
|
||||
str r2, [vcpu, #VCPU_PC]
|
||||
str r3, [vcpu, #VCPU_CPSR]
|
||||
|
||||
@ Store other guest registers
|
||||
save_guest_regs_mode svc, #VCPU_SVC_REGS
|
||||
save_guest_regs_mode abt, #VCPU_ABT_REGS
|
||||
save_guest_regs_mode und, #VCPU_UND_REGS
|
||||
save_guest_regs_mode irq, #VCPU_IRQ_REGS
|
||||
.endm
|
||||
|
||||
/* Reads cp15 registers from hardware and stores them in memory
|
||||
* @store_to_vcpu: If 0, registers are written in-order to the stack,
|
||||
* otherwise to the VCPU struct pointed to by vcpup
|
||||
*
|
||||
* Assumes vcpu pointer in vcpu reg
|
||||
*
|
||||
* Clobbers r2 - r12
|
||||
*/
|
||||
.macro read_cp15_state store_to_vcpu
|
||||
mrc p15, 0, r2, c1, c0, 0 @ SCTLR
|
||||
mrc p15, 0, r3, c1, c0, 2 @ CPACR
|
||||
mrc p15, 0, r4, c2, c0, 2 @ TTBCR
|
||||
mrc p15, 0, r5, c3, c0, 0 @ DACR
|
||||
mrrc p15, 0, r6, r7, c2 @ TTBR 0
|
||||
mrrc p15, 1, r8, r9, c2 @ TTBR 1
|
||||
mrc p15, 0, r10, c10, c2, 0 @ PRRR
|
||||
mrc p15, 0, r11, c10, c2, 1 @ NMRR
|
||||
mrc p15, 2, r12, c0, c0, 0 @ CSSELR
|
||||
|
||||
.if \store_to_vcpu == 0
|
||||
push {r2-r12} @ Push CP15 registers
|
||||
.else
|
||||
str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
|
||||
str r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
|
||||
str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
|
||||
str r5, [vcpu, #CP15_OFFSET(c3_DACR)]
|
||||
add r2, vcpu, #CP15_OFFSET(c2_TTBR0)
|
||||
strd r6, r7, [r2]
|
||||
add r2, vcpu, #CP15_OFFSET(c2_TTBR1)
|
||||
strd r8, r9, [r2]
|
||||
str r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
|
||||
str r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
|
||||
str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
|
||||
.endif
|
||||
|
||||
mrc p15, 0, r2, c13, c0, 1 @ CID
|
||||
mrc p15, 0, r3, c13, c0, 2 @ TID_URW
|
||||
mrc p15, 0, r4, c13, c0, 3 @ TID_URO
|
||||
mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV
|
||||
mrc p15, 0, r6, c5, c0, 0 @ DFSR
|
||||
mrc p15, 0, r7, c5, c0, 1 @ IFSR
|
||||
mrc p15, 0, r8, c5, c1, 0 @ ADFSR
|
||||
mrc p15, 0, r9, c5, c1, 1 @ AIFSR
|
||||
mrc p15, 0, r10, c6, c0, 0 @ DFAR
|
||||
mrc p15, 0, r11, c6, c0, 2 @ IFAR
|
||||
mrc p15, 0, r12, c12, c0, 0 @ VBAR
|
||||
|
||||
.if \store_to_vcpu == 0
|
||||
push {r2-r12} @ Push CP15 registers
|
||||
.else
|
||||
str r2, [vcpu, #CP15_OFFSET(c13_CID)]
|
||||
str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
|
||||
str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
|
||||
str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
|
||||
str r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
|
||||
str r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
|
||||
str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
|
||||
str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
|
||||
str r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
|
||||
str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
|
||||
str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Reads cp15 registers from memory and writes them to hardware
|
||||
* @read_from_vcpu: If 0, registers are read in-order from the stack,
|
||||
* otherwise from the VCPU struct pointed to by vcpup
|
||||
*
|
||||
* Assumes vcpu pointer in vcpu reg
|
||||
*/
|
||||
.macro write_cp15_state read_from_vcpu
|
||||
.if \read_from_vcpu == 0
|
||||
pop {r2-r12}
|
||||
.else
|
||||
ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
|
||||
ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
|
||||
ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
|
||||
ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
|
||||
ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
|
||||
ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
|
||||
ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
|
||||
ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
|
||||
ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
|
||||
ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
|
||||
ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
|
||||
.endif
|
||||
|
||||
mcr p15, 0, r2, c13, c0, 1 @ CID
|
||||
mcr p15, 0, r3, c13, c0, 2 @ TID_URW
|
||||
mcr p15, 0, r4, c13, c0, 3 @ TID_URO
|
||||
mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV
|
||||
mcr p15, 0, r6, c5, c0, 0 @ DFSR
|
||||
mcr p15, 0, r7, c5, c0, 1 @ IFSR
|
||||
mcr p15, 0, r8, c5, c1, 0 @ ADFSR
|
||||
mcr p15, 0, r9, c5, c1, 1 @ AIFSR
|
||||
mcr p15, 0, r10, c6, c0, 0 @ DFAR
|
||||
mcr p15, 0, r11, c6, c0, 2 @ IFAR
|
||||
mcr p15, 0, r12, c12, c0, 0 @ VBAR
|
||||
|
||||
.if \read_from_vcpu == 0
|
||||
pop {r2-r12}
|
||||
.else
|
||||
ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
|
||||
ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
|
||||
ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
|
||||
ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)]
|
||||
add r12, vcpu, #CP15_OFFSET(c2_TTBR0)
|
||||
ldrd r6, r7, [r12]
|
||||
add r12, vcpu, #CP15_OFFSET(c2_TTBR1)
|
||||
ldrd r8, r9, [r12]
|
||||
ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
|
||||
ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
|
||||
ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
|
||||
.endif
|
||||
|
||||
mcr p15, 0, r2, c1, c0, 0 @ SCTLR
|
||||
mcr p15, 0, r3, c1, c0, 2 @ CPACR
|
||||
mcr p15, 0, r4, c2, c0, 2 @ TTBCR
|
||||
mcr p15, 0, r5, c3, c0, 0 @ DACR
|
||||
mcrr p15, 0, r6, r7, c2 @ TTBR 0
|
||||
mcrr p15, 1, r8, r9, c2 @ TTBR 1
|
||||
mcr p15, 0, r10, c10, c2, 0 @ PRRR
|
||||
mcr p15, 0, r11, c10, c2, 1 @ NMRR
|
||||
mcr p15, 2, r12, c0, c0, 0 @ CSSELR
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Save the VGIC CPU state into memory
|
||||
*
|
||||
* Assumes vcpu pointer in vcpu reg
|
||||
*/
|
||||
.macro save_vgic_state
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Restore the VGIC CPU state from memory
|
||||
*
|
||||
* Assumes vcpu pointer in vcpu reg
|
||||
*/
|
||||
.macro restore_vgic_state
|
||||
.endm
|
||||
|
||||
.equ vmentry, 0
|
||||
.equ vmexit, 1
|
||||
|
||||
/* Configures the HSTR (Hyp System Trap Register) on entry/return
|
||||
* (hardware reset value is 0) */
|
||||
.macro set_hstr operation
|
||||
mrc p15, 4, r2, c1, c1, 3
|
||||
ldr r3, =HSTR_T(15)
|
||||
.if \operation == vmentry
|
||||
orr r2, r2, r3 @ Trap CR{15}
|
||||
.else
|
||||
bic r2, r2, r3 @ Don't trap any CRx accesses
|
||||
.endif
|
||||
mcr p15, 4, r2, c1, c1, 3
|
||||
.endm
|
||||
|
||||
/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
|
||||
* (hardware reset value is 0). Keep previous value in r2. */
|
||||
.macro set_hcptr operation, mask
|
||||
mrc p15, 4, r2, c1, c1, 2
|
||||
ldr r3, =\mask
|
||||
.if \operation == vmentry
|
||||
orr r3, r2, r3 @ Trap coproc-accesses defined in mask
|
||||
.else
|
||||
bic r3, r2, r3 @ Don't trap defined coproc-accesses
|
||||
.endif
|
||||
mcr p15, 4, r3, c1, c1, 2
|
||||
.endm
|
||||
|
||||
/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
|
||||
* (hardware reset value is 0) */
|
||||
.macro set_hdcr operation
|
||||
mrc p15, 4, r2, c1, c1, 1
|
||||
ldr r3, =(HDCR_TPM|HDCR_TPMCR)
|
||||
.if \operation == vmentry
|
||||
orr r2, r2, r3 @ Trap some perfmon accesses
|
||||
.else
|
||||
bic r2, r2, r3 @ Don't trap any perfmon accesses
|
||||
.endif
|
||||
mcr p15, 4, r2, c1, c1, 1
|
||||
.endm
|
||||
|
||||
/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
|
||||
.macro configure_hyp_role operation
|
||||
mrc p15, 4, r2, c1, c1, 0 @ HCR
|
||||
bic r2, r2, #HCR_VIRT_EXCP_MASK
|
||||
ldr r3, =HCR_GUEST_MASK
|
||||
.if \operation == vmentry
|
||||
orr r2, r2, r3
|
||||
ldr r3, [vcpu, #VCPU_IRQ_LINES]
|
||||
orr r2, r2, r3
|
||||
.else
|
||||
bic r2, r2, r3
|
||||
.endif
|
||||
mcr p15, 4, r2, c1, c1, 0
|
||||
.endm
|
||||
|
||||
.macro load_vcpu
|
||||
mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR
|
||||
.endm
|
153
arch/arm/kvm/mmio.c
Normal file
153
arch/arm/kvm/mmio.c
Normal file
@ -0,0 +1,153 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
/**
|
||||
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
|
||||
* @vcpu: The VCPU pointer
|
||||
* @run: The VCPU run struct containing the mmio data
|
||||
*
|
||||
* This should only be called after returning from userspace for MMIO load
|
||||
* emulation.
|
||||
*/
|
||||
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
__u32 *dest;
|
||||
unsigned int len;
|
||||
int mask;
|
||||
|
||||
if (!run->mmio.is_write) {
|
||||
dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
|
||||
memset(dest, 0, sizeof(int));
|
||||
|
||||
len = run->mmio.len;
|
||||
if (len > 4)
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(dest, run->mmio.data, len);
|
||||
|
||||
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
|
||||
*((u64 *)run->mmio.data));
|
||||
|
||||
if (vcpu->arch.mmio_decode.sign_extend && len < 4) {
|
||||
mask = 1U << ((len * 8) - 1);
|
||||
*dest = (*dest ^ mask) - mask;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
struct kvm_exit_mmio *mmio)
|
||||
{
|
||||
unsigned long rt, len;
|
||||
bool is_write, sign_extend;
|
||||
|
||||
if ((vcpu->arch.hsr >> 8) & 1) {
|
||||
/* cache operation on I/O addr, tell guest unsupported */
|
||||
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((vcpu->arch.hsr >> 7) & 1) {
|
||||
/* page table accesses IO mem: tell guest to fix its TTBR */
|
||||
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
|
||||
return 1;
|
||||
}
|
||||
|
||||
switch ((vcpu->arch.hsr >> 22) & 0x3) {
|
||||
case 0:
|
||||
len = 1;
|
||||
break;
|
||||
case 1:
|
||||
len = 2;
|
||||
break;
|
||||
case 2:
|
||||
len = 4;
|
||||
break;
|
||||
default:
|
||||
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
is_write = vcpu->arch.hsr & HSR_WNR;
|
||||
sign_extend = vcpu->arch.hsr & HSR_SSE;
|
||||
rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
|
||||
|
||||
if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
|
||||
/* IO memory trying to read/write pc */
|
||||
kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
|
||||
return 1;
|
||||
}
|
||||
|
||||
mmio->is_write = is_write;
|
||||
mmio->phys_addr = fault_ipa;
|
||||
mmio->len = len;
|
||||
vcpu->arch.mmio_decode.sign_extend = sign_extend;
|
||||
vcpu->arch.mmio_decode.rt = rt;
|
||||
|
||||
/*
|
||||
* The MMIO instruction is emulated and should not be re-executed
|
||||
* in the guest.
|
||||
*/
|
||||
kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
phys_addr_t fault_ipa)
|
||||
{
|
||||
struct kvm_exit_mmio mmio;
|
||||
unsigned long rt;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Prepare MMIO operation. First stash it in a private
|
||||
* structure that we can use for in-kernel emulation. If the
|
||||
* kernel can't handle it, copy it into run->mmio and let user
|
||||
* space do its magic.
|
||||
*/
|
||||
|
||||
if (vcpu->arch.hsr & HSR_ISV) {
|
||||
ret = decode_hsr(vcpu, fault_ipa, &mmio);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
kvm_err("load/store instruction decoding not implemented\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
rt = vcpu->arch.mmio_decode.rt;
|
||||
trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
|
||||
KVM_TRACE_MMIO_READ_UNSATISFIED,
|
||||
mmio.len, fault_ipa,
|
||||
(mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);
|
||||
|
||||
if (mmio.is_write)
|
||||
memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);
|
||||
|
||||
kvm_prepare_mmio(run, &mmio);
|
||||
return 0;
|
||||
}
|
787
arch/arm/kvm/mmu.c
Normal file
787
arch/arm/kvm/mmu.c
Normal file
@ -0,0 +1,787 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#include <linux/mman.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/io.h>
|
||||
#include <trace/events/kvm.h>
|
||||
#include <asm/idmap.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/kvm_mmio.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/mach/map.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
|
||||
|
||||
static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
|
||||
|
||||
static void kvm_tlb_flush_vmid(struct kvm *kvm)
|
||||
{
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
|
||||
}
|
||||
|
||||
static void kvm_set_pte(pte_t *pte, pte_t new_pte)
|
||||
{
|
||||
pte_val(*pte) = new_pte;
|
||||
/*
|
||||
* flush_pmd_entry just takes a void pointer and cleans the necessary
|
||||
* cache entries, so we can reuse the function for ptes.
|
||||
*/
|
||||
flush_pmd_entry(pte);
|
||||
}
|
||||
|
||||
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
|
||||
int min, int max)
|
||||
{
|
||||
void *page;
|
||||
|
||||
BUG_ON(max > KVM_NR_MEM_OBJS);
|
||||
if (cache->nobjs >= min)
|
||||
return 0;
|
||||
while (cache->nobjs < max) {
|
||||
page = (void *)__get_free_page(PGALLOC_GFP);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
cache->objects[cache->nobjs++] = page;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
|
||||
{
|
||||
while (mc->nobjs)
|
||||
free_page((unsigned long)mc->objects[--mc->nobjs]);
|
||||
}
|
||||
|
||||
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
|
||||
{
|
||||
void *p;
|
||||
|
||||
BUG_ON(!mc || !mc->nobjs);
|
||||
p = mc->objects[--mc->nobjs];
|
||||
return p;
|
||||
}
|
||||
|
||||
static void free_ptes(pmd_t *pmd, unsigned long addr)
|
||||
{
|
||||
pte_t *pte;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) {
|
||||
if (!pmd_none(*pmd) && pmd_table(*pmd)) {
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
pte_free_kernel(NULL, pte);
|
||||
}
|
||||
pmd++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables
|
||||
*
|
||||
* Assumes this is a page table used strictly in Hyp-mode and therefore contains
|
||||
* only mappings in the kernel memory area, which is above PAGE_OFFSET.
|
||||
*/
|
||||
void free_hyp_pmds(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
unsigned long addr;
|
||||
|
||||
mutex_lock(&kvm_hyp_pgd_mutex);
|
||||
for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) {
|
||||
pgd = hyp_pgd + pgd_index(addr);
|
||||
pud = pud_offset(pgd, addr);
|
||||
|
||||
if (pud_none(*pud))
|
||||
continue;
|
||||
BUG_ON(pud_bad(*pud));
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
free_ptes(pmd, addr);
|
||||
pmd_free(NULL, pmd);
|
||||
pud_clear(pud);
|
||||
}
|
||||
mutex_unlock(&kvm_hyp_pgd_mutex);
|
||||
}
|
||||
|
||||
static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
pte_t *pte;
|
||||
unsigned long addr;
|
||||
struct page *page;
|
||||
|
||||
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
BUG_ON(!virt_addr_valid(addr));
|
||||
page = virt_to_page(addr);
|
||||
kvm_set_pte(pte, mk_pte(page, PAGE_HYP));
|
||||
}
|
||||
}
|
||||
|
||||
static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start,
|
||||
unsigned long end,
|
||||
unsigned long *pfn_base)
|
||||
{
|
||||
pte_t *pte;
|
||||
unsigned long addr;
|
||||
|
||||
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
BUG_ON(pfn_valid(*pfn_base));
|
||||
kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE));
|
||||
(*pfn_base)++;
|
||||
}
|
||||
}
|
||||
|
||||
static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
|
||||
unsigned long end, unsigned long *pfn_base)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
unsigned long addr, next;
|
||||
|
||||
for (addr = start; addr < end; addr = next) {
|
||||
pmd = pmd_offset(pud, addr);
|
||||
|
||||
BUG_ON(pmd_sect(*pmd));
|
||||
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = pte_alloc_one_kernel(NULL, addr);
|
||||
if (!pte) {
|
||||
kvm_err("Cannot allocate Hyp pte\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
pmd_populate_kernel(NULL, pmd, pte);
|
||||
}
|
||||
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
/*
|
||||
* If pfn_base is NULL, we map kernel pages into HYP with the
|
||||
* virtual address. Otherwise, this is considered an I/O
|
||||
* mapping and we map the physical region starting at
|
||||
* *pfn_base to [start, end[.
|
||||
*/
|
||||
if (!pfn_base)
|
||||
create_hyp_pte_mappings(pmd, addr, next);
|
||||
else
|
||||
create_hyp_io_pte_mappings(pmd, addr, next, pfn_base);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base)
|
||||
{
|
||||
unsigned long start = (unsigned long)from;
|
||||
unsigned long end = (unsigned long)to;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
unsigned long addr, next;
|
||||
int err = 0;
|
||||
|
||||
BUG_ON(start > end);
|
||||
if (start < PAGE_OFFSET)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&kvm_hyp_pgd_mutex);
|
||||
for (addr = start; addr < end; addr = next) {
|
||||
pgd = hyp_pgd + pgd_index(addr);
|
||||
pud = pud_offset(pgd, addr);
|
||||
|
||||
if (pud_none_or_clear_bad(pud)) {
|
||||
pmd = pmd_alloc_one(NULL, addr);
|
||||
if (!pmd) {
|
||||
kvm_err("Cannot allocate Hyp pmd\n");
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pud_populate(NULL, pud, pmd);
|
||||
}
|
||||
|
||||
next = pgd_addr_end(addr, end);
|
||||
err = create_hyp_pmd_mappings(pud, addr, next, pfn_base);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&kvm_hyp_pgd_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* create_hyp_mappings - map a kernel virtual address range in Hyp mode
|
||||
* @from: The virtual kernel start address of the range
|
||||
* @to: The virtual kernel end address of the range (exclusive)
|
||||
*
|
||||
* The same virtual address as the kernel virtual address is also used in
|
||||
* Hyp-mode mapping to the same underlying physical pages.
|
||||
*
|
||||
* Note: Wrapping around zero in the "to" address is not supported.
|
||||
*/
|
||||
int create_hyp_mappings(void *from, void *to)
|
||||
{
|
||||
return __create_hyp_mappings(from, to, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* create_hyp_io_mappings - map a physical IO range in Hyp mode
|
||||
* @from: The virtual HYP start address of the range
|
||||
* @to: The virtual HYP end address of the range (exclusive)
|
||||
* @addr: The physical start address which gets mapped
|
||||
*/
|
||||
int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr)
|
||||
{
|
||||
unsigned long pfn = __phys_to_pfn(addr);
|
||||
return __create_hyp_mappings(from, to, &pfn);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
|
||||
* @kvm: The KVM struct pointer for the VM.
|
||||
*
|
||||
* Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
|
||||
* support either full 40-bit input addresses or limited to 32-bit input
|
||||
* addresses). Clears the allocated pages.
|
||||
*
|
||||
* Note we don't need locking here as this is only called when the VM is
|
||||
* created, which can only be done once.
|
||||
*/
|
||||
int kvm_alloc_stage2_pgd(struct kvm *kvm)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
|
||||
if (kvm->arch.pgd != NULL) {
|
||||
kvm_err("kvm_arch already initialized?\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER);
|
||||
if (!pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
/* stage-2 pgd must be aligned to its size */
|
||||
VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1));
|
||||
|
||||
memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t));
|
||||
clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
|
||||
kvm->arch.pgd = pgd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void clear_pud_entry(pud_t *pud)
|
||||
{
|
||||
pmd_t *pmd_table = pmd_offset(pud, 0);
|
||||
pud_clear(pud);
|
||||
pmd_free(NULL, pmd_table);
|
||||
put_page(virt_to_page(pud));
|
||||
}
|
||||
|
||||
static void clear_pmd_entry(pmd_t *pmd)
|
||||
{
|
||||
pte_t *pte_table = pte_offset_kernel(pmd, 0);
|
||||
pmd_clear(pmd);
|
||||
pte_free_kernel(NULL, pte_table);
|
||||
put_page(virt_to_page(pmd));
|
||||
}
|
||||
|
||||
static bool pmd_empty(pmd_t *pmd)
|
||||
{
|
||||
struct page *pmd_page = virt_to_page(pmd);
|
||||
return page_count(pmd_page) == 1;
|
||||
}
|
||||
|
||||
static void clear_pte_entry(pte_t *pte)
|
||||
{
|
||||
if (pte_present(*pte)) {
|
||||
kvm_set_pte(pte, __pte(0));
|
||||
put_page(virt_to_page(pte));
|
||||
}
|
||||
}
|
||||
|
||||
static bool pte_empty(pte_t *pte)
|
||||
{
|
||||
struct page *pte_page = virt_to_page(pte);
|
||||
return page_count(pte_page) == 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* unmap_stage2_range -- Clear stage2 page table entries to unmap a range
|
||||
* @kvm: The VM pointer
|
||||
* @start: The intermediate physical base address of the range to unmap
|
||||
* @size: The size of the area to unmap
|
||||
*
|
||||
* Clear a range of stage-2 mappings, lowering the various ref-counts. Must
|
||||
* be called while holding mmu_lock (unless for freeing the stage2 pgd before
|
||||
* destroying the VM), otherwise another faulting VCPU may come in and mess
|
||||
* with things behind our backs.
|
||||
*/
|
||||
static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
phys_addr_t addr = start, end = start + size;
|
||||
u64 range;
|
||||
|
||||
while (addr < end) {
|
||||
pgd = kvm->arch.pgd + pgd_index(addr);
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud)) {
|
||||
addr += PUD_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd)) {
|
||||
addr += PMD_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
clear_pte_entry(pte);
|
||||
range = PAGE_SIZE;
|
||||
|
||||
/* If we emptied the pte, walk back up the ladder */
|
||||
if (pte_empty(pte)) {
|
||||
clear_pmd_entry(pmd);
|
||||
range = PMD_SIZE;
|
||||
if (pmd_empty(pmd)) {
|
||||
clear_pud_entry(pud);
|
||||
range = PUD_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
addr += range;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_free_stage2_pgd - free all stage-2 tables
|
||||
* @kvm: The KVM struct pointer for the VM.
|
||||
*
|
||||
* Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
|
||||
* underlying level-2 and level-3 tables before freeing the actual level-1 table
|
||||
* and setting the struct pointer to NULL.
|
||||
*
|
||||
* Note we don't need locking here as this is only called when the VM is
|
||||
* destroyed, which can only be done once.
|
||||
*/
|
||||
void kvm_free_stage2_pgd(struct kvm *kvm)
|
||||
{
|
||||
if (kvm->arch.pgd == NULL)
|
||||
return;
|
||||
|
||||
unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
|
||||
free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
|
||||
kvm->arch.pgd = NULL;
|
||||
}
|
||||
|
||||
|
||||
static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
|
||||
phys_addr_t addr, const pte_t *new_pte, bool iomap)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte, old_pte;
|
||||
|
||||
/* Create 2nd stage page table mapping - Level 1 */
|
||||
pgd = kvm->arch.pgd + pgd_index(addr);
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud)) {
|
||||
if (!cache)
|
||||
return 0; /* ignore calls from kvm_set_spte_hva */
|
||||
pmd = mmu_memory_cache_alloc(cache);
|
||||
pud_populate(NULL, pud, pmd);
|
||||
pmd += pmd_index(addr);
|
||||
get_page(virt_to_page(pud));
|
||||
} else
|
||||
pmd = pmd_offset(pud, addr);
|
||||
|
||||
/* Create 2nd stage page table mapping - Level 2 */
|
||||
if (pmd_none(*pmd)) {
|
||||
if (!cache)
|
||||
return 0; /* ignore calls from kvm_set_spte_hva */
|
||||
pte = mmu_memory_cache_alloc(cache);
|
||||
clean_pte_table(pte);
|
||||
pmd_populate_kernel(NULL, pmd, pte);
|
||||
pte += pte_index(addr);
|
||||
get_page(virt_to_page(pmd));
|
||||
} else
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
|
||||
if (iomap && pte_present(*pte))
|
||||
return -EFAULT;
|
||||
|
||||
/* Create 2nd stage page table mapping - Level 3 */
|
||||
old_pte = *pte;
|
||||
kvm_set_pte(pte, *new_pte);
|
||||
if (pte_present(old_pte))
|
||||
kvm_tlb_flush_vmid(kvm);
|
||||
else
|
||||
get_page(virt_to_page(pte));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_phys_addr_ioremap - map a device range to guest IPA
|
||||
*
|
||||
* @kvm: The KVM pointer
|
||||
* @guest_ipa: The IPA at which to insert the mapping
|
||||
* @pa: The physical address of the device
|
||||
* @size: The size of the mapping
|
||||
*/
|
||||
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
phys_addr_t pa, unsigned long size)
|
||||
{
|
||||
phys_addr_t addr, end;
|
||||
int ret = 0;
|
||||
unsigned long pfn;
|
||||
struct kvm_mmu_memory_cache cache = { 0, };
|
||||
|
||||
end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
pfn = __phys_to_pfn(pa);
|
||||
|
||||
for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
|
||||
pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR);
|
||||
|
||||
ret = mmu_topup_memory_cache(&cache, 2, 2);
|
||||
if (ret)
|
||||
goto out;
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
ret = stage2_set_pte(kvm, &cache, addr, &pte, true);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
pfn++;
|
||||
}
|
||||
|
||||
out:
|
||||
mmu_free_memory_cache(&cache);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
/*
|
||||
* If we are going to insert an instruction page and the icache is
|
||||
* either VIPT or PIPT, there is a potential problem where the host
|
||||
* (or another VM) may have used the same page as this guest, and we
|
||||
* read incorrect data from the icache. If we're using a PIPT cache,
|
||||
* we can invalidate just that page, but if we are using a VIPT cache
|
||||
* we need to invalidate the entire icache - damn shame - as written
|
||||
* in the ARM ARM (DDI 0406C.b - Page B3-1393).
|
||||
*
|
||||
* VIVT caches are tagged using both the ASID and the VMID and doesn't
|
||||
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
|
||||
*/
|
||||
if (icache_is_pipt()) {
|
||||
unsigned long hva = gfn_to_hva(kvm, gfn);
|
||||
__cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
|
||||
} else if (!icache_is_vivt_asid_tagged()) {
|
||||
/* any kind of VIPT cache */
|
||||
__flush_icache_all();
|
||||
}
|
||||
}
|
||||
|
||||
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
gfn_t gfn, struct kvm_memory_slot *memslot,
|
||||
unsigned long fault_status)
|
||||
{
|
||||
pte_t new_pte;
|
||||
pfn_t pfn;
|
||||
int ret;
|
||||
bool write_fault, writable;
|
||||
unsigned long mmu_seq;
|
||||
struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
|
||||
|
||||
write_fault = kvm_is_write_fault(vcpu->arch.hsr);
|
||||
if (fault_status == FSC_PERM && !write_fault) {
|
||||
kvm_err("Unexpected L2 read permission error\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* We need minimum second+third level pages */
|
||||
ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
||||
/*
|
||||
* Ensure the read of mmu_notifier_seq happens before we call
|
||||
* gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
|
||||
* the page we just got a reference to gets unmapped before we have a
|
||||
* chance to grab the mmu_lock, which ensure that if the page gets
|
||||
* unmapped afterwards, the call to kvm_unmap_hva will take it away
|
||||
* from us again properly. This smp_rmb() interacts with the smp_wmb()
|
||||
* in kvm_mmu_notifier_invalidate_<page|range_end>.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable);
|
||||
if (is_error_pfn(pfn))
|
||||
return -EFAULT;
|
||||
|
||||
new_pte = pfn_pte(pfn, PAGE_S2);
|
||||
coherent_icache_guest_page(vcpu->kvm, gfn);
|
||||
|
||||
spin_lock(&vcpu->kvm->mmu_lock);
|
||||
if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
|
||||
goto out_unlock;
|
||||
if (writable) {
|
||||
pte_val(new_pte) |= L_PTE_S2_RDWR;
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
}
|
||||
stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&vcpu->kvm->mmu_lock);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_guest_abort - handles all 2nd stage aborts
|
||||
* @vcpu: the VCPU pointer
|
||||
* @run: the kvm_run structure
|
||||
*
|
||||
* Any abort that gets to the host is almost guaranteed to be caused by a
|
||||
* missing second stage translation table entry, which can mean that either the
|
||||
* guest simply needs more memory and we must allocate an appropriate page or it
|
||||
* can mean that the guest tried to access I/O memory, which is emulated by user
|
||||
* space. The distinction is based on the IPA causing the fault and whether this
|
||||
* memory region has been registered as standard RAM by user space.
|
||||
*/
|
||||
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
unsigned long hsr_ec;
|
||||
unsigned long fault_status;
|
||||
phys_addr_t fault_ipa;
|
||||
struct kvm_memory_slot *memslot;
|
||||
bool is_iabt;
|
||||
gfn_t gfn;
|
||||
int ret, idx;
|
||||
|
||||
hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT;
|
||||
is_iabt = (hsr_ec == HSR_EC_IABT);
|
||||
fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8;
|
||||
|
||||
trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr,
|
||||
vcpu->arch.hxfar, fault_ipa);
|
||||
|
||||
/* Check the stage-2 fault is trans. fault or write fault */
|
||||
fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE);
|
||||
if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
|
||||
kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n",
|
||||
hsr_ec, fault_status);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
idx = srcu_read_lock(&vcpu->kvm->srcu);
|
||||
|
||||
gfn = fault_ipa >> PAGE_SHIFT;
|
||||
if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
|
||||
if (is_iabt) {
|
||||
/* Prefetch Abort on I/O address */
|
||||
kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
|
||||
ret = 1;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (fault_status != FSC_FAULT) {
|
||||
kvm_err("Unsupported fault status on io memory: %#lx\n",
|
||||
fault_status);
|
||||
ret = -EFAULT;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* Adjust page offset */
|
||||
fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK;
|
||||
ret = io_mem_abort(vcpu, run, fault_ipa);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
memslot = gfn_to_memslot(vcpu->kvm, gfn);
|
||||
if (!memslot->user_alloc) {
|
||||
kvm_err("non user-alloc memslots not supported\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
|
||||
if (ret == 0)
|
||||
ret = 1;
|
||||
out_unlock:
|
||||
srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void handle_hva_to_gpa(struct kvm *kvm,
|
||||
unsigned long start,
|
||||
unsigned long end,
|
||||
void (*handler)(struct kvm *kvm,
|
||||
gpa_t gpa, void *data),
|
||||
void *data)
|
||||
{
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
|
||||
slots = kvm_memslots(kvm);
|
||||
|
||||
/* we only care about the pages that the guest sees */
|
||||
kvm_for_each_memslot(memslot, slots) {
|
||||
unsigned long hva_start, hva_end;
|
||||
gfn_t gfn, gfn_end;
|
||||
|
||||
hva_start = max(start, memslot->userspace_addr);
|
||||
hva_end = min(end, memslot->userspace_addr +
|
||||
(memslot->npages << PAGE_SHIFT));
|
||||
if (hva_start >= hva_end)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* {gfn(page) | page intersects with [hva_start, hva_end)} =
|
||||
* {gfn_start, gfn_start+1, ..., gfn_end-1}.
|
||||
*/
|
||||
gfn = hva_to_gfn_memslot(hva_start, memslot);
|
||||
gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
|
||||
|
||||
for (; gfn < gfn_end; ++gfn) {
|
||||
gpa_t gpa = gfn << PAGE_SHIFT;
|
||||
handler(kvm, gpa, data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
{
|
||||
unmap_stage2_range(kvm, gpa, PAGE_SIZE);
|
||||
kvm_tlb_flush_vmid(kvm);
|
||||
}
|
||||
|
||||
int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
||||
{
|
||||
unsigned long end = hva + PAGE_SIZE;
|
||||
|
||||
if (!kvm->arch.pgd)
|
||||
return 0;
|
||||
|
||||
trace_kvm_unmap_hva(hva);
|
||||
handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!kvm->arch.pgd)
|
||||
return 0;
|
||||
|
||||
trace_kvm_unmap_hva_range(start, end);
|
||||
handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
|
||||
{
|
||||
pte_t *pte = (pte_t *)data;
|
||||
|
||||
stage2_set_pte(kvm, NULL, gpa, pte, false);
|
||||
}
|
||||
|
||||
|
||||
void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
||||
{
|
||||
unsigned long end = hva + PAGE_SIZE;
|
||||
pte_t stage2_pte;
|
||||
|
||||
if (!kvm->arch.pgd)
|
||||
return;
|
||||
|
||||
trace_kvm_set_spte_hva(hva);
|
||||
stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
|
||||
handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
|
||||
}
|
||||
|
||||
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
|
||||
}
|
||||
|
||||
phys_addr_t kvm_mmu_get_httbr(void)
|
||||
{
|
||||
VM_BUG_ON(!virt_addr_valid(hyp_pgd));
|
||||
return virt_to_phys(hyp_pgd);
|
||||
}
|
||||
|
||||
int kvm_mmu_init(void)
|
||||
{
|
||||
if (!hyp_pgd) {
|
||||
kvm_err("Hyp mode PGD not allocated\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_clear_idmap - remove all idmaps from the hyp pgd
|
||||
*
|
||||
* Free the underlying pmds for all pgds in range and clear the pgds (but
|
||||
* don't free them) afterwards.
|
||||
*/
|
||||
void kvm_clear_hyp_idmap(void)
|
||||
{
|
||||
unsigned long addr, end;
|
||||
unsigned long next;
|
||||
pgd_t *pgd = hyp_pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
addr = virt_to_phys(__hyp_idmap_text_start);
|
||||
end = virt_to_phys(__hyp_idmap_text_end);
|
||||
|
||||
pgd += pgd_index(addr);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none_or_clear_bad(pgd))
|
||||
continue;
|
||||
pud = pud_offset(pgd, addr);
|
||||
pmd = pmd_offset(pud, addr);
|
||||
|
||||
pud_clear(pud);
|
||||
clean_pmd_entry(pmd);
|
||||
pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK));
|
||||
} while (pgd++, addr = next, addr < end);
|
||||
}
|
108
arch/arm/kvm/psci.c
Normal file
108
arch/arm/kvm/psci.c
Normal file
@ -0,0 +1,108 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_psci.h>
|
||||
|
||||
/*
|
||||
* This is an implementation of the Power State Coordination Interface
|
||||
* as described in ARM document number ARM DEN 0022A.
|
||||
*/
|
||||
|
||||
static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.pause = true;
|
||||
}
|
||||
|
||||
static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
||||
{
|
||||
struct kvm *kvm = source_vcpu->kvm;
|
||||
struct kvm_vcpu *vcpu;
|
||||
wait_queue_head_t *wq;
|
||||
unsigned long cpu_id;
|
||||
phys_addr_t target_pc;
|
||||
|
||||
cpu_id = *vcpu_reg(source_vcpu, 1);
|
||||
if (vcpu_mode_is_32bit(source_vcpu))
|
||||
cpu_id &= ~((u32) 0);
|
||||
|
||||
if (cpu_id >= atomic_read(&kvm->online_vcpus))
|
||||
return KVM_PSCI_RET_INVAL;
|
||||
|
||||
target_pc = *vcpu_reg(source_vcpu, 2);
|
||||
|
||||
vcpu = kvm_get_vcpu(kvm, cpu_id);
|
||||
|
||||
wq = kvm_arch_vcpu_wq(vcpu);
|
||||
if (!waitqueue_active(wq))
|
||||
return KVM_PSCI_RET_INVAL;
|
||||
|
||||
kvm_reset_vcpu(vcpu);
|
||||
|
||||
/* Gracefully handle Thumb2 entry point */
|
||||
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
|
||||
target_pc &= ~((phys_addr_t) 1);
|
||||
vcpu_set_thumb(vcpu);
|
||||
}
|
||||
|
||||
*vcpu_pc(vcpu) = target_pc;
|
||||
vcpu->arch.pause = false;
|
||||
smp_mb(); /* Make sure the above is visible */
|
||||
|
||||
wake_up_interruptible(wq);
|
||||
|
||||
return KVM_PSCI_RET_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_psci_call - handle PSCI call if r0 value is in range
|
||||
* @vcpu: Pointer to the VCPU struct
|
||||
*
|
||||
* Handle PSCI calls from guests through traps from HVC or SMC instructions.
|
||||
* The calling convention is similar to SMC calls to the secure world where
|
||||
* the function number is placed in r0 and this function returns true if the
|
||||
* function number specified in r0 is withing the PSCI range, and false
|
||||
* otherwise.
|
||||
*/
|
||||
bool kvm_psci_call(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
|
||||
unsigned long val;
|
||||
|
||||
switch (psci_fn) {
|
||||
case KVM_PSCI_FN_CPU_OFF:
|
||||
kvm_psci_vcpu_off(vcpu);
|
||||
val = KVM_PSCI_RET_SUCCESS;
|
||||
break;
|
||||
case KVM_PSCI_FN_CPU_ON:
|
||||
val = kvm_psci_vcpu_on(vcpu);
|
||||
break;
|
||||
case KVM_PSCI_FN_CPU_SUSPEND:
|
||||
case KVM_PSCI_FN_MIGRATE:
|
||||
val = KVM_PSCI_RET_NI;
|
||||
break;
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
*vcpu_reg(vcpu, 0) = val;
|
||||
return true;
|
||||
}
|
74
arch/arm/kvm/reset.c
Normal file
74
arch/arm/kvm/reset.c
Normal file
@ -0,0 +1,74 @@
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/kvm.h>
|
||||
|
||||
#include <asm/unified.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
|
||||
/******************************************************************************
|
||||
* Cortex-A15 Reset Values
|
||||
*/
|
||||
|
||||
static const int a15_max_cpu_idx = 3;
|
||||
|
||||
static struct kvm_regs a15_regs_reset = {
|
||||
.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
|
||||
};
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
* Exported reset function
|
||||
*/
|
||||
|
||||
/**
|
||||
* kvm_reset_vcpu - sets core registers and cp15 registers to reset value
|
||||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
* This function finds the right table above and sets the registers on the
|
||||
* virtual CPU struct to their architectually defined reset values.
|
||||
*/
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_regs *cpu_reset;
|
||||
|
||||
switch (vcpu->arch.target) {
|
||||
case KVM_ARM_TARGET_CORTEX_A15:
|
||||
if (vcpu->vcpu_id > a15_max_cpu_idx)
|
||||
return -EINVAL;
|
||||
cpu_reset = &a15_regs_reset;
|
||||
vcpu->arch.midr = read_cpuid_id();
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Reset core registers */
|
||||
memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs));
|
||||
|
||||
/* Reset CP15 registers */
|
||||
kvm_reset_coprocs(vcpu);
|
||||
|
||||
return 0;
|
||||
}
|
235
arch/arm/kvm/trace.h
Normal file
235
arch/arm/kvm/trace.h
Normal file
@ -0,0 +1,235 @@
|
||||
#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_KVM_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm
|
||||
|
||||
/*
|
||||
* Tracepoints for entry/exit to guest
|
||||
*/
|
||||
TRACE_EVENT(kvm_entry,
|
||||
TP_PROTO(unsigned long vcpu_pc),
|
||||
TP_ARGS(vcpu_pc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, vcpu_pc )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
),
|
||||
|
||||
TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_exit,
|
||||
TP_PROTO(unsigned long vcpu_pc),
|
||||
TP_ARGS(vcpu_pc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, vcpu_pc )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
),
|
||||
|
||||
TP_printk("PC: 0x%08lx", __entry->vcpu_pc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_guest_fault,
|
||||
TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
|
||||
unsigned long hxfar,
|
||||
unsigned long long ipa),
|
||||
TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, vcpu_pc )
|
||||
__field( unsigned long, hsr )
|
||||
__field( unsigned long, hxfar )
|
||||
__field( unsigned long long, ipa )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->hsr = hsr;
|
||||
__entry->hxfar = hxfar;
|
||||
__entry->ipa = ipa;
|
||||
),
|
||||
|
||||
TP_printk("guest fault at PC %#08lx (hxfar %#08lx, "
|
||||
"ipa %#16llx, hsr %#08lx",
|
||||
__entry->vcpu_pc, __entry->hxfar,
|
||||
__entry->ipa, __entry->hsr)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_irq_line,
|
||||
TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level),
|
||||
TP_ARGS(type, vcpu_idx, irq_num, level),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, type )
|
||||
__field( int, vcpu_idx )
|
||||
__field( int, irq_num )
|
||||
__field( int, level )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->type = type;
|
||||
__entry->vcpu_idx = vcpu_idx;
|
||||
__entry->irq_num = irq_num;
|
||||
__entry->level = level;
|
||||
),
|
||||
|
||||
TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d",
|
||||
(__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" :
|
||||
(__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" :
|
||||
(__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN",
|
||||
__entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_mmio_emulate,
|
||||
TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
|
||||
unsigned long cpsr),
|
||||
TP_ARGS(vcpu_pc, instr, cpsr),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, vcpu_pc )
|
||||
__field( unsigned long, instr )
|
||||
__field( unsigned long, cpsr )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->instr = instr;
|
||||
__entry->cpsr = cpsr;
|
||||
),
|
||||
|
||||
TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
|
||||
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
|
||||
);
|
||||
|
||||
/* Architecturally implementation defined CP15 register access */
|
||||
TRACE_EVENT(kvm_emulate_cp15_imp,
|
||||
TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
|
||||
unsigned long CRm, unsigned long Op2, bool is_write),
|
||||
TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, Op1 )
|
||||
__field( unsigned int, Rt1 )
|
||||
__field( unsigned int, CRn )
|
||||
__field( unsigned int, CRm )
|
||||
__field( unsigned int, Op2 )
|
||||
__field( bool, is_write )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->is_write = is_write;
|
||||
__entry->Op1 = Op1;
|
||||
__entry->Rt1 = Rt1;
|
||||
__entry->CRn = CRn;
|
||||
__entry->CRm = CRm;
|
||||
__entry->Op2 = Op2;
|
||||
),
|
||||
|
||||
TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u",
|
||||
(__entry->is_write) ? "mcr" : "mrc",
|
||||
__entry->Op1, __entry->Rt1, __entry->CRn,
|
||||
__entry->CRm, __entry->Op2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_wfi,
|
||||
TP_PROTO(unsigned long vcpu_pc),
|
||||
TP_ARGS(vcpu_pc),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, vcpu_pc )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
),
|
||||
|
||||
TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_unmap_hva,
|
||||
TP_PROTO(unsigned long hva),
|
||||
TP_ARGS(hva),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, hva )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->hva = hva;
|
||||
),
|
||||
|
||||
TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_unmap_hva_range,
|
||||
TP_PROTO(unsigned long start, unsigned long end),
|
||||
TP_ARGS(start, end),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, start )
|
||||
__field( unsigned long, end )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->start = start;
|
||||
__entry->end = end;
|
||||
),
|
||||
|
||||
TP_printk("mmu notifier unmap range: %#08lx -- %#08lx",
|
||||
__entry->start, __entry->end)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_set_spte_hva,
|
||||
TP_PROTO(unsigned long hva),
|
||||
TP_ARGS(hva),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, hva )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->hva = hva;
|
||||
),
|
||||
|
||||
TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_hvc,
|
||||
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
|
||||
TP_ARGS(vcpu_pc, r0, imm),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, vcpu_pc )
|
||||
__field( unsigned long, r0 )
|
||||
__field( unsigned long, imm )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->r0 = r0;
|
||||
__entry->imm = imm;
|
||||
),
|
||||
|
||||
TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx",
|
||||
__entry->vcpu_pc, __entry->r0, __entry->imm)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH arch/arm/kvm
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
@ -629,8 +629,9 @@ config ARM_THUMBEE
|
||||
make use of it. Say N for code that can run on CPUs without ThumbEE.
|
||||
|
||||
config ARM_VIRT_EXT
|
||||
bool "Native support for the ARM Virtualization Extensions"
|
||||
depends on MMU && CPU_V7
|
||||
bool
|
||||
depends on MMU
|
||||
default y if CPU_V7
|
||||
help
|
||||
Enable the kernel to make use of the ARM Virtualization
|
||||
Extensions to install hypervisors without run-time firmware
|
||||
@ -640,11 +641,6 @@ config ARM_VIRT_EXT
|
||||
use of this feature. Refer to Documentation/arm/Booting for
|
||||
details.
|
||||
|
||||
It is safe to enable this option even if the kernel may not be
|
||||
booted in HYP mode, may not have support for the
|
||||
virtualization extensions, or may be booted with a
|
||||
non-compliant bootloader.
|
||||
|
||||
config SWP_EMULATE
|
||||
bool "Emulate SWP/SWPB instructions"
|
||||
depends on !CPU_USE_DOMAINS && CPU_V7
|
||||
|
@ -1,4 +1,6 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/idmap.h>
|
||||
@ -6,6 +8,7 @@
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/system_info.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
pgd_t *idmap_pgd;
|
||||
|
||||
@ -59,11 +62,17 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
} while (pud++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
|
||||
static void identity_mapping_add(pgd_t *pgd, const char *text_start,
|
||||
const char *text_end, unsigned long prot)
|
||||
{
|
||||
unsigned long prot, next;
|
||||
unsigned long addr, end;
|
||||
unsigned long next;
|
||||
|
||||
addr = virt_to_phys(text_start);
|
||||
end = virt_to_phys(text_end);
|
||||
|
||||
prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
|
||||
|
||||
prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
|
||||
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
|
||||
prot |= PMD_BIT4;
|
||||
|
||||
@ -74,28 +83,52 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE)
|
||||
pgd_t *hyp_pgd;
|
||||
|
||||
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
|
||||
|
||||
static int __init init_static_idmap_hyp(void)
|
||||
{
|
||||
hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
|
||||
if (!hyp_pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
pr_info("Setting up static HYP identity map for 0x%p - 0x%p\n",
|
||||
__hyp_idmap_text_start, __hyp_idmap_text_end);
|
||||
identity_mapping_add(hyp_pgd, __hyp_idmap_text_start,
|
||||
__hyp_idmap_text_end, PMD_SECT_AP1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static int __init init_static_idmap_hyp(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern char __idmap_text_start[], __idmap_text_end[];
|
||||
|
||||
static int __init init_static_idmap(void)
|
||||
{
|
||||
phys_addr_t idmap_start, idmap_end;
|
||||
int ret;
|
||||
|
||||
idmap_pgd = pgd_alloc(&init_mm);
|
||||
if (!idmap_pgd)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Add an identity mapping for the physical address of the section. */
|
||||
idmap_start = virt_to_phys((void *)__idmap_text_start);
|
||||
idmap_end = virt_to_phys((void *)__idmap_text_end);
|
||||
pr_info("Setting up static identity map for 0x%p - 0x%p\n",
|
||||
__idmap_text_start, __idmap_text_end);
|
||||
identity_mapping_add(idmap_pgd, __idmap_text_start,
|
||||
__idmap_text_end, 0);
|
||||
|
||||
pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
|
||||
(long long)idmap_start, (long long)idmap_end);
|
||||
identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
|
||||
ret = init_static_idmap_hyp();
|
||||
|
||||
/* Flush L1 for the hardware to see this page table content */
|
||||
flush_cache_louis();
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
early_initcall(init_static_idmap);
|
||||
|
||||
|
@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
|
||||
static unsigned int ecc_mask __initdata = 0;
|
||||
pgprot_t pgprot_user;
|
||||
pgprot_t pgprot_kernel;
|
||||
pgprot_t pgprot_hyp_device;
|
||||
pgprot_t pgprot_s2;
|
||||
pgprot_t pgprot_s2_device;
|
||||
|
||||
EXPORT_SYMBOL(pgprot_user);
|
||||
EXPORT_SYMBOL(pgprot_kernel);
|
||||
@ -66,34 +69,46 @@ struct cachepolicy {
|
||||
unsigned int cr_mask;
|
||||
pmdval_t pmd;
|
||||
pteval_t pte;
|
||||
pteval_t pte_s2;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
#define s2_policy(policy) policy
|
||||
#else
|
||||
#define s2_policy(policy) 0
|
||||
#endif
|
||||
|
||||
static struct cachepolicy cache_policies[] __initdata = {
|
||||
{
|
||||
.policy = "uncached",
|
||||
.cr_mask = CR_W|CR_C,
|
||||
.pmd = PMD_SECT_UNCACHED,
|
||||
.pte = L_PTE_MT_UNCACHED,
|
||||
.pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
|
||||
}, {
|
||||
.policy = "buffered",
|
||||
.cr_mask = CR_C,
|
||||
.pmd = PMD_SECT_BUFFERED,
|
||||
.pte = L_PTE_MT_BUFFERABLE,
|
||||
.pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
|
||||
}, {
|
||||
.policy = "writethrough",
|
||||
.cr_mask = 0,
|
||||
.pmd = PMD_SECT_WT,
|
||||
.pte = L_PTE_MT_WRITETHROUGH,
|
||||
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
|
||||
}, {
|
||||
.policy = "writeback",
|
||||
.cr_mask = 0,
|
||||
.pmd = PMD_SECT_WB,
|
||||
.pte = L_PTE_MT_WRITEBACK,
|
||||
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
|
||||
}, {
|
||||
.policy = "writealloc",
|
||||
.cr_mask = 0,
|
||||
.pmd = PMD_SECT_WBWA,
|
||||
.pte = L_PTE_MT_WRITEALLOC,
|
||||
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
|
||||
}
|
||||
};
|
||||
|
||||
@ -310,6 +325,7 @@ static void __init build_mem_type_table(void)
|
||||
struct cachepolicy *cp;
|
||||
unsigned int cr = get_cr();
|
||||
pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
|
||||
pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
|
||||
int cpu_arch = cpu_architecture();
|
||||
int i;
|
||||
|
||||
@ -421,6 +437,8 @@ static void __init build_mem_type_table(void)
|
||||
*/
|
||||
cp = &cache_policies[cachepolicy];
|
||||
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
|
||||
s2_pgprot = cp->pte_s2;
|
||||
hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
|
||||
|
||||
/*
|
||||
* ARMv6 and above have extended page tables.
|
||||
@ -444,6 +462,7 @@ static void __init build_mem_type_table(void)
|
||||
user_pgprot |= L_PTE_SHARED;
|
||||
kern_pgprot |= L_PTE_SHARED;
|
||||
vecs_pgprot |= L_PTE_SHARED;
|
||||
s2_pgprot |= L_PTE_SHARED;
|
||||
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
|
||||
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
|
||||
@ -498,6 +517,9 @@ static void __init build_mem_type_table(void)
|
||||
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
|
||||
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
|
||||
L_PTE_DIRTY | kern_pgprot);
|
||||
pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
|
||||
pgprot_s2_device = __pgprot(s2_device_pgprot);
|
||||
pgprot_hyp_device = __pgprot(hyp_device_pgprot);
|
||||
|
||||
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
|
||||
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
|
||||
|
@ -115,6 +115,7 @@ struct kvm_irq_level {
|
||||
* ACPI gsi notion of irq.
|
||||
* For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
|
||||
* For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
|
||||
* For ARM: See Documentation/virtual/kvm/api.txt
|
||||
*/
|
||||
union {
|
||||
__u32 irq;
|
||||
@ -635,6 +636,7 @@ struct kvm_ppc_smmu_info {
|
||||
#define KVM_CAP_IRQFD_RESAMPLE 82
|
||||
#define KVM_CAP_PPC_BOOKE_WATCHDOG 83
|
||||
#define KVM_CAP_PPC_HTAB_FD 84
|
||||
#define KVM_CAP_ARM_PSCI 87
|
||||
|
||||
#ifdef KVM_CAP_IRQ_ROUTING
|
||||
|
||||
@ -764,6 +766,11 @@ struct kvm_dirty_tlb {
|
||||
#define KVM_REG_SIZE_U512 0x0060000000000000ULL
|
||||
#define KVM_REG_SIZE_U1024 0x0070000000000000ULL
|
||||
|
||||
struct kvm_reg_list {
|
||||
__u64 n; /* number of regs */
|
||||
__u64 reg[0];
|
||||
};
|
||||
|
||||
struct kvm_one_reg {
|
||||
__u64 id;
|
||||
__u64 addr;
|
||||
@ -932,6 +939,8 @@ struct kvm_s390_ucas_mapping {
|
||||
#define KVM_SET_ONE_REG _IOW(KVMIO, 0xac, struct kvm_one_reg)
|
||||
/* VM is being stopped by host */
|
||||
#define KVM_KVMCLOCK_CTRL _IO(KVMIO, 0xad)
|
||||
#define KVM_ARM_VCPU_INIT _IOW(KVMIO, 0xae, struct kvm_vcpu_init)
|
||||
#define KVM_GET_REG_LIST _IOWR(KVMIO, 0xb0, struct kvm_reg_list)
|
||||
|
||||
#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
|
||||
#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
|
||||
|
Loading…
Reference in New Issue
Block a user