mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-20 20:59:10 +07:00
d94d71cb45
Based on 1 normalized pattern(s): this program is free software you can redistribute it and or modify it under the terms of the gnu general public license version 2 as published by the free software foundation this program is distributed in the hope that it will be useful but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose see the gnu general public license for more details you should have received a copy of the gnu general public license along with this program if not write to the free software foundation 51 franklin street fifth floor boston ma 02110 1301 usa extracted by the scancode license scanner the SPDX license identifier GPL-2.0-only has been chosen to replace the boilerplate/reference in 67 file(s). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Allison Randal <allison@lohutok.net> Reviewed-by: Richard Fontana <rfontana@redhat.com> Reviewed-by: Alexios Zavras <alexios.zavras@intel.com> Cc: linux-spdx@vger.kernel.org Link: https://lkml.kernel.org/r/20190529141333.953658117@linutronix.de Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
167 lines
4.7 KiB
C
167 lines
4.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
|
*/
|
|
|
|
#include <linux/mm.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <asm/kvm_arm.h>
|
|
#include <asm/kvm_emulate.h>
|
|
#include <asm/opcodes.h>
|
|
#include <trace/events/kvm.h>
|
|
|
|
#include "trace.h"
|
|
|
|
#define VCPU_NR_MODES 6
|
|
#define VCPU_REG_OFFSET_USR 0
|
|
#define VCPU_REG_OFFSET_FIQ 1
|
|
#define VCPU_REG_OFFSET_IRQ 2
|
|
#define VCPU_REG_OFFSET_SVC 3
|
|
#define VCPU_REG_OFFSET_ABT 4
|
|
#define VCPU_REG_OFFSET_UND 5
|
|
#define REG_OFFSET(_reg) \
|
|
(offsetof(struct kvm_regs, _reg) / sizeof(u32))
|
|
|
|
#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
|
|
|
|
static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
|
|
/* USR/SYS Registers */
|
|
[VCPU_REG_OFFSET_USR] = {
|
|
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
|
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
|
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
|
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
|
USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
|
|
},
|
|
|
|
/* FIQ Registers */
|
|
[VCPU_REG_OFFSET_FIQ] = {
|
|
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
|
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
|
USR_REG_OFFSET(6), USR_REG_OFFSET(7),
|
|
REG_OFFSET(fiq_regs[0]), /* r8 */
|
|
REG_OFFSET(fiq_regs[1]), /* r9 */
|
|
REG_OFFSET(fiq_regs[2]), /* r10 */
|
|
REG_OFFSET(fiq_regs[3]), /* r11 */
|
|
REG_OFFSET(fiq_regs[4]), /* r12 */
|
|
REG_OFFSET(fiq_regs[5]), /* r13 */
|
|
REG_OFFSET(fiq_regs[6]), /* r14 */
|
|
},
|
|
|
|
/* IRQ Registers */
|
|
[VCPU_REG_OFFSET_IRQ] = {
|
|
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
|
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
|
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
|
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
|
USR_REG_OFFSET(12),
|
|
REG_OFFSET(irq_regs[0]), /* r13 */
|
|
REG_OFFSET(irq_regs[1]), /* r14 */
|
|
},
|
|
|
|
/* SVC Registers */
|
|
[VCPU_REG_OFFSET_SVC] = {
|
|
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
|
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
|
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
|
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
|
USR_REG_OFFSET(12),
|
|
REG_OFFSET(svc_regs[0]), /* r13 */
|
|
REG_OFFSET(svc_regs[1]), /* r14 */
|
|
},
|
|
|
|
/* ABT Registers */
|
|
[VCPU_REG_OFFSET_ABT] = {
|
|
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
|
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
|
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
|
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
|
USR_REG_OFFSET(12),
|
|
REG_OFFSET(abt_regs[0]), /* r13 */
|
|
REG_OFFSET(abt_regs[1]), /* r14 */
|
|
},
|
|
|
|
/* UND Registers */
|
|
[VCPU_REG_OFFSET_UND] = {
|
|
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
|
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
|
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
|
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
|
USR_REG_OFFSET(12),
|
|
REG_OFFSET(und_regs[0]), /* r13 */
|
|
REG_OFFSET(und_regs[1]), /* r14 */
|
|
},
|
|
};
|
|
|
|
/*
|
|
* Return a pointer to the register number valid in the current mode of
|
|
* the virtual CPU.
|
|
*/
|
|
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
|
|
{
|
|
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
|
|
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
|
|
|
|
switch (mode) {
|
|
case USR_MODE...SVC_MODE:
|
|
mode &= ~MODE32_BIT; /* 0 ... 3 */
|
|
break;
|
|
|
|
case ABT_MODE:
|
|
mode = VCPU_REG_OFFSET_ABT;
|
|
break;
|
|
|
|
case UND_MODE:
|
|
mode = VCPU_REG_OFFSET_UND;
|
|
break;
|
|
|
|
case SYSTEM_MODE:
|
|
mode = VCPU_REG_OFFSET_USR;
|
|
break;
|
|
|
|
default:
|
|
BUG();
|
|
}
|
|
|
|
return reg_array + vcpu_reg_offsets[mode][reg_num];
|
|
}
|
|
|
|
/*
|
|
* Return the SPSR for the current mode of the virtual CPU.
|
|
*/
|
|
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu)
|
|
{
|
|
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
|
|
switch (mode) {
|
|
case SVC_MODE:
|
|
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
|
|
case ABT_MODE:
|
|
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
|
|
case UND_MODE:
|
|
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
|
|
case IRQ_MODE:
|
|
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
|
|
case FIQ_MODE:
|
|
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
|
|
default:
|
|
BUG();
|
|
}
|
|
}
|
|
|
|
/******************************************************************************
|
|
* Inject exceptions into the guest
|
|
*/
|
|
|
|
/**
|
|
* kvm_inject_vabt - inject an async abort / SError into the guest
|
|
* @vcpu: The VCPU to receive the exception
|
|
*
|
|
* It is assumed that this code is called from the VCPU thread and that the
|
|
* VCPU therefore is not currently executing guest code.
|
|
*/
|
|
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
|
|
{
|
|
*vcpu_hcr(vcpu) |= HCR_VA;
|
|
}
|