mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 23:45:19 +07:00
d94d71cb45
Based on 1 normalized pattern(s): this program is free software you can redistribute it and or modify it under the terms of the gnu general public license version 2 as published by the free software foundation this program is distributed in the hope that it will be useful but without any warranty without even the implied warranty of merchantability or fitness for a particular purpose see the gnu general public license for more details you should have received a copy of the gnu general public license along with this program if not write to the free software foundation 51 franklin street fifth floor boston ma 02110 1301 usa extracted by the scancode license scanner the SPDX license identifier GPL-2.0-only has been chosen to replace the boilerplate/reference in 67 file(s). Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Allison Randal <allison@lohutok.net> Reviewed-by: Richard Fontana <rfontana@redhat.com> Reviewed-by: Alexios Zavras <alexios.zavras@intel.com> Cc: linux-spdx@vger.kernel.org Link: https://lkml.kernel.org/r/20190529141333.953658117@linutronix.de Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
384 lines
9.3 KiB
C
384 lines
9.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
*
|
|
* Copyright IBM Corp. 2007
|
|
* Copyright 2011 Freescale Semiconductor, Inc.
|
|
*
|
|
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
|
*/
|
|
|
|
#include <linux/jiffies.h>
|
|
#include <linux/hrtimer.h>
|
|
#include <linux/types.h>
|
|
#include <linux/string.h>
|
|
#include <linux/kvm_host.h>
|
|
#include <linux/clockchips.h>
|
|
|
|
#include <asm/reg.h>
|
|
#include <asm/time.h>
|
|
#include <asm/byteorder.h>
|
|
#include <asm/kvm_ppc.h>
|
|
#include <asm/disassemble.h>
|
|
#include <asm/ppc-opcode.h>
|
|
#include <asm/sstep.h>
|
|
#include "timing.h"
|
|
#include "trace.h"
|
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
|
|
kvmppc_core_queue_fpunavail(vcpu);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_PPC_FPU */
|
|
|
|
#ifdef CONFIG_VSX
|
|
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
|
|
kvmppc_core_queue_vsx_unavail(vcpu);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_VSX */
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
|
|
{
|
|
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
|
|
kvmppc_core_queue_vec_unavail(vcpu);
|
|
return true;
|
|
}
|
|
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
/*
|
|
* XXX to do:
|
|
* lfiwax, lfiwzx
|
|
* vector loads and stores
|
|
*
|
|
* Instructions that trap when used on cache-inhibited mappings
|
|
* are not emulated here: multiple and string instructions,
|
|
* lq/stq, and the load-reserve/store-conditional instructions.
|
|
*/
|
|
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
|
|
{
|
|
struct kvm_run *run = vcpu->run;
|
|
u32 inst;
|
|
int ra, rs, rt;
|
|
enum emulation_result emulated = EMULATE_FAIL;
|
|
int advance = 1;
|
|
struct instruction_op op;
|
|
|
|
/* this default type might be overwritten by subcategories */
|
|
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
|
|
|
|
emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
|
|
if (emulated != EMULATE_DONE)
|
|
return emulated;
|
|
|
|
ra = get_ra(inst);
|
|
rs = get_rs(inst);
|
|
rt = get_rt(inst);
|
|
|
|
/*
|
|
* if mmio_vsx_tx_sx_enabled == 0, copy data between
|
|
* VSR[0..31] and memory
|
|
* if mmio_vsx_tx_sx_enabled == 1, copy data between
|
|
* VSR[32..63] and memory
|
|
*/
|
|
vcpu->arch.mmio_vsx_copy_nums = 0;
|
|
vcpu->arch.mmio_vsx_offset = 0;
|
|
vcpu->arch.mmio_copy_type = KVMPPC_VSX_COPY_NONE;
|
|
vcpu->arch.mmio_sp64_extend = 0;
|
|
vcpu->arch.mmio_sign_extend = 0;
|
|
vcpu->arch.mmio_vmx_copy_nums = 0;
|
|
vcpu->arch.mmio_vmx_offset = 0;
|
|
vcpu->arch.mmio_host_swabbed = 0;
|
|
|
|
emulated = EMULATE_FAIL;
|
|
vcpu->arch.regs.msr = vcpu->arch.shared->msr;
|
|
if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
|
|
int type = op.type & INSTR_TYPE_MASK;
|
|
int size = GETSIZE(op.type);
|
|
|
|
switch (type) {
|
|
case LOAD: {
|
|
int instr_byte_swap = op.type & BYTEREV;
|
|
|
|
if (op.type & SIGNEXT)
|
|
emulated = kvmppc_handle_loads(run, vcpu,
|
|
op.reg, size, !instr_byte_swap);
|
|
else
|
|
emulated = kvmppc_handle_load(run, vcpu,
|
|
op.reg, size, !instr_byte_swap);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
|
|
|
break;
|
|
}
|
|
#ifdef CONFIG_PPC_FPU
|
|
case LOAD_FP:
|
|
if (kvmppc_check_fp_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
if (op.type & FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
if (op.type & SIGNEXT)
|
|
emulated = kvmppc_handle_loads(run, vcpu,
|
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
|
else
|
|
emulated = kvmppc_handle_load(run, vcpu,
|
|
KVM_MMIO_REG_FPR|op.reg, size, 1);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
|
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_ALTIVEC
|
|
case LOAD_VMX:
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
/* Hardware enforces alignment of VMX accesses */
|
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
if (size == 16) { /* lvx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_DWORD;
|
|
} else if (size == 4) { /* lvewx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_WORD;
|
|
} else if (size == 2) { /* lvehx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_HWORD;
|
|
} else if (size == 1) { /* lvebx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_BYTE;
|
|
} else
|
|
break;
|
|
|
|
vcpu->arch.mmio_vmx_offset =
|
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
|
|
|
if (size == 16) {
|
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
|
emulated = kvmppc_handle_vmx_load(run,
|
|
vcpu, KVM_MMIO_REG_VMX|op.reg,
|
|
8, 1);
|
|
} else {
|
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
|
emulated = kvmppc_handle_vmx_load(run, vcpu,
|
|
KVM_MMIO_REG_VMX|op.reg,
|
|
size, 1);
|
|
}
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_VSX
|
|
case LOAD_VSX: {
|
|
int io_size_each;
|
|
|
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
} else {
|
|
if (kvmppc_check_vsx_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
}
|
|
|
|
if (op.vsx_flags & VSX_FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
if (op.element_size == 8) {
|
|
if (op.vsx_flags & VSX_SPLAT)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
|
|
else
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_DWORD;
|
|
} else if (op.element_size == 4) {
|
|
if (op.vsx_flags & VSX_SPLAT)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_WORD_LOAD_DUMP;
|
|
else
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_WORD;
|
|
} else
|
|
break;
|
|
|
|
if (size < op.element_size) {
|
|
/* precision convert case: lxsspx, etc */
|
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
io_size_each = size;
|
|
} else { /* lxvw4x, lxvd2x, etc */
|
|
vcpu->arch.mmio_vsx_copy_nums =
|
|
size/op.element_size;
|
|
io_size_each = op.element_size;
|
|
}
|
|
|
|
emulated = kvmppc_handle_vsx_load(run, vcpu,
|
|
KVM_MMIO_REG_VSX|op.reg, io_size_each,
|
|
1, op.type & SIGNEXT);
|
|
break;
|
|
}
|
|
#endif
|
|
case STORE:
|
|
/* if need byte reverse, op.val has been reversed by
|
|
* analyse_instr().
|
|
*/
|
|
emulated = kvmppc_handle_store(run, vcpu, op.val,
|
|
size, 1);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
|
|
|
break;
|
|
#ifdef CONFIG_PPC_FPU
|
|
case STORE_FP:
|
|
if (kvmppc_check_fp_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
/* The FP registers need to be flushed so that
|
|
* kvmppc_handle_store() can read actual FP vals
|
|
* from vcpu->arch.
|
|
*/
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
MSR_FP);
|
|
|
|
if (op.type & FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
emulated = kvmppc_handle_store(run, vcpu,
|
|
VCPU_FPR(vcpu, op.reg), size, 1);
|
|
|
|
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
|
|
kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
|
|
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_ALTIVEC
|
|
case STORE_VMX:
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
|
|
/* Hardware enforces alignment of VMX accesses. */
|
|
vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
|
|
vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
MSR_VEC);
|
|
if (size == 16) { /* stvx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_DWORD;
|
|
} else if (size == 4) { /* stvewx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_WORD;
|
|
} else if (size == 2) { /* stvehx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_HWORD;
|
|
} else if (size == 1) { /* stvebx */
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VMX_COPY_BYTE;
|
|
} else
|
|
break;
|
|
|
|
vcpu->arch.mmio_vmx_offset =
|
|
(vcpu->arch.vaddr_accessed & 0xf)/size;
|
|
|
|
if (size == 16) {
|
|
vcpu->arch.mmio_vmx_copy_nums = 2;
|
|
emulated = kvmppc_handle_vmx_store(run,
|
|
vcpu, op.reg, 8, 1);
|
|
} else {
|
|
vcpu->arch.mmio_vmx_copy_nums = 1;
|
|
emulated = kvmppc_handle_vmx_store(run,
|
|
vcpu, op.reg, size, 1);
|
|
}
|
|
|
|
break;
|
|
#endif
|
|
#ifdef CONFIG_VSX
|
|
case STORE_VSX: {
|
|
int io_size_each;
|
|
|
|
if (op.vsx_flags & VSX_CHECK_VEC) {
|
|
if (kvmppc_check_altivec_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
} else {
|
|
if (kvmppc_check_vsx_disabled(vcpu))
|
|
return EMULATE_DONE;
|
|
}
|
|
|
|
if (vcpu->kvm->arch.kvm_ops->giveup_ext)
|
|
vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
|
|
MSR_VSX);
|
|
|
|
if (op.vsx_flags & VSX_FPCONV)
|
|
vcpu->arch.mmio_sp64_extend = 1;
|
|
|
|
if (op.element_size == 8)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_DWORD;
|
|
else if (op.element_size == 4)
|
|
vcpu->arch.mmio_copy_type =
|
|
KVMPPC_VSX_COPY_WORD;
|
|
else
|
|
break;
|
|
|
|
if (size < op.element_size) {
|
|
/* precise conversion case, like stxsspx */
|
|
vcpu->arch.mmio_vsx_copy_nums = 1;
|
|
io_size_each = size;
|
|
} else { /* stxvw4x, stxvd2x, etc */
|
|
vcpu->arch.mmio_vsx_copy_nums =
|
|
size/op.element_size;
|
|
io_size_each = op.element_size;
|
|
}
|
|
|
|
emulated = kvmppc_handle_vsx_store(run, vcpu,
|
|
op.reg, io_size_each, 1);
|
|
break;
|
|
}
|
|
#endif
|
|
case CACHEOP:
|
|
/* Do nothing. The guest is performing dcbi because
|
|
* hardware DMA is not snooped by the dcache, but
|
|
* emulated DMA either goes through the dcache as
|
|
* normal writes, or the host kernel has handled dcache
|
|
* coherence.
|
|
*/
|
|
emulated = EMULATE_DONE;
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (emulated == EMULATE_FAIL) {
|
|
advance = 0;
|
|
kvmppc_core_queue_program(vcpu, 0);
|
|
}
|
|
|
|
trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
|
|
|
|
/* Advance past emulated instruction. */
|
|
if (advance)
|
|
kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
|
|
|
|
return emulated;
|
|
}
|