2019-05-29 21:12:40 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2013-01-21 06:43:58 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
|
|
|
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
#include <asm/kvm_emulate.h>
|
|
|
|
#include <trace/events/kvm.h>
|
|
|
|
|
|
|
|
#include "trace.h"
|
|
|
|
|
2016-04-25 02:41:36 +07:00
|
|
|
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data)
|
2013-02-12 19:40:22 +07:00
|
|
|
{
|
|
|
|
void *datap = NULL;
|
|
|
|
union {
|
|
|
|
u8 byte;
|
|
|
|
u16 hword;
|
|
|
|
u32 word;
|
|
|
|
u64 dword;
|
|
|
|
} tmp;
|
|
|
|
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
|
|
|
tmp.byte = data;
|
|
|
|
datap = &tmp.byte;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
tmp.hword = data;
|
|
|
|
datap = &tmp.hword;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
tmp.word = data;
|
|
|
|
datap = &tmp.word;
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
tmp.dword = data;
|
|
|
|
datap = &tmp.dword;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(buf, datap, len);
|
|
|
|
}
|
|
|
|
|
2016-04-25 02:41:36 +07:00
|
|
|
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len)
|
2013-02-12 19:40:22 +07:00
|
|
|
{
|
|
|
|
unsigned long data = 0;
|
|
|
|
union {
|
|
|
|
u16 hword;
|
|
|
|
u32 word;
|
|
|
|
u64 dword;
|
|
|
|
} tmp;
|
|
|
|
|
|
|
|
switch (len) {
|
|
|
|
case 1:
|
2016-04-25 02:41:36 +07:00
|
|
|
data = *(u8 *)buf;
|
2013-02-12 19:40:22 +07:00
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
memcpy(&tmp.hword, buf, len);
|
|
|
|
data = tmp.hword;
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
memcpy(&tmp.word, buf, len);
|
|
|
|
data = tmp.word;
|
|
|
|
break;
|
|
|
|
case 8:
|
|
|
|
memcpy(&tmp.dword, buf, len);
|
|
|
|
data = tmp.dword;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
|
2013-01-21 06:43:58 +07:00
|
|
|
/**
|
|
|
|
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
|
2016-03-29 19:29:28 +07:00
|
|
|
* or in-kernel IO emulation
|
|
|
|
*
|
2013-01-21 06:43:58 +07:00
|
|
|
* @vcpu: The VCPU pointer
|
|
|
|
* @run: The VCPU run struct containing the mmio data
|
|
|
|
*/
|
|
|
|
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|
|
|
{
|
2013-02-12 19:40:22 +07:00
|
|
|
unsigned long data;
|
2013-01-21 06:43:58 +07:00
|
|
|
unsigned int len;
|
|
|
|
int mask;
|
|
|
|
|
2019-08-22 18:03:05 +07:00
|
|
|
/* Detect an already handled MMIO return */
|
|
|
|
if (unlikely(!vcpu->mmio_needed))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
vcpu->mmio_needed = 0;
|
|
|
|
|
2019-12-13 20:25:25 +07:00
|
|
|
if (!kvm_vcpu_dabt_iswrite(vcpu)) {
|
|
|
|
len = kvm_vcpu_dabt_get_as(vcpu);
|
2016-04-25 02:41:36 +07:00
|
|
|
data = kvm_mmio_read_buf(run->mmio.data, len);
|
2013-01-21 06:43:58 +07:00
|
|
|
|
2019-12-13 20:25:25 +07:00
|
|
|
if (kvm_vcpu_dabt_issext(vcpu) &&
|
2013-03-05 09:43:23 +07:00
|
|
|
len < sizeof(unsigned long)) {
|
2013-01-21 06:43:58 +07:00
|
|
|
mask = 1U << ((len * 8) - 1);
|
2013-02-12 19:40:22 +07:00
|
|
|
data = (data ^ mask) - mask;
|
2013-01-21 06:43:58 +07:00
|
|
|
}
|
2013-02-12 19:40:22 +07:00
|
|
|
|
2019-12-13 20:25:25 +07:00
|
|
|
if (!kvm_vcpu_dabt_issf(vcpu))
|
2019-12-13 02:50:55 +07:00
|
|
|
data = data & 0xffffffff;
|
|
|
|
|
2013-02-12 19:40:22 +07:00
|
|
|
trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
|
2017-12-15 08:40:50 +07:00
|
|
|
&data);
|
2013-02-12 19:40:22 +07:00
|
|
|
data = vcpu_data_host_to_guest(vcpu, data, len);
|
2019-12-13 20:25:25 +07:00
|
|
|
vcpu_set_reg(vcpu, kvm_vcpu_dabt_get_rd(vcpu), data);
|
2013-01-21 06:43:58 +07:00
|
|
|
}
|
|
|
|
|
2018-11-09 22:07:10 +07:00
|
|
|
/*
|
|
|
|
* The MMIO instruction is emulated and should not be re-executed
|
|
|
|
* in the guest.
|
|
|
|
*/
|
|
|
|
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
|
|
|
|
2013-01-21 06:43:58 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|
|
|
phys_addr_t fault_ipa)
|
|
|
|
{
|
2013-02-12 19:40:22 +07:00
|
|
|
unsigned long data;
|
2013-01-21 06:43:58 +07:00
|
|
|
unsigned long rt;
|
|
|
|
int ret;
|
2015-03-28 08:13:13 +07:00
|
|
|
bool is_write;
|
|
|
|
int len;
|
|
|
|
u8 data_buf[8];
|
2013-01-21 06:43:58 +07:00
|
|
|
|
|
|
|
/*
|
2019-12-13 20:25:25 +07:00
|
|
|
* No valid syndrome? Ask userspace for help if it has
|
|
|
|
* voluntered to do so, and bail out otherwise.
|
2013-01-21 06:43:58 +07:00
|
|
|
*/
|
2019-12-13 20:25:25 +07:00
|
|
|
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
|
KVM: arm/arm64: Allow reporting non-ISV data aborts to userspace
For a long time, if a guest accessed memory outside of a memslot using
any of the load/store instructions in the architecture which doesn't
supply decoding information in the ESR_EL2 (the ISV bit is not set), the
kernel would print the following message and terminate the VM as a
result of returning -ENOSYS to userspace:
load/store instruction decoding not implemented
The reason behind this message is that KVM assumes that all accesses
outside a memslot is an MMIO access which should be handled by
userspace, and we originally expected to eventually implement some sort
of decoding of load/store instructions where the ISV bit was not set.
However, it turns out that many of the instructions which don't provide
decoding information on abort are not safe to use for MMIO accesses, and
the remaining few that would potentially make sense to use on MMIO
accesses, such as those with register writeback, are not used in
practice. It also turns out that fetching an instruction from guest
memory can be a pretty horrible affair, involving stopping all CPUs on
SMP systems, handling multiple corner cases of address translation in
software, and more. It doesn't appear likely that we'll ever implement
this in the kernel.
What is much more common is that a user has misconfigured his/her guest
and is actually not accessing an MMIO region, but just hitting some
random hole in the IPA space. In this scenario, the error message above
is almost misleading and has led to a great deal of confusion over the
years.
It is, nevertheless, ABI to userspace, and we therefore need to
introduce a new capability that userspace explicitly enables to change
behavior.
This patch introduces KVM_CAP_ARM_NISV_TO_USER (NISV meaning Non-ISV)
which does exactly that, and introduces a new exit reason to report the
event to userspace. User space can then emulate an exception to the
guest, restart the guest, suspend the guest, or take any other
appropriate action as per the policy of the running system.
Reported-by: Heinrich Schuchardt <xypron.glpk@gmx.de>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: Alexander Graf <graf@amazon.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
2019-10-11 18:07:05 +07:00
|
|
|
if (vcpu->kvm->arch.return_nisv_io_abort_to_user) {
|
|
|
|
run->exit_reason = KVM_EXIT_ARM_NISV;
|
|
|
|
run->arm_nisv.esr_iss = kvm_vcpu_dabt_iss_nisv_sanitized(vcpu);
|
|
|
|
run->arm_nisv.fault_ipa = fault_ipa;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n");
|
2013-01-21 06:43:58 +07:00
|
|
|
return -ENOSYS;
|
|
|
|
}
|
|
|
|
|
2019-12-13 20:25:25 +07:00
|
|
|
/* Page table accesses IO mem: tell guest to fix its TTBR */
|
|
|
|
if (kvm_vcpu_dabt_iss1tw(vcpu)) {
|
|
|
|
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Prepare MMIO operation. First decode the syndrome data we get
|
|
|
|
* from the CPU. Then try if some in-kernel emulation feels
|
|
|
|
* responsible, otherwise let user space do its magic.
|
|
|
|
*/
|
|
|
|
is_write = kvm_vcpu_dabt_iswrite(vcpu);
|
|
|
|
len = kvm_vcpu_dabt_get_as(vcpu);
|
|
|
|
rt = kvm_vcpu_dabt_get_rd(vcpu);
|
2013-02-12 19:40:22 +07:00
|
|
|
|
2015-03-28 08:13:13 +07:00
|
|
|
if (is_write) {
|
2015-12-04 19:03:11 +07:00
|
|
|
data = vcpu_data_guest_to_host(vcpu, vcpu_get_reg(vcpu, rt),
|
|
|
|
len);
|
2015-03-28 08:13:13 +07:00
|
|
|
|
2017-12-15 08:40:50 +07:00
|
|
|
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, &data);
|
2016-04-25 02:41:36 +07:00
|
|
|
kvm_mmio_write_buf(data_buf, len, data);
|
2013-01-21 06:43:58 +07:00
|
|
|
|
2015-03-28 08:13:13 +07:00
|
|
|
ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
|
|
|
|
data_buf);
|
2014-11-06 19:11:45 +07:00
|
|
|
} else {
|
2015-03-28 08:13:13 +07:00
|
|
|
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
|
2017-12-15 08:40:50 +07:00
|
|
|
fault_ipa, NULL);
|
2015-03-28 08:13:13 +07:00
|
|
|
|
|
|
|
ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
|
|
|
|
data_buf);
|
2014-11-06 19:11:45 +07:00
|
|
|
}
|
2013-01-21 06:43:58 +07:00
|
|
|
|
2015-03-28 08:13:13 +07:00
|
|
|
/* Now prepare kvm_run for the potential return to userland. */
|
|
|
|
run->mmio.is_write = is_write;
|
|
|
|
run->mmio.phys_addr = fault_ipa;
|
|
|
|
run->mmio.len = len;
|
2019-08-22 18:03:05 +07:00
|
|
|
vcpu->mmio_needed = 1;
|
2015-03-28 08:13:13 +07:00
|
|
|
|
|
|
|
if (!ret) {
|
|
|
|
/* We handled the access successfully in the kernel. */
|
2016-03-29 19:29:28 +07:00
|
|
|
if (!is_write)
|
|
|
|
memcpy(run->mmio.data, data_buf, len);
|
2015-11-26 17:09:43 +07:00
|
|
|
vcpu->stat.mmio_exit_kernel++;
|
2015-03-28 08:13:13 +07:00
|
|
|
kvm_handle_mmio_return(vcpu, run);
|
2013-01-22 07:36:12 +07:00
|
|
|
return 1;
|
2015-03-28 08:13:13 +07:00
|
|
|
}
|
2013-01-22 07:36:12 +07:00
|
|
|
|
2016-03-29 19:29:28 +07:00
|
|
|
if (is_write)
|
|
|
|
memcpy(run->mmio.data, data_buf, len);
|
|
|
|
vcpu->stat.mmio_exit_user++;
|
2015-03-28 08:13:13 +07:00
|
|
|
run->exit_reason = KVM_EXIT_MMIO;
|
2013-01-21 06:43:58 +07:00
|
|
|
return 0;
|
|
|
|
}
|