2012-12-18 00:07:52 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012,2013 - ARM Ltd
|
|
|
|
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License, version 2, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
|
|
|
|
#include <asm/assembler.h>
|
|
|
|
#include <asm/kvm_arm.h>
|
|
|
|
#include <asm/kvm_mmu.h>
|
2015-03-19 23:42:28 +07:00
|
|
|
#include <asm/pgtable-hwdef.h>
|
2016-04-27 23:47:01 +07:00
|
|
|
#include <asm/sysreg.h>
|
2012-12-18 00:07:52 +07:00
|
|
|
|
|
|
|
.text
|
|
|
|
.pushsection .hyp.idmap.text, "ax"
|
|
|
|
|
|
|
|
.align 11
|
|
|
|
|
|
|
|
ENTRY(__kvm_hyp_init)
|
|
|
|
ventry __invalid // Synchronous EL2t
|
|
|
|
ventry __invalid // IRQ EL2t
|
|
|
|
ventry __invalid // FIQ EL2t
|
|
|
|
ventry __invalid // Error EL2t
|
|
|
|
|
|
|
|
ventry __invalid // Synchronous EL2h
|
|
|
|
ventry __invalid // IRQ EL2h
|
|
|
|
ventry __invalid // FIQ EL2h
|
|
|
|
ventry __invalid // Error EL2h
|
|
|
|
|
|
|
|
ventry __do_hyp_init // Synchronous 64-bit EL1
|
|
|
|
ventry __invalid // IRQ 64-bit EL1
|
|
|
|
ventry __invalid // FIQ 64-bit EL1
|
|
|
|
ventry __invalid // Error 64-bit EL1
|
|
|
|
|
|
|
|
ventry __invalid // Synchronous 32-bit EL1
|
|
|
|
ventry __invalid // IRQ 32-bit EL1
|
|
|
|
ventry __invalid // FIQ 32-bit EL1
|
|
|
|
ventry __invalid // Error 32-bit EL1
|
|
|
|
|
|
|
|
__invalid:
|
|
|
|
b .
|
|
|
|
|
|
|
|
/*
|
|
|
|
* x0: HYP boot pgd
|
|
|
|
* x1: HYP pgd
|
|
|
|
* x2: HYP stack
|
|
|
|
* x3: HYP vectors
|
|
|
|
*/
|
|
|
|
__do_hyp_init:
|
|
|
|
|
|
|
|
msr ttbr0_el2, x0
|
|
|
|
|
|
|
|
mrs x4, tcr_el1
|
|
|
|
ldr x5, =TCR_EL2_MASK
|
|
|
|
and x4, x4, x5
|
2016-02-11 01:46:53 +07:00
|
|
|
mov x5, #TCR_EL2_RES1
|
2012-12-18 00:07:52 +07:00
|
|
|
orr x4, x4, x5
|
2015-03-19 23:42:28 +07:00
|
|
|
|
|
|
|
#ifndef CONFIG_ARM64_VA_BITS_48
|
|
|
|
/*
|
|
|
|
* If we are running with VA_BITS < 48, we may be running with an extra
|
|
|
|
* level of translation in the ID map. This is only the case if system
|
|
|
|
* RAM is out of range for the currently configured page size and number
|
|
|
|
* of translation levels, in which case we will also need the extra
|
|
|
|
* level for the HYP ID map, or we won't be able to enable the EL2 MMU.
|
|
|
|
*
|
|
|
|
* However, at EL2, there is only one TTBR register, and we can't switch
|
|
|
|
* between translation tables *and* update TCR_EL2.T0SZ at the same
|
|
|
|
* time. Bottom line: we need the extra level in *both* our translation
|
|
|
|
* tables.
|
|
|
|
*
|
|
|
|
* So use the same T0SZ value we use for the ID map.
|
|
|
|
*/
|
|
|
|
ldr_l x5, idmap_t0sz
|
|
|
|
bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
|
|
|
|
#endif
|
2014-03-07 15:49:25 +07:00
|
|
|
/*
|
|
|
|
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
|
2015-01-29 20:19:45 +07:00
|
|
|
* TCR_EL2.
|
2014-03-07 15:49:25 +07:00
|
|
|
*/
|
|
|
|
mrs x5, ID_AA64MMFR0_EL1
|
|
|
|
bfi x4, x5, #16, #3
|
2016-02-11 01:46:53 +07:00
|
|
|
|
|
|
|
msr tcr_el2, x4
|
|
|
|
|
2012-12-18 00:07:52 +07:00
|
|
|
mrs x4, mair_el1
|
|
|
|
msr mair_el2, x4
|
|
|
|
isb
|
|
|
|
|
2014-07-31 13:53:23 +07:00
|
|
|
/* Invalidate the stale TLBs from Bootloader */
|
|
|
|
tlbi alle2
|
|
|
|
dsb sy
|
|
|
|
|
2013-11-06 01:29:45 +07:00
|
|
|
mrs x4, sctlr_el2
|
2016-04-27 23:47:01 +07:00
|
|
|
and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
|
|
|
|
ldr x5, =SCTLR_ELx_FLAGS
|
2013-11-06 01:29:45 +07:00
|
|
|
orr x4, x4, x5
|
2012-12-18 00:07:52 +07:00
|
|
|
msr sctlr_el2, x4
|
|
|
|
isb
|
|
|
|
|
2015-03-19 23:42:28 +07:00
|
|
|
/* Skip the trampoline dance if we merged the boot and runtime PGDs */
|
|
|
|
cmp x0, x1
|
|
|
|
b.eq merged
|
|
|
|
|
2012-12-18 00:07:52 +07:00
|
|
|
/* MMU is now enabled. Get ready for the trampoline dance */
|
|
|
|
ldr x4, =TRAMPOLINE_VA
|
|
|
|
adr x5, target
|
|
|
|
bfi x4, x5, #0, #PAGE_SHIFT
|
|
|
|
br x4
|
|
|
|
|
|
|
|
target: /* We're now in the trampoline code, switch page tables */
|
|
|
|
msr ttbr0_el2, x1
|
|
|
|
isb
|
|
|
|
|
|
|
|
/* Invalidate the old TLBs */
|
|
|
|
tlbi alle2
|
|
|
|
dsb sy
|
|
|
|
|
2015-03-19 23:42:28 +07:00
|
|
|
merged:
|
2012-12-18 00:07:52 +07:00
|
|
|
/* Set the stack and new vectors */
|
|
|
|
kern_hyp_va x2
|
|
|
|
mov sp, x2
|
|
|
|
kern_hyp_va x3
|
|
|
|
msr vbar_el2, x3
|
|
|
|
|
|
|
|
/* Hello, World! */
|
|
|
|
eret
|
|
|
|
ENDPROC(__kvm_hyp_init)
|
|
|
|
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 23:47:05 +07:00
|
|
|
/*
|
arm64: kvm: Fix kvm teardown for systems using the extended idmap
If memory is located above 1<<VA_BITS, kvm adds an extra level to its page
tables, merging the runtime tables and boot tables that contain the idmap.
This lets us avoid the trampoline dance during initialisation.
This also means there is no trampoline page mapped, so
__cpu_reset_hyp_mode() can't call __kvm_hyp_reset() in this page. The good
news is the idmap is still mapped, so we don't need the trampoline page.
The bad news is we can't call it directly as the idmap is above
HYP_PAGE_OFFSET, so its address is masked by kvm_call_hyp.
Add a function __extended_idmap_trampoline which will branch into
__kvm_hyp_reset in the idmap, change kvm_hyp_reset_entry() to return
this address if __kvm_cpu_uses_extended_idmap(). In this case
__kvm_hyp_reset() will still switch to the boot tables (which are the
merged tables that were already in use), and branch into the idmap (where
it already was).
This fixes boot failures on these systems, where we fail to execute the
missing trampoline page when tearing down kvm in init_subsystems():
[ 2.508922] kvm [1]: 8-bit VMID
[ 2.512057] kvm [1]: Hyp mode initialized successfully
[ 2.517242] kvm [1]: interrupt-controller@e1140000 IRQ13
[ 2.522622] kvm [1]: timer IRQ3
[ 2.525783] Kernel panic - not syncing: HYP panic:
[ 2.525783] PS:200003c9 PC:0000007ffffff820 ESR:86000005
[ 2.525783] FAR:0000007ffffff820 HPFAR:00000000003ffff0 PAR:0000000000000000
[ 2.525783] VCPU: (null)
[ 2.525783]
[ 2.547667] CPU: 0 PID: 0 Comm: swapper/0 Tainted: G W 4.6.0-rc5+ #1
[ 2.555137] Hardware name: Default string Default string/Default string, BIOS ROD0084E 09/03/2015
[ 2.563994] Call trace:
[ 2.566432] [<ffffff80080888d0>] dump_backtrace+0x0/0x240
[ 2.571818] [<ffffff8008088b24>] show_stack+0x14/0x20
[ 2.576858] [<ffffff80083423ac>] dump_stack+0x94/0xb8
[ 2.581899] [<ffffff8008152130>] panic+0x10c/0x250
[ 2.586677] [<ffffff8008152024>] panic+0x0/0x250
[ 2.591281] SMP: stopping secondary CPUs
[ 3.649692] SMP: failed to stop secondary CPUs 0-2,4-7
[ 3.654818] Kernel Offset: disabled
[ 3.658293] Memory Limit: none
[ 3.661337] ---[ end Kernel panic - not syncing: HYP panic:
[ 3.661337] PS:200003c9 PC:0000007ffffff820 ESR:86000005
[ 3.661337] FAR:0000007ffffff820 HPFAR:00000000003ffff0 PAR:0000000000000000
[ 3.661337] VCPU: (null)
[ 3.661337]
Reported-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: James Morse <james.morse@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-30 00:27:03 +07:00
|
|
|
* Reset kvm back to the hyp stub. This is the trampoline dance in
|
|
|
|
* reverse. If kvm used an extended idmap, __extended_idmap_trampoline
|
|
|
|
* calls this code directly in the idmap. In this case switching to the
|
|
|
|
* boot tables is a no-op.
|
|
|
|
*
|
arm64: kvm: allows kvm cpu hotplug
The current kvm implementation on arm64 does cpu-specific initialization
at system boot, and has no way to gracefully shutdown a core in terms of
kvm. This prevents kexec from rebooting the system at EL2.
This patch adds a cpu tear-down function and also puts an existing cpu-init
code into a separate function, kvm_arch_hardware_disable() and
kvm_arch_hardware_enable() respectively.
We don't need the arm64 specific cpu hotplug hook any more.
Since this patch modifies common code between arm and arm64, one stub
definition, __cpu_reset_hyp_mode(), is added on arm side to avoid
compilation errors.
Signed-off-by: AKASHI Takahiro <takahiro.akashi@linaro.org>
[Rebase, added separate VHE init/exit path, changed resets use of
kvm_call_hyp() to the __version, en/disabled hardware in init_subsystems(),
added icache maintenance to __kvm_hyp_reset() and removed lr restore, removed
guest-enter after teardown handling]
Signed-off-by: James Morse <james.morse@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
2016-04-27 23:47:05 +07:00
|
|
|
* x0: HYP boot pgd
|
|
|
|
* x1: HYP phys_idmap_start
|
|
|
|
*/
|
|
|
|
ENTRY(__kvm_hyp_reset)
|
|
|
|
/* We're in trampoline code in VA, switch back to boot page tables */
|
|
|
|
msr ttbr0_el2, x0
|
|
|
|
isb
|
|
|
|
|
|
|
|
/* Ensure the PA branch doesn't find a stale tlb entry or stale code. */
|
|
|
|
ic iallu
|
|
|
|
tlbi alle2
|
|
|
|
dsb sy
|
|
|
|
isb
|
|
|
|
|
|
|
|
/* Branch into PA space */
|
|
|
|
adr x0, 1f
|
|
|
|
bfi x1, x0, #0, #PAGE_SHIFT
|
|
|
|
br x1
|
|
|
|
|
|
|
|
/* We're now in idmap, disable MMU */
|
|
|
|
1: mrs x0, sctlr_el2
|
|
|
|
ldr x1, =SCTLR_ELx_FLAGS
|
|
|
|
bic x0, x0, x1 // Clear SCTL_M and etc
|
|
|
|
msr sctlr_el2, x0
|
|
|
|
isb
|
|
|
|
|
|
|
|
/* Invalidate the old TLBs */
|
|
|
|
tlbi alle2
|
|
|
|
dsb sy
|
|
|
|
|
|
|
|
/* Install stub vectors */
|
|
|
|
adr_l x0, __hyp_stub_vectors
|
|
|
|
msr vbar_el2, x0
|
|
|
|
|
|
|
|
eret
|
|
|
|
ENDPROC(__kvm_hyp_reset)
|
|
|
|
|
2012-12-18 00:07:52 +07:00
|
|
|
.ltorg
|
|
|
|
|
|
|
|
.popsection
|