mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 20:55:09 +07:00
cc33c4e201
We don't currently limit guest accesses to the LOR registers, which we neither virtualize nor context-switch. As such, guests are provided with unusable information/controls, and are not isolated from each other (or the host). To prevent these issues, we can trap register accesses and present the illusion LORegions are unssupported by the CPU. To do this, we mask ID_AA64MMFR1.LO, and set HCR_EL2.TLOR to trap accesses to the following registers: * LORC_EL1 * LOREA_EL1 * LORID_EL1 * LORN_EL1 * LORSA_EL1 ... when trapped, we inject an UNDEFINED exception to EL1, simulating their non-existence. As noted in D7.2.67, when no LORegions are implemented, LoadLOAcquire and StoreLORelease must behave as LoadAcquire and StoreRelease respectively. We can ensure this by clearing LORC_EL1.EN when a CPU's EL2 is first initialized, as the host kernel will not modify this. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Vladimir Murzin <vladimir.murzin@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoffer Dall <christoffer.dall@linaro.org> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: kvmarm@lists.cs.columbia.edu Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
860 lines
24 KiB
ArmAsm
860 lines
24 KiB
ArmAsm
/*
|
|
* Low-level CPU initialisation
|
|
* Based on arch/arm/kernel/head.S
|
|
*
|
|
* Copyright (C) 1994-2002 Russell King
|
|
* Copyright (C) 2003-2012 ARM Ltd.
|
|
* Authors: Catalin Marinas <catalin.marinas@arm.com>
|
|
* Will Deacon <will.deacon@arm.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <linux/init.h>
|
|
#include <linux/irqchip/arm-gic-v3.h>
|
|
|
|
#include <asm/assembler.h>
|
|
#include <asm/boot.h>
|
|
#include <asm/ptrace.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/cputype.h>
|
|
#include <asm/elf.h>
|
|
#include <asm/kernel-pgtable.h>
|
|
#include <asm/kvm_arm.h>
|
|
#include <asm/memory.h>
|
|
#include <asm/pgtable-hwdef.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/page.h>
|
|
#include <asm/smp.h>
|
|
#include <asm/sysreg.h>
|
|
#include <asm/thread_info.h>
|
|
#include <asm/virt.h>
|
|
|
|
#include "efi-header.S"
|
|
|
|
#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
|
|
|
|
#if (TEXT_OFFSET & 0xfff) != 0
|
|
#error TEXT_OFFSET must be at least 4KB aligned
|
|
#elif (PAGE_OFFSET & 0x1fffff) != 0
|
|
#error PAGE_OFFSET must be at least 2MB aligned
|
|
#elif TEXT_OFFSET > 0x1fffff
|
|
#error TEXT_OFFSET must be less than 2MB
|
|
#endif
|
|
|
|
/*
|
|
* Kernel startup entry point.
|
|
* ---------------------------
|
|
*
|
|
* The requirements are:
|
|
* MMU = off, D-cache = off, I-cache = on or off,
|
|
* x0 = physical address to the FDT blob.
|
|
*
|
|
* This code is mostly position independent so you call this at
|
|
* __pa(PAGE_OFFSET + TEXT_OFFSET).
|
|
*
|
|
* Note that the callee-saved registers are used for storing variables
|
|
* that are useful before the MMU is enabled. The allocations are described
|
|
* in the entry routines.
|
|
*/
|
|
__HEAD
|
|
_head:
|
|
/*
|
|
* DO NOT MODIFY. Image header expected by Linux boot-loaders.
|
|
*/
|
|
#ifdef CONFIG_EFI
|
|
/*
|
|
* This add instruction has no meaningful effect except that
|
|
* its opcode forms the magic "MZ" signature required by UEFI.
|
|
*/
|
|
add x13, x18, #0x16
|
|
b stext
|
|
#else
|
|
b stext // branch to kernel start, magic
|
|
.long 0 // reserved
|
|
#endif
|
|
le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian
|
|
le64sym _kernel_size_le // Effective size of kernel image, little-endian
|
|
le64sym _kernel_flags_le // Informative flags, little-endian
|
|
.quad 0 // reserved
|
|
.quad 0 // reserved
|
|
.quad 0 // reserved
|
|
.ascii "ARM\x64" // Magic number
|
|
#ifdef CONFIG_EFI
|
|
.long pe_header - _head // Offset to the PE header.
|
|
|
|
pe_header:
|
|
__EFI_PE_HEADER
|
|
#else
|
|
.long 0 // reserved
|
|
#endif
|
|
|
|
__INIT
|
|
|
|
/*
|
|
* The following callee saved general purpose registers are used on the
|
|
* primary lowlevel boot path:
|
|
*
|
|
* Register Scope Purpose
|
|
* x21 stext() .. start_kernel() FDT pointer passed at boot in x0
|
|
* x23 stext() .. start_kernel() physical misalignment/KASLR offset
|
|
* x28 __create_page_tables() callee preserved temp register
|
|
* x19/x20 __primary_switch() callee preserved temp registers
|
|
*/
|
|
ENTRY(stext)
|
|
bl preserve_boot_args
|
|
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
|
|
adrp x23, __PHYS_OFFSET
|
|
and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0
|
|
bl set_cpu_boot_mode_flag
|
|
bl __create_page_tables
|
|
/*
|
|
* The following calls CPU setup code, see arch/arm64/mm/proc.S for
|
|
* details.
|
|
* On return, the CPU will be ready for the MMU to be turned on and
|
|
* the TCR will have been set.
|
|
*/
|
|
bl __cpu_setup // initialise processor
|
|
b __primary_switch
|
|
ENDPROC(stext)
|
|
|
|
/*
|
|
* Preserve the arguments passed by the bootloader in x0 .. x3
|
|
*/
|
|
preserve_boot_args:
|
|
mov x21, x0 // x21=FDT
|
|
|
|
adr_l x0, boot_args // record the contents of
|
|
stp x21, x1, [x0] // x0 .. x3 at kernel entry
|
|
stp x2, x3, [x0, #16]
|
|
|
|
dmb sy // needed before dc ivac with
|
|
// MMU off
|
|
|
|
mov x1, #0x20 // 4 x 8 bytes
|
|
b __inval_dcache_area // tail call
|
|
ENDPROC(preserve_boot_args)
|
|
|
|
/*
|
|
* Macro to create a table entry to the next page.
|
|
*
|
|
* tbl: page table address
|
|
* virt: virtual address
|
|
* shift: #imm page table shift
|
|
* ptrs: #imm pointers per table page
|
|
*
|
|
* Preserves: virt
|
|
* Corrupts: ptrs, tmp1, tmp2
|
|
* Returns: tbl -> next level table page address
|
|
*/
|
|
.macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
|
|
add \tmp1, \tbl, #PAGE_SIZE
|
|
phys_to_pte \tmp2, \tmp1
|
|
orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
|
|
lsr \tmp1, \virt, #\shift
|
|
sub \ptrs, \ptrs, #1
|
|
and \tmp1, \tmp1, \ptrs // table index
|
|
str \tmp2, [\tbl, \tmp1, lsl #3]
|
|
add \tbl, \tbl, #PAGE_SIZE // next level table page
|
|
.endm
|
|
|
|
/*
|
|
* Macro to populate page table entries, these entries can be pointers to the next level
|
|
* or last level entries pointing to physical memory.
|
|
*
|
|
* tbl: page table address
|
|
* rtbl: pointer to page table or physical memory
|
|
* index: start index to write
|
|
* eindex: end index to write - [index, eindex] written to
|
|
* flags: flags for pagetable entry to or in
|
|
* inc: increment to rtbl between each entry
|
|
* tmp1: temporary variable
|
|
*
|
|
* Preserves: tbl, eindex, flags, inc
|
|
* Corrupts: index, tmp1
|
|
* Returns: rtbl
|
|
*/
|
|
.macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1
|
|
.Lpe\@: phys_to_pte \tmp1, \rtbl
|
|
orr \tmp1, \tmp1, \flags // tmp1 = table entry
|
|
str \tmp1, [\tbl, \index, lsl #3]
|
|
add \rtbl, \rtbl, \inc // rtbl = pa next level
|
|
add \index, \index, #1
|
|
cmp \index, \eindex
|
|
b.ls .Lpe\@
|
|
.endm
|
|
|
|
/*
|
|
* Compute indices of table entries from virtual address range. If multiple entries
|
|
* were needed in the previous page table level then the next page table level is assumed
|
|
* to be composed of multiple pages. (This effectively scales the end index).
|
|
*
|
|
* vstart: virtual address of start of range
|
|
* vend: virtual address of end of range
|
|
* shift: shift used to transform virtual address into index
|
|
* ptrs: number of entries in page table
|
|
* istart: index in table corresponding to vstart
|
|
* iend: index in table corresponding to vend
|
|
* count: On entry: how many extra entries were required in previous level, scales
|
|
* our end index.
|
|
* On exit: returns how many extra entries required for next page table level
|
|
*
|
|
* Preserves: vstart, vend, shift, ptrs
|
|
* Returns: istart, iend, count
|
|
*/
|
|
.macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count
|
|
lsr \iend, \vend, \shift
|
|
mov \istart, \ptrs
|
|
sub \istart, \istart, #1
|
|
and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
|
|
mov \istart, \ptrs
|
|
mul \istart, \istart, \count
|
|
add \iend, \iend, \istart // iend += (count - 1) * ptrs
|
|
// our entries span multiple tables
|
|
|
|
lsr \istart, \vstart, \shift
|
|
mov \count, \ptrs
|
|
sub \count, \count, #1
|
|
and \istart, \istart, \count
|
|
|
|
sub \count, \iend, \istart
|
|
.endm
|
|
|
|
/*
|
|
* Map memory for specified virtual address range. Each level of page table needed supports
|
|
* multiple entries. If a level requires n entries the next page table level is assumed to be
|
|
* formed from n pages.
|
|
*
|
|
* tbl: location of page table
|
|
* rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE)
|
|
* vstart: start address to map
|
|
* vend: end address to map - we map [vstart, vend]
|
|
* flags: flags to use to map last level entries
|
|
* phys: physical address corresponding to vstart - physical memory is contiguous
|
|
* pgds: the number of pgd entries
|
|
*
|
|
* Temporaries: istart, iend, tmp, count, sv - these need to be different registers
|
|
* Preserves: vstart, vend, flags
|
|
* Corrupts: tbl, rtbl, istart, iend, tmp, count, sv
|
|
*/
|
|
.macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv
|
|
add \rtbl, \tbl, #PAGE_SIZE
|
|
mov \sv, \rtbl
|
|
mov \count, #0
|
|
compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count
|
|
populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
|
|
mov \tbl, \sv
|
|
mov \sv, \rtbl
|
|
|
|
#if SWAPPER_PGTABLE_LEVELS > 3
|
|
compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count
|
|
populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
|
|
mov \tbl, \sv
|
|
mov \sv, \rtbl
|
|
#endif
|
|
|
|
#if SWAPPER_PGTABLE_LEVELS > 2
|
|
compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count
|
|
populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp
|
|
mov \tbl, \sv
|
|
#endif
|
|
|
|
compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
|
|
bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1
|
|
populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp
|
|
.endm
|
|
|
|
/*
|
|
* Setup the initial page tables. We only setup the barest amount which is
|
|
* required to get the kernel running. The following sections are required:
|
|
* - identity mapping to enable the MMU (low address, TTBR0)
|
|
* - first few MB of the kernel linear mapping to jump to once the MMU has
|
|
* been enabled
|
|
*/
|
|
__create_page_tables:
|
|
mov x28, lr
|
|
|
|
/*
|
|
* Invalidate the idmap and swapper page tables to avoid potential
|
|
* dirty cache lines being evicted.
|
|
*/
|
|
adrp x0, idmap_pg_dir
|
|
adrp x1, swapper_pg_end
|
|
sub x1, x1, x0
|
|
bl __inval_dcache_area
|
|
|
|
/*
|
|
* Clear the idmap and swapper page tables.
|
|
*/
|
|
adrp x0, idmap_pg_dir
|
|
adrp x1, swapper_pg_end
|
|
sub x1, x1, x0
|
|
1: stp xzr, xzr, [x0], #16
|
|
stp xzr, xzr, [x0], #16
|
|
stp xzr, xzr, [x0], #16
|
|
stp xzr, xzr, [x0], #16
|
|
subs x1, x1, #64
|
|
b.ne 1b
|
|
|
|
mov x7, SWAPPER_MM_MMUFLAGS
|
|
|
|
/*
|
|
* Create the identity mapping.
|
|
*/
|
|
adrp x0, idmap_pg_dir
|
|
adrp x3, __idmap_text_start // __pa(__idmap_text_start)
|
|
|
|
/*
|
|
* VA_BITS may be too small to allow for an ID mapping to be created
|
|
* that covers system RAM if that is located sufficiently high in the
|
|
* physical address space. So for the ID map, use an extended virtual
|
|
* range in that case, and configure an additional translation level
|
|
* if needed.
|
|
*
|
|
* Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
|
|
* entire ID map region can be mapped. As T0SZ == (64 - #bits used),
|
|
* this number conveniently equals the number of leading zeroes in
|
|
* the physical address of __idmap_text_end.
|
|
*/
|
|
adrp x5, __idmap_text_end
|
|
clz x5, x5
|
|
cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
|
|
b.ge 1f // .. then skip VA range extension
|
|
|
|
adr_l x6, idmap_t0sz
|
|
str x5, [x6]
|
|
dmb sy
|
|
dc ivac, x6 // Invalidate potentially stale cache line
|
|
|
|
#if (VA_BITS < 48)
|
|
#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
|
|
#define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT))
|
|
|
|
/*
|
|
* If VA_BITS < 48, we have to configure an additional table level.
|
|
* First, we have to verify our assumption that the current value of
|
|
* VA_BITS was chosen such that all translation levels are fully
|
|
* utilised, and that lowering T0SZ will always result in an additional
|
|
* translation level to be configured.
|
|
*/
|
|
#if VA_BITS != EXTRA_SHIFT
|
|
#error "Mismatch between VA_BITS and page size/number of translation levels"
|
|
#endif
|
|
|
|
mov x4, EXTRA_PTRS
|
|
create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6
|
|
#else
|
|
/*
|
|
* If VA_BITS == 48, we don't have to configure an additional
|
|
* translation level, but the top-level table has more entries.
|
|
*/
|
|
mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT)
|
|
str_l x4, idmap_ptrs_per_pgd, x5
|
|
#endif
|
|
1:
|
|
ldr_l x4, idmap_ptrs_per_pgd
|
|
mov x5, x3 // __pa(__idmap_text_start)
|
|
adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
|
|
|
|
map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
|
|
|
|
/*
|
|
* Map the kernel image (starting with PHYS_OFFSET).
|
|
*/
|
|
adrp x0, swapper_pg_dir
|
|
mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text)
|
|
add x5, x5, x23 // add KASLR displacement
|
|
mov x4, PTRS_PER_PGD
|
|
adrp x6, _end // runtime __pa(_end)
|
|
adrp x3, _text // runtime __pa(_text)
|
|
sub x6, x6, x3 // _end - _text
|
|
add x6, x6, x5 // runtime __va(_end)
|
|
|
|
map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14
|
|
|
|
/*
|
|
* Since the page tables have been populated with non-cacheable
|
|
* accesses (MMU disabled), invalidate the idmap and swapper page
|
|
* tables again to remove any speculatively loaded cache lines.
|
|
*/
|
|
adrp x0, idmap_pg_dir
|
|
adrp x1, swapper_pg_end
|
|
sub x1, x1, x0
|
|
dmb sy
|
|
bl __inval_dcache_area
|
|
|
|
ret x28
|
|
ENDPROC(__create_page_tables)
|
|
.ltorg
|
|
|
|
/*
|
|
* The following fragment of code is executed with the MMU enabled.
|
|
*
|
|
* x0 = __PHYS_OFFSET
|
|
*/
|
|
__primary_switched:
|
|
adrp x4, init_thread_union
|
|
add sp, x4, #THREAD_SIZE
|
|
adr_l x5, init_task
|
|
msr sp_el0, x5 // Save thread_info
|
|
|
|
adr_l x8, vectors // load VBAR_EL1 with virtual
|
|
msr vbar_el1, x8 // vector table address
|
|
isb
|
|
|
|
stp xzr, x30, [sp, #-16]!
|
|
mov x29, sp
|
|
|
|
str_l x21, __fdt_pointer, x5 // Save FDT pointer
|
|
|
|
ldr_l x4, kimage_vaddr // Save the offset between
|
|
sub x4, x4, x0 // the kernel virtual and
|
|
str_l x4, kimage_voffset, x5 // physical mappings
|
|
|
|
// Clear BSS
|
|
adr_l x0, __bss_start
|
|
mov x1, xzr
|
|
adr_l x2, __bss_stop
|
|
sub x2, x2, x0
|
|
bl __pi_memset
|
|
dsb ishst // Make zero page visible to PTW
|
|
|
|
#ifdef CONFIG_KASAN
|
|
bl kasan_early_init
|
|
#endif
|
|
#ifdef CONFIG_RANDOMIZE_BASE
|
|
tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized?
|
|
b.ne 0f
|
|
mov x0, x21 // pass FDT address in x0
|
|
bl kaslr_early_init // parse FDT for KASLR options
|
|
cbz x0, 0f // KASLR disabled? just proceed
|
|
orr x23, x23, x0 // record KASLR offset
|
|
ldp x29, x30, [sp], #16 // we must enable KASLR, return
|
|
ret // to __primary_switch()
|
|
0:
|
|
#endif
|
|
add sp, sp, #16
|
|
mov x29, #0
|
|
mov x30, #0
|
|
b start_kernel
|
|
ENDPROC(__primary_switched)
|
|
|
|
/*
|
|
* end early head section, begin head code that is also used for
|
|
* hotplug and needs to have the same protections as the text region
|
|
*/
|
|
.section ".idmap.text","awx"
|
|
|
|
ENTRY(kimage_vaddr)
|
|
.quad _text - TEXT_OFFSET
|
|
|
|
/*
|
|
* If we're fortunate enough to boot at EL2, ensure that the world is
|
|
* sane before dropping to EL1.
|
|
*
|
|
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if
|
|
* booted in EL1 or EL2 respectively.
|
|
*/
|
|
ENTRY(el2_setup)
|
|
msr SPsel, #1 // We want to use SP_EL{1,2}
|
|
mrs x0, CurrentEL
|
|
cmp x0, #CurrentEL_EL2
|
|
b.eq 1f
|
|
mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
|
|
msr sctlr_el1, x0
|
|
mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
|
|
isb
|
|
ret
|
|
|
|
1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2)
|
|
msr sctlr_el2, x0
|
|
|
|
#ifdef CONFIG_ARM64_VHE
|
|
/*
|
|
* Check for VHE being present. For the rest of the EL2 setup,
|
|
* x2 being non-zero indicates that we do have VHE, and that the
|
|
* kernel is intended to run at EL2.
|
|
*/
|
|
mrs x2, id_aa64mmfr1_el1
|
|
ubfx x2, x2, #8, #4
|
|
#else
|
|
mov x2, xzr
|
|
#endif
|
|
|
|
/* Hyp configuration. */
|
|
mov x0, #HCR_RW // 64-bit EL1
|
|
cbz x2, set_hcr
|
|
orr x0, x0, #HCR_TGE // Enable Host Extensions
|
|
orr x0, x0, #HCR_E2H
|
|
set_hcr:
|
|
msr hcr_el2, x0
|
|
isb
|
|
|
|
/*
|
|
* Allow Non-secure EL1 and EL0 to access physical timer and counter.
|
|
* This is not necessary for VHE, since the host kernel runs in EL2,
|
|
* and EL0 accesses are configured in the later stage of boot process.
|
|
* Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
|
|
* as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
|
|
* to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
|
|
* to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
|
|
* EL2.
|
|
*/
|
|
cbnz x2, 1f
|
|
mrs x0, cnthctl_el2
|
|
orr x0, x0, #3 // Enable EL1 physical timers
|
|
msr cnthctl_el2, x0
|
|
1:
|
|
msr cntvoff_el2, xzr // Clear virtual offset
|
|
|
|
#ifdef CONFIG_ARM_GIC_V3
|
|
/* GICv3 system register access */
|
|
mrs x0, id_aa64pfr0_el1
|
|
ubfx x0, x0, #24, #4
|
|
cmp x0, #1
|
|
b.ne 3f
|
|
|
|
mrs_s x0, SYS_ICC_SRE_EL2
|
|
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
|
|
orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
|
|
msr_s SYS_ICC_SRE_EL2, x0
|
|
isb // Make sure SRE is now set
|
|
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
|
|
tbz x0, #0, 3f // and check that it sticks
|
|
msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
|
|
|
|
3:
|
|
#endif
|
|
|
|
/* Populate ID registers. */
|
|
mrs x0, midr_el1
|
|
mrs x1, mpidr_el1
|
|
msr vpidr_el2, x0
|
|
msr vmpidr_el2, x1
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
msr hstr_el2, xzr // Disable CP15 traps to EL2
|
|
#endif
|
|
|
|
/* EL2 debug */
|
|
mrs x1, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
|
|
sbfx x0, x1, #8, #4
|
|
cmp x0, #1
|
|
b.lt 4f // Skip if no PMU present
|
|
mrs x0, pmcr_el0 // Disable debug access traps
|
|
ubfx x0, x0, #11, #5 // to EL2 and allow access to
|
|
4:
|
|
csel x3, xzr, x0, lt // all PMU counters from EL1
|
|
|
|
/* Statistical profiling */
|
|
ubfx x0, x1, #32, #4 // Check ID_AA64DFR0_EL1 PMSVer
|
|
cbz x0, 7f // Skip if SPE not present
|
|
cbnz x2, 6f // VHE?
|
|
mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2,
|
|
and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT)
|
|
cbnz x4, 5f // then permit sampling of physical
|
|
mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \
|
|
1 << SYS_PMSCR_EL2_PA_SHIFT)
|
|
msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter
|
|
5:
|
|
mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
|
|
orr x3, x3, x1 // If we don't have VHE, then
|
|
b 7f // use EL1&0 translation.
|
|
6: // For VHE, use EL2 translation
|
|
orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1
|
|
7:
|
|
msr mdcr_el2, x3 // Configure debug traps
|
|
|
|
/* LORegions */
|
|
mrs x1, id_aa64mmfr1_el1
|
|
ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4
|
|
cbz x0, 1f
|
|
msr_s SYS_LORC_EL1, xzr
|
|
1:
|
|
|
|
/* Stage-2 translation */
|
|
msr vttbr_el2, xzr
|
|
|
|
cbz x2, install_el2_stub
|
|
|
|
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
|
|
isb
|
|
ret
|
|
|
|
install_el2_stub:
|
|
/*
|
|
* When VHE is not in use, early init of EL2 and EL1 needs to be
|
|
* done here.
|
|
* When VHE _is_ in use, EL1 will not be used in the host and
|
|
* requires no configuration, and all non-hyp-specific EL2 setup
|
|
* will be done via the _EL1 system register aliases in __cpu_setup.
|
|
*/
|
|
mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1)
|
|
msr sctlr_el1, x0
|
|
|
|
/* Coprocessor traps. */
|
|
mov x0, #0x33ff
|
|
msr cptr_el2, x0 // Disable copro. traps to EL2
|
|
|
|
/* SVE register access */
|
|
mrs x1, id_aa64pfr0_el1
|
|
ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
|
|
cbz x1, 7f
|
|
|
|
bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps
|
|
msr cptr_el2, x0 // Disable copro. traps to EL2
|
|
isb
|
|
mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
|
|
msr_s SYS_ZCR_EL2, x1 // length for EL1.
|
|
|
|
/* Hypervisor stub */
|
|
7: adr_l x0, __hyp_stub_vectors
|
|
msr vbar_el2, x0
|
|
|
|
/* spsr */
|
|
mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
|
PSR_MODE_EL1h)
|
|
msr spsr_el2, x0
|
|
msr elr_el2, lr
|
|
mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
|
|
eret
|
|
ENDPROC(el2_setup)
|
|
|
|
/*
|
|
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
|
|
* in w0. See arch/arm64/include/asm/virt.h for more info.
|
|
*/
|
|
set_cpu_boot_mode_flag:
|
|
adr_l x1, __boot_cpu_mode
|
|
cmp w0, #BOOT_CPU_MODE_EL2
|
|
b.ne 1f
|
|
add x1, x1, #4
|
|
1: str w0, [x1] // This CPU has booted in EL1
|
|
dmb sy
|
|
dc ivac, x1 // Invalidate potentially stale cache line
|
|
ret
|
|
ENDPROC(set_cpu_boot_mode_flag)
|
|
|
|
/*
|
|
* These values are written with the MMU off, but read with the MMU on.
|
|
* Writers will invalidate the corresponding address, discarding up to a
|
|
* 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
|
|
* sufficient alignment that the CWG doesn't overlap another section.
|
|
*/
|
|
.pushsection ".mmuoff.data.write", "aw"
|
|
/*
|
|
* We need to find out the CPU boot mode long after boot, so we need to
|
|
* store it in a writable variable.
|
|
*
|
|
* This is not in .bss, because we set it sufficiently early that the boot-time
|
|
* zeroing of .bss would clobber it.
|
|
*/
|
|
ENTRY(__boot_cpu_mode)
|
|
.long BOOT_CPU_MODE_EL2
|
|
.long BOOT_CPU_MODE_EL1
|
|
/*
|
|
* The booting CPU updates the failed status @__early_cpu_boot_status,
|
|
* with MMU turned off.
|
|
*/
|
|
ENTRY(__early_cpu_boot_status)
|
|
.long 0
|
|
|
|
.popsection
|
|
|
|
/*
|
|
* This provides a "holding pen" for platforms to hold all secondary
|
|
* cores are held until we're ready for them to initialise.
|
|
*/
|
|
ENTRY(secondary_holding_pen)
|
|
bl el2_setup // Drop to EL1, w0=cpu_boot_mode
|
|
bl set_cpu_boot_mode_flag
|
|
mrs x0, mpidr_el1
|
|
mov_q x1, MPIDR_HWID_BITMASK
|
|
and x0, x0, x1
|
|
adr_l x3, secondary_holding_pen_release
|
|
pen: ldr x4, [x3]
|
|
cmp x4, x0
|
|
b.eq secondary_startup
|
|
wfe
|
|
b pen
|
|
ENDPROC(secondary_holding_pen)
|
|
|
|
/*
|
|
* Secondary entry point that jumps straight into the kernel. Only to
|
|
* be used where CPUs are brought online dynamically by the kernel.
|
|
*/
|
|
ENTRY(secondary_entry)
|
|
bl el2_setup // Drop to EL1
|
|
bl set_cpu_boot_mode_flag
|
|
b secondary_startup
|
|
ENDPROC(secondary_entry)
|
|
|
|
secondary_startup:
|
|
/*
|
|
* Common entry point for secondary CPUs.
|
|
*/
|
|
bl __cpu_setup // initialise processor
|
|
bl __enable_mmu
|
|
ldr x8, =__secondary_switched
|
|
br x8
|
|
ENDPROC(secondary_startup)
|
|
|
|
__secondary_switched:
|
|
adr_l x5, vectors
|
|
msr vbar_el1, x5
|
|
isb
|
|
|
|
adr_l x0, secondary_data
|
|
ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
|
|
mov sp, x1
|
|
ldr x2, [x0, #CPU_BOOT_TASK]
|
|
msr sp_el0, x2
|
|
mov x29, #0
|
|
mov x30, #0
|
|
b secondary_start_kernel
|
|
ENDPROC(__secondary_switched)
|
|
|
|
/*
|
|
* The booting CPU updates the failed status @__early_cpu_boot_status,
|
|
* with MMU turned off.
|
|
*
|
|
* update_early_cpu_boot_status tmp, status
|
|
* - Corrupts tmp1, tmp2
|
|
* - Writes 'status' to __early_cpu_boot_status and makes sure
|
|
* it is committed to memory.
|
|
*/
|
|
|
|
.macro update_early_cpu_boot_status status, tmp1, tmp2
|
|
mov \tmp2, #\status
|
|
adr_l \tmp1, __early_cpu_boot_status
|
|
str \tmp2, [\tmp1]
|
|
dmb sy
|
|
dc ivac, \tmp1 // Invalidate potentially stale cache line
|
|
.endm
|
|
|
|
/*
|
|
* Enable the MMU.
|
|
*
|
|
* x0 = SCTLR_EL1 value for turning on the MMU.
|
|
*
|
|
* Returns to the caller via x30/lr. This requires the caller to be covered
|
|
* by the .idmap.text section.
|
|
*
|
|
* Checks if the selected granule size is supported by the CPU.
|
|
* If it isn't, park the CPU
|
|
*/
|
|
ENTRY(__enable_mmu)
|
|
mrs x1, ID_AA64MMFR0_EL1
|
|
ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
|
|
cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
|
|
b.ne __no_granule_support
|
|
update_early_cpu_boot_status 0, x1, x2
|
|
adrp x1, idmap_pg_dir
|
|
adrp x2, swapper_pg_dir
|
|
phys_to_ttbr x3, x1
|
|
phys_to_ttbr x4, x2
|
|
msr ttbr0_el1, x3 // load TTBR0
|
|
msr ttbr1_el1, x4 // load TTBR1
|
|
isb
|
|
msr sctlr_el1, x0
|
|
isb
|
|
/*
|
|
* Invalidate the local I-cache so that any instructions fetched
|
|
* speculatively from the PoC are discarded, since they may have
|
|
* been dynamically patched at the PoU.
|
|
*/
|
|
ic iallu
|
|
dsb nsh
|
|
isb
|
|
ret
|
|
ENDPROC(__enable_mmu)
|
|
|
|
__no_granule_support:
|
|
/* Indicate that this CPU can't boot and is stuck in the kernel */
|
|
update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
|
|
1:
|
|
wfe
|
|
wfi
|
|
b 1b
|
|
ENDPROC(__no_granule_support)
|
|
|
|
#ifdef CONFIG_RELOCATABLE
|
|
__relocate_kernel:
|
|
/*
|
|
* Iterate over each entry in the relocation table, and apply the
|
|
* relocations in place.
|
|
*/
|
|
ldr w9, =__rela_offset // offset to reloc table
|
|
ldr w10, =__rela_size // size of reloc table
|
|
|
|
mov_q x11, KIMAGE_VADDR // default virtual offset
|
|
add x11, x11, x23 // actual virtual offset
|
|
add x9, x9, x11 // __va(.rela)
|
|
add x10, x9, x10 // __va(.rela) + sizeof(.rela)
|
|
|
|
0: cmp x9, x10
|
|
b.hs 1f
|
|
ldp x11, x12, [x9], #24
|
|
ldr x13, [x9, #-8]
|
|
cmp w12, #R_AARCH64_RELATIVE
|
|
b.ne 0b
|
|
add x13, x13, x23 // relocate
|
|
str x13, [x11, x23]
|
|
b 0b
|
|
1: ret
|
|
ENDPROC(__relocate_kernel)
|
|
#endif
|
|
|
|
__primary_switch:
|
|
#ifdef CONFIG_RANDOMIZE_BASE
|
|
mov x19, x0 // preserve new SCTLR_EL1 value
|
|
mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
|
|
#endif
|
|
|
|
bl __enable_mmu
|
|
#ifdef CONFIG_RELOCATABLE
|
|
bl __relocate_kernel
|
|
#ifdef CONFIG_RANDOMIZE_BASE
|
|
ldr x8, =__primary_switched
|
|
adrp x0, __PHYS_OFFSET
|
|
blr x8
|
|
|
|
/*
|
|
* If we return here, we have a KASLR displacement in x23 which we need
|
|
* to take into account by discarding the current kernel mapping and
|
|
* creating a new one.
|
|
*/
|
|
pre_disable_mmu_workaround
|
|
msr sctlr_el1, x20 // disable the MMU
|
|
isb
|
|
bl __create_page_tables // recreate kernel mapping
|
|
|
|
tlbi vmalle1 // Remove any stale TLB entries
|
|
dsb nsh
|
|
|
|
msr sctlr_el1, x19 // re-enable the MMU
|
|
isb
|
|
ic iallu // flush instructions fetched
|
|
dsb nsh // via old mapping
|
|
isb
|
|
|
|
bl __relocate_kernel
|
|
#endif
|
|
#endif
|
|
ldr x8, =__primary_switched
|
|
adrp x0, __PHYS_OFFSET
|
|
br x8
|
|
ENDPROC(__primary_switch)
|