mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 09:26:44 +07:00
Merge branches 'for-next/acpi', 'for-next/cpufeatures', 'for-next/csum', 'for-next/e0pd', 'for-next/entry', 'for-next/kbuild', 'for-next/kexec/cleanup', 'for-next/kexec/file-kdump', 'for-next/misc', 'for-next/nofpsimd', 'for-next/perf' and 'for-next/scs' into for-next/core
* for-next/acpi: ACPI/IORT: Fix 'Number of IDs' handling in iort_id_map() * for-next/cpufeatures: (2 commits) arm64: Introduce ID_ISAR6 CPU register ... * for-next/csum: (2 commits) arm64: csum: Fix pathological zero-length calls ... * for-next/e0pd: (7 commits) arm64: kconfig: Fix alignment of E0PD help text ... * for-next/entry: (5 commits) arm64: entry: cleanup sp_el0 manipulation ... * for-next/kbuild: (4 commits) arm64: kbuild: remove compressed images on 'make ARCH=arm64 (dist)clean' ... * for-next/kexec/cleanup: (11 commits) Revert "arm64: kexec: make dtb_mem always enabled" ... * for-next/kexec/file-kdump: (2 commits) arm64: kexec_file: add crash dump support ... * for-next/misc: (12 commits) arm64: entry: Avoid empty alternatives entries ... * for-next/nofpsimd: (7 commits) arm64: nofpsmid: Handle TIF_FOREIGN_FPSTATE flag cleanly ... * for-next/perf: (2 commits) perf/imx_ddr: Fix cpu hotplug state cleanup ... * for-next/scs: (6 commits) arm64: kernel: avoid x18 in __cpu_soft_restart ...
This commit is contained in:
parent
3c23b83a88
8e3747beff
c2c24edb1d
e717d93b1c
3e3934176a
d7bbd6c1b0
1595fe299e
3751e728ce
108eae2d4d
52f73c383b
9ee68b314e
500d14affd
commit
4f6cdf296c
@ -200,6 +200,12 @@ infrastructure:
|
|||||||
+------------------------------+---------+---------+
|
+------------------------------+---------+---------+
|
||||||
| Name | bits | visible |
|
| Name | bits | visible |
|
||||||
+------------------------------+---------+---------+
|
+------------------------------+---------+---------+
|
||||||
|
| I8MM | [55-52] | y |
|
||||||
|
+------------------------------+---------+---------+
|
||||||
|
| DGH | [51-48] | y |
|
||||||
|
+------------------------------+---------+---------+
|
||||||
|
| BF16 | [47-44] | y |
|
||||||
|
+------------------------------+---------+---------+
|
||||||
| SB | [39-36] | y |
|
| SB | [39-36] | y |
|
||||||
+------------------------------+---------+---------+
|
+------------------------------+---------+---------+
|
||||||
| FRINTTS | [35-32] | y |
|
| FRINTTS | [35-32] | y |
|
||||||
@ -234,10 +240,18 @@ infrastructure:
|
|||||||
+------------------------------+---------+---------+
|
+------------------------------+---------+---------+
|
||||||
| Name | bits | visible |
|
| Name | bits | visible |
|
||||||
+------------------------------+---------+---------+
|
+------------------------------+---------+---------+
|
||||||
|
| F64MM | [59-56] | y |
|
||||||
|
+------------------------------+---------+---------+
|
||||||
|
| F32MM | [55-52] | y |
|
||||||
|
+------------------------------+---------+---------+
|
||||||
|
| I8MM | [47-44] | y |
|
||||||
|
+------------------------------+---------+---------+
|
||||||
| SM4 | [43-40] | y |
|
| SM4 | [43-40] | y |
|
||||||
+------------------------------+---------+---------+
|
+------------------------------+---------+---------+
|
||||||
| SHA3 | [35-32] | y |
|
| SHA3 | [35-32] | y |
|
||||||
+------------------------------+---------+---------+
|
+------------------------------+---------+---------+
|
||||||
|
| BF16 | [23-20] | y |
|
||||||
|
+------------------------------+---------+---------+
|
||||||
| BitPerm | [19-16] | y |
|
| BitPerm | [19-16] | y |
|
||||||
+------------------------------+---------+---------+
|
+------------------------------+---------+---------+
|
||||||
| AES | [7-4] | y |
|
| AES | [7-4] | y |
|
||||||
|
@ -204,6 +204,33 @@ HWCAP2_FRINT
|
|||||||
|
|
||||||
Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001.
|
Functionality implied by ID_AA64ISAR1_EL1.FRINTTS == 0b0001.
|
||||||
|
|
||||||
|
HWCAP2_SVEI8MM
|
||||||
|
|
||||||
|
Functionality implied by ID_AA64ZFR0_EL1.I8MM == 0b0001.
|
||||||
|
|
||||||
|
HWCAP2_SVEF32MM
|
||||||
|
|
||||||
|
Functionality implied by ID_AA64ZFR0_EL1.F32MM == 0b0001.
|
||||||
|
|
||||||
|
HWCAP2_SVEF64MM
|
||||||
|
|
||||||
|
Functionality implied by ID_AA64ZFR0_EL1.F64MM == 0b0001.
|
||||||
|
|
||||||
|
HWCAP2_SVEBF16
|
||||||
|
|
||||||
|
Functionality implied by ID_AA64ZFR0_EL1.BF16 == 0b0001.
|
||||||
|
|
||||||
|
HWCAP2_I8MM
|
||||||
|
|
||||||
|
Functionality implied by ID_AA64ISAR1_EL1.I8MM == 0b0001.
|
||||||
|
|
||||||
|
HWCAP2_BF16
|
||||||
|
|
||||||
|
Functionality implied by ID_AA64ISAR1_EL1.BF16 == 0b0001.
|
||||||
|
|
||||||
|
HWCAP2_DGH
|
||||||
|
|
||||||
|
Functionality implied by ID_AA64ISAR1_EL1.DGH == 0b0001.
|
||||||
|
|
||||||
4. Unused AT_HWCAP bits
|
4. Unused AT_HWCAP bits
|
||||||
-----------------------
|
-----------------------
|
||||||
|
@ -161,6 +161,7 @@ config ARM64
|
|||||||
select HAVE_PERF_USER_STACK_DUMP
|
select HAVE_PERF_USER_STACK_DUMP
|
||||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||||
select HAVE_FUNCTION_ARG_ACCESS_API
|
select HAVE_FUNCTION_ARG_ACCESS_API
|
||||||
|
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||||
select HAVE_RCU_TABLE_FREE
|
select HAVE_RCU_TABLE_FREE
|
||||||
select HAVE_RSEQ
|
select HAVE_RSEQ
|
||||||
select HAVE_STACKPROTECTOR
|
select HAVE_STACKPROTECTOR
|
||||||
@ -301,6 +302,9 @@ config ARCH_SUPPORTS_UPROBES
|
|||||||
config ARCH_PROC_KCORE_TEXT
|
config ARCH_PROC_KCORE_TEXT
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
|
config BROKEN_GAS_INST
|
||||||
|
def_bool !$(as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n)
|
||||||
|
|
||||||
config KASAN_SHADOW_OFFSET
|
config KASAN_SHADOW_OFFSET
|
||||||
hex
|
hex
|
||||||
depends on KASAN
|
depends on KASAN
|
||||||
@ -1363,6 +1367,11 @@ config ARM64_PAN
|
|||||||
instruction if the cpu does not implement the feature.
|
instruction if the cpu does not implement the feature.
|
||||||
|
|
||||||
config ARM64_LSE_ATOMICS
|
config ARM64_LSE_ATOMICS
|
||||||
|
bool
|
||||||
|
default ARM64_USE_LSE_ATOMICS
|
||||||
|
depends on $(as-instr,.arch_extension lse)
|
||||||
|
|
||||||
|
config ARM64_USE_LSE_ATOMICS
|
||||||
bool "Atomic instructions"
|
bool "Atomic instructions"
|
||||||
depends on JUMP_LABEL
|
depends on JUMP_LABEL
|
||||||
default y
|
default y
|
||||||
@ -1484,6 +1493,22 @@ config ARM64_PTR_AUTH
|
|||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
|
menu "ARMv8.5 architectural features"
|
||||||
|
|
||||||
|
config ARM64_E0PD
|
||||||
|
bool "Enable support for E0PD"
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
E0PD (part of the ARMv8.5 extensions) allows us to ensure
|
||||||
|
that EL0 accesses made via TTBR1 always fault in constant time,
|
||||||
|
providing similar benefits to KASLR as those provided by KPTI, but
|
||||||
|
with lower overhead and without disrupting legitimate access to
|
||||||
|
kernel memory such as SPE.
|
||||||
|
|
||||||
|
This option enables E0PD for TTBR1 where available.
|
||||||
|
|
||||||
|
endmenu
|
||||||
|
|
||||||
config ARM64_SVE
|
config ARM64_SVE
|
||||||
bool "ARM Scalable Vector Extension support"
|
bool "ARM Scalable Vector Extension support"
|
||||||
default y
|
default y
|
||||||
@ -1544,7 +1569,7 @@ config ARM64_MODULE_PLTS
|
|||||||
|
|
||||||
config ARM64_PSEUDO_NMI
|
config ARM64_PSEUDO_NMI
|
||||||
bool "Support for NMI-like interrupts"
|
bool "Support for NMI-like interrupts"
|
||||||
select CONFIG_ARM_GIC_V3
|
select ARM_GIC_V3
|
||||||
help
|
help
|
||||||
Adds support for mimicking Non-Maskable Interrupts through the use of
|
Adds support for mimicking Non-Maskable Interrupts through the use of
|
||||||
GIC interrupt priority. This support requires version 3 or later of
|
GIC interrupt priority. This support requires version 3 or later of
|
||||||
|
@ -30,11 +30,8 @@ LDFLAGS_vmlinux += --fix-cortex-a53-843419
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
# Check for binutils support for specific extensions
|
ifeq ($(CONFIG_ARM64_USE_LSE_ATOMICS), y)
|
||||||
lseinstr := $(call as-instr,.arch_extension lse,-DCONFIG_AS_LSE=1)
|
ifneq ($(CONFIG_ARM64_LSE_ATOMICS), y)
|
||||||
|
|
||||||
ifeq ($(CONFIG_ARM64_LSE_ATOMICS), y)
|
|
||||||
ifeq ($(lseinstr),)
|
|
||||||
$(warning LSE atomics not supported by binutils)
|
$(warning LSE atomics not supported by binutils)
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
@ -45,19 +42,15 @@ cc_has_k_constraint := $(call try-run,echo \
|
|||||||
return 0; \
|
return 0; \
|
||||||
}' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
|
}' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1)
|
||||||
|
|
||||||
ifeq ($(CONFIG_ARM64), y)
|
ifeq ($(CONFIG_BROKEN_GAS_INST),y)
|
||||||
brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1)
|
|
||||||
|
|
||||||
ifneq ($(brokengasinst),)
|
|
||||||
$(warning Detected assembler with broken .inst; disassembly will be unreliable)
|
$(warning Detected assembler with broken .inst; disassembly will be unreliable)
|
||||||
endif
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) \
|
KBUILD_CFLAGS += -mgeneral-regs-only \
|
||||||
$(compat_vdso) $(cc_has_k_constraint)
|
$(compat_vdso) $(cc_has_k_constraint)
|
||||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||||
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
|
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
|
||||||
KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) $(compat_vdso)
|
KBUILD_AFLAGS += $(compat_vdso)
|
||||||
|
|
||||||
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
|
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
|
||||||
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
|
KBUILD_AFLAGS += $(call cc-option,-mabi=lp64)
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
|
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
|
||||||
|
|
||||||
targets := Image Image.gz
|
targets := Image Image.bz2 Image.gz Image.lz4 Image.lzma Image.lzo
|
||||||
|
|
||||||
$(obj)/Image: vmlinux FORCE
|
$(obj)/Image: vmlinux FORCE
|
||||||
$(call if_changed,objcopy)
|
$(call if_changed,objcopy)
|
||||||
|
@ -35,13 +35,16 @@ void apply_alternatives_module(void *start, size_t length);
|
|||||||
static inline void apply_alternatives_module(void *start, size_t length) { }
|
static inline void apply_alternatives_module(void *start, size_t length) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define ALTINSTR_ENTRY(feature,cb) \
|
#define ALTINSTR_ENTRY(feature) \
|
||||||
" .word 661b - .\n" /* label */ \
|
" .word 661b - .\n" /* label */ \
|
||||||
" .if " __stringify(cb) " == 0\n" \
|
|
||||||
" .word 663f - .\n" /* new instruction */ \
|
" .word 663f - .\n" /* new instruction */ \
|
||||||
" .else\n" \
|
" .hword " __stringify(feature) "\n" /* feature bit */ \
|
||||||
|
" .byte 662b-661b\n" /* source len */ \
|
||||||
|
" .byte 664f-663f\n" /* replacement len */
|
||||||
|
|
||||||
|
#define ALTINSTR_ENTRY_CB(feature, cb) \
|
||||||
|
" .word 661b - .\n" /* label */ \
|
||||||
" .word " __stringify(cb) "- .\n" /* callback */ \
|
" .word " __stringify(cb) "- .\n" /* callback */ \
|
||||||
" .endif\n" \
|
|
||||||
" .hword " __stringify(feature) "\n" /* feature bit */ \
|
" .hword " __stringify(feature) "\n" /* feature bit */ \
|
||||||
" .byte 662b-661b\n" /* source len */ \
|
" .byte 662b-661b\n" /* source len */ \
|
||||||
" .byte 664f-663f\n" /* replacement len */
|
" .byte 664f-663f\n" /* replacement len */
|
||||||
@ -62,15 +65,14 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
|
|||||||
*
|
*
|
||||||
* Alternatives with callbacks do not generate replacement instructions.
|
* Alternatives with callbacks do not generate replacement instructions.
|
||||||
*/
|
*/
|
||||||
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled, cb) \
|
#define __ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg_enabled) \
|
||||||
".if "__stringify(cfg_enabled)" == 1\n" \
|
".if "__stringify(cfg_enabled)" == 1\n" \
|
||||||
"661:\n\t" \
|
"661:\n\t" \
|
||||||
oldinstr "\n" \
|
oldinstr "\n" \
|
||||||
"662:\n" \
|
"662:\n" \
|
||||||
".pushsection .altinstructions,\"a\"\n" \
|
".pushsection .altinstructions,\"a\"\n" \
|
||||||
ALTINSTR_ENTRY(feature,cb) \
|
ALTINSTR_ENTRY(feature) \
|
||||||
".popsection\n" \
|
".popsection\n" \
|
||||||
" .if " __stringify(cb) " == 0\n" \
|
|
||||||
".pushsection .altinstr_replacement, \"a\"\n" \
|
".pushsection .altinstr_replacement, \"a\"\n" \
|
||||||
"663:\n\t" \
|
"663:\n\t" \
|
||||||
newinstr "\n" \
|
newinstr "\n" \
|
||||||
@ -78,17 +80,25 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
|
|||||||
".popsection\n\t" \
|
".popsection\n\t" \
|
||||||
".org . - (664b-663b) + (662b-661b)\n\t" \
|
".org . - (664b-663b) + (662b-661b)\n\t" \
|
||||||
".org . - (662b-661b) + (664b-663b)\n" \
|
".org . - (662b-661b) + (664b-663b)\n" \
|
||||||
".else\n\t" \
|
".endif\n"
|
||||||
|
|
||||||
|
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \
|
||||||
|
".if "__stringify(cfg_enabled)" == 1\n" \
|
||||||
|
"661:\n\t" \
|
||||||
|
oldinstr "\n" \
|
||||||
|
"662:\n" \
|
||||||
|
".pushsection .altinstructions,\"a\"\n" \
|
||||||
|
ALTINSTR_ENTRY_CB(feature, cb) \
|
||||||
|
".popsection\n" \
|
||||||
"663:\n\t" \
|
"663:\n\t" \
|
||||||
"664:\n\t" \
|
"664:\n\t" \
|
||||||
".endif\n" \
|
|
||||||
".endif\n"
|
".endif\n"
|
||||||
|
|
||||||
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
|
#define _ALTERNATIVE_CFG(oldinstr, newinstr, feature, cfg, ...) \
|
||||||
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg), 0)
|
__ALTERNATIVE_CFG(oldinstr, newinstr, feature, IS_ENABLED(cfg))
|
||||||
|
|
||||||
#define ALTERNATIVE_CB(oldinstr, cb) \
|
#define ALTERNATIVE_CB(oldinstr, cb) \
|
||||||
__ALTERNATIVE_CFG(oldinstr, "NOT_AN_INSTRUCTION", ARM64_CB_PATCH, 1, cb)
|
__ALTERNATIVE_CFG_CB(oldinstr, ARM64_CB_PATCH, 1, cb)
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#include <asm/assembler.h>
|
#include <asm/assembler.h>
|
||||||
|
@ -40,12 +40,6 @@
|
|||||||
msr daif, \flags
|
msr daif, \flags
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
|
|
||||||
.macro inherit_daif, pstate:req, tmp:req
|
|
||||||
and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
|
|
||||||
msr daif, \tmp
|
|
||||||
.endm
|
|
||||||
|
|
||||||
/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
|
/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
|
||||||
.macro enable_da_f
|
.macro enable_da_f
|
||||||
msr daifclr, #(8 | 4 | 1)
|
msr daifclr, #(8 | 4 | 1)
|
||||||
@ -85,13 +79,6 @@
|
|||||||
9990:
|
9990:
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
/*
|
|
||||||
* SMP data memory barrier
|
|
||||||
*/
|
|
||||||
.macro smp_dmb, opt
|
|
||||||
dmb \opt
|
|
||||||
.endm
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* RAS Error Synchronization barrier
|
* RAS Error Synchronization barrier
|
||||||
*/
|
*/
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
#include <linux/stringify.h>
|
#include <linux/stringify.h>
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE)
|
#ifdef CONFIG_ARM64_LSE_ATOMICS
|
||||||
#define __LL_SC_FALLBACK(asm_ops) \
|
#define __LL_SC_FALLBACK(asm_ops) \
|
||||||
" b 3f\n" \
|
" b 3f\n" \
|
||||||
" .subsection 1\n" \
|
" .subsection 1\n" \
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
static inline void __lse_atomic_##op(int i, atomic_t *v) \
|
static inline void __lse_atomic_##op(int i, atomic_t *v) \
|
||||||
{ \
|
{ \
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" " #asm_op " %w[i], %[v]\n" \
|
" " #asm_op " %w[i], %[v]\n" \
|
||||||
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
||||||
: "r" (v)); \
|
: "r" (v)); \
|
||||||
@ -30,6 +31,7 @@ ATOMIC_OP(add, stadd)
|
|||||||
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
|
static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \
|
||||||
{ \
|
{ \
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" " #asm_op #mb " %w[i], %w[i], %[v]" \
|
" " #asm_op #mb " %w[i], %w[i], %[v]" \
|
||||||
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
||||||
: "r" (v) \
|
: "r" (v) \
|
||||||
@ -58,6 +60,7 @@ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \
|
|||||||
u32 tmp; \
|
u32 tmp; \
|
||||||
\
|
\
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
|
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
|
||||||
" add %w[i], %w[i], %w[tmp]" \
|
" add %w[i], %w[i], %w[tmp]" \
|
||||||
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
|
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
|
||||||
@ -77,6 +80,7 @@ ATOMIC_OP_ADD_RETURN( , al, "memory")
|
|||||||
static inline void __lse_atomic_and(int i, atomic_t *v)
|
static inline void __lse_atomic_and(int i, atomic_t *v)
|
||||||
{
|
{
|
||||||
asm volatile(
|
asm volatile(
|
||||||
|
__LSE_PREAMBLE
|
||||||
" mvn %w[i], %w[i]\n"
|
" mvn %w[i], %w[i]\n"
|
||||||
" stclr %w[i], %[v]"
|
" stclr %w[i], %[v]"
|
||||||
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
||||||
@ -87,6 +91,7 @@ static inline void __lse_atomic_and(int i, atomic_t *v)
|
|||||||
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
|
static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \
|
||||||
{ \
|
{ \
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" mvn %w[i], %w[i]\n" \
|
" mvn %w[i], %w[i]\n" \
|
||||||
" ldclr" #mb " %w[i], %w[i], %[v]" \
|
" ldclr" #mb " %w[i], %w[i], %[v]" \
|
||||||
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
||||||
@ -106,6 +111,7 @@ ATOMIC_FETCH_OP_AND( , al, "memory")
|
|||||||
static inline void __lse_atomic_sub(int i, atomic_t *v)
|
static inline void __lse_atomic_sub(int i, atomic_t *v)
|
||||||
{
|
{
|
||||||
asm volatile(
|
asm volatile(
|
||||||
|
__LSE_PREAMBLE
|
||||||
" neg %w[i], %w[i]\n"
|
" neg %w[i], %w[i]\n"
|
||||||
" stadd %w[i], %[v]"
|
" stadd %w[i], %[v]"
|
||||||
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
||||||
@ -118,6 +124,7 @@ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \
|
|||||||
u32 tmp; \
|
u32 tmp; \
|
||||||
\
|
\
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" neg %w[i], %w[i]\n" \
|
" neg %w[i], %w[i]\n" \
|
||||||
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
|
" ldadd" #mb " %w[i], %w[tmp], %[v]\n" \
|
||||||
" add %w[i], %w[i], %w[tmp]" \
|
" add %w[i], %w[i], %w[tmp]" \
|
||||||
@ -139,6 +146,7 @@ ATOMIC_OP_SUB_RETURN( , al, "memory")
|
|||||||
static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
|
static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \
|
||||||
{ \
|
{ \
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" neg %w[i], %w[i]\n" \
|
" neg %w[i], %w[i]\n" \
|
||||||
" ldadd" #mb " %w[i], %w[i], %[v]" \
|
" ldadd" #mb " %w[i], %w[i], %[v]" \
|
||||||
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
||||||
@ -159,6 +167,7 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
|
|||||||
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
|
static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
|
||||||
{ \
|
{ \
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" " #asm_op " %[i], %[v]\n" \
|
" " #asm_op " %[i], %[v]\n" \
|
||||||
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
||||||
: "r" (v)); \
|
: "r" (v)); \
|
||||||
@ -175,6 +184,7 @@ ATOMIC64_OP(add, stadd)
|
|||||||
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
|
static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
|
||||||
{ \
|
{ \
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" " #asm_op #mb " %[i], %[i], %[v]" \
|
" " #asm_op #mb " %[i], %[i], %[v]" \
|
||||||
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
: [i] "+r" (i), [v] "+Q" (v->counter) \
|
||||||
: "r" (v) \
|
: "r" (v) \
|
||||||
@ -203,6 +213,7 @@ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
|
|||||||
unsigned long tmp; \
|
unsigned long tmp; \
|
||||||
\
|
\
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
|
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
|
||||||
" add %[i], %[i], %x[tmp]" \
|
" add %[i], %[i], %x[tmp]" \
|
||||||
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
|
: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
|
||||||
@ -222,6 +233,7 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory")
|
|||||||
static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
|
static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
|
||||||
{
|
{
|
||||||
asm volatile(
|
asm volatile(
|
||||||
|
__LSE_PREAMBLE
|
||||||
" mvn %[i], %[i]\n"
|
" mvn %[i], %[i]\n"
|
||||||
" stclr %[i], %[v]"
|
" stclr %[i], %[v]"
|
||||||
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
||||||
@ -232,6 +244,7 @@ static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
|
|||||||
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
|
static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \
|
||||||
{ \
|
{ \
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" mvn %[i], %[i]\n" \
|
" mvn %[i], %[i]\n" \
|
||||||
" ldclr" #mb " %[i], %[i], %[v]" \
|
" ldclr" #mb " %[i], %[i], %[v]" \
|
||||||
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
||||||
@ -251,6 +264,7 @@ ATOMIC64_FETCH_OP_AND( , al, "memory")
|
|||||||
static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
|
static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
|
||||||
{
|
{
|
||||||
asm volatile(
|
asm volatile(
|
||||||
|
__LSE_PREAMBLE
|
||||||
" neg %[i], %[i]\n"
|
" neg %[i], %[i]\n"
|
||||||
" stadd %[i], %[v]"
|
" stadd %[i], %[v]"
|
||||||
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
: [i] "+&r" (i), [v] "+Q" (v->counter)
|
||||||
@ -263,6 +277,7 @@ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \
|
|||||||
unsigned long tmp; \
|
unsigned long tmp; \
|
||||||
\
|
\
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" neg %[i], %[i]\n" \
|
" neg %[i], %[i]\n" \
|
||||||
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
|
" ldadd" #mb " %[i], %x[tmp], %[v]\n" \
|
||||||
" add %[i], %[i], %x[tmp]" \
|
" add %[i], %[i], %x[tmp]" \
|
||||||
@ -284,6 +299,7 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory")
|
|||||||
static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
|
static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \
|
||||||
{ \
|
{ \
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" neg %[i], %[i]\n" \
|
" neg %[i], %[i]\n" \
|
||||||
" ldadd" #mb " %[i], %[i], %[v]" \
|
" ldadd" #mb " %[i], %[i], %[v]" \
|
||||||
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
: [i] "+&r" (i), [v] "+Q" (v->counter) \
|
||||||
@ -305,6 +321,7 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
|
|||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
asm volatile(
|
asm volatile(
|
||||||
|
__LSE_PREAMBLE
|
||||||
"1: ldr %x[tmp], %[v]\n"
|
"1: ldr %x[tmp], %[v]\n"
|
||||||
" subs %[ret], %x[tmp], #1\n"
|
" subs %[ret], %x[tmp], #1\n"
|
||||||
" b.lt 2f\n"
|
" b.lt 2f\n"
|
||||||
@ -332,6 +349,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
|
|||||||
unsigned long tmp; \
|
unsigned long tmp; \
|
||||||
\
|
\
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" mov %" #w "[tmp], %" #w "[old]\n" \
|
" mov %" #w "[tmp], %" #w "[old]\n" \
|
||||||
" cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
|
" cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
|
||||||
" mov %" #w "[ret], %" #w "[tmp]" \
|
" mov %" #w "[ret], %" #w "[tmp]" \
|
||||||
@ -379,6 +397,7 @@ __lse__cmpxchg_double##name(unsigned long old1, \
|
|||||||
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
|
register unsigned long x4 asm ("x4") = (unsigned long)ptr; \
|
||||||
\
|
\
|
||||||
asm volatile( \
|
asm volatile( \
|
||||||
|
__LSE_PREAMBLE \
|
||||||
" casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
|
" casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
|
||||||
" eor %[old1], %[old1], %[oldval1]\n" \
|
" eor %[old1], %[old1], %[oldval1]\n" \
|
||||||
" eor %[old2], %[old2], %[oldval2]\n" \
|
" eor %[old2], %[old2], %[oldval2]\n" \
|
||||||
|
@ -35,6 +35,9 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
|
|||||||
}
|
}
|
||||||
#define ip_fast_csum ip_fast_csum
|
#define ip_fast_csum ip_fast_csum
|
||||||
|
|
||||||
|
extern unsigned int do_csum(const unsigned char *buff, int len);
|
||||||
|
#define do_csum do_csum
|
||||||
|
|
||||||
#include <asm-generic/checksum.h>
|
#include <asm-generic/checksum.h>
|
||||||
|
|
||||||
#endif /* __ASM_CHECKSUM_H */
|
#endif /* __ASM_CHECKSUM_H */
|
||||||
|
@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
|
|||||||
u32 reg_id_isar3;
|
u32 reg_id_isar3;
|
||||||
u32 reg_id_isar4;
|
u32 reg_id_isar4;
|
||||||
u32 reg_id_isar5;
|
u32 reg_id_isar5;
|
||||||
|
u32 reg_id_isar6;
|
||||||
u32 reg_id_mmfr0;
|
u32 reg_id_mmfr0;
|
||||||
u32 reg_id_mmfr1;
|
u32 reg_id_mmfr1;
|
||||||
u32 reg_id_mmfr2;
|
u32 reg_id_mmfr2;
|
||||||
|
@ -56,7 +56,8 @@
|
|||||||
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
|
#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM 46
|
||||||
#define ARM64_WORKAROUND_1542419 47
|
#define ARM64_WORKAROUND_1542419 47
|
||||||
#define ARM64_WORKAROUND_1319367 48
|
#define ARM64_WORKAROUND_1319367 48
|
||||||
|
#define ARM64_HAS_E0PD 49
|
||||||
|
|
||||||
#define ARM64_NCAPS 49
|
#define ARM64_NCAPS 50
|
||||||
|
|
||||||
#endif /* __ASM_CPUCAPS_H */
|
#endif /* __ASM_CPUCAPS_H */
|
||||||
|
@ -613,6 +613,11 @@ static inline bool system_has_prio_mask_debugging(void)
|
|||||||
system_uses_irq_prio_masking();
|
system_uses_irq_prio_masking();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool system_capabilities_finalized(void)
|
||||||
|
{
|
||||||
|
return static_branch_likely(&arm64_const_caps_ready);
|
||||||
|
}
|
||||||
|
|
||||||
#define ARM64_BP_HARDEN_UNKNOWN -1
|
#define ARM64_BP_HARDEN_UNKNOWN -1
|
||||||
#define ARM64_BP_HARDEN_WA_NEEDED 0
|
#define ARM64_BP_HARDEN_WA_NEEDED 0
|
||||||
#define ARM64_BP_HARDEN_NOT_REQUIRED 1
|
#define ARM64_BP_HARDEN_NOT_REQUIRED 1
|
||||||
|
@ -85,6 +85,8 @@
|
|||||||
#define QCOM_CPU_PART_FALKOR_V1 0x800
|
#define QCOM_CPU_PART_FALKOR_V1 0x800
|
||||||
#define QCOM_CPU_PART_FALKOR 0xC00
|
#define QCOM_CPU_PART_FALKOR 0xC00
|
||||||
#define QCOM_CPU_PART_KRYO 0x200
|
#define QCOM_CPU_PART_KRYO 0x200
|
||||||
|
#define QCOM_CPU_PART_KRYO_3XX_SILVER 0x803
|
||||||
|
#define QCOM_CPU_PART_KRYO_4XX_SILVER 0x805
|
||||||
|
|
||||||
#define NVIDIA_CPU_PART_DENVER 0x003
|
#define NVIDIA_CPU_PART_DENVER 0x003
|
||||||
#define NVIDIA_CPU_PART_CARMEL 0x004
|
#define NVIDIA_CPU_PART_CARMEL 0x004
|
||||||
@ -111,6 +113,8 @@
|
|||||||
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
|
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
|
||||||
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
|
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
|
||||||
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
|
#define MIDR_QCOM_KRYO MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO)
|
||||||
|
#define MIDR_QCOM_KRYO_3XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_3XX_SILVER)
|
||||||
|
#define MIDR_QCOM_KRYO_4XX_SILVER MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_KRYO_4XX_SILVER)
|
||||||
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
|
#define MIDR_NVIDIA_DENVER MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_DENVER)
|
||||||
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
|
#define MIDR_NVIDIA_CARMEL MIDR_CPU_MODEL(ARM_CPU_IMP_NVIDIA, NVIDIA_CPU_PART_CARMEL)
|
||||||
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
|
#define MIDR_FUJITSU_A64FX MIDR_CPU_MODEL(ARM_CPU_IMP_FUJITSU, FUJITSU_CPU_PART_A64FX)
|
||||||
|
@ -45,8 +45,8 @@ void do_sysinstr(unsigned int esr, struct pt_regs *regs);
|
|||||||
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||||
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
|
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
|
||||||
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
|
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
|
||||||
void el0_svc_handler(struct pt_regs *regs);
|
void do_el0_svc(struct pt_regs *regs);
|
||||||
void el0_svc_compat_handler(struct pt_regs *regs);
|
void do_el0_svc_compat(struct pt_regs *regs);
|
||||||
void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr,
|
void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr,
|
||||||
struct pt_regs *regs);
|
struct pt_regs *regs);
|
||||||
|
|
||||||
|
@ -86,6 +86,13 @@
|
|||||||
#define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4)
|
#define KERNEL_HWCAP_SVESM4 __khwcap2_feature(SVESM4)
|
||||||
#define KERNEL_HWCAP_FLAGM2 __khwcap2_feature(FLAGM2)
|
#define KERNEL_HWCAP_FLAGM2 __khwcap2_feature(FLAGM2)
|
||||||
#define KERNEL_HWCAP_FRINT __khwcap2_feature(FRINT)
|
#define KERNEL_HWCAP_FRINT __khwcap2_feature(FRINT)
|
||||||
|
#define KERNEL_HWCAP_SVEI8MM __khwcap2_feature(SVEI8MM)
|
||||||
|
#define KERNEL_HWCAP_SVEF32MM __khwcap2_feature(SVEF32MM)
|
||||||
|
#define KERNEL_HWCAP_SVEF64MM __khwcap2_feature(SVEF64MM)
|
||||||
|
#define KERNEL_HWCAP_SVEBF16 __khwcap2_feature(SVEBF16)
|
||||||
|
#define KERNEL_HWCAP_I8MM __khwcap2_feature(I8MM)
|
||||||
|
#define KERNEL_HWCAP_DGH __khwcap2_feature(DGH)
|
||||||
|
#define KERNEL_HWCAP_BF16 __khwcap2_feature(BF16)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This yields a mask that user programs can use to figure out what
|
* This yields a mask that user programs can use to figure out what
|
||||||
|
@ -96,6 +96,10 @@ static inline void crash_post_resume(void) {}
|
|||||||
struct kimage_arch {
|
struct kimage_arch {
|
||||||
void *dtb;
|
void *dtb;
|
||||||
unsigned long dtb_mem;
|
unsigned long dtb_mem;
|
||||||
|
/* Core ELF header buffer */
|
||||||
|
void *elf_headers;
|
||||||
|
unsigned long elf_headers_mem;
|
||||||
|
unsigned long elf_headers_sz;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const struct kexec_file_ops kexec_image_ops;
|
extern const struct kexec_file_ops kexec_image_ops;
|
||||||
|
@ -547,7 +547,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
|||||||
* wrong, and hyp will crash and burn when it uses any
|
* wrong, and hyp will crash and burn when it uses any
|
||||||
* cpus_have_const_cap() wrapper.
|
* cpus_have_const_cap() wrapper.
|
||||||
*/
|
*/
|
||||||
BUG_ON(!static_branch_likely(&arm64_const_caps_ready));
|
BUG_ON(!system_capabilities_finalized());
|
||||||
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
|
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -4,7 +4,9 @@
|
|||||||
|
|
||||||
#include <asm/atomic_ll_sc.h>
|
#include <asm/atomic_ll_sc.h>
|
||||||
|
|
||||||
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
|
#ifdef CONFIG_ARM64_LSE_ATOMICS
|
||||||
|
|
||||||
|
#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
|
||||||
|
|
||||||
#include <linux/compiler_types.h>
|
#include <linux/compiler_types.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
@ -14,8 +16,6 @@
|
|||||||
#include <asm/atomic_lse.h>
|
#include <asm/atomic_lse.h>
|
||||||
#include <asm/cpucaps.h>
|
#include <asm/cpucaps.h>
|
||||||
|
|
||||||
__asm__(".arch_extension lse");
|
|
||||||
|
|
||||||
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
|
extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
|
||||||
extern struct static_key_false arm64_const_caps_ready;
|
extern struct static_key_false arm64_const_caps_ready;
|
||||||
|
|
||||||
@ -34,9 +34,9 @@ static inline bool system_uses_lse_atomics(void)
|
|||||||
|
|
||||||
/* In-line patching at runtime */
|
/* In-line patching at runtime */
|
||||||
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
|
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) \
|
||||||
ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS)
|
ALTERNATIVE(llsc, __LSE_PREAMBLE lse, ARM64_HAS_LSE_ATOMICS)
|
||||||
|
|
||||||
#else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
|
#else /* CONFIG_ARM64_LSE_ATOMICS */
|
||||||
|
|
||||||
static inline bool system_uses_lse_atomics(void) { return false; }
|
static inline bool system_uses_lse_atomics(void) { return false; }
|
||||||
|
|
||||||
@ -44,5 +44,5 @@ static inline bool system_uses_lse_atomics(void) { return false; }
|
|||||||
|
|
||||||
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
|
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
|
||||||
|
|
||||||
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
|
#endif /* CONFIG_ARM64_LSE_ATOMICS */
|
||||||
#endif /* __ASM_LSE_H */
|
#endif /* __ASM_LSE_H */
|
||||||
|
@ -29,52 +29,11 @@ typedef struct {
|
|||||||
*/
|
*/
|
||||||
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
|
#define ASID(mm) ((mm)->context.id.counter & 0xffff)
|
||||||
|
|
||||||
|
extern bool arm64_use_ng_mappings;
|
||||||
|
|
||||||
static inline bool arm64_kernel_unmapped_at_el0(void)
|
static inline bool arm64_kernel_unmapped_at_el0(void)
|
||||||
{
|
{
|
||||||
return IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0) &&
|
return arm64_use_ng_mappings;
|
||||||
cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool arm64_kernel_use_ng_mappings(void)
|
|
||||||
{
|
|
||||||
bool tx1_bug;
|
|
||||||
|
|
||||||
/* What's a kpti? Use global mappings if we don't know. */
|
|
||||||
if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Note: this function is called before the CPU capabilities have
|
|
||||||
* been configured, so our early mappings will be global. If we
|
|
||||||
* later determine that kpti is required, then
|
|
||||||
* kpti_install_ng_mappings() will make them non-global.
|
|
||||||
*/
|
|
||||||
if (arm64_kernel_unmapped_at_el0())
|
|
||||||
return true;
|
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* KASLR is enabled so we're going to be enabling kpti on non-broken
|
|
||||||
* CPUs regardless of their susceptibility to Meltdown. Rather
|
|
||||||
* than force everybody to go through the G -> nG dance later on,
|
|
||||||
* just put down non-global mappings from the beginning.
|
|
||||||
*/
|
|
||||||
if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
|
|
||||||
tx1_bug = false;
|
|
||||||
#ifndef MODULE
|
|
||||||
} else if (!static_branch_likely(&arm64_const_caps_ready)) {
|
|
||||||
extern const struct midr_range cavium_erratum_27456_cpus[];
|
|
||||||
|
|
||||||
tx1_bug = is_midr_in_range_list(read_cpuid_id(),
|
|
||||||
cavium_erratum_27456_cpus);
|
|
||||||
#endif
|
|
||||||
} else {
|
|
||||||
tx1_bug = __cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456);
|
|
||||||
}
|
|
||||||
|
|
||||||
return !tx1_bug && kaslr_offset() > 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef void (*bp_hardening_cb_t)(void);
|
typedef void (*bp_hardening_cb_t)(void);
|
||||||
@ -128,6 +87,7 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
|||||||
pgprot_t prot, bool page_mappings_only);
|
pgprot_t prot, bool page_mappings_only);
|
||||||
extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
|
extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot);
|
||||||
extern void mark_linear_text_alias_ro(void);
|
extern void mark_linear_text_alias_ro(void);
|
||||||
|
extern bool kaslr_requires_kpti(void);
|
||||||
|
|
||||||
#define INIT_MM_CONTEXT(name) \
|
#define INIT_MM_CONTEXT(name) \
|
||||||
.pgd = init_pg_dir,
|
.pgd = init_pg_dir,
|
||||||
|
@ -110,6 +110,7 @@
|
|||||||
#define PUD_TABLE_BIT (_AT(pudval_t, 1) << 1)
|
#define PUD_TABLE_BIT (_AT(pudval_t, 1) << 1)
|
||||||
#define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0)
|
#define PUD_TYPE_MASK (_AT(pudval_t, 3) << 0)
|
||||||
#define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0)
|
#define PUD_TYPE_SECT (_AT(pudval_t, 1) << 0)
|
||||||
|
#define PUD_SECT_RDONLY (_AT(pudval_t, 1) << 7) /* AP[2] */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Level 2 descriptor (PMD).
|
* Level 2 descriptor (PMD).
|
||||||
@ -292,6 +293,8 @@
|
|||||||
#define TCR_HD (UL(1) << 40)
|
#define TCR_HD (UL(1) << 40)
|
||||||
#define TCR_NFD0 (UL(1) << 53)
|
#define TCR_NFD0 (UL(1) << 53)
|
||||||
#define TCR_NFD1 (UL(1) << 54)
|
#define TCR_NFD1 (UL(1) << 54)
|
||||||
|
#define TCR_E0PD0 (UL(1) << 55)
|
||||||
|
#define TCR_E0PD1 (UL(1) << 56)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TTBR.
|
* TTBR.
|
||||||
|
@ -26,8 +26,8 @@
|
|||||||
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
#define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||||
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
#define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||||
|
|
||||||
#define PTE_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PTE_NG : 0)
|
#define PTE_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PTE_NG : 0)
|
||||||
#define PMD_MAYBE_NG (arm64_kernel_use_ng_mappings() ? PMD_SECT_NG : 0)
|
#define PMD_MAYBE_NG (arm64_kernel_unmapped_at_el0() ? PMD_SECT_NG : 0)
|
||||||
|
|
||||||
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
|
#define PROT_DEFAULT (_PROT_DEFAULT | PTE_MAYBE_NG)
|
||||||
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
|
#define PROT_SECT_DEFAULT (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
#include <asm-generic/sections.h>
|
#include <asm-generic/sections.h>
|
||||||
|
|
||||||
extern char __alt_instructions[], __alt_instructions_end[];
|
extern char __alt_instructions[], __alt_instructions_end[];
|
||||||
extern char __exception_text_start[], __exception_text_end[];
|
|
||||||
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
|
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
|
||||||
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
|
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
|
||||||
extern char __hyp_text_start[], __hyp_text_end[];
|
extern char __hyp_text_start[], __hyp_text_end[];
|
||||||
|
@ -26,6 +26,8 @@ DECLARE_PER_CPU(bool, fpsimd_context_busy);
|
|||||||
static __must_check inline bool may_use_simd(void)
|
static __must_check inline bool may_use_simd(void)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
* We must make sure that the SVE has been initialized properly
|
||||||
|
* before using the SIMD in kernel.
|
||||||
* fpsimd_context_busy is only set while preemption is disabled,
|
* fpsimd_context_busy is only set while preemption is disabled,
|
||||||
* and is clear whenever preemption is enabled. Since
|
* and is clear whenever preemption is enabled. Since
|
||||||
* this_cpu_read() is atomic w.r.t. preemption, fpsimd_context_busy
|
* this_cpu_read() is atomic w.r.t. preemption, fpsimd_context_busy
|
||||||
@ -33,8 +35,10 @@ static __must_check inline bool may_use_simd(void)
|
|||||||
* migrated, and if it's clear we cannot be migrated to a CPU
|
* migrated, and if it's clear we cannot be migrated to a CPU
|
||||||
* where it is set.
|
* where it is set.
|
||||||
*/
|
*/
|
||||||
return !in_irq() && !irqs_disabled() && !in_nmi() &&
|
return !WARN_ON(!system_capabilities_finalized()) &&
|
||||||
!this_cpu_read(fpsimd_context_busy);
|
system_supports_fpsimd() &&
|
||||||
|
!in_irq() && !irqs_disabled() && !in_nmi() &&
|
||||||
|
!this_cpu_read(fpsimd_context_busy);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* ! CONFIG_KERNEL_MODE_NEON */
|
#else /* ! CONFIG_KERNEL_MODE_NEON */
|
||||||
|
@ -146,6 +146,7 @@
|
|||||||
#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4)
|
#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4)
|
||||||
#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5)
|
#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5)
|
||||||
#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6)
|
#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6)
|
||||||
|
#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7)
|
||||||
|
|
||||||
#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0)
|
#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0)
|
||||||
#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
|
#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
|
||||||
@ -538,6 +539,18 @@
|
|||||||
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
|
SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\
|
||||||
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
|
ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1)
|
||||||
|
|
||||||
|
/* MAIR_ELx memory attributes (used by Linux) */
|
||||||
|
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
|
||||||
|
#define MAIR_ATTR_DEVICE_nGnRE UL(0x04)
|
||||||
|
#define MAIR_ATTR_DEVICE_GRE UL(0x0c)
|
||||||
|
#define MAIR_ATTR_NORMAL_NC UL(0x44)
|
||||||
|
#define MAIR_ATTR_NORMAL_WT UL(0xbb)
|
||||||
|
#define MAIR_ATTR_NORMAL UL(0xff)
|
||||||
|
#define MAIR_ATTR_MASK UL(0xff)
|
||||||
|
|
||||||
|
/* Position the attr at the correct index */
|
||||||
|
#define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8))
|
||||||
|
|
||||||
/* id_aa64isar0 */
|
/* id_aa64isar0 */
|
||||||
#define ID_AA64ISAR0_TS_SHIFT 52
|
#define ID_AA64ISAR0_TS_SHIFT 52
|
||||||
#define ID_AA64ISAR0_FHM_SHIFT 48
|
#define ID_AA64ISAR0_FHM_SHIFT 48
|
||||||
@ -553,6 +566,10 @@
|
|||||||
#define ID_AA64ISAR0_AES_SHIFT 4
|
#define ID_AA64ISAR0_AES_SHIFT 4
|
||||||
|
|
||||||
/* id_aa64isar1 */
|
/* id_aa64isar1 */
|
||||||
|
#define ID_AA64ISAR1_I8MM_SHIFT 52
|
||||||
|
#define ID_AA64ISAR1_DGH_SHIFT 48
|
||||||
|
#define ID_AA64ISAR1_BF16_SHIFT 44
|
||||||
|
#define ID_AA64ISAR1_SPECRES_SHIFT 40
|
||||||
#define ID_AA64ISAR1_SB_SHIFT 36
|
#define ID_AA64ISAR1_SB_SHIFT 36
|
||||||
#define ID_AA64ISAR1_FRINTTS_SHIFT 32
|
#define ID_AA64ISAR1_FRINTTS_SHIFT 32
|
||||||
#define ID_AA64ISAR1_GPI_SHIFT 28
|
#define ID_AA64ISAR1_GPI_SHIFT 28
|
||||||
@ -605,12 +622,20 @@
|
|||||||
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
|
#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2
|
||||||
|
|
||||||
/* id_aa64zfr0 */
|
/* id_aa64zfr0 */
|
||||||
|
#define ID_AA64ZFR0_F64MM_SHIFT 56
|
||||||
|
#define ID_AA64ZFR0_F32MM_SHIFT 52
|
||||||
|
#define ID_AA64ZFR0_I8MM_SHIFT 44
|
||||||
#define ID_AA64ZFR0_SM4_SHIFT 40
|
#define ID_AA64ZFR0_SM4_SHIFT 40
|
||||||
#define ID_AA64ZFR0_SHA3_SHIFT 32
|
#define ID_AA64ZFR0_SHA3_SHIFT 32
|
||||||
|
#define ID_AA64ZFR0_BF16_SHIFT 20
|
||||||
#define ID_AA64ZFR0_BITPERM_SHIFT 16
|
#define ID_AA64ZFR0_BITPERM_SHIFT 16
|
||||||
#define ID_AA64ZFR0_AES_SHIFT 4
|
#define ID_AA64ZFR0_AES_SHIFT 4
|
||||||
#define ID_AA64ZFR0_SVEVER_SHIFT 0
|
#define ID_AA64ZFR0_SVEVER_SHIFT 0
|
||||||
|
|
||||||
|
#define ID_AA64ZFR0_F64MM 0x1
|
||||||
|
#define ID_AA64ZFR0_F32MM 0x1
|
||||||
|
#define ID_AA64ZFR0_I8MM 0x1
|
||||||
|
#define ID_AA64ZFR0_BF16 0x1
|
||||||
#define ID_AA64ZFR0_SM4 0x1
|
#define ID_AA64ZFR0_SM4 0x1
|
||||||
#define ID_AA64ZFR0_SHA3 0x1
|
#define ID_AA64ZFR0_SHA3 0x1
|
||||||
#define ID_AA64ZFR0_BITPERM 0x1
|
#define ID_AA64ZFR0_BITPERM 0x1
|
||||||
@ -655,6 +680,7 @@
|
|||||||
#define ID_AA64MMFR1_VMIDBITS_16 2
|
#define ID_AA64MMFR1_VMIDBITS_16 2
|
||||||
|
|
||||||
/* id_aa64mmfr2 */
|
/* id_aa64mmfr2 */
|
||||||
|
#define ID_AA64MMFR2_E0PD_SHIFT 60
|
||||||
#define ID_AA64MMFR2_FWB_SHIFT 40
|
#define ID_AA64MMFR2_FWB_SHIFT 40
|
||||||
#define ID_AA64MMFR2_AT_SHIFT 32
|
#define ID_AA64MMFR2_AT_SHIFT 32
|
||||||
#define ID_AA64MMFR2_LVA_SHIFT 16
|
#define ID_AA64MMFR2_LVA_SHIFT 16
|
||||||
@ -679,6 +705,14 @@
|
|||||||
#define ID_ISAR5_AES_SHIFT 4
|
#define ID_ISAR5_AES_SHIFT 4
|
||||||
#define ID_ISAR5_SEVL_SHIFT 0
|
#define ID_ISAR5_SEVL_SHIFT 0
|
||||||
|
|
||||||
|
#define ID_ISAR6_I8MM_SHIFT 24
|
||||||
|
#define ID_ISAR6_BF16_SHIFT 20
|
||||||
|
#define ID_ISAR6_SPECRES_SHIFT 16
|
||||||
|
#define ID_ISAR6_SB_SHIFT 12
|
||||||
|
#define ID_ISAR6_FHM_SHIFT 8
|
||||||
|
#define ID_ISAR6_DP_SHIFT 4
|
||||||
|
#define ID_ISAR6_JSCVT_SHIFT 0
|
||||||
|
|
||||||
#define MVFR0_FPROUND_SHIFT 28
|
#define MVFR0_FPROUND_SHIFT 28
|
||||||
#define MVFR0_FPSHVEC_SHIFT 24
|
#define MVFR0_FPSHVEC_SHIFT 24
|
||||||
#define MVFR0_FPSQRT_SHIFT 20
|
#define MVFR0_FPSQRT_SHIFT 20
|
||||||
|
@ -65,5 +65,12 @@
|
|||||||
#define HWCAP2_SVESM4 (1 << 6)
|
#define HWCAP2_SVESM4 (1 << 6)
|
||||||
#define HWCAP2_FLAGM2 (1 << 7)
|
#define HWCAP2_FLAGM2 (1 << 7)
|
||||||
#define HWCAP2_FRINT (1 << 8)
|
#define HWCAP2_FRINT (1 << 8)
|
||||||
|
#define HWCAP2_SVEI8MM (1 << 9)
|
||||||
|
#define HWCAP2_SVEF32MM (1 << 10)
|
||||||
|
#define HWCAP2_SVEF64MM (1 << 11)
|
||||||
|
#define HWCAP2_SVEBF16 (1 << 12)
|
||||||
|
#define HWCAP2_I8MM (1 << 13)
|
||||||
|
#define HWCAP2_BF16 (1 << 14)
|
||||||
|
#define HWCAP2_DGH (1 << 15)
|
||||||
|
|
||||||
#endif /* _UAPI__ASM_HWCAP_H */
|
#endif /* _UAPI__ASM_HWCAP_H */
|
||||||
|
@ -618,7 +618,8 @@ static struct insn_emulation_ops setend_ops = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Invoked as late_initcall, since not needed before init spawned.
|
* Invoked as core_initcall, which guarantees that the instruction
|
||||||
|
* emulation is ready for userspace.
|
||||||
*/
|
*/
|
||||||
static int __init armv8_deprecated_init(void)
|
static int __init armv8_deprecated_init(void)
|
||||||
{
|
{
|
||||||
|
@ -42,11 +42,11 @@ ENTRY(__cpu_soft_restart)
|
|||||||
mov x0, #HVC_SOFT_RESTART
|
mov x0, #HVC_SOFT_RESTART
|
||||||
hvc #0 // no return
|
hvc #0 // no return
|
||||||
|
|
||||||
1: mov x18, x1 // entry
|
1: mov x8, x1 // entry
|
||||||
mov x0, x2 // arg0
|
mov x0, x2 // arg0
|
||||||
mov x1, x3 // arg1
|
mov x1, x3 // arg1
|
||||||
mov x2, x4 // arg2
|
mov x2, x4 // arg2
|
||||||
br x18
|
br x8
|
||||||
ENDPROC(__cpu_soft_restart)
|
ENDPROC(__cpu_soft_restart)
|
||||||
|
|
||||||
.popsection
|
.popsection
|
||||||
|
@ -548,6 +548,8 @@ static const struct midr_range spectre_v2_safe_list[] = {
|
|||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||||
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
|
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
|
||||||
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
|
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
|
||||||
{ /* sentinel */ }
|
{ /* sentinel */ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -32,9 +32,7 @@ static unsigned long elf_hwcap __read_mostly;
|
|||||||
#define COMPAT_ELF_HWCAP_DEFAULT \
|
#define COMPAT_ELF_HWCAP_DEFAULT \
|
||||||
(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
|
(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
|
||||||
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
|
COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
|
||||||
COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
|
COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
|
||||||
COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
|
|
||||||
COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
|
|
||||||
COMPAT_HWCAP_LPAE)
|
COMPAT_HWCAP_LPAE)
|
||||||
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
|
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
|
||||||
unsigned int compat_elf_hwcap2 __read_mostly;
|
unsigned int compat_elf_hwcap2 __read_mostly;
|
||||||
@ -47,19 +45,23 @@ static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM6
|
|||||||
/* Need also bit for ARM64_CB_PATCH */
|
/* Need also bit for ARM64_CB_PATCH */
|
||||||
DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
|
DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
|
||||||
|
|
||||||
|
bool arm64_use_ng_mappings = false;
|
||||||
|
EXPORT_SYMBOL(arm64_use_ng_mappings);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flag to indicate if we have computed the system wide
|
* Flag to indicate if we have computed the system wide
|
||||||
* capabilities based on the boot time active CPUs. This
|
* capabilities based on the boot time active CPUs. This
|
||||||
* will be used to determine if a new booting CPU should
|
* will be used to determine if a new booting CPU should
|
||||||
* go through the verification process to make sure that it
|
* go through the verification process to make sure that it
|
||||||
* supports the system capabilities, without using a hotplug
|
* supports the system capabilities, without using a hotplug
|
||||||
* notifier.
|
* notifier. This is also used to decide if we could use
|
||||||
|
* the fast path for checking constant CPU caps.
|
||||||
*/
|
*/
|
||||||
static bool sys_caps_initialised;
|
DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
|
||||||
|
EXPORT_SYMBOL(arm64_const_caps_ready);
|
||||||
static inline void set_sys_caps_initialised(void)
|
static inline void finalize_system_capabilities(void)
|
||||||
{
|
{
|
||||||
sys_caps_initialised = true;
|
static_branch_enable(&arm64_const_caps_ready);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
|
static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
|
||||||
@ -135,6 +137,10 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
|
static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
|
||||||
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
|
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
|
||||||
@ -176,10 +182,18 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
|
static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
|
||||||
|
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||||
|
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||||
|
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||||
|
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
|
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
|
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||||
|
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||||
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
|
FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
|
||||||
@ -225,6 +239,7 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
|
static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
|
||||||
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
|
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
|
||||||
@ -313,6 +328,17 @@ static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
|
|||||||
ARM64_FTR_END,
|
ARM64_FTR_END,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct arm64_ftr_bits ftr_id_isar6[] = {
|
||||||
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
|
||||||
|
ARM64_FTR_END,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct arm64_ftr_bits ftr_id_pfr0[] = {
|
static const struct arm64_ftr_bits ftr_id_pfr0[] = {
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
|
||||||
@ -396,6 +422,7 @@ static const struct __ftr_reg_entry {
|
|||||||
ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
|
ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
|
||||||
ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
|
ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
|
||||||
ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
|
ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
|
||||||
|
ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
|
||||||
|
|
||||||
/* Op1 = 0, CRn = 0, CRm = 3 */
|
/* Op1 = 0, CRn = 0, CRm = 3 */
|
||||||
ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
|
ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
|
||||||
@ -600,6 +627,7 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
|
|||||||
init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
|
init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
|
||||||
init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
|
init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
|
||||||
init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
|
init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
|
||||||
|
init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
|
||||||
init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
|
init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
|
||||||
init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
|
init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
|
||||||
init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
|
init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
|
||||||
@ -753,6 +781,8 @@ void update_cpu_features(int cpu,
|
|||||||
info->reg_id_isar4, boot->reg_id_isar4);
|
info->reg_id_isar4, boot->reg_id_isar4);
|
||||||
taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
|
taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
|
||||||
info->reg_id_isar5, boot->reg_id_isar5);
|
info->reg_id_isar5, boot->reg_id_isar5);
|
||||||
|
taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
|
||||||
|
info->reg_id_isar6, boot->reg_id_isar6);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
|
* Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
|
||||||
@ -785,7 +815,7 @@ void update_cpu_features(int cpu,
|
|||||||
|
|
||||||
/* Probe vector lengths, unless we already gave up on SVE */
|
/* Probe vector lengths, unless we already gave up on SVE */
|
||||||
if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
|
if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
|
||||||
!sys_caps_initialised)
|
!system_capabilities_finalized())
|
||||||
sve_update_vq_map();
|
sve_update_vq_map();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -831,6 +861,7 @@ static u64 __read_sysreg_by_encoding(u32 sys_id)
|
|||||||
read_sysreg_case(SYS_ID_ISAR3_EL1);
|
read_sysreg_case(SYS_ID_ISAR3_EL1);
|
||||||
read_sysreg_case(SYS_ID_ISAR4_EL1);
|
read_sysreg_case(SYS_ID_ISAR4_EL1);
|
||||||
read_sysreg_case(SYS_ID_ISAR5_EL1);
|
read_sysreg_case(SYS_ID_ISAR5_EL1);
|
||||||
|
read_sysreg_case(SYS_ID_ISAR6_EL1);
|
||||||
read_sysreg_case(SYS_MVFR0_EL1);
|
read_sysreg_case(SYS_MVFR0_EL1);
|
||||||
read_sysreg_case(SYS_MVFR1_EL1);
|
read_sysreg_case(SYS_MVFR1_EL1);
|
||||||
read_sysreg_case(SYS_MVFR2_EL1);
|
read_sysreg_case(SYS_MVFR2_EL1);
|
||||||
@ -965,6 +996,46 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
|
|||||||
return has_cpuid_feature(entry, scope);
|
return has_cpuid_feature(entry, scope);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This check is triggered during the early boot before the cpufeature
|
||||||
|
* is initialised. Checking the status on the local CPU allows the boot
|
||||||
|
* CPU to detect the need for non-global mappings and thus avoiding a
|
||||||
|
* pagetable re-write after all the CPUs are booted. This check will be
|
||||||
|
* anyway run on individual CPUs, allowing us to get the consistent
|
||||||
|
* state once the SMP CPUs are up and thus make the switch to non-global
|
||||||
|
* mappings if required.
|
||||||
|
*/
|
||||||
|
bool kaslr_requires_kpti(void)
|
||||||
|
{
|
||||||
|
if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* E0PD does a similar job to KPTI so can be used instead
|
||||||
|
* where available.
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
|
||||||
|
u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
|
||||||
|
if (cpuid_feature_extract_unsigned_field(mmfr2,
|
||||||
|
ID_AA64MMFR2_E0PD_SHIFT))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Systems affected by Cavium erratum 24756 are incompatible
|
||||||
|
* with KPTI.
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
|
||||||
|
extern const struct midr_range cavium_erratum_27456_cpus[];
|
||||||
|
|
||||||
|
if (is_midr_in_range_list(read_cpuid_id(),
|
||||||
|
cavium_erratum_27456_cpus))
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return kaslr_offset() > 0;
|
||||||
|
}
|
||||||
|
|
||||||
static bool __meltdown_safe = true;
|
static bool __meltdown_safe = true;
|
||||||
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
|
static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
|
||||||
|
|
||||||
@ -975,6 +1046,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
|||||||
static const struct midr_range kpti_safe_list[] = {
|
static const struct midr_range kpti_safe_list[] = {
|
||||||
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
|
||||||
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||||
@ -1008,7 +1080,7 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Useful for KASLR robustness */
|
/* Useful for KASLR robustness */
|
||||||
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
|
if (kaslr_requires_kpti()) {
|
||||||
if (!__kpti_forced) {
|
if (!__kpti_forced) {
|
||||||
str = "KASLR";
|
str = "KASLR";
|
||||||
__kpti_forced = 1;
|
__kpti_forced = 1;
|
||||||
@ -1043,7 +1115,6 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
|||||||
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
|
extern kpti_remap_fn idmap_kpti_install_ng_mappings;
|
||||||
kpti_remap_fn *remap_fn;
|
kpti_remap_fn *remap_fn;
|
||||||
|
|
||||||
static bool kpti_applied = false;
|
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1051,7 +1122,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
|||||||
* it already or we have KASLR enabled and therefore have not
|
* it already or we have KASLR enabled and therefore have not
|
||||||
* created any global mappings at all.
|
* created any global mappings at all.
|
||||||
*/
|
*/
|
||||||
if (kpti_applied || kaslr_offset() > 0)
|
if (arm64_use_ng_mappings)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
|
remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
|
||||||
@ -1061,7 +1132,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
|
|||||||
cpu_uninstall_idmap();
|
cpu_uninstall_idmap();
|
||||||
|
|
||||||
if (!cpu)
|
if (!cpu)
|
||||||
kpti_applied = true;
|
arm64_use_ng_mappings = true;
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -1251,6 +1322,14 @@ static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
|
|||||||
}
|
}
|
||||||
#endif /* CONFIG_ARM64_PTR_AUTH */
|
#endif /* CONFIG_ARM64_PTR_AUTH */
|
||||||
|
|
||||||
|
#ifdef CONFIG_ARM64_E0PD
|
||||||
|
static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
|
||||||
|
{
|
||||||
|
if (this_cpu_has_cap(ARM64_HAS_E0PD))
|
||||||
|
sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_ARM64_E0PD */
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
#ifdef CONFIG_ARM64_PSEUDO_NMI
|
||||||
static bool enable_pseudo_nmi;
|
static bool enable_pseudo_nmi;
|
||||||
|
|
||||||
@ -1291,7 +1370,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||||||
.cpu_enable = cpu_enable_pan,
|
.cpu_enable = cpu_enable_pan,
|
||||||
},
|
},
|
||||||
#endif /* CONFIG_ARM64_PAN */
|
#endif /* CONFIG_ARM64_PAN */
|
||||||
#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
|
#ifdef CONFIG_ARM64_LSE_ATOMICS
|
||||||
{
|
{
|
||||||
.desc = "LSE atomic instructions",
|
.desc = "LSE atomic instructions",
|
||||||
.capability = ARM64_HAS_LSE_ATOMICS,
|
.capability = ARM64_HAS_LSE_ATOMICS,
|
||||||
@ -1302,7 +1381,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||||||
.sign = FTR_UNSIGNED,
|
.sign = FTR_UNSIGNED,
|
||||||
.min_field_value = 2,
|
.min_field_value = 2,
|
||||||
},
|
},
|
||||||
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
|
#endif /* CONFIG_ARM64_LSE_ATOMICS */
|
||||||
{
|
{
|
||||||
.desc = "Software prefetching using PRFM",
|
.desc = "Software prefetching using PRFM",
|
||||||
.capability = ARM64_HAS_NO_HW_PREFETCH,
|
.capability = ARM64_HAS_NO_HW_PREFETCH,
|
||||||
@ -1368,7 +1447,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||||||
{
|
{
|
||||||
/* FP/SIMD is not implemented */
|
/* FP/SIMD is not implemented */
|
||||||
.capability = ARM64_HAS_NO_FPSIMD,
|
.capability = ARM64_HAS_NO_FPSIMD,
|
||||||
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
.type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
|
||||||
.min_field_value = 0,
|
.min_field_value = 0,
|
||||||
.matches = has_no_fpsimd,
|
.matches = has_no_fpsimd,
|
||||||
},
|
},
|
||||||
@ -1566,6 +1645,19 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||||||
.sign = FTR_UNSIGNED,
|
.sign = FTR_UNSIGNED,
|
||||||
.min_field_value = 1,
|
.min_field_value = 1,
|
||||||
},
|
},
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_ARM64_E0PD
|
||||||
|
{
|
||||||
|
.desc = "E0PD",
|
||||||
|
.capability = ARM64_HAS_E0PD,
|
||||||
|
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
|
||||||
|
.sys_reg = SYS_ID_AA64MMFR2_EL1,
|
||||||
|
.sign = FTR_UNSIGNED,
|
||||||
|
.field_pos = ID_AA64MMFR2_E0PD_SHIFT,
|
||||||
|
.matches = has_cpuid_feature,
|
||||||
|
.min_field_value = 1,
|
||||||
|
.cpu_enable = cpu_enable_e0pd,
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
@ -1596,6 +1688,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||||||
.match_list = list, \
|
.match_list = list, \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define HWCAP_CAP_MATCH(match, cap_type, cap) \
|
||||||
|
{ \
|
||||||
|
__HWCAP_CAP(#cap, cap_type, cap) \
|
||||||
|
.matches = match, \
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||||
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
|
static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
|
||||||
{
|
{
|
||||||
@ -1651,6 +1749,9 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
|||||||
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
|
||||||
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
|
||||||
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
|
||||||
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
|
||||||
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
|
||||||
|
HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
|
||||||
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
|
HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
|
||||||
#ifdef CONFIG_ARM64_SVE
|
#ifdef CONFIG_ARM64_SVE
|
||||||
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
|
HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
|
||||||
@ -1658,8 +1759,12 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
|||||||
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
|
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
|
||||||
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
|
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
|
||||||
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
|
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
|
||||||
|
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
|
||||||
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
|
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
|
||||||
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
|
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
|
||||||
|
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
|
||||||
|
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
|
||||||
|
HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
|
||||||
#endif
|
#endif
|
||||||
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
|
HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
|
||||||
#ifdef CONFIG_ARM64_PTR_AUTH
|
#ifdef CONFIG_ARM64_PTR_AUTH
|
||||||
@ -1669,8 +1774,35 @@ static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
|
|||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_COMPAT
|
||||||
|
static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
|
||||||
|
* in line with that of arm32 as in vfp_init(). We make sure that the
|
||||||
|
* check is future proof, by making sure value is non-zero.
|
||||||
|
*/
|
||||||
|
u32 mvfr1;
|
||||||
|
|
||||||
|
WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
|
||||||
|
if (scope == SCOPE_SYSTEM)
|
||||||
|
mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
|
||||||
|
else
|
||||||
|
mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
|
||||||
|
|
||||||
|
return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
|
||||||
|
cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
|
||||||
|
cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
|
static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
|
HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
|
||||||
|
HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
|
||||||
|
/* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
|
||||||
|
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
|
||||||
|
HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
|
||||||
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
|
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
|
||||||
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
|
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
|
||||||
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
|
HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
|
||||||
@ -1974,7 +2106,7 @@ void check_local_cpu_capabilities(void)
|
|||||||
* Otherwise, this CPU should verify that it has all the system
|
* Otherwise, this CPU should verify that it has all the system
|
||||||
* advertised capabilities.
|
* advertised capabilities.
|
||||||
*/
|
*/
|
||||||
if (!sys_caps_initialised)
|
if (!system_capabilities_finalized())
|
||||||
update_cpu_capabilities(SCOPE_LOCAL_CPU);
|
update_cpu_capabilities(SCOPE_LOCAL_CPU);
|
||||||
else
|
else
|
||||||
verify_local_cpu_capabilities();
|
verify_local_cpu_capabilities();
|
||||||
@ -1988,14 +2120,6 @@ static void __init setup_boot_cpu_capabilities(void)
|
|||||||
enable_cpu_capabilities(SCOPE_BOOT_CPU);
|
enable_cpu_capabilities(SCOPE_BOOT_CPU);
|
||||||
}
|
}
|
||||||
|
|
||||||
DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
|
|
||||||
EXPORT_SYMBOL(arm64_const_caps_ready);
|
|
||||||
|
|
||||||
static void __init mark_const_caps_ready(void)
|
|
||||||
{
|
|
||||||
static_branch_enable(&arm64_const_caps_ready);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool this_cpu_has_cap(unsigned int n)
|
bool this_cpu_has_cap(unsigned int n)
|
||||||
{
|
{
|
||||||
if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
|
if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
|
||||||
@ -2054,7 +2178,6 @@ void __init setup_cpu_features(void)
|
|||||||
u32 cwg;
|
u32 cwg;
|
||||||
|
|
||||||
setup_system_capabilities();
|
setup_system_capabilities();
|
||||||
mark_const_caps_ready();
|
|
||||||
setup_elf_hwcaps(arm64_elf_hwcaps);
|
setup_elf_hwcaps(arm64_elf_hwcaps);
|
||||||
|
|
||||||
if (system_supports_32bit_el0())
|
if (system_supports_32bit_el0())
|
||||||
@ -2067,7 +2190,7 @@ void __init setup_cpu_features(void)
|
|||||||
minsigstksz_setup();
|
minsigstksz_setup();
|
||||||
|
|
||||||
/* Advertise that we have computed the system capabilities */
|
/* Advertise that we have computed the system capabilities */
|
||||||
set_sys_caps_initialised();
|
finalize_system_capabilities();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check for sane CTR_EL0.CWG value.
|
* Check for sane CTR_EL0.CWG value.
|
||||||
|
@ -84,6 +84,13 @@ static const char *const hwcap_str[] = {
|
|||||||
"svesm4",
|
"svesm4",
|
||||||
"flagm2",
|
"flagm2",
|
||||||
"frint",
|
"frint",
|
||||||
|
"svei8mm",
|
||||||
|
"svef32mm",
|
||||||
|
"svef64mm",
|
||||||
|
"svebf16",
|
||||||
|
"i8mm",
|
||||||
|
"bf16",
|
||||||
|
"dgh",
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -360,6 +367,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
|||||||
info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
|
info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
|
||||||
info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
|
info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
|
||||||
info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
|
info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
|
||||||
|
info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
|
||||||
info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
|
info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
|
||||||
info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
|
info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
|
||||||
info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
|
info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
|
||||||
|
@ -36,14 +36,14 @@ static void notrace el1_pc(struct pt_regs *regs, unsigned long esr)
|
|||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(el1_pc);
|
NOKPROBE_SYMBOL(el1_pc);
|
||||||
|
|
||||||
static void el1_undef(struct pt_regs *regs)
|
static void notrace el1_undef(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
local_daif_inherit(regs);
|
local_daif_inherit(regs);
|
||||||
do_undefinstr(regs);
|
do_undefinstr(regs);
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(el1_undef);
|
NOKPROBE_SYMBOL(el1_undef);
|
||||||
|
|
||||||
static void el1_inv(struct pt_regs *regs, unsigned long esr)
|
static void notrace el1_inv(struct pt_regs *regs, unsigned long esr)
|
||||||
{
|
{
|
||||||
local_daif_inherit(regs);
|
local_daif_inherit(regs);
|
||||||
bad_mode(regs, 0, esr);
|
bad_mode(regs, 0, esr);
|
||||||
@ -215,7 +215,7 @@ static void notrace el0_svc(struct pt_regs *regs)
|
|||||||
if (system_uses_irq_prio_masking())
|
if (system_uses_irq_prio_masking())
|
||||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||||
|
|
||||||
el0_svc_handler(regs);
|
do_el0_svc(regs);
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(el0_svc);
|
NOKPROBE_SYMBOL(el0_svc);
|
||||||
|
|
||||||
@ -281,7 +281,7 @@ static void notrace el0_svc_compat(struct pt_regs *regs)
|
|||||||
if (system_uses_irq_prio_masking())
|
if (system_uses_irq_prio_masking())
|
||||||
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
|
||||||
|
|
||||||
el0_svc_compat_handler(regs);
|
do_el0_svc_compat(regs);
|
||||||
}
|
}
|
||||||
NOKPROBE_SYMBOL(el0_svc_compat);
|
NOKPROBE_SYMBOL(el0_svc_compat);
|
||||||
|
|
||||||
|
@ -60,16 +60,16 @@
|
|||||||
.macro kernel_ventry, el, label, regsize = 64
|
.macro kernel_ventry, el, label, regsize = 64
|
||||||
.align 7
|
.align 7
|
||||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
||||||
alternative_if ARM64_UNMAP_KERNEL_AT_EL0
|
|
||||||
.if \el == 0
|
.if \el == 0
|
||||||
|
alternative_if ARM64_UNMAP_KERNEL_AT_EL0
|
||||||
.if \regsize == 64
|
.if \regsize == 64
|
||||||
mrs x30, tpidrro_el0
|
mrs x30, tpidrro_el0
|
||||||
msr tpidrro_el0, xzr
|
msr tpidrro_el0, xzr
|
||||||
.else
|
.else
|
||||||
mov x30, xzr
|
mov x30, xzr
|
||||||
.endif
|
.endif
|
||||||
.endif
|
|
||||||
alternative_else_nop_endif
|
alternative_else_nop_endif
|
||||||
|
.endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
sub sp, sp, #S_FRAME_SIZE
|
sub sp, sp, #S_FRAME_SIZE
|
||||||
@ -167,9 +167,13 @@ alternative_cb_end
|
|||||||
.if \el == 0
|
.if \el == 0
|
||||||
clear_gp_regs
|
clear_gp_regs
|
||||||
mrs x21, sp_el0
|
mrs x21, sp_el0
|
||||||
ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
|
ldr_this_cpu tsk, __entry_task, x20
|
||||||
ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
|
msr sp_el0, tsk
|
||||||
disable_step_tsk x19, x20 // exceptions when scheduling.
|
|
||||||
|
// Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
|
||||||
|
// when scheduling.
|
||||||
|
ldr x19, [tsk, #TSK_TI_FLAGS]
|
||||||
|
disable_step_tsk x19, x20
|
||||||
|
|
||||||
apply_ssbd 1, x22, x23
|
apply_ssbd 1, x22, x23
|
||||||
|
|
||||||
@ -232,13 +236,6 @@ alternative_else_nop_endif
|
|||||||
str w21, [sp, #S_SYSCALLNO]
|
str w21, [sp, #S_SYSCALLNO]
|
||||||
.endif
|
.endif
|
||||||
|
|
||||||
/*
|
|
||||||
* Set sp_el0 to current thread_info.
|
|
||||||
*/
|
|
||||||
.if \el == 0
|
|
||||||
msr sp_el0, tsk
|
|
||||||
.endif
|
|
||||||
|
|
||||||
/* Save pmr */
|
/* Save pmr */
|
||||||
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
alternative_if ARM64_HAS_IRQ_PRIO_MASKING
|
||||||
mrs_s x20, SYS_ICC_PMR_EL1
|
mrs_s x20, SYS_ICC_PMR_EL1
|
||||||
|
@ -269,6 +269,7 @@ static void sve_free(struct task_struct *task)
|
|||||||
*/
|
*/
|
||||||
static void task_fpsimd_load(void)
|
static void task_fpsimd_load(void)
|
||||||
{
|
{
|
||||||
|
WARN_ON(!system_supports_fpsimd());
|
||||||
WARN_ON(!have_cpu_fpsimd_context());
|
WARN_ON(!have_cpu_fpsimd_context());
|
||||||
|
|
||||||
if (system_supports_sve() && test_thread_flag(TIF_SVE))
|
if (system_supports_sve() && test_thread_flag(TIF_SVE))
|
||||||
@ -289,6 +290,7 @@ static void fpsimd_save(void)
|
|||||||
this_cpu_ptr(&fpsimd_last_state);
|
this_cpu_ptr(&fpsimd_last_state);
|
||||||
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
|
/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
|
||||||
|
|
||||||
|
WARN_ON(!system_supports_fpsimd());
|
||||||
WARN_ON(!have_cpu_fpsimd_context());
|
WARN_ON(!have_cpu_fpsimd_context());
|
||||||
|
|
||||||
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
|
if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
|
||||||
@ -1092,6 +1094,7 @@ void fpsimd_bind_task_to_cpu(void)
|
|||||||
struct fpsimd_last_state_struct *last =
|
struct fpsimd_last_state_struct *last =
|
||||||
this_cpu_ptr(&fpsimd_last_state);
|
this_cpu_ptr(&fpsimd_last_state);
|
||||||
|
|
||||||
|
WARN_ON(!system_supports_fpsimd());
|
||||||
last->st = ¤t->thread.uw.fpsimd_state;
|
last->st = ¤t->thread.uw.fpsimd_state;
|
||||||
last->sve_state = current->thread.sve_state;
|
last->sve_state = current->thread.sve_state;
|
||||||
last->sve_vl = current->thread.sve_vl;
|
last->sve_vl = current->thread.sve_vl;
|
||||||
@ -1114,6 +1117,7 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
|
|||||||
struct fpsimd_last_state_struct *last =
|
struct fpsimd_last_state_struct *last =
|
||||||
this_cpu_ptr(&fpsimd_last_state);
|
this_cpu_ptr(&fpsimd_last_state);
|
||||||
|
|
||||||
|
WARN_ON(!system_supports_fpsimd());
|
||||||
WARN_ON(!in_softirq() && !irqs_disabled());
|
WARN_ON(!in_softirq() && !irqs_disabled());
|
||||||
|
|
||||||
last->st = st;
|
last->st = st;
|
||||||
@ -1128,8 +1132,19 @@ void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
|
|||||||
*/
|
*/
|
||||||
void fpsimd_restore_current_state(void)
|
void fpsimd_restore_current_state(void)
|
||||||
{
|
{
|
||||||
if (!system_supports_fpsimd())
|
/*
|
||||||
|
* For the tasks that were created before we detected the absence of
|
||||||
|
* FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
|
||||||
|
* e.g, init. This could be then inherited by the children processes.
|
||||||
|
* If we later detect that the system doesn't support FP/SIMD,
|
||||||
|
* we must clear the flag for all the tasks to indicate that the
|
||||||
|
* FPSTATE is clean (as we can't have one) to avoid looping for ever in
|
||||||
|
* do_notify_resume().
|
||||||
|
*/
|
||||||
|
if (!system_supports_fpsimd()) {
|
||||||
|
clear_thread_flag(TIF_FOREIGN_FPSTATE);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
get_cpu_fpsimd_context();
|
get_cpu_fpsimd_context();
|
||||||
|
|
||||||
@ -1148,7 +1163,7 @@ void fpsimd_restore_current_state(void)
|
|||||||
*/
|
*/
|
||||||
void fpsimd_update_current_state(struct user_fpsimd_state const *state)
|
void fpsimd_update_current_state(struct user_fpsimd_state const *state)
|
||||||
{
|
{
|
||||||
if (!system_supports_fpsimd())
|
if (WARN_ON(!system_supports_fpsimd()))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
get_cpu_fpsimd_context();
|
get_cpu_fpsimd_context();
|
||||||
@ -1179,7 +1194,13 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state)
|
|||||||
void fpsimd_flush_task_state(struct task_struct *t)
|
void fpsimd_flush_task_state(struct task_struct *t)
|
||||||
{
|
{
|
||||||
t->thread.fpsimd_cpu = NR_CPUS;
|
t->thread.fpsimd_cpu = NR_CPUS;
|
||||||
|
/*
|
||||||
|
* If we don't support fpsimd, bail out after we have
|
||||||
|
* reset the fpsimd_cpu for this task and clear the
|
||||||
|
* FPSTATE.
|
||||||
|
*/
|
||||||
|
if (!system_supports_fpsimd())
|
||||||
|
return;
|
||||||
barrier();
|
barrier();
|
||||||
set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
|
set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
|
||||||
|
|
||||||
@ -1193,6 +1214,7 @@ void fpsimd_flush_task_state(struct task_struct *t)
|
|||||||
*/
|
*/
|
||||||
static void fpsimd_flush_cpu_state(void)
|
static void fpsimd_flush_cpu_state(void)
|
||||||
{
|
{
|
||||||
|
WARN_ON(!system_supports_fpsimd());
|
||||||
__this_cpu_write(fpsimd_last_state.st, NULL);
|
__this_cpu_write(fpsimd_last_state.st, NULL);
|
||||||
set_thread_flag(TIF_FOREIGN_FPSTATE);
|
set_thread_flag(TIF_FOREIGN_FPSTATE);
|
||||||
}
|
}
|
||||||
@ -1203,6 +1225,8 @@ static void fpsimd_flush_cpu_state(void)
|
|||||||
*/
|
*/
|
||||||
void fpsimd_save_and_flush_cpu_state(void)
|
void fpsimd_save_and_flush_cpu_state(void)
|
||||||
{
|
{
|
||||||
|
if (!system_supports_fpsimd())
|
||||||
|
return;
|
||||||
WARN_ON(preemptible());
|
WARN_ON(preemptible());
|
||||||
__get_cpu_fpsimd_context();
|
__get_cpu_fpsimd_context();
|
||||||
fpsimd_save();
|
fpsimd_save();
|
||||||
|
@ -182,9 +182,48 @@ int arch_hibernation_header_restore(void *addr)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(arch_hibernation_header_restore);
|
EXPORT_SYMBOL(arch_hibernation_header_restore);
|
||||||
|
|
||||||
|
static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
|
||||||
|
unsigned long dst_addr,
|
||||||
|
pgprot_t pgprot)
|
||||||
|
{
|
||||||
|
pgd_t *pgdp;
|
||||||
|
pud_t *pudp;
|
||||||
|
pmd_t *pmdp;
|
||||||
|
pte_t *ptep;
|
||||||
|
|
||||||
|
pgdp = pgd_offset_raw(trans_pgd, dst_addr);
|
||||||
|
if (pgd_none(READ_ONCE(*pgdp))) {
|
||||||
|
pudp = (void *)get_safe_page(GFP_ATOMIC);
|
||||||
|
if (!pudp)
|
||||||
|
return -ENOMEM;
|
||||||
|
pgd_populate(&init_mm, pgdp, pudp);
|
||||||
|
}
|
||||||
|
|
||||||
|
pudp = pud_offset(pgdp, dst_addr);
|
||||||
|
if (pud_none(READ_ONCE(*pudp))) {
|
||||||
|
pmdp = (void *)get_safe_page(GFP_ATOMIC);
|
||||||
|
if (!pmdp)
|
||||||
|
return -ENOMEM;
|
||||||
|
pud_populate(&init_mm, pudp, pmdp);
|
||||||
|
}
|
||||||
|
|
||||||
|
pmdp = pmd_offset(pudp, dst_addr);
|
||||||
|
if (pmd_none(READ_ONCE(*pmdp))) {
|
||||||
|
ptep = (void *)get_safe_page(GFP_ATOMIC);
|
||||||
|
if (!ptep)
|
||||||
|
return -ENOMEM;
|
||||||
|
pmd_populate_kernel(&init_mm, pmdp, ptep);
|
||||||
|
}
|
||||||
|
|
||||||
|
ptep = pte_offset_kernel(pmdp, dst_addr);
|
||||||
|
set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copies length bytes, starting at src_start into an new page,
|
* Copies length bytes, starting at src_start into an new page,
|
||||||
* perform cache maintentance, then maps it at the specified address low
|
* perform cache maintenance, then maps it at the specified address low
|
||||||
* address as executable.
|
* address as executable.
|
||||||
*
|
*
|
||||||
* This is used by hibernate to copy the code it needs to execute when
|
* This is used by hibernate to copy the code it needs to execute when
|
||||||
@ -196,64 +235,26 @@ EXPORT_SYMBOL(arch_hibernation_header_restore);
|
|||||||
*/
|
*/
|
||||||
static int create_safe_exec_page(void *src_start, size_t length,
|
static int create_safe_exec_page(void *src_start, size_t length,
|
||||||
unsigned long dst_addr,
|
unsigned long dst_addr,
|
||||||
phys_addr_t *phys_dst_addr,
|
phys_addr_t *phys_dst_addr)
|
||||||
void *(*allocator)(gfp_t mask),
|
|
||||||
gfp_t mask)
|
|
||||||
{
|
{
|
||||||
int rc = 0;
|
void *page = (void *)get_safe_page(GFP_ATOMIC);
|
||||||
pgd_t *trans_pgd;
|
pgd_t *trans_pgd;
|
||||||
pgd_t *pgdp;
|
int rc;
|
||||||
pud_t *pudp;
|
|
||||||
pmd_t *pmdp;
|
|
||||||
pte_t *ptep;
|
|
||||||
unsigned long dst = (unsigned long)allocator(mask);
|
|
||||||
|
|
||||||
if (!dst) {
|
if (!page)
|
||||||
rc = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy((void *)dst, src_start, length);
|
memcpy(page, src_start, length);
|
||||||
__flush_icache_range(dst, dst + length);
|
__flush_icache_range((unsigned long)page, (unsigned long)page + length);
|
||||||
|
|
||||||
trans_pgd = allocator(mask);
|
trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
|
||||||
if (!trans_pgd) {
|
if (!trans_pgd)
|
||||||
rc = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
pgdp = pgd_offset_raw(trans_pgd, dst_addr);
|
rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
|
||||||
if (pgd_none(READ_ONCE(*pgdp))) {
|
PAGE_KERNEL_EXEC);
|
||||||
pudp = allocator(mask);
|
if (rc)
|
||||||
if (!pudp) {
|
return rc;
|
||||||
rc = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
pgd_populate(&init_mm, pgdp, pudp);
|
|
||||||
}
|
|
||||||
|
|
||||||
pudp = pud_offset(pgdp, dst_addr);
|
|
||||||
if (pud_none(READ_ONCE(*pudp))) {
|
|
||||||
pmdp = allocator(mask);
|
|
||||||
if (!pmdp) {
|
|
||||||
rc = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
pud_populate(&init_mm, pudp, pmdp);
|
|
||||||
}
|
|
||||||
|
|
||||||
pmdp = pmd_offset(pudp, dst_addr);
|
|
||||||
if (pmd_none(READ_ONCE(*pmdp))) {
|
|
||||||
ptep = allocator(mask);
|
|
||||||
if (!ptep) {
|
|
||||||
rc = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
pmd_populate_kernel(&init_mm, pmdp, ptep);
|
|
||||||
}
|
|
||||||
|
|
||||||
ptep = pte_offset_kernel(pmdp, dst_addr);
|
|
||||||
set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Load our new page tables. A strict BBM approach requires that we
|
* Load our new page tables. A strict BBM approach requires that we
|
||||||
@ -269,13 +270,12 @@ static int create_safe_exec_page(void *src_start, size_t length,
|
|||||||
*/
|
*/
|
||||||
cpu_set_reserved_ttbr0();
|
cpu_set_reserved_ttbr0();
|
||||||
local_flush_tlb_all();
|
local_flush_tlb_all();
|
||||||
write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1);
|
write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1);
|
||||||
isb();
|
isb();
|
||||||
|
|
||||||
*phys_dst_addr = virt_to_phys((void *)dst);
|
*phys_dst_addr = virt_to_phys(page);
|
||||||
|
|
||||||
out:
|
return 0;
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
|
#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
|
||||||
@ -450,7 +450,7 @@ static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
} else {
|
} else {
|
||||||
set_pud(dst_pudp,
|
set_pud(dst_pudp,
|
||||||
__pud(pud_val(pud) & ~PMD_SECT_RDONLY));
|
__pud(pud_val(pud) & ~PUD_SECT_RDONLY));
|
||||||
}
|
}
|
||||||
} while (dst_pudp++, src_pudp++, addr = next, addr != end);
|
} while (dst_pudp++, src_pudp++, addr = next, addr != end);
|
||||||
|
|
||||||
@ -476,6 +476,24 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
|
||||||
|
unsigned long end)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
|
||||||
|
|
||||||
|
if (!trans_pgd) {
|
||||||
|
pr_err("Failed to allocate memory for temporary page tables.\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = copy_page_tables(trans_pgd, start, end);
|
||||||
|
if (!rc)
|
||||||
|
*dst_pgdp = trans_pgd;
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
|
* Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
|
||||||
*
|
*
|
||||||
@ -484,7 +502,7 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
|
|||||||
*/
|
*/
|
||||||
int swsusp_arch_resume(void)
|
int swsusp_arch_resume(void)
|
||||||
{
|
{
|
||||||
int rc = 0;
|
int rc;
|
||||||
void *zero_page;
|
void *zero_page;
|
||||||
size_t exit_size;
|
size_t exit_size;
|
||||||
pgd_t *tmp_pg_dir;
|
pgd_t *tmp_pg_dir;
|
||||||
@ -497,15 +515,9 @@ int swsusp_arch_resume(void)
|
|||||||
* Create a second copy of just the linear map, and use this when
|
* Create a second copy of just the linear map, and use this when
|
||||||
* restoring.
|
* restoring.
|
||||||
*/
|
*/
|
||||||
tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
|
rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END);
|
||||||
if (!tmp_pg_dir) {
|
|
||||||
pr_err("Failed to allocate memory for temporary page tables.\n");
|
|
||||||
rc = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
|
|
||||||
if (rc)
|
if (rc)
|
||||||
goto out;
|
return rc;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need a zero page that is zero before & after resume in order to
|
* We need a zero page that is zero before & after resume in order to
|
||||||
@ -514,8 +526,7 @@ int swsusp_arch_resume(void)
|
|||||||
zero_page = (void *)get_safe_page(GFP_ATOMIC);
|
zero_page = (void *)get_safe_page(GFP_ATOMIC);
|
||||||
if (!zero_page) {
|
if (!zero_page) {
|
||||||
pr_err("Failed to allocate zero page.\n");
|
pr_err("Failed to allocate zero page.\n");
|
||||||
rc = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -530,11 +541,10 @@ int swsusp_arch_resume(void)
|
|||||||
*/
|
*/
|
||||||
rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
|
rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
|
||||||
(unsigned long)hibernate_exit,
|
(unsigned long)hibernate_exit,
|
||||||
&phys_hibernate_exit,
|
&phys_hibernate_exit);
|
||||||
(void *)get_safe_page, GFP_ATOMIC);
|
|
||||||
if (rc) {
|
if (rc) {
|
||||||
pr_err("Failed to create safe executable page for hibernate_exit code.\n");
|
pr_err("Failed to create safe executable page for hibernate_exit code.\n");
|
||||||
goto out;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -561,8 +571,7 @@ int swsusp_arch_resume(void)
|
|||||||
resume_hdr.reenter_kernel, restore_pblist,
|
resume_hdr.reenter_kernel, restore_pblist,
|
||||||
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
|
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
|
||||||
|
|
||||||
out:
|
return 0;
|
||||||
return rc;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int hibernate_resume_nonboot_cpu_disable(void)
|
int hibernate_resume_nonboot_cpu_disable(void)
|
||||||
|
@ -47,10 +47,6 @@ static void *image_load(struct kimage *image,
|
|||||||
struct kexec_segment *kernel_segment;
|
struct kexec_segment *kernel_segment;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* We don't support crash kernels yet. */
|
|
||||||
if (image->type == KEXEC_TYPE_CRASH)
|
|
||||||
return ERR_PTR(-EOPNOTSUPP);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We require a kernel with an unambiguous Image header. Per
|
* We require a kernel with an unambiguous Image header. Per
|
||||||
* Documentation/arm64/booting.rst, this is the case when image_size
|
* Documentation/arm64/booting.rst, this is the case when image_size
|
||||||
|
@ -160,18 +160,6 @@ void machine_kexec(struct kimage *kimage)
|
|||||||
|
|
||||||
kexec_image_info(kimage);
|
kexec_image_info(kimage);
|
||||||
|
|
||||||
pr_debug("%s:%d: control_code_page: %p\n", __func__, __LINE__,
|
|
||||||
kimage->control_code_page);
|
|
||||||
pr_debug("%s:%d: reboot_code_buffer_phys: %pa\n", __func__, __LINE__,
|
|
||||||
&reboot_code_buffer_phys);
|
|
||||||
pr_debug("%s:%d: reboot_code_buffer: %p\n", __func__, __LINE__,
|
|
||||||
reboot_code_buffer);
|
|
||||||
pr_debug("%s:%d: relocate_new_kernel: %p\n", __func__, __LINE__,
|
|
||||||
arm64_relocate_new_kernel);
|
|
||||||
pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
|
|
||||||
__func__, __LINE__, arm64_relocate_new_kernel_size,
|
|
||||||
arm64_relocate_new_kernel_size);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
|
* Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
|
||||||
* after the kernel is shut down.
|
* after the kernel is shut down.
|
||||||
|
@ -17,12 +17,15 @@
|
|||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
#include <linux/of_fdt.h>
|
#include <linux/of_fdt.h>
|
||||||
#include <linux/random.h>
|
#include <linux/random.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
|
|
||||||
/* relevant device tree properties */
|
/* relevant device tree properties */
|
||||||
|
#define FDT_PROP_KEXEC_ELFHDR "linux,elfcorehdr"
|
||||||
|
#define FDT_PROP_MEM_RANGE "linux,usable-memory-range"
|
||||||
#define FDT_PROP_INITRD_START "linux,initrd-start"
|
#define FDT_PROP_INITRD_START "linux,initrd-start"
|
||||||
#define FDT_PROP_INITRD_END "linux,initrd-end"
|
#define FDT_PROP_INITRD_END "linux,initrd-end"
|
||||||
#define FDT_PROP_BOOTARGS "bootargs"
|
#define FDT_PROP_BOOTARGS "bootargs"
|
||||||
@ -40,6 +43,10 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
|
|||||||
vfree(image->arch.dtb);
|
vfree(image->arch.dtb);
|
||||||
image->arch.dtb = NULL;
|
image->arch.dtb = NULL;
|
||||||
|
|
||||||
|
vfree(image->arch.elf_headers);
|
||||||
|
image->arch.elf_headers = NULL;
|
||||||
|
image->arch.elf_headers_sz = 0;
|
||||||
|
|
||||||
return kexec_image_post_load_cleanup_default(image);
|
return kexec_image_post_load_cleanup_default(image);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -55,6 +62,31 @@ static int setup_dtb(struct kimage *image,
|
|||||||
|
|
||||||
off = ret;
|
off = ret;
|
||||||
|
|
||||||
|
ret = fdt_delprop(dtb, off, FDT_PROP_KEXEC_ELFHDR);
|
||||||
|
if (ret && ret != -FDT_ERR_NOTFOUND)
|
||||||
|
goto out;
|
||||||
|
ret = fdt_delprop(dtb, off, FDT_PROP_MEM_RANGE);
|
||||||
|
if (ret && ret != -FDT_ERR_NOTFOUND)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (image->type == KEXEC_TYPE_CRASH) {
|
||||||
|
/* add linux,elfcorehdr */
|
||||||
|
ret = fdt_appendprop_addrrange(dtb, 0, off,
|
||||||
|
FDT_PROP_KEXEC_ELFHDR,
|
||||||
|
image->arch.elf_headers_mem,
|
||||||
|
image->arch.elf_headers_sz);
|
||||||
|
if (ret)
|
||||||
|
return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
|
||||||
|
|
||||||
|
/* add linux,usable-memory-range */
|
||||||
|
ret = fdt_appendprop_addrrange(dtb, 0, off,
|
||||||
|
FDT_PROP_MEM_RANGE,
|
||||||
|
crashk_res.start,
|
||||||
|
crashk_res.end - crashk_res.start + 1);
|
||||||
|
if (ret)
|
||||||
|
return (ret == -FDT_ERR_NOSPACE ? -ENOMEM : -EINVAL);
|
||||||
|
}
|
||||||
|
|
||||||
/* add bootargs */
|
/* add bootargs */
|
||||||
if (cmdline) {
|
if (cmdline) {
|
||||||
ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline);
|
ret = fdt_setprop_string(dtb, off, FDT_PROP_BOOTARGS, cmdline);
|
||||||
@ -125,8 +157,8 @@ static int setup_dtb(struct kimage *image,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* More space needed so that we can add initrd, bootargs, kaslr-seed, and
|
* More space needed so that we can add initrd, bootargs, kaslr-seed,
|
||||||
* rng-seed.
|
* rng-seed, userable-memory-range and elfcorehdr.
|
||||||
*/
|
*/
|
||||||
#define DTB_EXTRA_SPACE 0x1000
|
#define DTB_EXTRA_SPACE 0x1000
|
||||||
|
|
||||||
@ -174,6 +206,43 @@ static int create_dtb(struct kimage *image,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int prepare_elf_headers(void **addr, unsigned long *sz)
|
||||||
|
{
|
||||||
|
struct crash_mem *cmem;
|
||||||
|
unsigned int nr_ranges;
|
||||||
|
int ret;
|
||||||
|
u64 i;
|
||||||
|
phys_addr_t start, end;
|
||||||
|
|
||||||
|
nr_ranges = 1; /* for exclusion of crashkernel region */
|
||||||
|
for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
|
||||||
|
MEMBLOCK_NONE, &start, &end, NULL)
|
||||||
|
nr_ranges++;
|
||||||
|
|
||||||
|
cmem = kmalloc(sizeof(struct crash_mem) +
|
||||||
|
sizeof(struct crash_mem_range) * nr_ranges, GFP_KERNEL);
|
||||||
|
if (!cmem)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
cmem->max_nr_ranges = nr_ranges;
|
||||||
|
cmem->nr_ranges = 0;
|
||||||
|
for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
|
||||||
|
MEMBLOCK_NONE, &start, &end, NULL) {
|
||||||
|
cmem->ranges[cmem->nr_ranges].start = start;
|
||||||
|
cmem->ranges[cmem->nr_ranges].end = end - 1;
|
||||||
|
cmem->nr_ranges++;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Exclude crashkernel region */
|
||||||
|
ret = crash_exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
|
||||||
|
|
||||||
|
if (!ret)
|
||||||
|
ret = crash_prepare_elf64_headers(cmem, true, addr, sz);
|
||||||
|
|
||||||
|
kfree(cmem);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int load_other_segments(struct kimage *image,
|
int load_other_segments(struct kimage *image,
|
||||||
unsigned long kernel_load_addr,
|
unsigned long kernel_load_addr,
|
||||||
unsigned long kernel_size,
|
unsigned long kernel_size,
|
||||||
@ -181,14 +250,43 @@ int load_other_segments(struct kimage *image,
|
|||||||
char *cmdline)
|
char *cmdline)
|
||||||
{
|
{
|
||||||
struct kexec_buf kbuf;
|
struct kexec_buf kbuf;
|
||||||
void *dtb = NULL;
|
void *headers, *dtb = NULL;
|
||||||
unsigned long initrd_load_addr = 0, dtb_len;
|
unsigned long headers_sz, initrd_load_addr = 0, dtb_len;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
kbuf.image = image;
|
kbuf.image = image;
|
||||||
/* not allocate anything below the kernel */
|
/* not allocate anything below the kernel */
|
||||||
kbuf.buf_min = kernel_load_addr + kernel_size;
|
kbuf.buf_min = kernel_load_addr + kernel_size;
|
||||||
|
|
||||||
|
/* load elf core header */
|
||||||
|
if (image->type == KEXEC_TYPE_CRASH) {
|
||||||
|
ret = prepare_elf_headers(&headers, &headers_sz);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("Preparing elf core header failed\n");
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
kbuf.buffer = headers;
|
||||||
|
kbuf.bufsz = headers_sz;
|
||||||
|
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
|
||||||
|
kbuf.memsz = headers_sz;
|
||||||
|
kbuf.buf_align = SZ_64K; /* largest supported page size */
|
||||||
|
kbuf.buf_max = ULONG_MAX;
|
||||||
|
kbuf.top_down = true;
|
||||||
|
|
||||||
|
ret = kexec_add_buffer(&kbuf);
|
||||||
|
if (ret) {
|
||||||
|
vfree(headers);
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
image->arch.elf_headers = headers;
|
||||||
|
image->arch.elf_headers_mem = kbuf.mem;
|
||||||
|
image->arch.elf_headers_sz = headers_sz;
|
||||||
|
|
||||||
|
pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
|
||||||
|
image->arch.elf_headers_mem, headers_sz, headers_sz);
|
||||||
|
}
|
||||||
|
|
||||||
/* load initrd */
|
/* load initrd */
|
||||||
if (initrd) {
|
if (initrd) {
|
||||||
kbuf.buffer = initrd;
|
kbuf.buffer = initrd;
|
||||||
|
@ -646,6 +646,6 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void)
|
|||||||
* Only allow a task to be preempted once cpufeatures have been
|
* Only allow a task to be preempted once cpufeatures have been
|
||||||
* enabled.
|
* enabled.
|
||||||
*/
|
*/
|
||||||
if (static_branch_likely(&arm64_const_caps_ready))
|
if (system_capabilities_finalized())
|
||||||
preempt_schedule_irq();
|
preempt_schedule_irq();
|
||||||
}
|
}
|
||||||
|
@ -615,6 +615,13 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int fpr_active(struct task_struct *target, const struct user_regset *regset)
|
||||||
|
{
|
||||||
|
if (!system_supports_fpsimd())
|
||||||
|
return -ENODEV;
|
||||||
|
return regset->n;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: update fp accessors for lazy context switching (sync/flush hwstate)
|
* TODO: update fp accessors for lazy context switching (sync/flush hwstate)
|
||||||
*/
|
*/
|
||||||
@ -637,6 +644,9 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
|
|||||||
unsigned int pos, unsigned int count,
|
unsigned int pos, unsigned int count,
|
||||||
void *kbuf, void __user *ubuf)
|
void *kbuf, void __user *ubuf)
|
||||||
{
|
{
|
||||||
|
if (!system_supports_fpsimd())
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (target == current)
|
if (target == current)
|
||||||
fpsimd_preserve_current_state();
|
fpsimd_preserve_current_state();
|
||||||
|
|
||||||
@ -676,6 +686,9 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!system_supports_fpsimd())
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
|
ret = __fpr_set(target, regset, pos, count, kbuf, ubuf, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -1134,6 +1147,7 @@ static const struct user_regset aarch64_regsets[] = {
|
|||||||
*/
|
*/
|
||||||
.size = sizeof(u32),
|
.size = sizeof(u32),
|
||||||
.align = sizeof(u32),
|
.align = sizeof(u32),
|
||||||
|
.active = fpr_active,
|
||||||
.get = fpr_get,
|
.get = fpr_get,
|
||||||
.set = fpr_set
|
.set = fpr_set
|
||||||
},
|
},
|
||||||
@ -1348,6 +1362,9 @@ static int compat_vfp_get(struct task_struct *target,
|
|||||||
compat_ulong_t fpscr;
|
compat_ulong_t fpscr;
|
||||||
int ret, vregs_end_pos;
|
int ret, vregs_end_pos;
|
||||||
|
|
||||||
|
if (!system_supports_fpsimd())
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
uregs = &target->thread.uw.fpsimd_state;
|
uregs = &target->thread.uw.fpsimd_state;
|
||||||
|
|
||||||
if (target == current)
|
if (target == current)
|
||||||
@ -1381,6 +1398,9 @@ static int compat_vfp_set(struct task_struct *target,
|
|||||||
compat_ulong_t fpscr;
|
compat_ulong_t fpscr;
|
||||||
int ret, vregs_end_pos;
|
int ret, vregs_end_pos;
|
||||||
|
|
||||||
|
if (!system_supports_fpsimd())
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
uregs = &target->thread.uw.fpsimd_state;
|
uregs = &target->thread.uw.fpsimd_state;
|
||||||
|
|
||||||
vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
|
vregs_end_pos = VFP_STATE_SIZE - sizeof(compat_ulong_t);
|
||||||
@ -1438,6 +1458,7 @@ static const struct user_regset aarch32_regsets[] = {
|
|||||||
.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
|
.n = VFP_STATE_SIZE / sizeof(compat_ulong_t),
|
||||||
.size = sizeof(compat_ulong_t),
|
.size = sizeof(compat_ulong_t),
|
||||||
.align = sizeof(compat_ulong_t),
|
.align = sizeof(compat_ulong_t),
|
||||||
|
.active = fpr_active,
|
||||||
.get = compat_vfp_get,
|
.get = compat_vfp_get,
|
||||||
.set = compat_vfp_set
|
.set = compat_vfp_set
|
||||||
},
|
},
|
||||||
|
@ -285,6 +285,13 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
|
|
||||||
*cmdline_p = boot_command_line;
|
*cmdline_p = boot_command_line;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If know now we are going to need KPTI then use non-global
|
||||||
|
* mappings from the start, avoiding the cost of rewriting
|
||||||
|
* everything later.
|
||||||
|
*/
|
||||||
|
arm64_use_ng_mappings = kaslr_requires_kpti();
|
||||||
|
|
||||||
early_fixmap_init();
|
early_fixmap_init();
|
||||||
early_ioremap_init();
|
early_ioremap_init();
|
||||||
|
|
||||||
|
@ -371,6 +371,8 @@ static int parse_user_sigframe(struct user_ctxs *user,
|
|||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
case FPSIMD_MAGIC:
|
case FPSIMD_MAGIC:
|
||||||
|
if (!system_supports_fpsimd())
|
||||||
|
goto invalid;
|
||||||
if (user->fpsimd)
|
if (user->fpsimd)
|
||||||
goto invalid;
|
goto invalid;
|
||||||
|
|
||||||
@ -506,7 +508,7 @@ static int restore_sigframe(struct pt_regs *regs,
|
|||||||
if (err == 0)
|
if (err == 0)
|
||||||
err = parse_user_sigframe(&user, sf);
|
err = parse_user_sigframe(&user, sf);
|
||||||
|
|
||||||
if (err == 0) {
|
if (err == 0 && system_supports_fpsimd()) {
|
||||||
if (!user.fpsimd)
|
if (!user.fpsimd)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -623,7 +625,7 @@ static int setup_sigframe(struct rt_sigframe_user_layout *user,
|
|||||||
|
|
||||||
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
|
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
|
||||||
|
|
||||||
if (err == 0) {
|
if (err == 0 && system_supports_fpsimd()) {
|
||||||
struct fpsimd_context __user *fpsimd_ctx =
|
struct fpsimd_context __user *fpsimd_ctx =
|
||||||
apply_user_offset(user, user->fpsimd_offset);
|
apply_user_offset(user, user->fpsimd_offset);
|
||||||
err |= preserve_fpsimd_context(fpsimd_ctx);
|
err |= preserve_fpsimd_context(fpsimd_ctx);
|
||||||
|
@ -223,7 +223,7 @@ static int compat_restore_sigframe(struct pt_regs *regs,
|
|||||||
err |= !valid_user_regs(®s->user_regs, current);
|
err |= !valid_user_regs(®s->user_regs, current);
|
||||||
|
|
||||||
aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
|
aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
|
||||||
if (err == 0)
|
if (err == 0 && system_supports_fpsimd())
|
||||||
err |= compat_restore_vfp_context(&aux->vfp);
|
err |= compat_restore_vfp_context(&aux->vfp);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
@ -419,7 +419,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
|
|||||||
|
|
||||||
aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
|
aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
|
||||||
|
|
||||||
if (err == 0)
|
if (err == 0 && system_supports_fpsimd())
|
||||||
err |= compat_preserve_vfp_context(&aux->vfp);
|
err |= compat_preserve_vfp_context(&aux->vfp);
|
||||||
__put_user_error(0, &aux->end_magic, err);
|
__put_user_error(0, &aux->end_magic, err);
|
||||||
|
|
||||||
|
@ -37,7 +37,7 @@ static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
|
|||||||
|
|
||||||
/* Unsupported */
|
/* Unsupported */
|
||||||
if (state == ARM64_SSBD_UNKNOWN)
|
if (state == ARM64_SSBD_UNKNOWN)
|
||||||
return -EINVAL;
|
return -ENODEV;
|
||||||
|
|
||||||
/* Treat the unaffected/mitigated state separately */
|
/* Treat the unaffected/mitigated state separately */
|
||||||
if (state == ARM64_SSBD_MITIGATED) {
|
if (state == ARM64_SSBD_MITIGATED) {
|
||||||
@ -102,7 +102,7 @@ static int ssbd_prctl_get(struct task_struct *task)
|
|||||||
{
|
{
|
||||||
switch (arm64_get_ssbd_state()) {
|
switch (arm64_get_ssbd_state()) {
|
||||||
case ARM64_SSBD_UNKNOWN:
|
case ARM64_SSBD_UNKNOWN:
|
||||||
return -EINVAL;
|
return -ENODEV;
|
||||||
case ARM64_SSBD_FORCE_ENABLE:
|
case ARM64_SSBD_FORCE_ENABLE:
|
||||||
return PR_SPEC_DISABLE;
|
return PR_SPEC_DISABLE;
|
||||||
case ARM64_SSBD_KERNEL:
|
case ARM64_SSBD_KERNEL:
|
||||||
|
@ -154,14 +154,14 @@ static inline void sve_user_discard(void)
|
|||||||
sve_user_disable();
|
sve_user_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
void el0_svc_handler(struct pt_regs *regs)
|
void do_el0_svc(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
sve_user_discard();
|
sve_user_discard();
|
||||||
el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
|
el0_svc_common(regs, regs->regs[8], __NR_syscalls, sys_call_table);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
void el0_svc_compat_handler(struct pt_regs *regs)
|
void do_el0_svc_compat(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
|
el0_svc_common(regs, regs->regs[7], __NR_compat_syscalls,
|
||||||
compat_sys_call_table);
|
compat_sys_call_table);
|
||||||
|
@ -22,7 +22,12 @@
|
|||||||
.text
|
.text
|
||||||
.pushsection .hyp.text, "ax"
|
.pushsection .hyp.text, "ax"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We treat x18 as callee-saved as the host may use it as a platform
|
||||||
|
* register (e.g. for shadow call stack).
|
||||||
|
*/
|
||||||
.macro save_callee_saved_regs ctxt
|
.macro save_callee_saved_regs ctxt
|
||||||
|
str x18, [\ctxt, #CPU_XREG_OFFSET(18)]
|
||||||
stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
||||||
stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
||||||
stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
||||||
@ -32,6 +37,8 @@
|
|||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro restore_callee_saved_regs ctxt
|
.macro restore_callee_saved_regs ctxt
|
||||||
|
// We require \ctxt is not x18-x28
|
||||||
|
ldr x18, [\ctxt, #CPU_XREG_OFFSET(18)]
|
||||||
ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
|
||||||
ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
|
||||||
ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
|
||||||
@ -48,7 +55,7 @@ ENTRY(__guest_enter)
|
|||||||
// x0: vcpu
|
// x0: vcpu
|
||||||
// x1: host context
|
// x1: host context
|
||||||
// x2-x17: clobbered by macros
|
// x2-x17: clobbered by macros
|
||||||
// x18: guest context
|
// x29: guest context
|
||||||
|
|
||||||
// Store the host regs
|
// Store the host regs
|
||||||
save_callee_saved_regs x1
|
save_callee_saved_regs x1
|
||||||
@ -67,31 +74,28 @@ alternative_else_nop_endif
|
|||||||
ret
|
ret
|
||||||
|
|
||||||
1:
|
1:
|
||||||
add x18, x0, #VCPU_CONTEXT
|
add x29, x0, #VCPU_CONTEXT
|
||||||
|
|
||||||
// Macro ptrauth_switch_to_guest format:
|
// Macro ptrauth_switch_to_guest format:
|
||||||
// ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
|
// ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
|
||||||
// The below macro to restore guest keys is not implemented in C code
|
// The below macro to restore guest keys is not implemented in C code
|
||||||
// as it may cause Pointer Authentication key signing mismatch errors
|
// as it may cause Pointer Authentication key signing mismatch errors
|
||||||
// when this feature is enabled for kernel code.
|
// when this feature is enabled for kernel code.
|
||||||
ptrauth_switch_to_guest x18, x0, x1, x2
|
ptrauth_switch_to_guest x29, x0, x1, x2
|
||||||
|
|
||||||
// Restore guest regs x0-x17
|
// Restore guest regs x0-x17
|
||||||
ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
|
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
|
||||||
ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
|
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
|
||||||
ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
|
ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
|
||||||
ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
|
ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
|
||||||
ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
|
ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
|
||||||
ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
|
ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
|
||||||
ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
|
ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
|
||||||
ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
|
ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
|
||||||
ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
|
ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
|
||||||
|
|
||||||
// Restore guest regs x19-x29, lr
|
// Restore guest regs x18-x29, lr
|
||||||
restore_callee_saved_regs x18
|
restore_callee_saved_regs x29
|
||||||
|
|
||||||
// Restore guest reg x18
|
|
||||||
ldr x18, [x18, #CPU_XREG_OFFSET(18)]
|
|
||||||
|
|
||||||
// Do not touch any register after this!
|
// Do not touch any register after this!
|
||||||
eret
|
eret
|
||||||
@ -114,7 +118,7 @@ ENTRY(__guest_exit)
|
|||||||
// Retrieve the guest regs x0-x1 from the stack
|
// Retrieve the guest regs x0-x1 from the stack
|
||||||
ldp x2, x3, [sp], #16 // x0, x1
|
ldp x2, x3, [sp], #16 // x0, x1
|
||||||
|
|
||||||
// Store the guest regs x0-x1 and x4-x18
|
// Store the guest regs x0-x1 and x4-x17
|
||||||
stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
|
stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
|
||||||
stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
|
stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
|
||||||
stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
|
stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
|
||||||
@ -123,9 +127,8 @@ ENTRY(__guest_exit)
|
|||||||
stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
|
stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
|
||||||
stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
|
stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
|
||||||
stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
|
stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
|
||||||
str x18, [x1, #CPU_XREG_OFFSET(18)]
|
|
||||||
|
|
||||||
// Store the guest regs x19-x29, lr
|
// Store the guest regs x18-x29, lr
|
||||||
save_callee_saved_regs x1
|
save_callee_saved_regs x1
|
||||||
|
|
||||||
get_host_ctxt x2, x3
|
get_host_ctxt x2, x3
|
||||||
|
@ -28,7 +28,15 @@
|
|||||||
/* Check whether the FP regs were dirtied while in the host-side run loop: */
|
/* Check whether the FP regs were dirtied while in the host-side run loop: */
|
||||||
static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
|
static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
|
/*
|
||||||
|
* When the system doesn't support FP/SIMD, we cannot rely on
|
||||||
|
* the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
|
||||||
|
* abort on the very first access to FP and thus we should never
|
||||||
|
* see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
|
||||||
|
* trap the accesses.
|
||||||
|
*/
|
||||||
|
if (!system_supports_fpsimd() ||
|
||||||
|
vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
|
||||||
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
|
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
|
||||||
KVM_ARM64_FP_HOST);
|
KVM_ARM64_FP_HOST);
|
||||||
|
|
||||||
|
@ -1424,7 +1424,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||||||
ID_SANITISED(ID_ISAR4_EL1),
|
ID_SANITISED(ID_ISAR4_EL1),
|
||||||
ID_SANITISED(ID_ISAR5_EL1),
|
ID_SANITISED(ID_ISAR5_EL1),
|
||||||
ID_SANITISED(ID_MMFR4_EL1),
|
ID_SANITISED(ID_MMFR4_EL1),
|
||||||
ID_UNALLOCATED(2,7),
|
ID_SANITISED(ID_ISAR6_EL1),
|
||||||
|
|
||||||
/* CRm=3 */
|
/* CRm=3 */
|
||||||
ID_SANITISED(MVFR0_EL1),
|
ID_SANITISED(MVFR0_EL1),
|
||||||
|
@ -1,9 +1,9 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
lib-y := clear_user.o delay.o copy_from_user.o \
|
lib-y := clear_user.o delay.o copy_from_user.o \
|
||||||
copy_to_user.o copy_in_user.o copy_page.o \
|
copy_to_user.o copy_in_user.o copy_page.o \
|
||||||
clear_page.o memchr.o memcpy.o memmove.o memset.o \
|
clear_page.o csum.o memchr.o memcpy.o memmove.o \
|
||||||
memcmp.o strcmp.o strncmp.o strlen.o strnlen.o \
|
memset.o memcmp.o strcmp.o strncmp.o strlen.o \
|
||||||
strchr.o strrchr.o tishift.o
|
strnlen.o strchr.o strrchr.o tishift.o
|
||||||
|
|
||||||
ifeq ($(CONFIG_KERNEL_MODE_NEON), y)
|
ifeq ($(CONFIG_KERNEL_MODE_NEON), y)
|
||||||
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
|
obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
|
||||||
|
@ -34,45 +34,45 @@ alternative_else_nop_endif
|
|||||||
ldp x14, x15, [x1, #96]
|
ldp x14, x15, [x1, #96]
|
||||||
ldp x16, x17, [x1, #112]
|
ldp x16, x17, [x1, #112]
|
||||||
|
|
||||||
mov x18, #(PAGE_SIZE - 128)
|
add x0, x0, #256
|
||||||
add x1, x1, #128
|
add x1, x1, #128
|
||||||
1:
|
1:
|
||||||
subs x18, x18, #128
|
tst x0, #(PAGE_SIZE - 1)
|
||||||
|
|
||||||
alternative_if ARM64_HAS_NO_HW_PREFETCH
|
alternative_if ARM64_HAS_NO_HW_PREFETCH
|
||||||
prfm pldl1strm, [x1, #384]
|
prfm pldl1strm, [x1, #384]
|
||||||
alternative_else_nop_endif
|
alternative_else_nop_endif
|
||||||
|
|
||||||
stnp x2, x3, [x0]
|
stnp x2, x3, [x0, #-256]
|
||||||
ldp x2, x3, [x1]
|
ldp x2, x3, [x1]
|
||||||
stnp x4, x5, [x0, #16]
|
stnp x4, x5, [x0, #16 - 256]
|
||||||
ldp x4, x5, [x1, #16]
|
ldp x4, x5, [x1, #16]
|
||||||
stnp x6, x7, [x0, #32]
|
stnp x6, x7, [x0, #32 - 256]
|
||||||
ldp x6, x7, [x1, #32]
|
ldp x6, x7, [x1, #32]
|
||||||
stnp x8, x9, [x0, #48]
|
stnp x8, x9, [x0, #48 - 256]
|
||||||
ldp x8, x9, [x1, #48]
|
ldp x8, x9, [x1, #48]
|
||||||
stnp x10, x11, [x0, #64]
|
stnp x10, x11, [x0, #64 - 256]
|
||||||
ldp x10, x11, [x1, #64]
|
ldp x10, x11, [x1, #64]
|
||||||
stnp x12, x13, [x0, #80]
|
stnp x12, x13, [x0, #80 - 256]
|
||||||
ldp x12, x13, [x1, #80]
|
ldp x12, x13, [x1, #80]
|
||||||
stnp x14, x15, [x0, #96]
|
stnp x14, x15, [x0, #96 - 256]
|
||||||
ldp x14, x15, [x1, #96]
|
ldp x14, x15, [x1, #96]
|
||||||
stnp x16, x17, [x0, #112]
|
stnp x16, x17, [x0, #112 - 256]
|
||||||
ldp x16, x17, [x1, #112]
|
ldp x16, x17, [x1, #112]
|
||||||
|
|
||||||
add x0, x0, #128
|
add x0, x0, #128
|
||||||
add x1, x1, #128
|
add x1, x1, #128
|
||||||
|
|
||||||
b.gt 1b
|
b.ne 1b
|
||||||
|
|
||||||
stnp x2, x3, [x0]
|
stnp x2, x3, [x0, #-256]
|
||||||
stnp x4, x5, [x0, #16]
|
stnp x4, x5, [x0, #16 - 256]
|
||||||
stnp x6, x7, [x0, #32]
|
stnp x6, x7, [x0, #32 - 256]
|
||||||
stnp x8, x9, [x0, #48]
|
stnp x8, x9, [x0, #48 - 256]
|
||||||
stnp x10, x11, [x0, #64]
|
stnp x10, x11, [x0, #64 - 256]
|
||||||
stnp x12, x13, [x0, #80]
|
stnp x12, x13, [x0, #80 - 256]
|
||||||
stnp x14, x15, [x0, #96]
|
stnp x14, x15, [x0, #96 - 256]
|
||||||
stnp x16, x17, [x0, #112]
|
stnp x16, x17, [x0, #112 - 256]
|
||||||
|
|
||||||
ret
|
ret
|
||||||
ENDPROC(copy_page)
|
ENDPROC(copy_page)
|
||||||
|
126
arch/arm64/lib/csum.c
Normal file
126
arch/arm64/lib/csum.c
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
// SPDX-License-Identifier: GPL-2.0-only
|
||||||
|
// Copyright (C) 2019-2020 Arm Ltd.
|
||||||
|
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
#include <linux/kasan-checks.h>
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
|
||||||
|
#include <net/checksum.h>
|
||||||
|
|
||||||
|
/* Looks dumb, but generates nice-ish code */
|
||||||
|
static u64 accumulate(u64 sum, u64 data)
|
||||||
|
{
|
||||||
|
__uint128_t tmp = (__uint128_t)sum + data;
|
||||||
|
return tmp + (tmp >> 64);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int do_csum(const unsigned char *buff, int len)
|
||||||
|
{
|
||||||
|
unsigned int offset, shift, sum;
|
||||||
|
const u64 *ptr;
|
||||||
|
u64 data, sum64 = 0;
|
||||||
|
|
||||||
|
if (unlikely(len == 0))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
offset = (unsigned long)buff & 7;
|
||||||
|
/*
|
||||||
|
* This is to all intents and purposes safe, since rounding down cannot
|
||||||
|
* result in a different page or cache line being accessed, and @buff
|
||||||
|
* should absolutely not be pointing to anything read-sensitive. We do,
|
||||||
|
* however, have to be careful not to piss off KASAN, which means using
|
||||||
|
* unchecked reads to accommodate the head and tail, for which we'll
|
||||||
|
* compensate with an explicit check up-front.
|
||||||
|
*/
|
||||||
|
kasan_check_read(buff, len);
|
||||||
|
ptr = (u64 *)(buff - offset);
|
||||||
|
len = len + offset - 8;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Head: zero out any excess leading bytes. Shifting back by the same
|
||||||
|
* amount should be at least as fast as any other way of handling the
|
||||||
|
* odd/even alignment, and means we can ignore it until the very end.
|
||||||
|
*/
|
||||||
|
shift = offset * 8;
|
||||||
|
data = READ_ONCE_NOCHECK(*ptr++);
|
||||||
|
#ifdef __LITTLE_ENDIAN
|
||||||
|
data = (data >> shift) << shift;
|
||||||
|
#else
|
||||||
|
data = (data << shift) >> shift;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Body: straightforward aligned loads from here on (the paired loads
|
||||||
|
* underlying the quadword type still only need dword alignment). The
|
||||||
|
* main loop strictly excludes the tail, so the second loop will always
|
||||||
|
* run at least once.
|
||||||
|
*/
|
||||||
|
while (unlikely(len > 64)) {
|
||||||
|
__uint128_t tmp1, tmp2, tmp3, tmp4;
|
||||||
|
|
||||||
|
tmp1 = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
|
||||||
|
tmp2 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 2));
|
||||||
|
tmp3 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 4));
|
||||||
|
tmp4 = READ_ONCE_NOCHECK(*(__uint128_t *)(ptr + 6));
|
||||||
|
|
||||||
|
len -= 64;
|
||||||
|
ptr += 8;
|
||||||
|
|
||||||
|
/* This is the "don't dump the carry flag into a GPR" idiom */
|
||||||
|
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
|
||||||
|
tmp2 += (tmp2 >> 64) | (tmp2 << 64);
|
||||||
|
tmp3 += (tmp3 >> 64) | (tmp3 << 64);
|
||||||
|
tmp4 += (tmp4 >> 64) | (tmp4 << 64);
|
||||||
|
tmp1 = ((tmp1 >> 64) << 64) | (tmp2 >> 64);
|
||||||
|
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
|
||||||
|
tmp3 = ((tmp3 >> 64) << 64) | (tmp4 >> 64);
|
||||||
|
tmp3 += (tmp3 >> 64) | (tmp3 << 64);
|
||||||
|
tmp1 = ((tmp1 >> 64) << 64) | (tmp3 >> 64);
|
||||||
|
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
|
||||||
|
tmp1 = ((tmp1 >> 64) << 64) | sum64;
|
||||||
|
tmp1 += (tmp1 >> 64) | (tmp1 << 64);
|
||||||
|
sum64 = tmp1 >> 64;
|
||||||
|
}
|
||||||
|
while (len > 8) {
|
||||||
|
__uint128_t tmp;
|
||||||
|
|
||||||
|
sum64 = accumulate(sum64, data);
|
||||||
|
tmp = READ_ONCE_NOCHECK(*(__uint128_t *)ptr);
|
||||||
|
|
||||||
|
len -= 16;
|
||||||
|
ptr += 2;
|
||||||
|
|
||||||
|
#ifdef __LITTLE_ENDIAN
|
||||||
|
data = tmp >> 64;
|
||||||
|
sum64 = accumulate(sum64, tmp);
|
||||||
|
#else
|
||||||
|
data = tmp;
|
||||||
|
sum64 = accumulate(sum64, tmp >> 64);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
if (len > 0) {
|
||||||
|
sum64 = accumulate(sum64, data);
|
||||||
|
data = READ_ONCE_NOCHECK(*ptr);
|
||||||
|
len -= 8;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Tail: zero any over-read bytes similarly to the head, again
|
||||||
|
* preserving odd/even alignment.
|
||||||
|
*/
|
||||||
|
shift = len * -8;
|
||||||
|
#ifdef __LITTLE_ENDIAN
|
||||||
|
data = (data << shift) >> shift;
|
||||||
|
#else
|
||||||
|
data = (data >> shift) << shift;
|
||||||
|
#endif
|
||||||
|
sum64 = accumulate(sum64, data);
|
||||||
|
|
||||||
|
/* Finally, folding */
|
||||||
|
sum64 += (sum64 >> 32) | (sum64 << 32);
|
||||||
|
sum = sum64 >> 32;
|
||||||
|
sum += (sum >> 16) | (sum << 16);
|
||||||
|
if (offset & 1)
|
||||||
|
return (u16)swab32(sum);
|
||||||
|
|
||||||
|
return sum >> 16;
|
||||||
|
}
|
@ -29,15 +29,9 @@ static cpumask_t tlb_flush_pending;
|
|||||||
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
|
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
|
||||||
#define ASID_FIRST_VERSION (1UL << asid_bits)
|
#define ASID_FIRST_VERSION (1UL << asid_bits)
|
||||||
|
|
||||||
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
|
#define NUM_USER_ASIDS ASID_FIRST_VERSION
|
||||||
#define NUM_USER_ASIDS (ASID_FIRST_VERSION >> 1)
|
|
||||||
#define asid2idx(asid) (((asid) & ~ASID_MASK) >> 1)
|
|
||||||
#define idx2asid(idx) (((idx) << 1) & ~ASID_MASK)
|
|
||||||
#else
|
|
||||||
#define NUM_USER_ASIDS (ASID_FIRST_VERSION)
|
|
||||||
#define asid2idx(asid) ((asid) & ~ASID_MASK)
|
#define asid2idx(asid) ((asid) & ~ASID_MASK)
|
||||||
#define idx2asid(idx) asid2idx(idx)
|
#define idx2asid(idx) asid2idx(idx)
|
||||||
#endif
|
|
||||||
|
|
||||||
/* Get the ASIDBits supported by the current CPU */
|
/* Get the ASIDBits supported by the current CPU */
|
||||||
static u32 get_cpu_asid_bits(void)
|
static u32 get_cpu_asid_bits(void)
|
||||||
@ -77,13 +71,33 @@ void verify_cpu_asid_bits(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void set_kpti_asid_bits(void)
|
||||||
|
{
|
||||||
|
unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
|
||||||
|
/*
|
||||||
|
* In case of KPTI kernel/user ASIDs are allocated in
|
||||||
|
* pairs, the bottom bit distinguishes the two: if it
|
||||||
|
* is set, then the ASID will map only userspace. Thus
|
||||||
|
* mark even as reserved for kernel.
|
||||||
|
*/
|
||||||
|
memset(asid_map, 0xaa, len);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void set_reserved_asid_bits(void)
|
||||||
|
{
|
||||||
|
if (arm64_kernel_unmapped_at_el0())
|
||||||
|
set_kpti_asid_bits();
|
||||||
|
else
|
||||||
|
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
|
||||||
|
}
|
||||||
|
|
||||||
static void flush_context(void)
|
static void flush_context(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
u64 asid;
|
u64 asid;
|
||||||
|
|
||||||
/* Update the list of reserved ASIDs and the ASID bitmap. */
|
/* Update the list of reserved ASIDs and the ASID bitmap. */
|
||||||
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
|
set_reserved_asid_bits();
|
||||||
|
|
||||||
for_each_possible_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
|
asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
|
||||||
@ -261,6 +275,14 @@ static int asids_init(void)
|
|||||||
panic("Failed to allocate bitmap for %lu ASIDs\n",
|
panic("Failed to allocate bitmap for %lu ASIDs\n",
|
||||||
NUM_USER_ASIDS);
|
NUM_USER_ASIDS);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We cannot call set_reserved_asid_bits() here because CPU
|
||||||
|
* caps are not finalized yet, so it is safer to assume KPTI
|
||||||
|
* and reserve kernel ASID's from beginning.
|
||||||
|
*/
|
||||||
|
if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
|
||||||
|
set_kpti_asid_bits();
|
||||||
|
|
||||||
pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
|
pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -54,7 +54,7 @@ static int change_memory_common(unsigned long addr, int numpages,
|
|||||||
pgprot_t set_mask, pgprot_t clear_mask)
|
pgprot_t set_mask, pgprot_t clear_mask)
|
||||||
{
|
{
|
||||||
unsigned long start = addr;
|
unsigned long start = addr;
|
||||||
unsigned long size = PAGE_SIZE*numpages;
|
unsigned long size = PAGE_SIZE * numpages;
|
||||||
unsigned long end = start + size;
|
unsigned long end = start + size;
|
||||||
struct vm_struct *area;
|
struct vm_struct *area;
|
||||||
int i;
|
int i;
|
||||||
|
@ -42,7 +42,14 @@
|
|||||||
#define TCR_KASAN_FLAGS 0
|
#define TCR_KASAN_FLAGS 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
|
/* Default MAIR_EL1 */
|
||||||
|
#define MAIR_EL1_SET \
|
||||||
|
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
|
||||||
|
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
|
||||||
|
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
|
||||||
|
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
|
||||||
|
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
|
||||||
|
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT))
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_PM
|
#ifdef CONFIG_CPU_PM
|
||||||
/**
|
/**
|
||||||
@ -250,15 +257,15 @@ ENTRY(idmap_kpti_install_ng_mappings)
|
|||||||
/* We're the boot CPU. Wait for the others to catch up */
|
/* We're the boot CPU. Wait for the others to catch up */
|
||||||
sevl
|
sevl
|
||||||
1: wfe
|
1: wfe
|
||||||
ldaxr w18, [flag_ptr]
|
ldaxr w17, [flag_ptr]
|
||||||
eor w18, w18, num_cpus
|
eor w17, w17, num_cpus
|
||||||
cbnz w18, 1b
|
cbnz w17, 1b
|
||||||
|
|
||||||
/* We need to walk swapper, so turn off the MMU. */
|
/* We need to walk swapper, so turn off the MMU. */
|
||||||
pre_disable_mmu_workaround
|
pre_disable_mmu_workaround
|
||||||
mrs x18, sctlr_el1
|
mrs x17, sctlr_el1
|
||||||
bic x18, x18, #SCTLR_ELx_M
|
bic x17, x17, #SCTLR_ELx_M
|
||||||
msr sctlr_el1, x18
|
msr sctlr_el1, x17
|
||||||
isb
|
isb
|
||||||
|
|
||||||
/* Everybody is enjoying the idmap, so we can rewrite swapper. */
|
/* Everybody is enjoying the idmap, so we can rewrite swapper. */
|
||||||
@ -281,9 +288,9 @@ skip_pgd:
|
|||||||
isb
|
isb
|
||||||
|
|
||||||
/* We're done: fire up the MMU again */
|
/* We're done: fire up the MMU again */
|
||||||
mrs x18, sctlr_el1
|
mrs x17, sctlr_el1
|
||||||
orr x18, x18, #SCTLR_ELx_M
|
orr x17, x17, #SCTLR_ELx_M
|
||||||
msr sctlr_el1, x18
|
msr sctlr_el1, x17
|
||||||
isb
|
isb
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -353,34 +360,9 @@ skip_pte:
|
|||||||
b.ne do_pte
|
b.ne do_pte
|
||||||
b next_pmd
|
b next_pmd
|
||||||
|
|
||||||
/* Secondary CPUs end up here */
|
|
||||||
__idmap_kpti_secondary:
|
|
||||||
/* Uninstall swapper before surgery begins */
|
|
||||||
__idmap_cpu_set_reserved_ttbr1 x18, x17
|
|
||||||
|
|
||||||
/* Increment the flag to let the boot CPU we're ready */
|
|
||||||
1: ldxr w18, [flag_ptr]
|
|
||||||
add w18, w18, #1
|
|
||||||
stxr w17, w18, [flag_ptr]
|
|
||||||
cbnz w17, 1b
|
|
||||||
|
|
||||||
/* Wait for the boot CPU to finish messing around with swapper */
|
|
||||||
sevl
|
|
||||||
1: wfe
|
|
||||||
ldxr w18, [flag_ptr]
|
|
||||||
cbnz w18, 1b
|
|
||||||
|
|
||||||
/* All done, act like nothing happened */
|
|
||||||
offset_ttbr1 swapper_ttb, x18
|
|
||||||
msr ttbr1_el1, swapper_ttb
|
|
||||||
isb
|
|
||||||
ret
|
|
||||||
|
|
||||||
.unreq cpu
|
.unreq cpu
|
||||||
.unreq num_cpus
|
.unreq num_cpus
|
||||||
.unreq swapper_pa
|
.unreq swapper_pa
|
||||||
.unreq swapper_ttb
|
|
||||||
.unreq flag_ptr
|
|
||||||
.unreq cur_pgdp
|
.unreq cur_pgdp
|
||||||
.unreq end_pgdp
|
.unreq end_pgdp
|
||||||
.unreq pgd
|
.unreq pgd
|
||||||
@ -393,6 +375,32 @@ __idmap_kpti_secondary:
|
|||||||
.unreq cur_ptep
|
.unreq cur_ptep
|
||||||
.unreq end_ptep
|
.unreq end_ptep
|
||||||
.unreq pte
|
.unreq pte
|
||||||
|
|
||||||
|
/* Secondary CPUs end up here */
|
||||||
|
__idmap_kpti_secondary:
|
||||||
|
/* Uninstall swapper before surgery begins */
|
||||||
|
__idmap_cpu_set_reserved_ttbr1 x16, x17
|
||||||
|
|
||||||
|
/* Increment the flag to let the boot CPU we're ready */
|
||||||
|
1: ldxr w16, [flag_ptr]
|
||||||
|
add w16, w16, #1
|
||||||
|
stxr w17, w16, [flag_ptr]
|
||||||
|
cbnz w17, 1b
|
||||||
|
|
||||||
|
/* Wait for the boot CPU to finish messing around with swapper */
|
||||||
|
sevl
|
||||||
|
1: wfe
|
||||||
|
ldxr w16, [flag_ptr]
|
||||||
|
cbnz w16, 1b
|
||||||
|
|
||||||
|
/* All done, act like nothing happened */
|
||||||
|
offset_ttbr1 swapper_ttb, x16
|
||||||
|
msr ttbr1_el1, swapper_ttb
|
||||||
|
isb
|
||||||
|
ret
|
||||||
|
|
||||||
|
.unreq swapper_ttb
|
||||||
|
.unreq flag_ptr
|
||||||
ENDPROC(idmap_kpti_install_ng_mappings)
|
ENDPROC(idmap_kpti_install_ng_mappings)
|
||||||
.popsection
|
.popsection
|
||||||
#endif
|
#endif
|
||||||
@ -416,23 +424,9 @@ ENTRY(__cpu_setup)
|
|||||||
enable_dbg // since this is per-cpu
|
enable_dbg // since this is per-cpu
|
||||||
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
|
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
|
||||||
/*
|
/*
|
||||||
* Memory region attributes for LPAE:
|
* Memory region attributes
|
||||||
*
|
|
||||||
* n = AttrIndx[2:0]
|
|
||||||
* n MAIR
|
|
||||||
* DEVICE_nGnRnE 000 00000000
|
|
||||||
* DEVICE_nGnRE 001 00000100
|
|
||||||
* DEVICE_GRE 010 00001100
|
|
||||||
* NORMAL_NC 011 01000100
|
|
||||||
* NORMAL 100 11111111
|
|
||||||
* NORMAL_WT 101 10111011
|
|
||||||
*/
|
*/
|
||||||
ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
|
mov_q x5, MAIR_EL1_SET
|
||||||
MAIR(0x04, MT_DEVICE_nGnRE) | \
|
|
||||||
MAIR(0x0c, MT_DEVICE_GRE) | \
|
|
||||||
MAIR(0x44, MT_NORMAL_NC) | \
|
|
||||||
MAIR(0xff, MT_NORMAL) | \
|
|
||||||
MAIR(0xbb, MT_NORMAL_WT)
|
|
||||||
msr mair_el1, x5
|
msr mair_el1, x5
|
||||||
/*
|
/*
|
||||||
* Prepare SCTLR
|
* Prepare SCTLR
|
||||||
|
@ -633,13 +633,17 @@ static int ddr_perf_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
|
dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n");
|
||||||
goto ddr_perf_err;
|
goto cpuhp_state_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
pmu->cpuhp_state = ret;
|
pmu->cpuhp_state = ret;
|
||||||
|
|
||||||
/* Register the pmu instance for cpu hotplug */
|
/* Register the pmu instance for cpu hotplug */
|
||||||
cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
|
ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
|
||||||
|
goto cpuhp_instance_err;
|
||||||
|
}
|
||||||
|
|
||||||
/* Request irq */
|
/* Request irq */
|
||||||
irq = of_irq_get(np, 0);
|
irq = of_irq_get(np, 0);
|
||||||
@ -673,9 +677,10 @@ static int ddr_perf_probe(struct platform_device *pdev)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ddr_perf_err:
|
ddr_perf_err:
|
||||||
if (pmu->cpuhp_state)
|
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
|
||||||
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
|
cpuhp_instance_err:
|
||||||
|
cpuhp_remove_multi_state(pmu->cpuhp_state);
|
||||||
|
cpuhp_state_err:
|
||||||
ida_simple_remove(&ddr_ida, pmu->id);
|
ida_simple_remove(&ddr_ida, pmu->id);
|
||||||
dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
|
dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -686,6 +691,7 @@ static int ddr_perf_remove(struct platform_device *pdev)
|
|||||||
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
|
struct ddr_pmu *pmu = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
|
cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node);
|
||||||
|
cpuhp_remove_multi_state(pmu->cpuhp_state);
|
||||||
irq_set_affinity_hint(pmu->irq, NULL);
|
irq_set_affinity_hint(pmu->irq, NULL);
|
||||||
|
|
||||||
perf_pmu_unregister(&pmu->pmu);
|
perf_pmu_unregister(&pmu->pmu);
|
||||||
|
@ -337,38 +337,44 @@ void hisi_uncore_pmu_disable(struct pmu *pmu)
|
|||||||
hisi_pmu->ops->stop_counters(hisi_pmu);
|
hisi_pmu->ops->stop_counters(hisi_pmu);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read Super CPU cluster and CPU cluster ID from MPIDR_EL1.
|
* The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be
|
||||||
* If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu
|
* determined from the MPIDR_EL1, but the encoding varies by CPU:
|
||||||
* core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID
|
*
|
||||||
* is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID
|
* - For MT variants of TSV110:
|
||||||
* is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID
|
* SCCL is Aff2[7:3], CCL is Aff2[2:0]
|
||||||
* is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1].
|
*
|
||||||
|
* - For other MT parts:
|
||||||
|
* SCCL is Aff3[7:0], CCL is Aff2[7:0]
|
||||||
|
*
|
||||||
|
* - For non-MT parts:
|
||||||
|
* SCCL is Aff2[7:0], CCL is Aff1[7:0]
|
||||||
*/
|
*/
|
||||||
static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id)
|
static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
|
||||||
{
|
{
|
||||||
u64 mpidr = read_cpuid_mpidr();
|
u64 mpidr = read_cpuid_mpidr();
|
||||||
|
int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3);
|
||||||
|
int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
|
||||||
|
int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
||||||
|
bool mt = mpidr & MPIDR_MT_BITMASK;
|
||||||
|
int sccl, ccl;
|
||||||
|
|
||||||
if (mpidr & MPIDR_MT_BITMASK) {
|
if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
|
||||||
if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) {
|
sccl = aff2 >> 3;
|
||||||
int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2);
|
ccl = aff2 & 0x7;
|
||||||
|
} else if (mt) {
|
||||||
if (sccl_id)
|
sccl = aff3;
|
||||||
*sccl_id = aff2 >> 3;
|
ccl = aff2;
|
||||||
if (ccl_id)
|
|
||||||
*ccl_id = aff2 & 0x7;
|
|
||||||
} else {
|
|
||||||
if (sccl_id)
|
|
||||||
*sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3);
|
|
||||||
if (ccl_id)
|
|
||||||
*ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
if (sccl_id)
|
sccl = aff2;
|
||||||
*sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
|
ccl = aff1;
|
||||||
if (ccl_id)
|
|
||||||
*ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (scclp)
|
||||||
|
*scclp = sccl;
|
||||||
|
if (cclp)
|
||||||
|
*cclp = ccl;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -11,6 +11,8 @@
|
|||||||
#define PROT_WRITE 0x2 /* page can be written */
|
#define PROT_WRITE 0x2 /* page can be written */
|
||||||
#define PROT_EXEC 0x4 /* page can be executed */
|
#define PROT_EXEC 0x4 /* page can be executed */
|
||||||
#define PROT_SEM 0x8 /* page may be used for atomic ops */
|
#define PROT_SEM 0x8 /* page may be used for atomic ops */
|
||||||
|
/* 0x10 reserved for arch-specific use */
|
||||||
|
/* 0x20 reserved for arch-specific use */
|
||||||
#define PROT_NONE 0x0 /* page can not be accessed */
|
#define PROT_NONE 0x0 /* page can not be accessed */
|
||||||
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
|
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
|
||||||
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
|
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
|
||||||
|
@ -159,6 +159,10 @@ static int do_kexec_load(unsigned long entry, unsigned long nr_segments,
|
|||||||
|
|
||||||
kimage_terminate(image);
|
kimage_terminate(image);
|
||||||
|
|
||||||
|
ret = machine_kexec_post_load(image);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
/* Install the new kernel and uninstall the old */
|
/* Install the new kernel and uninstall the old */
|
||||||
image = xchg(dest_image, image);
|
image = xchg(dest_image, image);
|
||||||
|
|
||||||
|
@ -589,6 +589,12 @@ static void kimage_free_extra_pages(struct kimage *image)
|
|||||||
kimage_free_page_list(&image->unusable_pages);
|
kimage_free_page_list(&image->unusable_pages);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int __weak machine_kexec_post_load(struct kimage *image)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
void kimage_terminate(struct kimage *image)
|
void kimage_terminate(struct kimage *image)
|
||||||
{
|
{
|
||||||
if (*image->entry != 0)
|
if (*image->entry != 0)
|
||||||
@ -1171,7 +1177,7 @@ int kernel_kexec(void)
|
|||||||
* CPU hotplug again; so re-enable it here.
|
* CPU hotplug again; so re-enable it here.
|
||||||
*/
|
*/
|
||||||
cpu_hotplug_enable();
|
cpu_hotplug_enable();
|
||||||
pr_emerg("Starting new kernel\n");
|
pr_notice("Starting new kernel\n");
|
||||||
machine_shutdown();
|
machine_shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -441,6 +441,10 @@ SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
|
|||||||
|
|
||||||
kimage_terminate(image);
|
kimage_terminate(image);
|
||||||
|
|
||||||
|
ret = machine_kexec_post_load(image);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free up any temporary buffers allocated which are not needed
|
* Free up any temporary buffers allocated which are not needed
|
||||||
* after image has been loaded
|
* after image has been loaded
|
||||||
|
@ -13,6 +13,8 @@ void kimage_terminate(struct kimage *image);
|
|||||||
int kimage_is_destination_range(struct kimage *image,
|
int kimage_is_destination_range(struct kimage *image,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end);
|
||||||
|
|
||||||
|
int machine_kexec_post_load(struct kimage *image);
|
||||||
|
|
||||||
extern struct mutex kexec_mutex;
|
extern struct mutex kexec_mutex;
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC_FILE
|
#ifdef CONFIG_KEXEC_FILE
|
||||||
|
@ -223,7 +223,7 @@ KASAN_SANITIZE_stackdepot.o := n
|
|||||||
KCOV_INSTRUMENT_stackdepot.o := n
|
KCOV_INSTRUMENT_stackdepot.o := n
|
||||||
|
|
||||||
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
|
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
|
||||||
fdt_empty_tree.o
|
fdt_empty_tree.o fdt_addresses.o
|
||||||
$(foreach file, $(libfdt_files), \
|
$(foreach file, $(libfdt_files), \
|
||||||
$(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
|
$(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
|
||||||
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
|
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
|
||||||
|
2
lib/fdt_addresses.c
Normal file
2
lib/fdt_addresses.c
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
#include <linux/libfdt_env.h>
|
||||||
|
#include "../scripts/dtc/libfdt/fdt_addresses.c"
|
@ -31,6 +31,10 @@ cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /de
|
|||||||
# Return y if the linker supports <flag>, n otherwise
|
# Return y if the linker supports <flag>, n otherwise
|
||||||
ld-option = $(success,$(LD) -v $(1))
|
ld-option = $(success,$(LD) -v $(1))
|
||||||
|
|
||||||
|
# $(as-instr,<instr>)
|
||||||
|
# Return y if the assembler supports <instr>, n otherwise
|
||||||
|
as-instr = $(success,printf "%b\n" "$(1)" | $(CC) $(CLANG_FLAGS) -c -x assembler -o /dev/null -)
|
||||||
|
|
||||||
# check if $(CC) and $(LD) exist
|
# check if $(CC) and $(LD) exist
|
||||||
$(error-if,$(failure,command -v $(CC)),compiler '$(CC)' not found)
|
$(error-if,$(failure,command -v $(CC)),compiler '$(CC)' not found)
|
||||||
$(error-if,$(failure,command -v $(LD)),linker '$(LD)' not found)
|
$(error-if,$(failure,command -v $(LD)),linker '$(LD)' not found)
|
||||||
|
Loading…
Reference in New Issue
Block a user