2019-06-03 12:44:50 +07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2012-03-05 18:49:28 +07:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/processor.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 1995-1999 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_PROCESSOR_H
|
|
|
|
#define __ASM_PROCESSOR_H
|
|
|
|
|
2018-12-07 05:50:41 +07:00
|
|
|
#define KERNEL_DS UL(-1)
|
2018-12-12 18:51:40 +07:00
|
|
|
#define USER_DS ((UL(1) << MAX_USER_VA_BITS) - 1)
|
2018-02-05 22:34:18 +07:00
|
|
|
|
2018-11-08 00:10:38 +07:00
|
|
|
/*
|
|
|
|
* On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
|
|
|
|
* no point in shifting all network buffers by 2 bytes just to make some IP
|
|
|
|
* header fields appear aligned in memory, potentially sacrificing some DMA
|
|
|
|
* performance on some platforms.
|
|
|
|
*/
|
|
|
|
#define NET_IP_ALIGN 0
|
|
|
|
|
2017-08-31 15:30:50 +07:00
|
|
|
#ifndef __ASSEMBLY__
|
2012-03-05 18:49:28 +07:00
|
|
|
|
2018-03-28 16:50:49 +07:00
|
|
|
#include <linux/build_bug.h>
|
arm64: signal: Report signal frame size to userspace via auxv
Stateful CPU architecture extensions may require the signal frame
to grow to a size that exceeds the arch's MINSIGSTKSZ #define.
However, changing this #define is an ABI break.
To allow userspace the option of determining the signal frame size
in a more forwards-compatible way, this patch adds a new auxv entry
tagged with AT_MINSIGSTKSZ, which provides the maximum signal frame
size that the process can observe during its lifetime.
If AT_MINSIGSTKSZ is absent from the aux vector, the caller can
assume that the MINSIGSTKSZ #define is sufficient. This allows for
a consistent interface with older kernels that do not provide
AT_MINSIGSTKSZ.
The idea is that libc could expose this via sysconf() or some
similar mechanism.
There is deliberately no AT_SIGSTKSZ. The kernel knows nothing
about userspace's own stack overheads and should not pretend to
know.
For arm64:
The primary motivation for this interface is the Scalable Vector
Extension, which can require at least 4KB or so of extra space
in the signal frame for the largest hardware implementations.
To determine the correct value, a "Christmas tree" mode (via the
add_all argument) is added to setup_sigframe_layout(), to simulate
addition of all possible records to the signal frame at maximum
possible size.
If this procedure goes wrong somehow, resulting in a stupidly large
frame layout and hence failure of sigframe_alloc() to allocate a
record to the frame, then this is indicative of a kernel bug. In
this case, we WARN() and no attempt is made to populate
AT_MINSIGSTKSZ for userspace.
For arm64 SVE:
The SVE context block in the signal frame needs to be considered
too when computing the maximum possible signal frame size.
Because the size of this block depends on the vector length, this
patch computes the size based not on the thread's current vector
length but instead on the maximum possible vector length: this
determines the maximum size of SVE context block that can be
observed in any signal frame for the lifetime of the process.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2018-06-01 17:10:14 +07:00
|
|
|
#include <linux/cache.h>
|
|
|
|
#include <linux/init.h>
|
2018-03-28 16:50:49 +07:00
|
|
|
#include <linux/stddef.h>
|
2012-03-05 18:49:28 +07:00
|
|
|
#include <linux/string.h>
|
|
|
|
|
2016-02-02 19:46:23 +07:00
|
|
|
#include <asm/alternative.h>
|
2018-03-26 21:12:28 +07:00
|
|
|
#include <asm/cpufeature.h>
|
2012-03-05 18:49:28 +07:00
|
|
|
#include <asm/hw_breakpoint.h>
|
2016-02-10 17:07:30 +07:00
|
|
|
#include <asm/lse.h>
|
2015-01-06 07:38:41 +07:00
|
|
|
#include <asm/pgtable-hwdef.h>
|
2018-12-08 01:39:28 +07:00
|
|
|
#include <asm/pointer_auth.h>
|
2012-03-05 18:49:28 +07:00
|
|
|
#include <asm/ptrace.h>
|
|
|
|
#include <asm/types.h>
|
|
|
|
|
2017-08-31 15:30:50 +07:00
|
|
|
/*
|
|
|
|
* TASK_SIZE - the maximum size of a user space task.
|
|
|
|
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
|
|
|
|
*/
|
2018-12-07 05:50:37 +07:00
|
|
|
|
2019-08-07 22:55:17 +07:00
|
|
|
#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
|
2019-08-07 22:55:23 +07:00
|
|
|
#define TASK_SIZE_64 (UL(1) << vabits_actual)
|
2018-12-07 05:50:41 +07:00
|
|
|
|
2017-08-31 15:30:50 +07:00
|
|
|
#ifdef CONFIG_COMPAT
|
2019-04-30 00:27:13 +07:00
|
|
|
#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
|
2019-04-01 18:30:14 +07:00
|
|
|
/*
|
|
|
|
* With CONFIG_ARM64_64K_PAGES enabled, the last page is occupied
|
|
|
|
* by the compat vectors page.
|
|
|
|
*/
|
2017-08-31 15:30:50 +07:00
|
|
|
#define TASK_SIZE_32 UL(0x100000000)
|
2019-04-01 18:30:14 +07:00
|
|
|
#else
|
|
|
|
#define TASK_SIZE_32 (UL(0x100000000) - PAGE_SIZE)
|
|
|
|
#endif /* CONFIG_ARM64_64K_PAGES */
|
2017-08-31 15:30:50 +07:00
|
|
|
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
|
|
|
|
TASK_SIZE_32 : TASK_SIZE_64)
|
|
|
|
#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
|
|
|
|
TASK_SIZE_32 : TASK_SIZE_64)
|
2018-12-07 05:50:37 +07:00
|
|
|
#define DEFAULT_MAP_WINDOW (test_thread_flag(TIF_32BIT) ? \
|
|
|
|
TASK_SIZE_32 : DEFAULT_MAP_WINDOW_64)
|
2017-08-31 15:30:50 +07:00
|
|
|
#else
|
|
|
|
#define TASK_SIZE TASK_SIZE_64
|
2018-12-07 05:50:37 +07:00
|
|
|
#define DEFAULT_MAP_WINDOW DEFAULT_MAP_WINDOW_64
|
2017-08-31 15:30:50 +07:00
|
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
|
2018-12-07 05:50:42 +07:00
|
|
|
#ifdef CONFIG_ARM64_FORCE_52BIT
|
|
|
|
#define STACK_TOP_MAX TASK_SIZE_64
|
|
|
|
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 4))
|
|
|
|
#else
|
2018-12-07 05:50:37 +07:00
|
|
|
#define STACK_TOP_MAX DEFAULT_MAP_WINDOW_64
|
2018-12-07 05:50:42 +07:00
|
|
|
#define TASK_UNMAPPED_BASE (PAGE_ALIGN(DEFAULT_MAP_WINDOW / 4))
|
|
|
|
#endif /* CONFIG_ARM64_FORCE_52BIT */
|
2017-08-31 15:30:50 +07:00
|
|
|
|
2012-03-05 18:49:28 +07:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
#define AARCH32_VECTORS_BASE 0xffff0000
|
|
|
|
#define STACK_TOP (test_thread_flag(TIF_32BIT) ? \
|
|
|
|
AARCH32_VECTORS_BASE : STACK_TOP_MAX)
|
|
|
|
#else
|
|
|
|
#define STACK_TOP STACK_TOP_MAX
|
|
|
|
#endif /* CONFIG_COMPAT */
|
2012-11-08 23:00:16 +07:00
|
|
|
|
2018-12-07 05:50:42 +07:00
|
|
|
#ifndef CONFIG_ARM64_FORCE_52BIT
|
2018-12-07 05:50:38 +07:00
|
|
|
#define arch_get_mmap_end(addr) ((addr > DEFAULT_MAP_WINDOW) ? TASK_SIZE :\
|
|
|
|
DEFAULT_MAP_WINDOW)
|
|
|
|
|
|
|
|
#define arch_get_mmap_base(addr, base) ((addr > DEFAULT_MAP_WINDOW) ? \
|
|
|
|
base + TASK_SIZE - DEFAULT_MAP_WINDOW :\
|
|
|
|
base)
|
2018-12-07 05:50:42 +07:00
|
|
|
#endif /* CONFIG_ARM64_FORCE_52BIT */
|
2018-12-07 05:50:38 +07:00
|
|
|
|
2015-02-06 01:01:53 +07:00
|
|
|
extern phys_addr_t arm64_dma_phys_limit;
|
|
|
|
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
|
2012-03-05 18:49:28 +07:00
|
|
|
|
|
|
|
struct debug_info {
|
2017-03-17 05:10:43 +07:00
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
2012-03-05 18:49:28 +07:00
|
|
|
/* Have we suspended stepping by a debugger? */
|
|
|
|
int suspended_step;
|
|
|
|
/* Allow breakpoints and watchpoints to be disabled for this thread. */
|
|
|
|
int bps_disabled;
|
|
|
|
int wps_disabled;
|
|
|
|
/* Hardware breakpoints pinned to this task. */
|
|
|
|
struct perf_event *hbp_break[ARM_MAX_BRP];
|
|
|
|
struct perf_event *hbp_watch[ARM_MAX_WRP];
|
2017-03-17 05:10:43 +07:00
|
|
|
#endif
|
2012-03-05 18:49:28 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct cpu_context {
|
|
|
|
unsigned long x19;
|
|
|
|
unsigned long x20;
|
|
|
|
unsigned long x21;
|
|
|
|
unsigned long x22;
|
|
|
|
unsigned long x23;
|
|
|
|
unsigned long x24;
|
|
|
|
unsigned long x25;
|
|
|
|
unsigned long x26;
|
|
|
|
unsigned long x27;
|
|
|
|
unsigned long x28;
|
|
|
|
unsigned long fp;
|
|
|
|
unsigned long sp;
|
|
|
|
unsigned long pc;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct thread_struct {
|
|
|
|
struct cpu_context cpu_context; /* cpu context */
|
2018-03-28 16:50:49 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Whitelisted fields for hardened usercopy:
|
|
|
|
* Maintainers must ensure manually that this contains no
|
|
|
|
* implicit padding.
|
|
|
|
*/
|
|
|
|
struct {
|
|
|
|
unsigned long tp_value; /* TLS register */
|
|
|
|
unsigned long tp2_value;
|
|
|
|
struct user_fpsimd_state fpsimd_state;
|
|
|
|
} uw;
|
|
|
|
|
2018-03-28 16:50:48 +07:00
|
|
|
unsigned int fpsimd_cpu;
|
arm64/sve: Core task context handling
This patch adds the core support for switching and managing the SVE
architectural state of user tasks.
Calls to the existing FPSIMD low-level save/restore functions are
factored out as new functions task_fpsimd_{save,load}(), since SVE
now dynamically may or may not need to be handled at these points
depending on the kernel configuration, hardware features discovered
at boot, and the runtime state of the task. To make these
decisions as fast as possible, const cpucaps are used where
feasible, via the system_supports_sve() helper.
The SVE registers are only tracked for threads that have explicitly
used SVE, indicated by the new thread flag TIF_SVE. Otherwise, the
FPSIMD view of the architectural state is stored in
thread.fpsimd_state as usual.
When in use, the SVE registers are not stored directly in
thread_struct due to their potentially large and variable size.
Because the task_struct slab allocator must be configured very
early during kernel boot, it is also tricky to configure it
correctly to match the maximum vector length provided by the
hardware, since this depends on examining secondary CPUs as well as
the primary. Instead, a pointer sve_state in thread_struct points
to a dynamically allocated buffer containing the SVE register data,
and code is added to allocate and free this buffer at appropriate
times.
TIF_SVE is set when taking an SVE access trap from userspace, if
suitable hardware support has been detected. This enables SVE for
the thread: a subsequent return to userspace will disable the trap
accordingly. If such a trap is taken without sufficient system-
wide hardware support, SIGILL is sent to the thread instead as if
an undefined instruction had been executed: this may happen if
userspace tries to use SVE in a system where not all CPUs support
it for example.
The kernel will clear TIF_SVE and disable SVE for the thread
whenever an explicit syscall is made by userspace. For backwards
compatibility reasons and conformance with the spirit of the base
AArch64 procedure call standard, the subset of the SVE register
state that aliases the FPSIMD registers is still preserved across a
syscall even if this happens. The remainder of the SVE register
state logically becomes zero at syscall entry, though the actual
zeroing work is currently deferred until the thread next tries to
use SVE, causing another trap to the kernel. This implementation
is suboptimal: in the future, the fastpath case may be optimised
to zero the registers in-place and leave SVE enabled for the task,
where beneficial.
TIF_SVE is also cleared in the following slowpath cases, which are
taken as reasonable hints that the task may no longer use SVE:
* exec
* fork and clone
Code is added to sync data between thread.fpsimd_state and
thread.sve_state whenever enabling/disabling SVE, in a manner
consistent with the SVE architectural programmer's model.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Alex Bennée <alex.bennee@linaro.org>
[will: added #include to fix allnoconfig build]
[will: use enable_daif in do_sve_acc]
Signed-off-by: Will Deacon <will.deacon@arm.com>
2017-10-31 22:51:05 +07:00
|
|
|
void *sve_state; /* SVE registers, if any */
|
|
|
|
unsigned int sve_vl; /* SVE vector length */
|
2017-10-31 22:51:06 +07:00
|
|
|
unsigned int sve_vl_onexec; /* SVE vl after next exec */
|
2012-03-05 18:49:28 +07:00
|
|
|
unsigned long fault_address; /* fault info */
|
2014-04-07 05:04:12 +07:00
|
|
|
unsigned long fault_code; /* ESR_EL1 value */
|
2012-03-05 18:49:28 +07:00
|
|
|
struct debug_info debug; /* debugging */
|
2018-12-13 20:14:06 +07:00
|
|
|
#ifdef CONFIG_ARM64_PTR_AUTH
|
|
|
|
struct ptrauth_keys keys_user;
|
|
|
|
#endif
|
2012-03-05 18:49:28 +07:00
|
|
|
};
|
|
|
|
|
2017-08-17 04:05:09 +07:00
|
|
|
static inline void arch_thread_struct_whitelist(unsigned long *offset,
|
|
|
|
unsigned long *size)
|
|
|
|
{
|
2018-03-28 16:50:49 +07:00
|
|
|
/* Verify that there is no padding among the whitelisted fields: */
|
|
|
|
BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) !=
|
|
|
|
sizeof_field(struct thread_struct, uw.tp_value) +
|
|
|
|
sizeof_field(struct thread_struct, uw.tp2_value) +
|
|
|
|
sizeof_field(struct thread_struct, uw.fpsimd_state));
|
|
|
|
|
|
|
|
*offset = offsetof(struct thread_struct, uw);
|
|
|
|
*size = sizeof_field(struct thread_struct, uw);
|
2017-08-17 04:05:09 +07:00
|
|
|
}
|
|
|
|
|
2015-05-27 21:39:40 +07:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
#define task_user_tls(t) \
|
|
|
|
({ \
|
|
|
|
unsigned long *__tls; \
|
|
|
|
if (is_compat_thread(task_thread_info(t))) \
|
2018-03-28 16:50:49 +07:00
|
|
|
__tls = &(t)->thread.uw.tp2_value; \
|
2015-05-27 21:39:40 +07:00
|
|
|
else \
|
2018-03-28 16:50:49 +07:00
|
|
|
__tls = &(t)->thread.uw.tp_value; \
|
2015-05-27 21:39:40 +07:00
|
|
|
__tls; \
|
|
|
|
})
|
|
|
|
#else
|
2018-03-28 16:50:49 +07:00
|
|
|
#define task_user_tls(t) (&(t)->thread.uw.tp_value)
|
2015-05-27 21:39:40 +07:00
|
|
|
#endif
|
|
|
|
|
2017-06-21 22:00:44 +07:00
|
|
|
/* Sync TPIDR_EL0 back to thread_struct for current */
|
|
|
|
void tls_preserve_current_state(void);
|
|
|
|
|
arm64: fpsimd: Eliminate task->mm checks
Currently the FPSIMD handling code uses the condition task->mm ==
NULL as a hint that task has no FPSIMD register context.
The ->mm check is only there to filter out tasks that cannot
possibly have FPSIMD context loaded, for optimisation purposes.
Also, TIF_FOREIGN_FPSTATE must always be checked anyway before
saving FPSIMD context back to memory. For these reasons, the ->mm
checks are not useful, providing that TIF_FOREIGN_FPSTATE is
maintained in a consistent way for all threads.
The context switch logic is already deliberately optimised to defer
reloads of the regs until ret_to_user (or sigreturn as a special
case), and save them only if they have been previously loaded.
These paths are the only places where the wrong_task and wrong_cpu
conditions can be made false, by calling fpsimd_bind_task_to_cpu().
Kernel threads by definition never reach these paths. As a result,
the wrong_task and wrong_cpu tests in fpsimd_thread_switch() will
always yield true for kernel threads.
This patch removes the redundant checks and special-case code,
ensuring that TIF_FOREIGN_FPSTATE is set whenever a kernel thread
is scheduled in, and ensures that this flag is set for the init
task. The fpsimd_flush_task_state() call already present in
copy_thread() ensures the same for any new task.
With TIF_FOREIGN_FPSTATE always set for kernel threads, this patch
ensures that no extra context save work is added for kernel
threads, and eliminates the redundant context saving that may
currently occur for kernel threads that have acquired an mm via
use_mm().
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
2018-05-22 01:08:15 +07:00
|
|
|
#define INIT_THREAD { \
|
|
|
|
.fpsimd_cpu = NR_CPUS, \
|
|
|
|
}
|
2012-03-05 18:49:28 +07:00
|
|
|
|
|
|
|
static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
|
|
|
|
{
|
|
|
|
memset(regs, 0, sizeof(*regs));
|
2017-08-01 21:35:54 +07:00
|
|
|
forget_syscall(regs);
|
2012-03-05 18:49:28 +07:00
|
|
|
regs->pc = pc;
|
2019-01-31 21:58:46 +07:00
|
|
|
|
|
|
|
if (system_uses_irq_prio_masking())
|
|
|
|
regs->pmr_save = GIC_PRIO_IRQON;
|
2012-03-05 18:49:28 +07:00
|
|
|
}
|
|
|
|
|
2019-07-22 20:53:09 +07:00
|
|
|
static inline void set_ssbs_bit(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
regs->pstate |= PSR_SSBS_BIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void set_compat_ssbs_bit(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
regs->pstate |= PSR_AA32_SSBS_BIT;
|
|
|
|
}
|
|
|
|
|
2012-03-05 18:49:28 +07:00
|
|
|
static inline void start_thread(struct pt_regs *regs, unsigned long pc,
|
|
|
|
unsigned long sp)
|
|
|
|
{
|
|
|
|
start_thread_common(regs, pc);
|
|
|
|
regs->pstate = PSR_MODE_EL0t;
|
2018-08-07 19:47:06 +07:00
|
|
|
|
|
|
|
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
|
2019-07-22 20:53:09 +07:00
|
|
|
set_ssbs_bit(regs);
|
2018-08-07 19:47:06 +07:00
|
|
|
|
2012-03-05 18:49:28 +07:00
|
|
|
regs->sp = sp;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
|
|
|
|
unsigned long sp)
|
|
|
|
{
|
|
|
|
start_thread_common(regs, pc);
|
2018-07-05 21:16:52 +07:00
|
|
|
regs->pstate = PSR_AA32_MODE_USR;
|
2012-03-05 18:49:28 +07:00
|
|
|
if (pc & 1)
|
2018-07-05 21:16:52 +07:00
|
|
|
regs->pstate |= PSR_AA32_T_BIT;
|
2013-10-11 20:52:12 +07:00
|
|
|
|
|
|
|
#ifdef __AARCH64EB__
|
2018-07-05 21:16:52 +07:00
|
|
|
regs->pstate |= PSR_AA32_E_BIT;
|
2013-10-11 20:52:12 +07:00
|
|
|
#endif
|
|
|
|
|
2018-08-07 19:47:06 +07:00
|
|
|
if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
|
2019-07-22 20:53:09 +07:00
|
|
|
set_compat_ssbs_bit(regs);
|
2018-08-07 19:47:06 +07:00
|
|
|
|
2012-03-05 18:49:28 +07:00
|
|
|
regs->compat_sp = sp;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Forward declaration, a strange C thing */
|
|
|
|
struct task_struct;
|
|
|
|
|
|
|
|
/* Free all resources held by a thread. */
|
|
|
|
extern void release_thread(struct task_struct *);
|
|
|
|
|
|
|
|
unsigned long get_wchan(struct task_struct *p);
|
|
|
|
|
2015-03-03 02:19:14 +07:00
|
|
|
static inline void cpu_relax(void)
|
|
|
|
{
|
|
|
|
asm volatile("yield" ::: "memory");
|
|
|
|
}
|
|
|
|
|
2012-03-05 18:49:28 +07:00
|
|
|
/* Thread switching */
|
|
|
|
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
|
|
|
struct task_struct *next);
|
|
|
|
|
|
|
|
#define task_pt_regs(p) \
|
arm64: kernel: remove {THREAD,IRQ_STACK}_START_SP
For historical reasons, we leave the top 16 bytes of our task and IRQ
stacks unused, a practice used to ensure that the SP can always be
masked to find the base of the current stack (historically, where
thread_info could be found).
However, this is not necessary, as:
* When an exception is taken from a task stack, we decrement the SP by
S_FRAME_SIZE and stash the exception registers before we compare the
SP against the task stack. In such cases, the SP must be at least
S_FRAME_SIZE below the limit, and can be safely masked to determine
whether the task stack is in use.
* When transitioning to an IRQ stack, we'll place a dummy frame onto the
IRQ stack before enabling asynchronous exceptions, or executing code
we expect to trigger faults. Thus, if an exception is taken from the
IRQ stack, the SP must be at least 16 bytes below the limit.
* We no longer mask the SP to find the thread_info, which is now found
via sp_el0. Note that historically, the offset was critical to ensure
that cpu_switch_to() found the correct stack for new threads that
hadn't yet executed ret_from_fork().
Given that, this initial offset serves no purpose, and can be removed.
This brings us in-line with other architectures (e.g. x86) which do not
rely on this masking.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[Mark: rebase, kill THREAD_START_SP, commit msg additions]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Tested-by: Laura Abbott <labbott@redhat.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
2017-07-20 23:15:45 +07:00
|
|
|
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
|
2012-03-05 18:49:28 +07:00
|
|
|
|
2014-07-10 17:37:40 +07:00
|
|
|
#define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc)
|
2014-08-29 22:11:10 +07:00
|
|
|
#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk))
|
2012-03-05 18:49:28 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prefetching support
|
|
|
|
*/
|
|
|
|
#define ARCH_HAS_PREFETCH
|
|
|
|
static inline void prefetch(const void *ptr)
|
|
|
|
{
|
|
|
|
asm volatile("prfm pldl1keep, %a0\n" : : "p" (ptr));
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ARCH_HAS_PREFETCHW
|
|
|
|
static inline void prefetchw(const void *ptr)
|
|
|
|
{
|
|
|
|
asm volatile("prfm pstl1keep, %a0\n" : : "p" (ptr));
|
|
|
|
}
|
|
|
|
|
|
|
|
#define ARCH_HAS_SPINLOCK_PREFETCH
|
2016-02-02 19:46:23 +07:00
|
|
|
static inline void spin_lock_prefetch(const void *ptr)
|
2012-03-05 18:49:28 +07:00
|
|
|
{
|
2016-02-02 19:46:23 +07:00
|
|
|
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
|
|
|
"prfm pstl1strm, %a0",
|
|
|
|
"nop") : : "p" (ptr));
|
2012-03-05 18:49:28 +07:00
|
|
|
}
|
|
|
|
|
arm64: signal: Report signal frame size to userspace via auxv
Stateful CPU architecture extensions may require the signal frame
to grow to a size that exceeds the arch's MINSIGSTKSZ #define.
However, changing this #define is an ABI break.
To allow userspace the option of determining the signal frame size
in a more forwards-compatible way, this patch adds a new auxv entry
tagged with AT_MINSIGSTKSZ, which provides the maximum signal frame
size that the process can observe during its lifetime.
If AT_MINSIGSTKSZ is absent from the aux vector, the caller can
assume that the MINSIGSTKSZ #define is sufficient. This allows for
a consistent interface with older kernels that do not provide
AT_MINSIGSTKSZ.
The idea is that libc could expose this via sysconf() or some
similar mechanism.
There is deliberately no AT_SIGSTKSZ. The kernel knows nothing
about userspace's own stack overheads and should not pretend to
know.
For arm64:
The primary motivation for this interface is the Scalable Vector
Extension, which can require at least 4KB or so of extra space
in the signal frame for the largest hardware implementations.
To determine the correct value, a "Christmas tree" mode (via the
add_all argument) is added to setup_sigframe_layout(), to simulate
addition of all possible records to the signal frame at maximum
possible size.
If this procedure goes wrong somehow, resulting in a stupidly large
frame layout and hence failure of sigframe_alloc() to allocate a
record to the frame, then this is indicative of a kernel bug. In
this case, we WARN() and no attempt is made to populate
AT_MINSIGSTKSZ for userspace.
For arm64 SVE:
The SVE context block in the signal frame needs to be considered
too when computing the maximum possible signal frame size.
Because the size of this block depends on the vector length, this
patch computes the size based not on the thread's current vector
length but instead on the maximum possible vector length: this
determines the maximum size of SVE context block that can be
observed in any signal frame for the lifetime of the process.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2018-06-01 17:10:14 +07:00
|
|
|
extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */
|
|
|
|
extern void __init minsigstksz_setup(void);
|
|
|
|
|
2018-04-12 23:32:35 +07:00
|
|
|
/*
|
|
|
|
* Not at the top of the file due to a direct #include cycle between
|
|
|
|
* <asm/fpsimd.h> and <asm/processor.h>. Deferring this #include
|
|
|
|
* ensures that contents of processor.h are visible to fpsimd.h even if
|
|
|
|
* processor.h is included first.
|
|
|
|
*
|
|
|
|
* These prctl helpers are the only things in this file that require
|
|
|
|
* fpsimd.h. The core code expects them to be in this header.
|
|
|
|
*/
|
|
|
|
#include <asm/fpsimd.h>
|
|
|
|
|
2017-10-31 22:51:14 +07:00
|
|
|
/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
|
|
|
|
#define SVE_SET_VL(arg) sve_set_current_vl(arg)
|
|
|
|
#define SVE_GET_VL() sve_get_current_vl()
|
|
|
|
|
2018-12-08 01:39:28 +07:00
|
|
|
/* PR_PAC_RESET_KEYS prctl */
|
|
|
|
#define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg)
|
|
|
|
|
2019-07-24 00:58:39 +07:00
|
|
|
#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
|
|
|
|
/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */
|
|
|
|
long set_tagged_addr_ctrl(unsigned long arg);
|
|
|
|
long get_tagged_addr_ctrl(void);
|
|
|
|
#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg)
|
|
|
|
#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl()
|
|
|
|
#endif
|
|
|
|
|
2018-07-21 04:41:54 +07:00
|
|
|
/*
|
|
|
|
* For CONFIG_GCC_PLUGIN_STACKLEAK
|
|
|
|
*
|
|
|
|
* These need to be macros because otherwise we get stuck in a nightmare
|
|
|
|
* of header definitions for the use of task_stack_page.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define current_top_of_stack() \
|
|
|
|
({ \
|
|
|
|
struct stack_info _info; \
|
|
|
|
BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \
|
|
|
|
_info.high; \
|
|
|
|
})
|
|
|
|
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL))
|
|
|
|
|
2017-08-31 15:30:50 +07:00
|
|
|
#endif /* __ASSEMBLY__ */
|
2012-03-05 18:49:28 +07:00
|
|
|
#endif /* __ASM_PROCESSOR_H */
|