2012-03-05 18:49:27 +07:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/include/asm/ptrace.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 1996-2003 Russell King
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_PTRACE_H
|
|
|
|
#define __ASM_PTRACE_H
|
|
|
|
|
2012-10-11 17:05:13 +07:00
|
|
|
#include <uapi/asm/ptrace.h>
|
2012-03-05 18:49:27 +07:00
|
|
|
|
2014-06-06 20:16:21 +07:00
|
|
|
/* Current Exception Level values, as contained in CurrentEL */
|
|
|
|
#define CurrentEL_EL1 (1 << 2)
|
|
|
|
#define CurrentEL_EL2 (2 << 2)
|
|
|
|
|
2018-10-18 01:21:16 +07:00
|
|
|
/* Additional SPSR bits not exposed in the UABI */
|
|
|
|
#define PSR_IL_BIT (1 << 20)
|
|
|
|
|
2012-03-05 18:49:27 +07:00
|
|
|
/* AArch32-specific ptrace requests */
|
2012-09-27 17:38:12 +07:00
|
|
|
#define COMPAT_PTRACE_GETREGS 12
|
|
|
|
#define COMPAT_PTRACE_SETREGS 13
|
|
|
|
#define COMPAT_PTRACE_GET_THREAD_AREA 22
|
|
|
|
#define COMPAT_PTRACE_SET_SYSCALL 23
|
2012-03-05 18:49:27 +07:00
|
|
|
#define COMPAT_PTRACE_GETVFPREGS 27
|
|
|
|
#define COMPAT_PTRACE_SETVFPREGS 28
|
2012-09-27 17:38:12 +07:00
|
|
|
#define COMPAT_PTRACE_GETHBPREGS 29
|
|
|
|
#define COMPAT_PTRACE_SETHBPREGS 30
|
2012-10-04 22:28:52 +07:00
|
|
|
|
2018-07-05 21:16:48 +07:00
|
|
|
/* SPSR_ELx bits for exceptions taken from AArch32 */
|
|
|
|
#define PSR_AA32_MODE_MASK 0x0000001f
|
|
|
|
#define PSR_AA32_MODE_USR 0x00000010
|
|
|
|
#define PSR_AA32_MODE_FIQ 0x00000011
|
|
|
|
#define PSR_AA32_MODE_IRQ 0x00000012
|
|
|
|
#define PSR_AA32_MODE_SVC 0x00000013
|
|
|
|
#define PSR_AA32_MODE_ABT 0x00000017
|
|
|
|
#define PSR_AA32_MODE_HYP 0x0000001a
|
|
|
|
#define PSR_AA32_MODE_UND 0x0000001b
|
|
|
|
#define PSR_AA32_MODE_SYS 0x0000001f
|
|
|
|
#define PSR_AA32_T_BIT 0x00000020
|
|
|
|
#define PSR_AA32_F_BIT 0x00000040
|
|
|
|
#define PSR_AA32_I_BIT 0x00000080
|
|
|
|
#define PSR_AA32_A_BIT 0x00000100
|
|
|
|
#define PSR_AA32_E_BIT 0x00000200
|
2018-08-07 19:47:06 +07:00
|
|
|
#define PSR_AA32_SSBS_BIT 0x00800000
|
2018-07-05 21:16:48 +07:00
|
|
|
#define PSR_AA32_DIT_BIT 0x01000000
|
|
|
|
#define PSR_AA32_Q_BIT 0x08000000
|
|
|
|
#define PSR_AA32_V_BIT 0x10000000
|
|
|
|
#define PSR_AA32_C_BIT 0x20000000
|
|
|
|
#define PSR_AA32_Z_BIT 0x40000000
|
|
|
|
#define PSR_AA32_N_BIT 0x80000000
|
|
|
|
#define PSR_AA32_IT_MASK 0x0600fc00 /* If-Then execution state mask */
|
|
|
|
#define PSR_AA32_GE_MASK 0x000f0000
|
|
|
|
|
|
|
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
|
|
|
#define PSR_AA32_ENDSTATE PSR_AA32_E_BIT
|
|
|
|
#else
|
|
|
|
#define PSR_AA32_ENDSTATE 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* AArch32 CPSR bits, as seen in AArch32 */
|
|
|
|
#define COMPAT_PSR_DIT_BIT 0x00200000
|
2015-01-21 19:43:11 +07:00
|
|
|
|
2012-03-05 18:49:27 +07:00
|
|
|
/*
|
|
|
|
* These are 'magic' values for PTRACE_PEEKUSR that return info about where a
|
|
|
|
* process is located in memory.
|
|
|
|
*/
|
2012-10-10 21:50:03 +07:00
|
|
|
#define COMPAT_PT_TEXT_ADDR 0x10000
|
|
|
|
#define COMPAT_PT_DATA_ADDR 0x10004
|
|
|
|
#define COMPAT_PT_TEXT_END_ADDR 0x10008
|
2017-08-01 21:35:54 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If pt_regs.syscallno == NO_SYSCALL, then the thread is not executing
|
|
|
|
* a syscall -- i.e., its most recent entry into the kernel from
|
|
|
|
* userspace was not via SVC, or otherwise a tracer cancelled the syscall.
|
|
|
|
*
|
|
|
|
* This must have the value -1, for ABI compatibility with ptrace etc.
|
|
|
|
*/
|
|
|
|
#define NO_SYSCALL (-1)
|
|
|
|
|
2012-03-05 18:49:27 +07:00
|
|
|
#ifndef __ASSEMBLY__
|
2016-07-08 23:35:45 +07:00
|
|
|
#include <linux/bug.h>
|
2017-08-01 21:35:54 +07:00
|
|
|
#include <linux/types.h>
|
2012-03-05 18:49:27 +07:00
|
|
|
|
|
|
|
/* sizeof(struct user) for AArch32 */
|
|
|
|
#define COMPAT_USER_SZ 296
|
2012-10-03 21:54:09 +07:00
|
|
|
|
|
|
|
/* Architecturally defined mapping between AArch32 and AArch64 registers */
|
|
|
|
#define compat_usr(x) regs[(x)]
|
2014-02-04 01:18:27 +07:00
|
|
|
#define compat_fp regs[11]
|
2012-03-05 18:49:27 +07:00
|
|
|
#define compat_sp regs[13]
|
|
|
|
#define compat_lr regs[14]
|
2012-10-03 21:54:09 +07:00
|
|
|
#define compat_sp_hyp regs[15]
|
2015-10-22 21:41:52 +07:00
|
|
|
#define compat_lr_irq regs[16]
|
|
|
|
#define compat_sp_irq regs[17]
|
|
|
|
#define compat_lr_svc regs[18]
|
|
|
|
#define compat_sp_svc regs[19]
|
|
|
|
#define compat_lr_abt regs[20]
|
|
|
|
#define compat_sp_abt regs[21]
|
|
|
|
#define compat_lr_und regs[22]
|
|
|
|
#define compat_sp_und regs[23]
|
2012-10-03 21:54:09 +07:00
|
|
|
#define compat_r8_fiq regs[24]
|
|
|
|
#define compat_r9_fiq regs[25]
|
|
|
|
#define compat_r10_fiq regs[26]
|
|
|
|
#define compat_r11_fiq regs[27]
|
|
|
|
#define compat_r12_fiq regs[28]
|
|
|
|
#define compat_sp_fiq regs[29]
|
|
|
|
#define compat_lr_fiq regs[30]
|
2012-03-05 18:49:27 +07:00
|
|
|
|
2018-07-05 21:16:48 +07:00
|
|
|
static inline unsigned long compat_psr_to_pstate(const unsigned long psr)
|
|
|
|
{
|
|
|
|
unsigned long pstate;
|
|
|
|
|
|
|
|
pstate = psr & ~COMPAT_PSR_DIT_BIT;
|
|
|
|
|
|
|
|
if (psr & COMPAT_PSR_DIT_BIT)
|
|
|
|
pstate |= PSR_AA32_DIT_BIT;
|
|
|
|
|
|
|
|
return pstate;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long pstate_to_compat_psr(const unsigned long pstate)
|
|
|
|
{
|
|
|
|
unsigned long psr;
|
|
|
|
|
|
|
|
psr = pstate & ~PSR_AA32_DIT_BIT;
|
|
|
|
|
|
|
|
if (pstate & PSR_AA32_DIT_BIT)
|
|
|
|
psr |= COMPAT_PSR_DIT_BIT;
|
|
|
|
|
|
|
|
return psr;
|
|
|
|
}
|
|
|
|
|
2012-03-05 18:49:27 +07:00
|
|
|
/*
|
|
|
|
* This struct defines the way the registers are stored on the stack during an
|
|
|
|
* exception. Note that sizeof(struct pt_regs) has to be a multiple of 16 (for
|
|
|
|
* stack alignment). struct user_pt_regs must form a prefix of struct pt_regs.
|
|
|
|
*/
|
|
|
|
struct pt_regs {
|
|
|
|
union {
|
|
|
|
struct user_pt_regs user_regs;
|
|
|
|
struct {
|
|
|
|
u64 regs[31];
|
|
|
|
u64 sp;
|
|
|
|
u64 pc;
|
|
|
|
u64 pstate;
|
|
|
|
};
|
|
|
|
};
|
|
|
|
u64 orig_x0;
|
arm64: syscallno is secretly an int, make it official
The upper 32 bits of the syscallno field in thread_struct are
handled inconsistently, being sometimes zero extended and sometimes
sign-extended. In fact, only the lower 32 bits seem to have any
real significance for the behaviour of the code: it's been OK to
handle the upper bits inconsistently because they don't matter.
Currently, the only place I can find where those bits are
significant is in calling trace_sys_enter(), which may be
unintentional: for example, if a compat tracer attempts to cancel a
syscall by passing -1 to (COMPAT_)PTRACE_SET_SYSCALL at the
syscall-enter-stop, it will be traced as syscall 4294967295
rather than -1 as might be expected (and as occurs for a native
tracer doing the same thing). Elsewhere, reads of syscallno cast
it to an int or truncate it.
There's also a conspicuous amount of code and casting to bodge
around the fact that although semantically an int, syscallno is
stored as a u64.
Let's not pretend any more.
In order to preserve the stp x instruction that stores the syscall
number in entry.S, this patch special-cases the layout of struct
pt_regs for big endian so that the newly 32-bit syscallno field
maps onto the low bits of the stored value. This is not beautiful,
but benchmarking of the getpid syscall on Juno suggests indicates a
minor slowdown if the stp is split into an stp x and stp w.
Signed-off-by: Dave Martin <Dave.Martin@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-08-01 21:35:53 +07:00
|
|
|
#ifdef __AARCH64EB__
|
|
|
|
u32 unused2;
|
|
|
|
s32 syscallno;
|
|
|
|
#else
|
|
|
|
s32 syscallno;
|
|
|
|
u32 unused2;
|
|
|
|
#endif
|
|
|
|
|
2016-06-21 00:28:01 +07:00
|
|
|
u64 orig_addr_limit;
|
|
|
|
u64 unused; // maintain 16 byte alignment
|
arm64: unwind: reference pt_regs via embedded stack frame
As it turns out, the unwind code is slightly broken, and probably has
been for a while. The problem is in the dumping of the exception stack,
which is intended to dump the contents of the pt_regs struct at each
level in the call stack where an exception was taken and routed to a
routine marked as __exception (which means its stack frame is right
below the pt_regs struct on the stack).
'Right below the pt_regs struct' is ill defined, though: the unwind
code assigns 'frame pointer + 0x10' to the .sp member of the stackframe
struct at each level, and dump_backtrace() happily dereferences that as
the pt_regs pointer when encountering an __exception routine. However,
the actual size of the stack frame created by this routine (which could
be one of many __exception routines we have in the kernel) is not known,
and so frame.sp is pretty useless to figure out where struct pt_regs
really is.
So it seems the only way to ensure that we can find our struct pt_regs
when walking the stack frames is to put it at a known fixed offset of
the stack frame pointer that is passed to such __exception routines.
The simplest way to do that is to put it inside pt_regs itself, which is
the main change implemented by this patch. As a bonus, doing this allows
us to get rid of a fair amount of cruft related to walking from one stack
to the other, which is especially nice since we intend to introduce yet
another stack for overflow handling once we add support for vmapped
stacks. It also fixes an inconsistency where we only add a stack frame
pointing to ELR_EL1 if we are executing from the IRQ stack but not when
we are executing from the task stack.
To consistly identify exceptions regs even in the presence of exceptions
taken from entry code, we must check whether the next frame was created
by entry text, rather than whether the current frame was crated by
exception text.
To avoid backtracing using PCs that fall in the idmap, or are controlled
by userspace, we must explcitly zero the FP and LR in startup paths, and
must ensure that the frame embedded in pt_regs is zeroed upon entry from
EL0. To avoid these NULL entries showin in the backtrace, unwind_frame()
is updated to avoid them.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
[Mark: compare current frame against .entry.text, avoid bogus PCs]
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
2017-07-23 00:45:33 +07:00
|
|
|
u64 stackframe[2];
|
2012-03-05 18:49:27 +07:00
|
|
|
};
|
|
|
|
|
2017-08-01 21:35:54 +07:00
|
|
|
static inline bool in_syscall(struct pt_regs const *regs)
|
|
|
|
{
|
|
|
|
return regs->syscallno != NO_SYSCALL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void forget_syscall(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
regs->syscallno = NO_SYSCALL;
|
|
|
|
}
|
|
|
|
|
2016-07-08 23:35:45 +07:00
|
|
|
#define MAX_REG_OFFSET offsetof(struct pt_regs, pstate)
|
|
|
|
|
2012-03-05 18:49:27 +07:00
|
|
|
#define arch_has_single_step() (1)
|
|
|
|
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
#define compat_thumb_mode(regs) \
|
2018-07-05 21:16:52 +07:00
|
|
|
(((regs)->pstate & PSR_AA32_T_BIT))
|
2012-03-05 18:49:27 +07:00
|
|
|
#else
|
|
|
|
#define compat_thumb_mode(regs) (0)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define user_mode(regs) \
|
|
|
|
(((regs)->pstate & PSR_MODE_MASK) == PSR_MODE_EL0t)
|
|
|
|
|
|
|
|
#define compat_user_mode(regs) \
|
|
|
|
(((regs)->pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) == \
|
|
|
|
(PSR_MODE32_BIT | PSR_MODE_EL0t))
|
|
|
|
|
|
|
|
#define processor_mode(regs) \
|
|
|
|
((regs)->pstate & PSR_MODE_MASK)
|
|
|
|
|
|
|
|
#define interrupts_enabled(regs) \
|
|
|
|
(!((regs)->pstate & PSR_I_BIT))
|
|
|
|
|
|
|
|
#define fast_interrupts_enabled(regs) \
|
|
|
|
(!((regs)->pstate & PSR_F_BIT))
|
|
|
|
|
arm64: Kprobes with single stepping support
Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.
Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.
A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.
ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.
Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).
Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.
Instructions generating exceptions or cpu mode change are rejected
for probing.
Exclusive load/store instructions are rejected too. Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).
System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.
This also changes arch/arm64/include/asm/ptrace.h to use
include/asm-generic/ptrace.h.
Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.
Signed-off-by: Sandeepa Prabhu <sandeepa.s.prabhu@gmail.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Pratyush Anand <panand@redhat.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-08 23:35:48 +07:00
|
|
|
#define GET_USP(regs) \
|
2014-08-29 22:08:02 +07:00
|
|
|
(!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
|
2012-03-05 18:49:27 +07:00
|
|
|
|
arm64: Kprobes with single stepping support
Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.
Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.
A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.
ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.
Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).
Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.
Instructions generating exceptions or cpu mode change are rejected
for probing.
Exclusive load/store instructions are rejected too. Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).
System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.
This also changes arch/arm64/include/asm/ptrace.h to use
include/asm-generic/ptrace.h.
Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.
Signed-off-by: Sandeepa Prabhu <sandeepa.s.prabhu@gmail.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Pratyush Anand <panand@redhat.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-08 23:35:48 +07:00
|
|
|
#define SET_USP(ptregs, value) \
|
|
|
|
(!compat_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value))
|
|
|
|
|
2016-07-08 23:35:45 +07:00
|
|
|
extern int regs_query_register_offset(const char *name);
|
|
|
|
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
|
|
|
|
unsigned int n);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* regs_get_register() - get register value from its offset
|
|
|
|
* @regs: pt_regs from which register value is gotten
|
|
|
|
* @offset: offset of the register.
|
|
|
|
*
|
|
|
|
* regs_get_register returns the value of a register whose offset from @regs.
|
|
|
|
* The @offset is the offset of the register in struct pt_regs.
|
|
|
|
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
|
|
|
|
*/
|
|
|
|
static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset)
|
|
|
|
{
|
|
|
|
u64 val = 0;
|
|
|
|
|
|
|
|
WARN_ON(offset & 7);
|
|
|
|
|
|
|
|
offset >>= 3;
|
|
|
|
switch (offset) {
|
|
|
|
case 0 ... 30:
|
|
|
|
val = regs->regs[offset];
|
|
|
|
break;
|
|
|
|
case offsetof(struct pt_regs, sp) >> 3:
|
|
|
|
val = regs->sp;
|
|
|
|
break;
|
|
|
|
case offsetof(struct pt_regs, pc) >> 3:
|
|
|
|
val = regs->pc;
|
|
|
|
break;
|
|
|
|
case offsetof(struct pt_regs, pstate) >> 3:
|
|
|
|
val = regs->pstate;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
val = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2017-02-09 22:19:18 +07:00
|
|
|
/*
|
|
|
|
* Read a register given an architectural register index r.
|
|
|
|
* This handles the common case where 31 means XZR, not SP.
|
|
|
|
*/
|
|
|
|
static inline unsigned long pt_regs_read_reg(const struct pt_regs *regs, int r)
|
|
|
|
{
|
|
|
|
return (r == 31) ? 0 : regs->regs[r];
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write a register given an architectural register index r.
|
|
|
|
* This handles the common case where 31 means XZR, not SP.
|
|
|
|
*/
|
|
|
|
static inline void pt_regs_write_reg(struct pt_regs *regs, int r,
|
|
|
|
unsigned long val)
|
|
|
|
{
|
|
|
|
if (r != 31)
|
|
|
|
regs->regs[r] = val;
|
|
|
|
}
|
|
|
|
|
2016-07-08 23:35:45 +07:00
|
|
|
/* Valid only for Kernel mode traps. */
|
|
|
|
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
return regs->sp;
|
|
|
|
}
|
|
|
|
|
2014-04-30 16:51:31 +07:00
|
|
|
static inline unsigned long regs_return_value(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
return regs->regs[0];
|
|
|
|
}
|
|
|
|
|
2016-03-01 21:18:50 +07:00
|
|
|
/* We must avoid circular header include via sched.h */
|
|
|
|
struct task_struct;
|
|
|
|
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
|
2012-03-05 18:49:27 +07:00
|
|
|
|
arm64: Kprobes with single stepping support
Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.
Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.
A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.
ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.
Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).
Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.
Instructions generating exceptions or cpu mode change are rejected
for probing.
Exclusive load/store instructions are rejected too. Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).
System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.
This also changes arch/arm64/include/asm/ptrace.h to use
include/asm-generic/ptrace.h.
Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.
Signed-off-by: Sandeepa Prabhu <sandeepa.s.prabhu@gmail.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Pratyush Anand <panand@redhat.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-08 23:35:48 +07:00
|
|
|
#define GET_IP(regs) ((unsigned long)(regs)->pc)
|
|
|
|
#define SET_IP(regs, value) ((regs)->pc = ((u64) (value)))
|
|
|
|
|
|
|
|
#define GET_FP(ptregs) ((unsigned long)(ptregs)->regs[29])
|
|
|
|
#define SET_FP(ptregs, value) ((ptregs)->regs[29] = ((u64) (value)))
|
|
|
|
|
|
|
|
#include <asm-generic/ptrace.h>
|
2012-03-05 18:49:27 +07:00
|
|
|
|
2016-11-02 16:10:46 +07:00
|
|
|
#define procedure_link_pointer(regs) ((regs)->regs[30])
|
|
|
|
|
|
|
|
static inline void procedure_link_pointer_set(struct pt_regs *regs,
|
|
|
|
unsigned long val)
|
|
|
|
{
|
|
|
|
procedure_link_pointer(regs) = val;
|
|
|
|
}
|
|
|
|
|
arm64: Kprobes with single stepping support
Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.
Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.
A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.
ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.
Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).
Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.
Instructions generating exceptions or cpu mode change are rejected
for probing.
Exclusive load/store instructions are rejected too. Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).
System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.
This also changes arch/arm64/include/asm/ptrace.h to use
include/asm-generic/ptrace.h.
Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.
Signed-off-by: Sandeepa Prabhu <sandeepa.s.prabhu@gmail.com>
Signed-off-by: David A. Long <dave.long@linaro.org>
Signed-off-by: Pratyush Anand <panand@redhat.com>
Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-07-08 23:35:48 +07:00
|
|
|
#undef profile_pc
|
2012-03-05 18:49:27 +07:00
|
|
|
extern unsigned long profile_pc(struct pt_regs *regs);
|
|
|
|
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif
|