2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* PowerPC version
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
|
|
*
|
|
|
|
* Derived from "arch/m68k/kernel/ptrace.c"
|
|
|
|
* Copyright (C) 1994 by Hamish Macdonald
|
|
|
|
* Taken from linux/kernel/ptrace.c and modified for M680x0.
|
|
|
|
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
|
|
|
|
*
|
|
|
|
* Modified by Cort Dougan (cort@hq.fsmlabs.com)
|
2005-10-20 06:11:29 +07:00
|
|
|
* and Paul Mackerras (paulus@samba.org).
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General
|
|
|
|
* Public License. See the file README.legal in the main directory of
|
|
|
|
* this archive for more details.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/smp.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/ptrace.h>
|
2007-12-20 18:57:34 +07:00
|
|
|
#include <linux/regset.h>
|
2008-07-27 13:51:03 +07:00
|
|
|
#include <linux/tracehook.h>
|
2007-12-20 18:57:39 +07:00
|
|
|
#include <linux/elf.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/user.h>
|
|
|
|
#include <linux/security.h>
|
2005-05-01 22:59:14 +07:00
|
|
|
#include <linux/signal.h>
|
2005-05-08 21:56:09 +07:00
|
|
|
#include <linux/seccomp.h>
|
|
|
|
#include <linux/audit.h>
|
2011-02-03 00:27:24 +07:00
|
|
|
#include <trace/syscall.h>
|
2010-06-15 13:05:19 +07:00
|
|
|
#include <linux/hw_breakpoint.h>
|
|
|
|
#include <linux/perf_event.h>
|
2013-05-13 23:16:40 +07:00
|
|
|
#include <linux/context_tracking.h>
|
2019-01-30 19:46:00 +07:00
|
|
|
#include <linux/nospec.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-12-25 02:46:01 +07:00
|
|
|
#include <linux/uaccess.h>
|
2018-01-19 08:50:43 +07:00
|
|
|
#include <linux/pkeys.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/pgtable.h>
|
2012-03-29 00:30:02 +07:00
|
|
|
#include <asm/switch_to.h>
|
powerpc/ptrace: Fix coredump since ptrace TM changes
Commit 8d460f6156cd ("powerpc/process: Add the function
flush_tmregs_to_thread") added flush_tmregs_to_thread() and included
the assumption that it would only be called for a task which is not
current.
Although this is correct for ptrace, when generating a core dump, some
of the routines which call flush_tmregs_to_thread() are called. This
leads to a WARNing such as:
Not expecting ptrace on self: TM regs may be incorrect
------------[ cut here ]------------
WARNING: CPU: 123 PID: 7727 at arch/powerpc/kernel/process.c:1088 flush_tmregs_to_thread+0x78/0x80
CPU: 123 PID: 7727 Comm: libvirtd Not tainted 4.8.0-rc1-gcc6x-g61e8a0d #1
task: c000000fe631b600 task.stack: c000000fe63b0000
NIP: c00000000001a1a8 LR: c00000000001a1a4 CTR: c000000000717780
REGS: c000000fe63b3420 TRAP: 0700 Not tainted (4.8.0-rc1-gcc6x-g61e8a0d)
MSR: 900000010282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 28004222 XER: 20000000
...
NIP [c00000000001a1a8] flush_tmregs_to_thread+0x78/0x80
LR [c00000000001a1a4] flush_tmregs_to_thread+0x74/0x80
Call Trace:
flush_tmregs_to_thread+0x74/0x80 (unreliable)
vsr_get+0x64/0x1a0
elf_core_dump+0x604/0x1430
do_coredump+0x5fc/0x1200
get_signal+0x398/0x740
do_signal+0x54/0x2b0
do_notify_resume+0x98/0xb0
ret_from_except_lite+0x70/0x74
So fix flush_tmregs_to_thread() to detect the case where it is called on
current, and a transaction is active, and in that case flush the TM regs
to the thread_struct.
This patch also moves flush_tmregs_to_thread() into ptrace.c as it is
only called from that file.
Fixes: 8d460f6156cd ("powerpc/process: Add the function flush_tmregs_to_thread")
Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
[mpe: Flesh out change log]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-08-10 12:44:46 +07:00
|
|
|
#include <asm/tm.h>
|
2016-09-06 12:32:43 +07:00
|
|
|
#include <asm/asm-prototypes.h>
|
2018-03-27 11:37:18 +07:00
|
|
|
#include <asm/debug.h>
|
2019-04-01 13:03:12 +07:00
|
|
|
#include <asm/hw_breakpoint.h>
|
2005-11-19 16:47:22 +07:00
|
|
|
|
2011-02-03 00:27:24 +07:00
|
|
|
#define CREATE_TRACE_POINTS
|
|
|
|
#include <trace/events/syscalls.h>
|
|
|
|
|
2010-04-07 15:10:20 +07:00
|
|
|
/*
|
|
|
|
* The parameter save area on the stack is used to store arguments being passed
|
|
|
|
* to callee function and is located at fixed offset from stack pointer.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_PPC32
|
|
|
|
#define PARAMETER_SAVE_AREA_OFFSET 24 /* bytes */
|
|
|
|
#else /* CONFIG_PPC32 */
|
|
|
|
#define PARAMETER_SAVE_AREA_OFFSET 48 /* bytes */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
struct pt_regs_offset {
|
|
|
|
const char *name;
|
|
|
|
int offset;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define STR(s) #s /* convert to string */
|
|
|
|
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
|
|
|
|
#define GPR_OFFSET_NAME(num) \
|
2015-11-21 13:08:16 +07:00
|
|
|
{.name = STR(r##num), .offset = offsetof(struct pt_regs, gpr[num])}, \
|
2010-04-07 15:10:20 +07:00
|
|
|
{.name = STR(gpr##num), .offset = offsetof(struct pt_regs, gpr[num])}
|
|
|
|
#define REG_OFFSET_END {.name = NULL, .offset = 0}
|
|
|
|
|
2016-07-28 09:57:38 +07:00
|
|
|
#define TVSO(f) (offsetof(struct thread_vr_state, f))
|
2016-07-28 09:57:39 +07:00
|
|
|
#define TFSO(f) (offsetof(struct thread_fp_state, f))
|
2016-07-28 09:57:40 +07:00
|
|
|
#define TSO(f) (offsetof(struct thread_struct, f))
|
2016-07-28 09:57:38 +07:00
|
|
|
|
2010-04-07 15:10:20 +07:00
|
|
|
static const struct pt_regs_offset regoffset_table[] = {
|
|
|
|
GPR_OFFSET_NAME(0),
|
|
|
|
GPR_OFFSET_NAME(1),
|
|
|
|
GPR_OFFSET_NAME(2),
|
|
|
|
GPR_OFFSET_NAME(3),
|
|
|
|
GPR_OFFSET_NAME(4),
|
|
|
|
GPR_OFFSET_NAME(5),
|
|
|
|
GPR_OFFSET_NAME(6),
|
|
|
|
GPR_OFFSET_NAME(7),
|
|
|
|
GPR_OFFSET_NAME(8),
|
|
|
|
GPR_OFFSET_NAME(9),
|
|
|
|
GPR_OFFSET_NAME(10),
|
|
|
|
GPR_OFFSET_NAME(11),
|
|
|
|
GPR_OFFSET_NAME(12),
|
|
|
|
GPR_OFFSET_NAME(13),
|
|
|
|
GPR_OFFSET_NAME(14),
|
|
|
|
GPR_OFFSET_NAME(15),
|
|
|
|
GPR_OFFSET_NAME(16),
|
|
|
|
GPR_OFFSET_NAME(17),
|
|
|
|
GPR_OFFSET_NAME(18),
|
|
|
|
GPR_OFFSET_NAME(19),
|
|
|
|
GPR_OFFSET_NAME(20),
|
|
|
|
GPR_OFFSET_NAME(21),
|
|
|
|
GPR_OFFSET_NAME(22),
|
|
|
|
GPR_OFFSET_NAME(23),
|
|
|
|
GPR_OFFSET_NAME(24),
|
|
|
|
GPR_OFFSET_NAME(25),
|
|
|
|
GPR_OFFSET_NAME(26),
|
|
|
|
GPR_OFFSET_NAME(27),
|
|
|
|
GPR_OFFSET_NAME(28),
|
|
|
|
GPR_OFFSET_NAME(29),
|
|
|
|
GPR_OFFSET_NAME(30),
|
|
|
|
GPR_OFFSET_NAME(31),
|
|
|
|
REG_OFFSET_NAME(nip),
|
|
|
|
REG_OFFSET_NAME(msr),
|
|
|
|
REG_OFFSET_NAME(ctr),
|
|
|
|
REG_OFFSET_NAME(link),
|
|
|
|
REG_OFFSET_NAME(xer),
|
|
|
|
REG_OFFSET_NAME(ccr),
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
REG_OFFSET_NAME(softe),
|
|
|
|
#else
|
|
|
|
REG_OFFSET_NAME(mq),
|
|
|
|
#endif
|
|
|
|
REG_OFFSET_NAME(trap),
|
|
|
|
REG_OFFSET_NAME(dar),
|
|
|
|
REG_OFFSET_NAME(dsisr),
|
|
|
|
REG_OFFSET_END,
|
|
|
|
};
|
|
|
|
|
powerpc/ptrace: Fix coredump since ptrace TM changes
Commit 8d460f6156cd ("powerpc/process: Add the function
flush_tmregs_to_thread") added flush_tmregs_to_thread() and included
the assumption that it would only be called for a task which is not
current.
Although this is correct for ptrace, when generating a core dump, some
of the routines which call flush_tmregs_to_thread() are called. This
leads to a WARNing such as:
Not expecting ptrace on self: TM regs may be incorrect
------------[ cut here ]------------
WARNING: CPU: 123 PID: 7727 at arch/powerpc/kernel/process.c:1088 flush_tmregs_to_thread+0x78/0x80
CPU: 123 PID: 7727 Comm: libvirtd Not tainted 4.8.0-rc1-gcc6x-g61e8a0d #1
task: c000000fe631b600 task.stack: c000000fe63b0000
NIP: c00000000001a1a8 LR: c00000000001a1a4 CTR: c000000000717780
REGS: c000000fe63b3420 TRAP: 0700 Not tainted (4.8.0-rc1-gcc6x-g61e8a0d)
MSR: 900000010282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 28004222 XER: 20000000
...
NIP [c00000000001a1a8] flush_tmregs_to_thread+0x78/0x80
LR [c00000000001a1a4] flush_tmregs_to_thread+0x74/0x80
Call Trace:
flush_tmregs_to_thread+0x74/0x80 (unreliable)
vsr_get+0x64/0x1a0
elf_core_dump+0x604/0x1430
do_coredump+0x5fc/0x1200
get_signal+0x398/0x740
do_signal+0x54/0x2b0
do_notify_resume+0x98/0xb0
ret_from_except_lite+0x70/0x74
So fix flush_tmregs_to_thread() to detect the case where it is called on
current, and a transaction is active, and in that case flush the TM regs
to the thread_struct.
This patch also moves flush_tmregs_to_thread() into ptrace.c as it is
only called from that file.
Fixes: 8d460f6156cd ("powerpc/process: Add the function flush_tmregs_to_thread")
Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
[mpe: Flesh out change log]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-08-10 12:44:46 +07:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
static void flush_tmregs_to_thread(struct task_struct *tsk)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If task is not current, it will have been flushed already to
|
|
|
|
* it's thread_struct during __switch_to().
|
|
|
|
*
|
2017-07-19 12:44:13 +07:00
|
|
|
* A reclaim flushes ALL the state or if not in TM save TM SPRs
|
|
|
|
* in the appropriate thread structures from live.
|
powerpc/ptrace: Fix coredump since ptrace TM changes
Commit 8d460f6156cd ("powerpc/process: Add the function
flush_tmregs_to_thread") added flush_tmregs_to_thread() and included
the assumption that it would only be called for a task which is not
current.
Although this is correct for ptrace, when generating a core dump, some
of the routines which call flush_tmregs_to_thread() are called. This
leads to a WARNing such as:
Not expecting ptrace on self: TM regs may be incorrect
------------[ cut here ]------------
WARNING: CPU: 123 PID: 7727 at arch/powerpc/kernel/process.c:1088 flush_tmregs_to_thread+0x78/0x80
CPU: 123 PID: 7727 Comm: libvirtd Not tainted 4.8.0-rc1-gcc6x-g61e8a0d #1
task: c000000fe631b600 task.stack: c000000fe63b0000
NIP: c00000000001a1a8 LR: c00000000001a1a4 CTR: c000000000717780
REGS: c000000fe63b3420 TRAP: 0700 Not tainted (4.8.0-rc1-gcc6x-g61e8a0d)
MSR: 900000010282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 28004222 XER: 20000000
...
NIP [c00000000001a1a8] flush_tmregs_to_thread+0x78/0x80
LR [c00000000001a1a4] flush_tmregs_to_thread+0x74/0x80
Call Trace:
flush_tmregs_to_thread+0x74/0x80 (unreliable)
vsr_get+0x64/0x1a0
elf_core_dump+0x604/0x1430
do_coredump+0x5fc/0x1200
get_signal+0x398/0x740
do_signal+0x54/0x2b0
do_notify_resume+0x98/0xb0
ret_from_except_lite+0x70/0x74
So fix flush_tmregs_to_thread() to detect the case where it is called on
current, and a transaction is active, and in that case flush the TM regs
to the thread_struct.
This patch also moves flush_tmregs_to_thread() into ptrace.c as it is
only called from that file.
Fixes: 8d460f6156cd ("powerpc/process: Add the function flush_tmregs_to_thread")
Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
[mpe: Flesh out change log]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-08-10 12:44:46 +07:00
|
|
|
*/
|
|
|
|
|
2017-09-14 09:13:48 +07:00
|
|
|
if ((!cpu_has_feature(CPU_FTR_TM)) || (tsk != current))
|
2017-07-19 12:44:13 +07:00
|
|
|
return;
|
powerpc/ptrace: Fix coredump since ptrace TM changes
Commit 8d460f6156cd ("powerpc/process: Add the function
flush_tmregs_to_thread") added flush_tmregs_to_thread() and included
the assumption that it would only be called for a task which is not
current.
Although this is correct for ptrace, when generating a core dump, some
of the routines which call flush_tmregs_to_thread() are called. This
leads to a WARNing such as:
Not expecting ptrace on self: TM regs may be incorrect
------------[ cut here ]------------
WARNING: CPU: 123 PID: 7727 at arch/powerpc/kernel/process.c:1088 flush_tmregs_to_thread+0x78/0x80
CPU: 123 PID: 7727 Comm: libvirtd Not tainted 4.8.0-rc1-gcc6x-g61e8a0d #1
task: c000000fe631b600 task.stack: c000000fe63b0000
NIP: c00000000001a1a8 LR: c00000000001a1a4 CTR: c000000000717780
REGS: c000000fe63b3420 TRAP: 0700 Not tainted (4.8.0-rc1-gcc6x-g61e8a0d)
MSR: 900000010282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 28004222 XER: 20000000
...
NIP [c00000000001a1a8] flush_tmregs_to_thread+0x78/0x80
LR [c00000000001a1a4] flush_tmregs_to_thread+0x74/0x80
Call Trace:
flush_tmregs_to_thread+0x74/0x80 (unreliable)
vsr_get+0x64/0x1a0
elf_core_dump+0x604/0x1430
do_coredump+0x5fc/0x1200
get_signal+0x398/0x740
do_signal+0x54/0x2b0
do_notify_resume+0x98/0xb0
ret_from_except_lite+0x70/0x74
So fix flush_tmregs_to_thread() to detect the case where it is called on
current, and a transaction is active, and in that case flush the TM regs
to the thread_struct.
This patch also moves flush_tmregs_to_thread() into ptrace.c as it is
only called from that file.
Fixes: 8d460f6156cd ("powerpc/process: Add the function flush_tmregs_to_thread")
Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
[mpe: Flesh out change log]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-08-10 12:44:46 +07:00
|
|
|
|
2017-07-19 12:44:13 +07:00
|
|
|
if (MSR_TM_SUSPENDED(mfmsr())) {
|
|
|
|
tm_reclaim_current(TM_CAUSE_SIGNAL);
|
|
|
|
} else {
|
|
|
|
tm_enable();
|
|
|
|
tm_save_sprs(&(tsk->thread));
|
|
|
|
}
|
powerpc/ptrace: Fix coredump since ptrace TM changes
Commit 8d460f6156cd ("powerpc/process: Add the function
flush_tmregs_to_thread") added flush_tmregs_to_thread() and included
the assumption that it would only be called for a task which is not
current.
Although this is correct for ptrace, when generating a core dump, some
of the routines which call flush_tmregs_to_thread() are called. This
leads to a WARNing such as:
Not expecting ptrace on self: TM regs may be incorrect
------------[ cut here ]------------
WARNING: CPU: 123 PID: 7727 at arch/powerpc/kernel/process.c:1088 flush_tmregs_to_thread+0x78/0x80
CPU: 123 PID: 7727 Comm: libvirtd Not tainted 4.8.0-rc1-gcc6x-g61e8a0d #1
task: c000000fe631b600 task.stack: c000000fe63b0000
NIP: c00000000001a1a8 LR: c00000000001a1a4 CTR: c000000000717780
REGS: c000000fe63b3420 TRAP: 0700 Not tainted (4.8.0-rc1-gcc6x-g61e8a0d)
MSR: 900000010282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 28004222 XER: 20000000
...
NIP [c00000000001a1a8] flush_tmregs_to_thread+0x78/0x80
LR [c00000000001a1a4] flush_tmregs_to_thread+0x74/0x80
Call Trace:
flush_tmregs_to_thread+0x74/0x80 (unreliable)
vsr_get+0x64/0x1a0
elf_core_dump+0x604/0x1430
do_coredump+0x5fc/0x1200
get_signal+0x398/0x740
do_signal+0x54/0x2b0
do_notify_resume+0x98/0xb0
ret_from_except_lite+0x70/0x74
So fix flush_tmregs_to_thread() to detect the case where it is called on
current, and a transaction is active, and in that case flush the TM regs
to the thread_struct.
This patch also moves flush_tmregs_to_thread() into ptrace.c as it is
only called from that file.
Fixes: 8d460f6156cd ("powerpc/process: Add the function flush_tmregs_to_thread")
Signed-off-by: Cyril Bur <cyrilbur@gmail.com>
[mpe: Flesh out change log]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-08-10 12:44:46 +07:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline void flush_tmregs_to_thread(struct task_struct *tsk) { }
|
|
|
|
#endif
|
|
|
|
|
2010-04-07 15:10:20 +07:00
|
|
|
/**
|
|
|
|
* regs_query_register_offset() - query register offset from its name
|
|
|
|
* @name: the name of a register
|
|
|
|
*
|
|
|
|
* regs_query_register_offset() returns the offset of a register in struct
|
|
|
|
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
|
|
|
|
*/
|
|
|
|
int regs_query_register_offset(const char *name)
|
|
|
|
{
|
|
|
|
const struct pt_regs_offset *roff;
|
|
|
|
for (roff = regoffset_table; roff->name != NULL; roff++)
|
|
|
|
if (!strcmp(roff->name, name))
|
|
|
|
return roff->offset;
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* regs_query_register_name() - query register name from its offset
|
|
|
|
* @offset: the offset of a register in struct pt_regs.
|
|
|
|
*
|
|
|
|
* regs_query_register_name() returns the name of a register from its
|
|
|
|
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
|
|
|
|
*/
|
|
|
|
const char *regs_query_register_name(unsigned int offset)
|
|
|
|
{
|
|
|
|
const struct pt_regs_offset *roff;
|
|
|
|
for (roff = regoffset_table; roff->name != NULL; roff++)
|
|
|
|
if (roff->offset == offset)
|
|
|
|
return roff->name;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2007-06-04 12:15:47 +07:00
|
|
|
/*
|
|
|
|
* does not yet catch signals sent when the child dies.
|
|
|
|
* in exit.c or in signal.c.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set of msr bits that gdb can change on behalf of a process.
|
|
|
|
*/
|
2010-02-08 18:50:57 +07:00
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
2007-06-04 12:15:47 +07:00
|
|
|
#define MSR_DEBUGCHANGE 0
|
2005-04-17 05:20:36 +07:00
|
|
|
#else
|
2007-06-04 12:15:47 +07:00
|
|
|
#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
2007-06-04 12:15:41 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
2007-06-04 12:15:47 +07:00
|
|
|
* Max register writeable via put_reg
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2007-06-04 12:15:47 +07:00
|
|
|
#ifdef CONFIG_PPC32
|
|
|
|
#define PT_MAX_PUT_REG PT_MQ
|
|
|
|
#else
|
|
|
|
#define PT_MAX_PUT_REG PT_CCR
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-12-20 18:57:51 +07:00
|
|
|
static unsigned long get_user_msr(struct task_struct *task)
|
|
|
|
{
|
|
|
|
return task->thread.regs->msr | task->thread.fpexc_mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_user_msr(struct task_struct *task, unsigned long msr)
|
|
|
|
{
|
|
|
|
task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
|
|
|
|
task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-07-28 09:57:36 +07:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
static unsigned long get_user_ckpt_msr(struct task_struct *task)
|
|
|
|
{
|
|
|
|
return task->thread.ckpt_regs.msr | task->thread.fpexc_mode;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_user_ckpt_msr(struct task_struct *task, unsigned long msr)
|
|
|
|
{
|
|
|
|
task->thread.ckpt_regs.msr &= ~MSR_DEBUGCHANGE;
|
|
|
|
task->thread.ckpt_regs.msr |= msr & MSR_DEBUGCHANGE;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_user_ckpt_trap(struct task_struct *task, unsigned long trap)
|
|
|
|
{
|
|
|
|
task->thread.ckpt_regs.trap = trap & 0xfff0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2013-01-11 03:29:09 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
2013-02-15 00:44:23 +07:00
|
|
|
static int get_user_dscr(struct task_struct *task, unsigned long *data)
|
2013-01-11 03:29:09 +07:00
|
|
|
{
|
2013-02-15 00:44:23 +07:00
|
|
|
*data = task->thread.dscr;
|
|
|
|
return 0;
|
2013-01-11 03:29:09 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
|
|
|
|
{
|
|
|
|
task->thread.dscr = dscr;
|
|
|
|
task->thread.dscr_inherit = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#else
|
2013-02-15 00:44:23 +07:00
|
|
|
static int get_user_dscr(struct task_struct *task, unsigned long *data)
|
2013-01-11 03:29:09 +07:00
|
|
|
{
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
|
|
|
|
{
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2007-12-20 18:57:51 +07:00
|
|
|
/*
|
|
|
|
* We prevent mucking around with the reserved area of trap
|
|
|
|
* which are used internally by the kernel.
|
|
|
|
*/
|
|
|
|
static int set_user_trap(struct task_struct *task, unsigned long trap)
|
|
|
|
{
|
|
|
|
task->thread.regs->trap = trap & 0xfff0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-06-04 12:15:44 +07:00
|
|
|
/*
|
|
|
|
* Get contents of register REGNO in task TASK.
|
|
|
|
*/
|
2013-02-15 00:44:23 +07:00
|
|
|
int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
|
2007-06-04 12:15:44 +07:00
|
|
|
{
|
2019-01-30 19:46:00 +07:00
|
|
|
unsigned int regs_max;
|
|
|
|
|
2013-02-15 00:44:23 +07:00
|
|
|
if ((task->thread.regs == NULL) || !data)
|
2007-06-04 12:15:44 +07:00
|
|
|
return -EIO;
|
|
|
|
|
2013-02-15 00:44:23 +07:00
|
|
|
if (regno == PT_MSR) {
|
|
|
|
*data = get_user_msr(task);
|
|
|
|
return 0;
|
|
|
|
}
|
2007-06-04 12:15:44 +07:00
|
|
|
|
2013-01-11 03:29:09 +07:00
|
|
|
if (regno == PT_DSCR)
|
2013-02-15 00:44:23 +07:00
|
|
|
return get_user_dscr(task, data);
|
2013-01-11 03:29:09 +07:00
|
|
|
|
2017-08-21 00:58:24 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
/*
|
2017-12-20 10:55:50 +07:00
|
|
|
* softe copies paca->irq_soft_mask variable state. Since irq_soft_mask is
|
2017-08-21 00:58:24 +07:00
|
|
|
* no more used as a flag, lets force usr to alway see the softe value as 1
|
|
|
|
* which means interrupts are not soft disabled.
|
|
|
|
*/
|
|
|
|
if (regno == PT_SOFTE) {
|
|
|
|
*data = 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2019-01-30 19:46:00 +07:00
|
|
|
regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
|
|
|
|
if (regno < regs_max) {
|
|
|
|
regno = array_index_nospec(regno, regs_max);
|
2013-02-15 00:44:23 +07:00
|
|
|
*data = ((unsigned long *)task->thread.regs)[regno];
|
|
|
|
return 0;
|
|
|
|
}
|
2007-06-04 12:15:44 +07:00
|
|
|
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write contents of register REGNO in task TASK.
|
|
|
|
*/
|
|
|
|
int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
|
|
|
|
{
|
|
|
|
if (task->thread.regs == NULL)
|
|
|
|
return -EIO;
|
|
|
|
|
2007-12-20 18:57:51 +07:00
|
|
|
if (regno == PT_MSR)
|
|
|
|
return set_user_msr(task, data);
|
|
|
|
if (regno == PT_TRAP)
|
|
|
|
return set_user_trap(task, data);
|
2013-01-11 03:29:09 +07:00
|
|
|
if (regno == PT_DSCR)
|
|
|
|
return set_user_dscr(task, data);
|
2007-12-20 18:57:51 +07:00
|
|
|
|
|
|
|
if (regno <= PT_MAX_PUT_REG) {
|
2019-01-30 19:46:00 +07:00
|
|
|
regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
|
2007-06-04 12:15:44 +07:00
|
|
|
((unsigned long *)task->thread.regs)[regno] = data;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2007-12-20 18:57:55 +07:00
|
|
|
static int gpr_get(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
2011-03-21 07:14:53 +07:00
|
|
|
int i, ret;
|
2007-12-20 18:57:55 +07:00
|
|
|
|
|
|
|
if (target->thread.regs == NULL)
|
|
|
|
return -EIO;
|
|
|
|
|
2011-03-21 07:14:53 +07:00
|
|
|
if (!FULL_REGS(target->thread.regs)) {
|
|
|
|
/* We have a partial register set. Fill 14-31 with bogus values */
|
|
|
|
for (i = 14; i < 32; i++)
|
|
|
|
target->thread.regs->gpr[i] = NV_REG_POISON;
|
|
|
|
}
|
2007-12-20 18:57:55 +07:00
|
|
|
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
target->thread.regs,
|
|
|
|
0, offsetof(struct pt_regs, msr));
|
|
|
|
if (!ret) {
|
|
|
|
unsigned long msr = get_user_msr(target);
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
|
|
|
|
offsetof(struct pt_regs, msr),
|
|
|
|
offsetof(struct pt_regs, msr) +
|
|
|
|
sizeof(msr));
|
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
|
|
|
|
offsetof(struct pt_regs, msr) + sizeof(long));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.regs->orig_gpr3,
|
|
|
|
offsetof(struct pt_regs, orig_gpr3),
|
2018-10-12 20:39:31 +07:00
|
|
|
sizeof(struct user_pt_regs));
|
2007-12-20 18:57:55 +07:00
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
2018-10-12 20:39:31 +07:00
|
|
|
sizeof(struct user_pt_regs), -1);
|
2007-12-20 18:57:55 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int gpr_set(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
unsigned long reg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (target->thread.regs == NULL)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
CHECK_FULL_REGS(target->thread.regs);
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
target->thread.regs,
|
|
|
|
0, PT_MSR * sizeof(reg));
|
|
|
|
|
|
|
|
if (!ret && count > 0) {
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
|
|
|
|
PT_MSR * sizeof(reg),
|
|
|
|
(PT_MSR + 1) * sizeof(reg));
|
|
|
|
if (!ret)
|
|
|
|
ret = set_user_msr(target, reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
|
|
|
|
offsetof(struct pt_regs, msr) + sizeof(long));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.regs->orig_gpr3,
|
|
|
|
PT_ORIG_R3 * sizeof(reg),
|
|
|
|
(PT_MAX_PUT_REG + 1) * sizeof(reg));
|
|
|
|
|
|
|
|
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
|
|
|
|
ret = user_regset_copyin_ignore(
|
|
|
|
&pos, &count, &kbuf, &ubuf,
|
|
|
|
(PT_MAX_PUT_REG + 1) * sizeof(reg),
|
|
|
|
PT_TRAP * sizeof(reg));
|
|
|
|
|
|
|
|
if (!ret && count > 0) {
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
|
|
|
|
PT_TRAP * sizeof(reg),
|
|
|
|
(PT_TRAP + 1) * sizeof(reg));
|
|
|
|
if (!ret)
|
|
|
|
ret = set_user_trap(target, reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin_ignore(
|
|
|
|
&pos, &count, &kbuf, &ubuf,
|
|
|
|
(PT_TRAP + 1) * sizeof(reg), -1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2007-06-04 12:15:44 +07:00
|
|
|
|
2016-07-28 09:57:32 +07:00
|
|
|
/*
|
2016-09-23 13:18:24 +07:00
|
|
|
* Regardless of transactions, 'fp_state' holds the current running
|
2016-09-23 13:18:25 +07:00
|
|
|
* value of all FPR registers and 'ckfp_state' holds the last checkpointed
|
2016-09-23 13:18:24 +07:00
|
|
|
* value of all FPR registers for the current transaction.
|
2016-07-28 09:57:32 +07:00
|
|
|
*
|
|
|
|
* Userspace interface buffer layout:
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* u64 fpr[32];
|
|
|
|
* u64 fpscr;
|
|
|
|
* };
|
|
|
|
*/
|
2007-12-20 18:57:34 +07:00
|
|
|
static int fpr_get(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
2008-06-25 11:07:18 +07:00
|
|
|
#ifdef CONFIG_VSX
|
2013-09-10 17:20:42 +07:00
|
|
|
u64 buf[33];
|
2008-06-25 11:07:18 +07:00
|
|
|
int i;
|
2007-12-20 18:57:34 +07:00
|
|
|
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_fp_to_thread(target);
|
2016-07-28 09:57:32 +07:00
|
|
|
|
2008-06-25 11:07:18 +07:00
|
|
|
/* copy to local buffer then write that out */
|
|
|
|
for (i = 0; i < 32 ; i++)
|
|
|
|
buf[i] = target->thread.TS_FPR(i);
|
2013-09-10 17:20:42 +07:00
|
|
|
buf[32] = target->thread.fp_state.fpscr;
|
2008-06-25 11:07:18 +07:00
|
|
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
|
2016-09-23 13:18:24 +07:00
|
|
|
#else
|
2013-09-10 17:20:42 +07:00
|
|
|
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
2016-04-25 23:19:17 +07:00
|
|
|
offsetof(struct thread_fp_state, fpr[32]));
|
2007-12-20 18:57:34 +07:00
|
|
|
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
|
2007-12-20 18:57:34 +07:00
|
|
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
2013-09-10 17:20:42 +07:00
|
|
|
&target->thread.fp_state, 0, -1);
|
2008-06-25 11:07:18 +07:00
|
|
|
#endif
|
2007-12-20 18:57:34 +07:00
|
|
|
}
|
|
|
|
|
2016-07-28 09:57:32 +07:00
|
|
|
/*
|
2016-09-23 13:18:24 +07:00
|
|
|
* Regardless of transactions, 'fp_state' holds the current running
|
2016-09-23 13:18:25 +07:00
|
|
|
* value of all FPR registers and 'ckfp_state' holds the last checkpointed
|
2016-09-23 13:18:24 +07:00
|
|
|
* value of all FPR registers for the current transaction.
|
2016-07-28 09:57:32 +07:00
|
|
|
*
|
|
|
|
* Userspace interface buffer layout:
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* u64 fpr[32];
|
|
|
|
* u64 fpscr;
|
|
|
|
* };
|
|
|
|
*
|
|
|
|
*/
|
2007-12-20 18:57:34 +07:00
|
|
|
static int fpr_set(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
2008-06-25 11:07:18 +07:00
|
|
|
#ifdef CONFIG_VSX
|
2013-09-10 17:20:42 +07:00
|
|
|
u64 buf[33];
|
2008-06-25 11:07:18 +07:00
|
|
|
int i;
|
2016-09-23 13:18:24 +07:00
|
|
|
|
2007-12-20 18:57:34 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
|
2017-01-05 23:50:57 +07:00
|
|
|
for (i = 0; i < 32 ; i++)
|
|
|
|
buf[i] = target->thread.TS_FPR(i);
|
|
|
|
buf[32] = target->thread.fp_state.fpscr;
|
|
|
|
|
2016-07-28 09:57:32 +07:00
|
|
|
/* copy to local buffer then write that out */
|
|
|
|
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
|
|
|
|
if (i)
|
|
|
|
return i;
|
|
|
|
|
2008-06-25 11:07:18 +07:00
|
|
|
for (i = 0; i < 32 ; i++)
|
|
|
|
target->thread.TS_FPR(i) = buf[i];
|
2013-09-10 17:20:42 +07:00
|
|
|
target->thread.fp_state.fpscr = buf[32];
|
2008-06-25 11:07:18 +07:00
|
|
|
return 0;
|
2016-09-23 13:18:24 +07:00
|
|
|
#else
|
2013-09-10 17:20:42 +07:00
|
|
|
BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
|
2016-04-25 23:19:17 +07:00
|
|
|
offsetof(struct thread_fp_state, fpr[32]));
|
2007-12-20 18:57:34 +07:00
|
|
|
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
|
2007-12-20 18:57:34 +07:00
|
|
|
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
2013-09-10 17:20:42 +07:00
|
|
|
&target->thread.fp_state, 0, -1);
|
2008-06-25 11:07:18 +07:00
|
|
|
#endif
|
2007-12-20 18:57:34 +07:00
|
|
|
}
|
|
|
|
|
2007-06-04 12:15:44 +07:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
/*
|
|
|
|
* Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
|
|
|
|
* The transfer totals 34 quadword. Quadwords 0-31 contain the
|
|
|
|
* corresponding vector registers. Quadword 32 contains the vscr as the
|
|
|
|
* last word (offset 12) within that quadword. Quadword 33 contains the
|
|
|
|
* vrsave as the first word (offset 0) within the quadword.
|
|
|
|
*
|
|
|
|
* This definition of the VMX state is compatible with the current PPC32
|
|
|
|
* ptrace interface. This allows signal handling and ptrace to use the
|
|
|
|
* same structures. This also simplifies the implementation of a bi-arch
|
|
|
|
* (combined (32- and 64-bit) gdb.
|
|
|
|
*/
|
|
|
|
|
2007-12-20 18:57:39 +07:00
|
|
|
static int vr_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
return target->thread.used_vr ? regset->n : 0;
|
|
|
|
}
|
|
|
|
|
2016-07-28 09:57:33 +07:00
|
|
|
/*
|
2016-09-23 13:18:24 +07:00
|
|
|
* Regardless of transactions, 'vr_state' holds the current running
|
2016-09-23 13:18:25 +07:00
|
|
|
* value of all the VMX registers and 'ckvr_state' holds the last
|
2016-09-23 13:18:24 +07:00
|
|
|
* checkpointed value of all the VMX registers for the current
|
|
|
|
* transaction to fall back on in case it aborts.
|
2016-07-28 09:57:33 +07:00
|
|
|
*
|
|
|
|
* Userspace interface buffer layout:
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* vector128 vr[32];
|
|
|
|
* vector128 vscr;
|
|
|
|
* vector128 vrsave;
|
|
|
|
* };
|
|
|
|
*/
|
2007-12-20 18:57:39 +07:00
|
|
|
static int vr_get(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
|
2013-09-10 17:20:42 +07:00
|
|
|
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
|
|
|
|
offsetof(struct thread_vr_state, vr[32]));
|
2007-12-20 18:57:39 +07:00
|
|
|
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
2016-09-23 13:18:24 +07:00
|
|
|
&target->thread.vr_state, 0,
|
2007-12-20 18:57:39 +07:00
|
|
|
33 * sizeof(vector128));
|
|
|
|
if (!ret) {
|
|
|
|
/*
|
|
|
|
* Copy out only the low-order word of vrsave.
|
|
|
|
*/
|
powerpc/ptrace: Simplify vr_get/set() to avoid GCC warning
GCC 8 warns about the logic in vr_get/set(), which with -Werror breaks
the build:
In function ‘user_regset_copyin’,
inlined from ‘vr_set’ at arch/powerpc/kernel/ptrace.c:628:9:
include/linux/regset.h:295:4: error: ‘memcpy’ offset [-527, -529] is
out of the bounds [0, 16] of object ‘vrsave’ with type ‘union
<anonymous>’ [-Werror=array-bounds]
arch/powerpc/kernel/ptrace.c: In function ‘vr_set’:
arch/powerpc/kernel/ptrace.c:623:5: note: ‘vrsave’ declared here
} vrsave;
This has been identified as a regression in GCC, see GCC bug 88273.
However we can avoid the warning and also simplify the logic and make
it more robust.
Currently we pass -1 as end_pos to user_regset_copyout(). This says
"copy up to the end of the regset".
The definition of the regset is:
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
The end is calculated as (n * size), ie. 34 * sizeof(vector128).
In vr_get/set() we pass start_pos as 33 * sizeof(vector128), meaning
we can copy up to sizeof(vector128) into/out-of vrsave.
The on-stack vrsave is defined as:
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
And elf_vrreg_t is:
typedef __vector128 elf_vrreg_t;
So there is no bug, but we rely on all those sizes lining up,
otherwise we would have a kernel stack exposure/overwrite on our
hands.
Rather than relying on that we can pass an explict end_pos based on
the sizeof(vrsave). The result should be exactly the same but it's
more obviously not over-reading/writing the stack and it avoids the
compiler warning.
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Mathieu Malaterre <malat@debian.org>
Cc: stable@vger.kernel.org
Tested-by: Mathieu Malaterre <malat@debian.org>
Tested-by: Meelis Roos <mroos@linux.ee>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-02-14 07:08:29 +07:00
|
|
|
int start, end;
|
2007-12-20 18:57:39 +07:00
|
|
|
union {
|
|
|
|
elf_vrreg_t reg;
|
|
|
|
u32 word;
|
|
|
|
} vrsave;
|
|
|
|
memset(&vrsave, 0, sizeof(vrsave));
|
2016-07-28 09:57:33 +07:00
|
|
|
|
2007-12-20 18:57:39 +07:00
|
|
|
vrsave.word = target->thread.vrsave;
|
2016-07-28 09:57:33 +07:00
|
|
|
|
powerpc/ptrace: Simplify vr_get/set() to avoid GCC warning
GCC 8 warns about the logic in vr_get/set(), which with -Werror breaks
the build:
In function ‘user_regset_copyin’,
inlined from ‘vr_set’ at arch/powerpc/kernel/ptrace.c:628:9:
include/linux/regset.h:295:4: error: ‘memcpy’ offset [-527, -529] is
out of the bounds [0, 16] of object ‘vrsave’ with type ‘union
<anonymous>’ [-Werror=array-bounds]
arch/powerpc/kernel/ptrace.c: In function ‘vr_set’:
arch/powerpc/kernel/ptrace.c:623:5: note: ‘vrsave’ declared here
} vrsave;
This has been identified as a regression in GCC, see GCC bug 88273.
However we can avoid the warning and also simplify the logic and make
it more robust.
Currently we pass -1 as end_pos to user_regset_copyout(). This says
"copy up to the end of the regset".
The definition of the regset is:
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
The end is calculated as (n * size), ie. 34 * sizeof(vector128).
In vr_get/set() we pass start_pos as 33 * sizeof(vector128), meaning
we can copy up to sizeof(vector128) into/out-of vrsave.
The on-stack vrsave is defined as:
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
And elf_vrreg_t is:
typedef __vector128 elf_vrreg_t;
So there is no bug, but we rely on all those sizes lining up,
otherwise we would have a kernel stack exposure/overwrite on our
hands.
Rather than relying on that we can pass an explict end_pos based on
the sizeof(vrsave). The result should be exactly the same but it's
more obviously not over-reading/writing the stack and it avoids the
compiler warning.
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Mathieu Malaterre <malat@debian.org>
Cc: stable@vger.kernel.org
Tested-by: Mathieu Malaterre <malat@debian.org>
Tested-by: Meelis Roos <mroos@linux.ee>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-02-14 07:08:29 +07:00
|
|
|
start = 33 * sizeof(vector128);
|
|
|
|
end = start + sizeof(vrsave);
|
2007-12-20 18:57:39 +07:00
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
|
powerpc/ptrace: Simplify vr_get/set() to avoid GCC warning
GCC 8 warns about the logic in vr_get/set(), which with -Werror breaks
the build:
In function ‘user_regset_copyin’,
inlined from ‘vr_set’ at arch/powerpc/kernel/ptrace.c:628:9:
include/linux/regset.h:295:4: error: ‘memcpy’ offset [-527, -529] is
out of the bounds [0, 16] of object ‘vrsave’ with type ‘union
<anonymous>’ [-Werror=array-bounds]
arch/powerpc/kernel/ptrace.c: In function ‘vr_set’:
arch/powerpc/kernel/ptrace.c:623:5: note: ‘vrsave’ declared here
} vrsave;
This has been identified as a regression in GCC, see GCC bug 88273.
However we can avoid the warning and also simplify the logic and make
it more robust.
Currently we pass -1 as end_pos to user_regset_copyout(). This says
"copy up to the end of the regset".
The definition of the regset is:
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
The end is calculated as (n * size), ie. 34 * sizeof(vector128).
In vr_get/set() we pass start_pos as 33 * sizeof(vector128), meaning
we can copy up to sizeof(vector128) into/out-of vrsave.
The on-stack vrsave is defined as:
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
And elf_vrreg_t is:
typedef __vector128 elf_vrreg_t;
So there is no bug, but we rely on all those sizes lining up,
otherwise we would have a kernel stack exposure/overwrite on our
hands.
Rather than relying on that we can pass an explict end_pos based on
the sizeof(vrsave). The result should be exactly the same but it's
more obviously not over-reading/writing the stack and it avoids the
compiler warning.
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Mathieu Malaterre <malat@debian.org>
Cc: stable@vger.kernel.org
Tested-by: Mathieu Malaterre <malat@debian.org>
Tested-by: Meelis Roos <mroos@linux.ee>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-02-14 07:08:29 +07:00
|
|
|
start, end);
|
2007-12-20 18:57:39 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-07-28 09:57:33 +07:00
|
|
|
/*
|
2016-09-23 13:18:24 +07:00
|
|
|
* Regardless of transactions, 'vr_state' holds the current running
|
2016-09-23 13:18:25 +07:00
|
|
|
* value of all the VMX registers and 'ckvr_state' holds the last
|
2016-09-23 13:18:24 +07:00
|
|
|
* checkpointed value of all the VMX registers for the current
|
|
|
|
* transaction to fall back on in case it aborts.
|
2016-07-28 09:57:33 +07:00
|
|
|
*
|
|
|
|
* Userspace interface buffer layout:
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* vector128 vr[32];
|
|
|
|
* vector128 vscr;
|
|
|
|
* vector128 vrsave;
|
|
|
|
* };
|
|
|
|
*/
|
2007-12-20 18:57:39 +07:00
|
|
|
static int vr_set(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
|
2013-09-10 17:20:42 +07:00
|
|
|
BUILD_BUG_ON(offsetof(struct thread_vr_state, vscr) !=
|
|
|
|
offsetof(struct thread_vr_state, vr[32]));
|
2007-12-20 18:57:39 +07:00
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
2016-09-23 13:18:24 +07:00
|
|
|
&target->thread.vr_state, 0,
|
2013-09-10 17:20:42 +07:00
|
|
|
33 * sizeof(vector128));
|
2007-12-20 18:57:39 +07:00
|
|
|
if (!ret && count > 0) {
|
|
|
|
/*
|
|
|
|
* We use only the first word of vrsave.
|
|
|
|
*/
|
powerpc/ptrace: Simplify vr_get/set() to avoid GCC warning
GCC 8 warns about the logic in vr_get/set(), which with -Werror breaks
the build:
In function ‘user_regset_copyin’,
inlined from ‘vr_set’ at arch/powerpc/kernel/ptrace.c:628:9:
include/linux/regset.h:295:4: error: ‘memcpy’ offset [-527, -529] is
out of the bounds [0, 16] of object ‘vrsave’ with type ‘union
<anonymous>’ [-Werror=array-bounds]
arch/powerpc/kernel/ptrace.c: In function ‘vr_set’:
arch/powerpc/kernel/ptrace.c:623:5: note: ‘vrsave’ declared here
} vrsave;
This has been identified as a regression in GCC, see GCC bug 88273.
However we can avoid the warning and also simplify the logic and make
it more robust.
Currently we pass -1 as end_pos to user_regset_copyout(). This says
"copy up to the end of the regset".
The definition of the regset is:
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
The end is calculated as (n * size), ie. 34 * sizeof(vector128).
In vr_get/set() we pass start_pos as 33 * sizeof(vector128), meaning
we can copy up to sizeof(vector128) into/out-of vrsave.
The on-stack vrsave is defined as:
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
And elf_vrreg_t is:
typedef __vector128 elf_vrreg_t;
So there is no bug, but we rely on all those sizes lining up,
otherwise we would have a kernel stack exposure/overwrite on our
hands.
Rather than relying on that we can pass an explict end_pos based on
the sizeof(vrsave). The result should be exactly the same but it's
more obviously not over-reading/writing the stack and it avoids the
compiler warning.
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Mathieu Malaterre <malat@debian.org>
Cc: stable@vger.kernel.org
Tested-by: Mathieu Malaterre <malat@debian.org>
Tested-by: Meelis Roos <mroos@linux.ee>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-02-14 07:08:29 +07:00
|
|
|
int start, end;
|
2007-12-20 18:57:39 +07:00
|
|
|
union {
|
|
|
|
elf_vrreg_t reg;
|
|
|
|
u32 word;
|
|
|
|
} vrsave;
|
|
|
|
memset(&vrsave, 0, sizeof(vrsave));
|
2016-07-28 09:57:33 +07:00
|
|
|
|
2007-12-20 18:57:39 +07:00
|
|
|
vrsave.word = target->thread.vrsave;
|
2016-09-23 13:18:24 +07:00
|
|
|
|
powerpc/ptrace: Simplify vr_get/set() to avoid GCC warning
GCC 8 warns about the logic in vr_get/set(), which with -Werror breaks
the build:
In function ‘user_regset_copyin’,
inlined from ‘vr_set’ at arch/powerpc/kernel/ptrace.c:628:9:
include/linux/regset.h:295:4: error: ‘memcpy’ offset [-527, -529] is
out of the bounds [0, 16] of object ‘vrsave’ with type ‘union
<anonymous>’ [-Werror=array-bounds]
arch/powerpc/kernel/ptrace.c: In function ‘vr_set’:
arch/powerpc/kernel/ptrace.c:623:5: note: ‘vrsave’ declared here
} vrsave;
This has been identified as a regression in GCC, see GCC bug 88273.
However we can avoid the warning and also simplify the logic and make
it more robust.
Currently we pass -1 as end_pos to user_regset_copyout(). This says
"copy up to the end of the regset".
The definition of the regset is:
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
The end is calculated as (n * size), ie. 34 * sizeof(vector128).
In vr_get/set() we pass start_pos as 33 * sizeof(vector128), meaning
we can copy up to sizeof(vector128) into/out-of vrsave.
The on-stack vrsave is defined as:
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
And elf_vrreg_t is:
typedef __vector128 elf_vrreg_t;
So there is no bug, but we rely on all those sizes lining up,
otherwise we would have a kernel stack exposure/overwrite on our
hands.
Rather than relying on that we can pass an explict end_pos based on
the sizeof(vrsave). The result should be exactly the same but it's
more obviously not over-reading/writing the stack and it avoids the
compiler warning.
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Mathieu Malaterre <malat@debian.org>
Cc: stable@vger.kernel.org
Tested-by: Mathieu Malaterre <malat@debian.org>
Tested-by: Meelis Roos <mroos@linux.ee>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-02-14 07:08:29 +07:00
|
|
|
start = 33 * sizeof(vector128);
|
|
|
|
end = start + sizeof(vrsave);
|
2007-12-20 18:57:39 +07:00
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
|
powerpc/ptrace: Simplify vr_get/set() to avoid GCC warning
GCC 8 warns about the logic in vr_get/set(), which with -Werror breaks
the build:
In function ‘user_regset_copyin’,
inlined from ‘vr_set’ at arch/powerpc/kernel/ptrace.c:628:9:
include/linux/regset.h:295:4: error: ‘memcpy’ offset [-527, -529] is
out of the bounds [0, 16] of object ‘vrsave’ with type ‘union
<anonymous>’ [-Werror=array-bounds]
arch/powerpc/kernel/ptrace.c: In function ‘vr_set’:
arch/powerpc/kernel/ptrace.c:623:5: note: ‘vrsave’ declared here
} vrsave;
This has been identified as a regression in GCC, see GCC bug 88273.
However we can avoid the warning and also simplify the logic and make
it more robust.
Currently we pass -1 as end_pos to user_regset_copyout(). This says
"copy up to the end of the regset".
The definition of the regset is:
[REGSET_VMX] = {
.core_note_type = NT_PPC_VMX, .n = 34,
.size = sizeof(vector128), .align = sizeof(vector128),
.active = vr_active, .get = vr_get, .set = vr_set
},
The end is calculated as (n * size), ie. 34 * sizeof(vector128).
In vr_get/set() we pass start_pos as 33 * sizeof(vector128), meaning
we can copy up to sizeof(vector128) into/out-of vrsave.
The on-stack vrsave is defined as:
union {
elf_vrreg_t reg;
u32 word;
} vrsave;
And elf_vrreg_t is:
typedef __vector128 elf_vrreg_t;
So there is no bug, but we rely on all those sizes lining up,
otherwise we would have a kernel stack exposure/overwrite on our
hands.
Rather than relying on that we can pass an explict end_pos based on
the sizeof(vrsave). The result should be exactly the same but it's
more obviously not over-reading/writing the stack and it avoids the
compiler warning.
Reported-by: Meelis Roos <mroos@linux.ee>
Reported-by: Mathieu Malaterre <malat@debian.org>
Cc: stable@vger.kernel.org
Tested-by: Mathieu Malaterre <malat@debian.org>
Tested-by: Meelis Roos <mroos@linux.ee>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2019-02-14 07:08:29 +07:00
|
|
|
start, end);
|
2016-09-23 13:18:24 +07:00
|
|
|
if (!ret)
|
2007-12-20 18:57:39 +07:00
|
|
|
target->thread.vrsave = vrsave.word;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2007-06-04 12:15:44 +07:00
|
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
|
2008-06-25 11:07:18 +07:00
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
/*
|
|
|
|
* Currently to set and and get all the vsx state, you need to call
|
2011-03-31 08:57:33 +07:00
|
|
|
* the fp and VMX calls as well. This only get/sets the lower 32
|
2008-06-25 11:07:18 +07:00
|
|
|
* 128bit VSX registers.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int vsr_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
flush_vsx_to_thread(target);
|
|
|
|
return target->thread.used_vsr ? regset->n : 0;
|
|
|
|
}
|
|
|
|
|
2016-07-28 09:57:34 +07:00
|
|
|
/*
|
2016-09-23 13:18:24 +07:00
|
|
|
* Regardless of transactions, 'fp_state' holds the current running
|
2016-09-23 13:18:25 +07:00
|
|
|
* value of all FPR registers and 'ckfp_state' holds the last
|
2016-09-23 13:18:24 +07:00
|
|
|
* checkpointed value of all FPR registers for the current
|
|
|
|
* transaction.
|
2016-07-28 09:57:34 +07:00
|
|
|
*
|
|
|
|
* Userspace interface buffer layout:
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* u64 vsx[32];
|
|
|
|
* };
|
|
|
|
*/
|
2008-06-25 11:07:18 +07:00
|
|
|
static int vsr_get(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
2013-09-10 17:20:42 +07:00
|
|
|
u64 buf[32];
|
2008-07-01 11:01:39 +07:00
|
|
|
int ret, i;
|
2008-06-25 11:07:18 +07:00
|
|
|
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:34 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
2008-06-25 11:07:18 +07:00
|
|
|
flush_vsx_to_thread(target);
|
|
|
|
|
2008-07-01 11:01:39 +07:00
|
|
|
for (i = 0; i < 32 ; i++)
|
2013-09-10 17:20:42 +07:00
|
|
|
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
|
2016-09-23 13:18:24 +07:00
|
|
|
|
2008-06-25 11:07:18 +07:00
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
2008-07-01 11:01:39 +07:00
|
|
|
buf, 0, 32 * sizeof(double));
|
2008-06-25 11:07:18 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-07-28 09:57:34 +07:00
|
|
|
/*
|
2016-09-23 13:18:24 +07:00
|
|
|
* Regardless of transactions, 'fp_state' holds the current running
|
2016-09-23 13:18:25 +07:00
|
|
|
* value of all FPR registers and 'ckfp_state' holds the last
|
2016-09-23 13:18:24 +07:00
|
|
|
* checkpointed value of all FPR registers for the current
|
|
|
|
* transaction.
|
2016-07-28 09:57:34 +07:00
|
|
|
*
|
|
|
|
* Userspace interface buffer layout:
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* u64 vsx[32];
|
|
|
|
* };
|
|
|
|
*/
|
2008-06-25 11:07:18 +07:00
|
|
|
static int vsr_set(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
2013-09-10 17:20:42 +07:00
|
|
|
u64 buf[32];
|
2008-07-01 11:01:39 +07:00
|
|
|
int ret,i;
|
2008-06-25 11:07:18 +07:00
|
|
|
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:34 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
2008-06-25 11:07:18 +07:00
|
|
|
flush_vsx_to_thread(target);
|
|
|
|
|
2017-01-05 23:50:57 +07:00
|
|
|
for (i = 0; i < 32 ; i++)
|
|
|
|
buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
|
|
|
|
|
2008-06-25 11:07:18 +07:00
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
2008-07-01 11:01:39 +07:00
|
|
|
buf, 0, 32 * sizeof(double));
|
2016-09-23 13:18:24 +07:00
|
|
|
if (!ret)
|
2016-07-28 09:57:34 +07:00
|
|
|
for (i = 0; i < 32 ; i++)
|
2016-09-23 13:18:24 +07:00
|
|
|
target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
|
2008-06-25 11:07:18 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_VSX */
|
|
|
|
|
2007-06-04 12:15:44 +07:00
|
|
|
#ifdef CONFIG_SPE
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For get_evrregs/set_evrregs functions 'data' has the following layout:
|
|
|
|
*
|
|
|
|
* struct {
|
|
|
|
* u32 evr[32];
|
|
|
|
* u64 acc;
|
|
|
|
* u32 spefscr;
|
|
|
|
* }
|
|
|
|
*/
|
|
|
|
|
2007-12-20 18:57:48 +07:00
|
|
|
static int evr_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
2007-06-04 12:15:44 +07:00
|
|
|
{
|
2007-12-20 18:57:48 +07:00
|
|
|
flush_spe_to_thread(target);
|
|
|
|
return target->thread.used_spe ? regset->n : 0;
|
|
|
|
}
|
2007-06-04 12:15:44 +07:00
|
|
|
|
2007-12-20 18:57:48 +07:00
|
|
|
static int evr_get(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
2007-06-04 12:15:44 +07:00
|
|
|
|
2007-12-20 18:57:48 +07:00
|
|
|
flush_spe_to_thread(target);
|
2007-06-04 12:15:44 +07:00
|
|
|
|
2007-12-20 18:57:48 +07:00
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.evr,
|
|
|
|
0, sizeof(target->thread.evr));
|
2007-06-04 12:15:44 +07:00
|
|
|
|
2007-12-20 18:57:48 +07:00
|
|
|
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
|
|
|
|
offsetof(struct thread_struct, spefscr));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.acc,
|
|
|
|
sizeof(target->thread.evr), -1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int evr_set(struct task_struct *target, const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
flush_spe_to_thread(target);
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.evr,
|
|
|
|
0, sizeof(target->thread.evr));
|
2007-06-04 12:15:44 +07:00
|
|
|
|
2007-12-20 18:57:48 +07:00
|
|
|
BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
|
|
|
|
offsetof(struct thread_struct, spefscr));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.acc,
|
|
|
|
sizeof(target->thread.evr), -1);
|
|
|
|
|
|
|
|
return ret;
|
2007-06-04 12:15:44 +07:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_SPE */
|
|
|
|
|
2016-07-28 09:57:36 +07:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
/**
|
|
|
|
* tm_cgpr_active - get active number of registers in CGPR
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
*
|
|
|
|
* This function checks for the active number of available
|
|
|
|
* regisers in transaction checkpointed GPR category.
|
|
|
|
*/
|
|
|
|
static int tm_cgpr_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return regset->n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_cgpr_get - get CGPR registers
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
* @pos: The buffer position.
|
|
|
|
* @count: Number of bytes to copy.
|
|
|
|
* @kbuf: Kernel buffer to copy from.
|
|
|
|
* @ubuf: User buffer to copy into.
|
|
|
|
*
|
|
|
|
* This function gets transaction checkpointed GPR registers.
|
|
|
|
*
|
|
|
|
* When the transaction is active, 'ckpt_regs' holds all the checkpointed
|
|
|
|
* GPR register values for the current transaction to fall back on if it
|
|
|
|
* aborts in between. This function gets those checkpointed GPR registers.
|
|
|
|
* The userspace interface buffer layout is as follows.
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* struct pt_regs ckpt_regs;
|
|
|
|
* };
|
|
|
|
*/
|
|
|
|
static int tm_cgpr_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:36 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.ckpt_regs,
|
|
|
|
0, offsetof(struct pt_regs, msr));
|
|
|
|
if (!ret) {
|
|
|
|
unsigned long msr = get_user_ckpt_msr(target);
|
|
|
|
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
|
|
|
|
offsetof(struct pt_regs, msr),
|
|
|
|
offsetof(struct pt_regs, msr) +
|
|
|
|
sizeof(msr));
|
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
|
|
|
|
offsetof(struct pt_regs, msr) + sizeof(long));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.ckpt_regs.orig_gpr3,
|
|
|
|
offsetof(struct pt_regs, orig_gpr3),
|
2018-10-12 20:39:31 +07:00
|
|
|
sizeof(struct user_pt_regs));
|
2016-07-28 09:57:36 +07:00
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
2018-10-12 20:39:31 +07:00
|
|
|
sizeof(struct user_pt_regs), -1);
|
2016-07-28 09:57:36 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* tm_cgpr_set - set the CGPR registers
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
* @pos: The buffer position.
|
|
|
|
* @count: Number of bytes to copy.
|
|
|
|
* @kbuf: Kernel buffer to copy into.
|
|
|
|
* @ubuf: User buffer to copy from.
|
|
|
|
*
|
|
|
|
* This function sets in transaction checkpointed GPR registers.
|
|
|
|
*
|
|
|
|
* When the transaction is active, 'ckpt_regs' holds the checkpointed
|
|
|
|
* GPR register values for the current transaction to fall back on if it
|
|
|
|
* aborts in between. This function sets those checkpointed GPR registers.
|
|
|
|
* The userspace interface buffer layout is as follows.
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* struct pt_regs ckpt_regs;
|
|
|
|
* };
|
|
|
|
*/
|
|
|
|
static int tm_cgpr_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
unsigned long reg;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:36 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.ckpt_regs,
|
|
|
|
0, PT_MSR * sizeof(reg));
|
|
|
|
|
|
|
|
if (!ret && count > 0) {
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
|
|
|
|
PT_MSR * sizeof(reg),
|
|
|
|
(PT_MSR + 1) * sizeof(reg));
|
|
|
|
if (!ret)
|
|
|
|
ret = set_user_ckpt_msr(target, reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
|
|
|
|
offsetof(struct pt_regs, msr) + sizeof(long));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.ckpt_regs.orig_gpr3,
|
|
|
|
PT_ORIG_R3 * sizeof(reg),
|
|
|
|
(PT_MAX_PUT_REG + 1) * sizeof(reg));
|
|
|
|
|
|
|
|
if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
|
|
|
|
ret = user_regset_copyin_ignore(
|
|
|
|
&pos, &count, &kbuf, &ubuf,
|
|
|
|
(PT_MAX_PUT_REG + 1) * sizeof(reg),
|
|
|
|
PT_TRAP * sizeof(reg));
|
|
|
|
|
|
|
|
if (!ret && count > 0) {
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
|
|
|
|
PT_TRAP * sizeof(reg),
|
|
|
|
(PT_TRAP + 1) * sizeof(reg));
|
|
|
|
if (!ret)
|
|
|
|
ret = set_user_ckpt_trap(target, reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin_ignore(
|
|
|
|
&pos, &count, &kbuf, &ubuf,
|
|
|
|
(PT_TRAP + 1) * sizeof(reg), -1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-07-28 09:57:37 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_cfpr_active - get active number of registers in CFPR
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
*
|
|
|
|
* This function checks for the active number of available
|
|
|
|
* regisers in transaction checkpointed FPR category.
|
|
|
|
*/
|
|
|
|
static int tm_cfpr_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return regset->n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_cfpr_get - get CFPR registers
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
* @pos: The buffer position.
|
|
|
|
* @count: Number of bytes to copy.
|
|
|
|
* @kbuf: Kernel buffer to copy from.
|
|
|
|
* @ubuf: User buffer to copy into.
|
|
|
|
*
|
|
|
|
* This function gets in transaction checkpointed FPR registers.
|
|
|
|
*
|
2016-09-23 13:18:25 +07:00
|
|
|
* When the transaction is active 'ckfp_state' holds the checkpointed
|
2016-07-28 09:57:37 +07:00
|
|
|
* values for the current transaction to fall back on if it aborts
|
|
|
|
* in between. This function gets those checkpointed FPR registers.
|
|
|
|
* The userspace interface buffer layout is as follows.
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* u64 fpr[32];
|
|
|
|
* u64 fpscr;
|
|
|
|
*};
|
|
|
|
*/
|
|
|
|
static int tm_cfpr_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
u64 buf[33];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:37 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
|
|
|
|
/* copy to local buffer then write that out */
|
|
|
|
for (i = 0; i < 32 ; i++)
|
2016-09-23 13:18:25 +07:00
|
|
|
buf[i] = target->thread.TS_CKFPR(i);
|
|
|
|
buf[32] = target->thread.ckfp_state.fpscr;
|
2016-07-28 09:57:37 +07:00
|
|
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_cfpr_set - set CFPR registers
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
* @pos: The buffer position.
|
|
|
|
* @count: Number of bytes to copy.
|
|
|
|
* @kbuf: Kernel buffer to copy into.
|
|
|
|
* @ubuf: User buffer to copy from.
|
|
|
|
*
|
|
|
|
* This function sets in transaction checkpointed FPR registers.
|
|
|
|
*
|
2016-09-23 13:18:25 +07:00
|
|
|
* When the transaction is active 'ckfp_state' holds the checkpointed
|
2016-07-28 09:57:37 +07:00
|
|
|
* FPR register values for the current transaction to fall back on
|
|
|
|
* if it aborts in between. This function sets these checkpointed
|
|
|
|
* FPR registers. The userspace interface buffer layout is as follows.
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* u64 fpr[32];
|
|
|
|
* u64 fpscr;
|
|
|
|
*};
|
|
|
|
*/
|
|
|
|
static int tm_cfpr_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
u64 buf[33];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:37 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
|
2017-01-05 23:50:57 +07:00
|
|
|
for (i = 0; i < 32; i++)
|
|
|
|
buf[i] = target->thread.TS_CKFPR(i);
|
|
|
|
buf[32] = target->thread.ckfp_state.fpscr;
|
|
|
|
|
2016-07-28 09:57:37 +07:00
|
|
|
/* copy to local buffer then write that out */
|
|
|
|
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
|
|
|
|
if (i)
|
|
|
|
return i;
|
|
|
|
for (i = 0; i < 32 ; i++)
|
2016-09-23 13:18:25 +07:00
|
|
|
target->thread.TS_CKFPR(i) = buf[i];
|
|
|
|
target->thread.ckfp_state.fpscr = buf[32];
|
2016-07-28 09:57:37 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2016-07-28 09:57:38 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_cvmx_active - get active number of registers in CVMX
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
*
|
|
|
|
* This function checks for the active number of available
|
|
|
|
* regisers in checkpointed VMX category.
|
|
|
|
*/
|
|
|
|
static int tm_cvmx_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return regset->n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_cvmx_get - get CMVX registers
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
* @pos: The buffer position.
|
|
|
|
* @count: Number of bytes to copy.
|
|
|
|
* @kbuf: Kernel buffer to copy from.
|
|
|
|
* @ubuf: User buffer to copy into.
|
|
|
|
*
|
|
|
|
* This function gets in transaction checkpointed VMX registers.
|
|
|
|
*
|
2016-09-23 13:18:25 +07:00
|
|
|
* When the transaction is active 'ckvr_state' and 'ckvrsave' hold
|
2016-07-28 09:57:38 +07:00
|
|
|
* the checkpointed values for the current transaction to fall
|
|
|
|
* back on if it aborts in between. The userspace interface buffer
|
|
|
|
* layout is as follows.
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* vector128 vr[32];
|
|
|
|
* vector128 vscr;
|
|
|
|
* vector128 vrsave;
|
|
|
|
*};
|
|
|
|
*/
|
|
|
|
static int tm_cvmx_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
/* Flush the state */
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:38 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
2016-09-23 13:18:25 +07:00
|
|
|
&target->thread.ckvr_state, 0,
|
2016-07-28 09:57:38 +07:00
|
|
|
33 * sizeof(vector128));
|
|
|
|
if (!ret) {
|
|
|
|
/*
|
|
|
|
* Copy out only the low-order word of vrsave.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
elf_vrreg_t reg;
|
|
|
|
u32 word;
|
|
|
|
} vrsave;
|
|
|
|
memset(&vrsave, 0, sizeof(vrsave));
|
2016-09-23 13:18:25 +07:00
|
|
|
vrsave.word = target->thread.ckvrsave;
|
2016-07-28 09:57:38 +07:00
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
|
|
|
|
33 * sizeof(vector128), -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_cvmx_set - set CMVX registers
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
* @pos: The buffer position.
|
|
|
|
* @count: Number of bytes to copy.
|
|
|
|
* @kbuf: Kernel buffer to copy into.
|
|
|
|
* @ubuf: User buffer to copy from.
|
|
|
|
*
|
|
|
|
* This function sets in transaction checkpointed VMX registers.
|
|
|
|
*
|
2016-09-23 13:18:25 +07:00
|
|
|
* When the transaction is active 'ckvr_state' and 'ckvrsave' hold
|
2016-07-28 09:57:38 +07:00
|
|
|
* the checkpointed values for the current transaction to fall
|
|
|
|
* back on if it aborts in between. The userspace interface buffer
|
|
|
|
* layout is as follows.
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* vector128 vr[32];
|
|
|
|
* vector128 vscr;
|
|
|
|
* vector128 vrsave;
|
|
|
|
*};
|
|
|
|
*/
|
|
|
|
static int tm_cvmx_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
BUILD_BUG_ON(TVSO(vscr) != TVSO(vr[32]));
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:38 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
2016-09-23 13:18:25 +07:00
|
|
|
&target->thread.ckvr_state, 0,
|
2016-07-28 09:57:38 +07:00
|
|
|
33 * sizeof(vector128));
|
|
|
|
if (!ret && count > 0) {
|
|
|
|
/*
|
|
|
|
* We use only the low-order word of vrsave.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
elf_vrreg_t reg;
|
|
|
|
u32 word;
|
|
|
|
} vrsave;
|
|
|
|
memset(&vrsave, 0, sizeof(vrsave));
|
2016-09-23 13:18:25 +07:00
|
|
|
vrsave.word = target->thread.ckvrsave;
|
2016-07-28 09:57:38 +07:00
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
|
|
|
|
33 * sizeof(vector128), -1);
|
|
|
|
if (!ret)
|
2016-09-23 13:18:25 +07:00
|
|
|
target->thread.ckvrsave = vrsave.word;
|
2016-07-28 09:57:38 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-07-28 09:57:39 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_cvsx_active - get active number of registers in CVSX
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
*
|
|
|
|
* This function checks for the active number of available
|
|
|
|
* regisers in transaction checkpointed VSX category.
|
|
|
|
*/
|
|
|
|
static int tm_cvsx_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
flush_vsx_to_thread(target);
|
|
|
|
return target->thread.used_vsr ? regset->n : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_cvsx_get - get CVSX registers
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
* @pos: The buffer position.
|
|
|
|
* @count: Number of bytes to copy.
|
|
|
|
* @kbuf: Kernel buffer to copy from.
|
|
|
|
* @ubuf: User buffer to copy into.
|
|
|
|
*
|
|
|
|
* This function gets in transaction checkpointed VSX registers.
|
|
|
|
*
|
2016-09-23 13:18:25 +07:00
|
|
|
* When the transaction is active 'ckfp_state' holds the checkpointed
|
2016-07-28 09:57:39 +07:00
|
|
|
* values for the current transaction to fall back on if it aborts
|
|
|
|
* in between. This function gets those checkpointed VSX registers.
|
|
|
|
* The userspace interface buffer layout is as follows.
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* u64 vsx[32];
|
|
|
|
*};
|
|
|
|
*/
|
|
|
|
static int tm_cvsx_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
u64 buf[32];
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
/* Flush the state */
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:39 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
flush_vsx_to_thread(target);
|
|
|
|
|
|
|
|
for (i = 0; i < 32 ; i++)
|
2016-09-23 13:18:25 +07:00
|
|
|
buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
|
2016-07-28 09:57:39 +07:00
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
buf, 0, 32 * sizeof(double));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_cvsx_set - set CFPR registers
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
* @pos: The buffer position.
|
|
|
|
* @count: Number of bytes to copy.
|
|
|
|
* @kbuf: Kernel buffer to copy into.
|
|
|
|
* @ubuf: User buffer to copy from.
|
|
|
|
*
|
|
|
|
* This function sets in transaction checkpointed VSX registers.
|
|
|
|
*
|
2016-09-23 13:18:25 +07:00
|
|
|
* When the transaction is active 'ckfp_state' holds the checkpointed
|
2016-07-28 09:57:39 +07:00
|
|
|
* VSX register values for the current transaction to fall back on
|
|
|
|
* if it aborts in between. This function sets these checkpointed
|
|
|
|
* FPR registers. The userspace interface buffer layout is as follows.
|
|
|
|
*
|
|
|
|
* struct data {
|
|
|
|
* u64 vsx[32];
|
|
|
|
*};
|
|
|
|
*/
|
|
|
|
static int tm_cvsx_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
u64 buf[32];
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
/* Flush the state */
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:39 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
flush_vsx_to_thread(target);
|
|
|
|
|
2017-01-05 23:50:57 +07:00
|
|
|
for (i = 0; i < 32 ; i++)
|
|
|
|
buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
|
|
|
|
|
2016-07-28 09:57:39 +07:00
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
buf, 0, 32 * sizeof(double));
|
2016-09-23 13:18:24 +07:00
|
|
|
if (!ret)
|
|
|
|
for (i = 0; i < 32 ; i++)
|
2016-09-23 13:18:25 +07:00
|
|
|
target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
|
2016-07-28 09:57:39 +07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-07-28 09:57:40 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_spr_active - get active number of registers in TM SPR
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
*
|
|
|
|
* This function checks the active number of available
|
|
|
|
* regisers in the transactional memory SPR category.
|
|
|
|
*/
|
|
|
|
static int tm_spr_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return regset->n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_spr_get - get the TM related SPR registers
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
* @pos: The buffer position.
|
|
|
|
* @count: Number of bytes to copy.
|
|
|
|
* @kbuf: Kernel buffer to copy from.
|
|
|
|
* @ubuf: User buffer to copy into.
|
|
|
|
*
|
|
|
|
* This function gets transactional memory related SPR registers.
|
|
|
|
* The userspace interface buffer layout is as follows.
|
|
|
|
*
|
|
|
|
* struct {
|
|
|
|
* u64 tm_tfhar;
|
|
|
|
* u64 tm_texasr;
|
|
|
|
* u64 tm_tfiar;
|
|
|
|
* };
|
|
|
|
*/
|
|
|
|
static int tm_spr_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Build tests */
|
|
|
|
BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
|
|
|
|
BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
|
|
|
|
BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* Flush the states */
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:40 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
|
|
|
|
/* TFHAR register */
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_tfhar, 0, sizeof(u64));
|
|
|
|
|
|
|
|
/* TEXASR register */
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_texasr, sizeof(u64),
|
|
|
|
2 * sizeof(u64));
|
|
|
|
|
|
|
|
/* TFIAR register */
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_tfiar,
|
|
|
|
2 * sizeof(u64), 3 * sizeof(u64));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* tm_spr_set - set the TM related SPR registers
|
|
|
|
* @target: The target task.
|
|
|
|
* @regset: The user regset structure.
|
|
|
|
* @pos: The buffer position.
|
|
|
|
* @count: Number of bytes to copy.
|
|
|
|
* @kbuf: Kernel buffer to copy into.
|
|
|
|
* @ubuf: User buffer to copy from.
|
|
|
|
*
|
|
|
|
* This function sets transactional memory related SPR registers.
|
|
|
|
* The userspace interface buffer layout is as follows.
|
|
|
|
*
|
|
|
|
* struct {
|
|
|
|
* u64 tm_tfhar;
|
|
|
|
* u64 tm_texasr;
|
|
|
|
* u64 tm_tfiar;
|
|
|
|
* };
|
|
|
|
*/
|
|
|
|
static int tm_spr_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Build tests */
|
|
|
|
BUILD_BUG_ON(TSO(tm_tfhar) + sizeof(u64) != TSO(tm_texasr));
|
|
|
|
BUILD_BUG_ON(TSO(tm_texasr) + sizeof(u64) != TSO(tm_tfiar));
|
|
|
|
BUILD_BUG_ON(TSO(tm_tfiar) + sizeof(u64) != TSO(ckpt_regs));
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* Flush the states */
|
2016-09-23 13:18:24 +07:00
|
|
|
flush_tmregs_to_thread(target);
|
2016-07-28 09:57:40 +07:00
|
|
|
flush_fp_to_thread(target);
|
|
|
|
flush_altivec_to_thread(target);
|
|
|
|
|
|
|
|
/* TFHAR register */
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_tfhar, 0, sizeof(u64));
|
|
|
|
|
|
|
|
/* TEXASR register */
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_texasr, sizeof(u64),
|
|
|
|
2 * sizeof(u64));
|
|
|
|
|
|
|
|
/* TFIAR register */
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_tfiar,
|
|
|
|
2 * sizeof(u64), 3 * sizeof(u64));
|
|
|
|
return ret;
|
|
|
|
}
|
2016-07-28 09:57:41 +07:00
|
|
|
|
|
|
|
static int tm_tar_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return regset->n;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tm_tar_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_tar, 0, sizeof(u64));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tm_tar_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_tar, 0, sizeof(u64));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tm_ppr_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return regset->n;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int tm_ppr_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_ppr, 0, sizeof(u64));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tm_ppr_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_ppr, 0, sizeof(u64));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tm_dscr_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return regset->n;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tm_dscr_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_dscr, 0, sizeof(u64));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int tm_dscr_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_TM))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!MSR_TM_ACTIVE(target->thread.regs->msr))
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tm_dscr, 0, sizeof(u64));
|
|
|
|
return ret;
|
|
|
|
}
|
2016-07-28 09:57:40 +07:00
|
|
|
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
2007-06-04 12:15:44 +07:00
|
|
|
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
static int ppr_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
2016-09-06 18:21:50 +07:00
|
|
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
2018-10-12 20:15:16 +07:00
|
|
|
&target->thread.regs->ppr, 0, sizeof(u64));
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ppr_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
2016-09-06 18:21:50 +07:00
|
|
|
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
2018-10-12 20:15:16 +07:00
|
|
|
&target->thread.regs->ppr, 0, sizeof(u64));
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dscr_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
2016-09-06 18:21:50 +07:00
|
|
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.dscr, 0, sizeof(u64));
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
}
|
|
|
|
static int dscr_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
2016-09-06 18:21:50 +07:00
|
|
|
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.dscr, 0, sizeof(u64));
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
static int tar_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
2016-09-06 18:21:50 +07:00
|
|
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tar, 0, sizeof(u64));
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
}
|
|
|
|
static int tar_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
2016-09-06 18:21:50 +07:00
|
|
|
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.tar, 0, sizeof(u64));
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
}
|
2016-07-28 09:57:43 +07:00
|
|
|
|
|
|
|
static int ebb_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (target->thread.used_ebb)
|
|
|
|
return regset->n;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ebb_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
/* Build tests */
|
|
|
|
BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
|
|
|
|
BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!target->thread.used_ebb)
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.ebbrr, 0, 3 * sizeof(unsigned long));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ebb_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Build tests */
|
|
|
|
BUILD_BUG_ON(TSO(ebbrr) + sizeof(unsigned long) != TSO(ebbhr));
|
|
|
|
BUILD_BUG_ON(TSO(ebbhr) + sizeof(unsigned long) != TSO(bescr));
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (target->thread.used_ebb)
|
|
|
|
return -ENODATA;
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.ebbrr, 0, sizeof(unsigned long));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.ebbhr, sizeof(unsigned long),
|
|
|
|
2 * sizeof(unsigned long));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.bescr,
|
|
|
|
2 * sizeof(unsigned long), 3 * sizeof(unsigned long));
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2016-07-28 09:57:44 +07:00
|
|
|
static int pmu_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return regset->n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pmu_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
/* Build tests */
|
|
|
|
BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
|
|
|
|
BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
|
|
|
|
BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
|
|
|
|
BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.siar, 0,
|
|
|
|
5 * sizeof(unsigned long));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pmu_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* Build tests */
|
|
|
|
BUILD_BUG_ON(TSO(siar) + sizeof(unsigned long) != TSO(sdar));
|
|
|
|
BUILD_BUG_ON(TSO(sdar) + sizeof(unsigned long) != TSO(sier));
|
|
|
|
BUILD_BUG_ON(TSO(sier) + sizeof(unsigned long) != TSO(mmcr2));
|
|
|
|
BUILD_BUG_ON(TSO(mmcr2) + sizeof(unsigned long) != TSO(mmcr0));
|
|
|
|
|
|
|
|
if (!cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.siar, 0,
|
|
|
|
sizeof(unsigned long));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.sdar, sizeof(unsigned long),
|
|
|
|
2 * sizeof(unsigned long));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.sier, 2 * sizeof(unsigned long),
|
|
|
|
3 * sizeof(unsigned long));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.mmcr2, 3 * sizeof(unsigned long),
|
|
|
|
4 * sizeof(unsigned long));
|
|
|
|
|
|
|
|
if (!ret)
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.mmcr0, 4 * sizeof(unsigned long),
|
|
|
|
5 * sizeof(unsigned long));
|
|
|
|
return ret;
|
|
|
|
}
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
#endif
|
2018-01-19 08:50:43 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_MEM_KEYS
|
|
|
|
static int pkey_active(struct task_struct *target,
|
|
|
|
const struct user_regset *regset)
|
|
|
|
{
|
|
|
|
if (!arch_pkeys_enabled())
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return regset->n;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pkey_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
|
|
|
BUILD_BUG_ON(TSO(amr) + sizeof(unsigned long) != TSO(iamr));
|
|
|
|
BUILD_BUG_ON(TSO(iamr) + sizeof(unsigned long) != TSO(uamor));
|
|
|
|
|
|
|
|
if (!arch_pkeys_enabled())
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&target->thread.amr, 0,
|
|
|
|
ELF_NPKEY * sizeof(unsigned long));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pkey_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
|
|
|
u64 new_amr;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!arch_pkeys_enabled())
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* Only the AMR can be set from userspace */
|
|
|
|
if (pos != 0 || count != sizeof(new_amr))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
|
|
|
&new_amr, 0, sizeof(new_amr));
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* UAMOR determines which bits of the AMR can be set from userspace. */
|
|
|
|
target->thread.amr = (new_amr & target->thread.uamor) |
|
|
|
|
(target->thread.amr & ~target->thread.uamor);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PPC_MEM_KEYS */
|
|
|
|
|
2007-12-20 18:58:00 +07:00
|
|
|
/*
|
|
|
|
* These are our native regset flavors.
|
|
|
|
*/
|
|
|
|
enum powerpc_regset {
|
|
|
|
REGSET_GPR,
|
|
|
|
REGSET_FPR,
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
REGSET_VMX,
|
|
|
|
#endif
|
2008-06-25 11:07:18 +07:00
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
REGSET_VSX,
|
|
|
|
#endif
|
2007-12-20 18:58:00 +07:00
|
|
|
#ifdef CONFIG_SPE
|
|
|
|
REGSET_SPE,
|
|
|
|
#endif
|
2016-07-28 09:57:36 +07:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
REGSET_TM_CGPR, /* TM checkpointed GPR registers */
|
2016-07-28 09:57:37 +07:00
|
|
|
REGSET_TM_CFPR, /* TM checkpointed FPR registers */
|
2016-07-28 09:57:38 +07:00
|
|
|
REGSET_TM_CVMX, /* TM checkpointed VMX registers */
|
2016-07-28 09:57:39 +07:00
|
|
|
REGSET_TM_CVSX, /* TM checkpointed VSX registers */
|
2016-07-28 09:57:40 +07:00
|
|
|
REGSET_TM_SPR, /* TM specific SPR registers */
|
2016-07-28 09:57:41 +07:00
|
|
|
REGSET_TM_CTAR, /* TM checkpointed TAR register */
|
|
|
|
REGSET_TM_CPPR, /* TM checkpointed PPR register */
|
|
|
|
REGSET_TM_CDSCR, /* TM checkpointed DSCR register */
|
2016-07-28 09:57:36 +07:00
|
|
|
#endif
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
REGSET_PPR, /* PPR register */
|
|
|
|
REGSET_DSCR, /* DSCR register */
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
REGSET_TAR, /* TAR register */
|
2016-07-28 09:57:43 +07:00
|
|
|
REGSET_EBB, /* EBB registers */
|
2016-07-28 09:57:44 +07:00
|
|
|
REGSET_PMR, /* Performance Monitor Registers */
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
#endif
|
2018-01-19 08:50:43 +07:00
|
|
|
#ifdef CONFIG_PPC_MEM_KEYS
|
|
|
|
REGSET_PKEY, /* AMR register */
|
|
|
|
#endif
|
2007-12-20 18:58:00 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct user_regset native_regsets[] = {
|
|
|
|
[REGSET_GPR] = {
|
|
|
|
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
|
|
|
|
.size = sizeof(long), .align = sizeof(long),
|
|
|
|
.get = gpr_get, .set = gpr_set
|
|
|
|
},
|
|
|
|
[REGSET_FPR] = {
|
|
|
|
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
|
|
|
|
.size = sizeof(double), .align = sizeof(double),
|
|
|
|
.get = fpr_get, .set = fpr_set
|
|
|
|
},
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
[REGSET_VMX] = {
|
|
|
|
.core_note_type = NT_PPC_VMX, .n = 34,
|
|
|
|
.size = sizeof(vector128), .align = sizeof(vector128),
|
|
|
|
.active = vr_active, .get = vr_get, .set = vr_set
|
|
|
|
},
|
|
|
|
#endif
|
2008-06-25 11:07:18 +07:00
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
[REGSET_VSX] = {
|
2008-07-01 11:01:39 +07:00
|
|
|
.core_note_type = NT_PPC_VSX, .n = 32,
|
|
|
|
.size = sizeof(double), .align = sizeof(double),
|
2008-06-25 11:07:18 +07:00
|
|
|
.active = vsr_active, .get = vsr_get, .set = vsr_set
|
|
|
|
},
|
|
|
|
#endif
|
2007-12-20 18:58:00 +07:00
|
|
|
#ifdef CONFIG_SPE
|
|
|
|
[REGSET_SPE] = {
|
2013-08-27 14:52:14 +07:00
|
|
|
.core_note_type = NT_PPC_SPE, .n = 35,
|
2007-12-20 18:58:00 +07:00
|
|
|
.size = sizeof(u32), .align = sizeof(u32),
|
|
|
|
.active = evr_active, .get = evr_get, .set = evr_set
|
|
|
|
},
|
|
|
|
#endif
|
2016-07-28 09:57:36 +07:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
[REGSET_TM_CGPR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
|
|
|
|
.size = sizeof(long), .align = sizeof(long),
|
|
|
|
.active = tm_cgpr_active, .get = tm_cgpr_get, .set = tm_cgpr_set
|
|
|
|
},
|
2016-07-28 09:57:37 +07:00
|
|
|
[REGSET_TM_CFPR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
|
|
|
|
.size = sizeof(double), .align = sizeof(double),
|
|
|
|
.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
|
|
|
|
},
|
2016-07-28 09:57:38 +07:00
|
|
|
[REGSET_TM_CVMX] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
|
|
|
|
.size = sizeof(vector128), .align = sizeof(vector128),
|
|
|
|
.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
|
|
|
|
},
|
2016-07-28 09:57:39 +07:00
|
|
|
[REGSET_TM_CVSX] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
|
|
|
|
.size = sizeof(double), .align = sizeof(double),
|
|
|
|
.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
|
|
|
|
},
|
2016-07-28 09:57:40 +07:00
|
|
|
[REGSET_TM_SPR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
|
|
|
|
},
|
2016-07-28 09:57:41 +07:00
|
|
|
[REGSET_TM_CTAR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CTAR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
|
|
|
|
},
|
|
|
|
[REGSET_TM_CPPR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CPPR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
|
|
|
|
},
|
|
|
|
[REGSET_TM_CDSCR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
|
|
|
|
},
|
2016-07-28 09:57:36 +07:00
|
|
|
#endif
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
[REGSET_PPR] = {
|
|
|
|
.core_note_type = NT_PPC_PPR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.get = ppr_get, .set = ppr_set
|
|
|
|
},
|
|
|
|
[REGSET_DSCR] = {
|
|
|
|
.core_note_type = NT_PPC_DSCR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.get = dscr_get, .set = dscr_set
|
|
|
|
},
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
[REGSET_TAR] = {
|
|
|
|
.core_note_type = NT_PPC_TAR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.get = tar_get, .set = tar_set
|
|
|
|
},
|
2016-07-28 09:57:43 +07:00
|
|
|
[REGSET_EBB] = {
|
|
|
|
.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = ebb_active, .get = ebb_get, .set = ebb_set
|
|
|
|
},
|
2016-07-28 09:57:44 +07:00
|
|
|
[REGSET_PMR] = {
|
|
|
|
.core_note_type = NT_PPC_PMU, .n = ELF_NPMU,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = pmu_active, .get = pmu_get, .set = pmu_set
|
|
|
|
},
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
#endif
|
2018-01-19 08:50:43 +07:00
|
|
|
#ifdef CONFIG_PPC_MEM_KEYS
|
|
|
|
[REGSET_PKEY] = {
|
|
|
|
.core_note_type = NT_PPC_PKEY, .n = ELF_NPKEY,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = pkey_active, .get = pkey_get, .set = pkey_set
|
|
|
|
},
|
|
|
|
#endif
|
2007-12-20 18:58:00 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct user_regset_view user_ppc_native_view = {
|
|
|
|
.name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
|
|
|
|
.regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
|
|
|
|
};
|
|
|
|
|
2007-12-20 18:58:08 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
#include <linux/compat.h>
|
|
|
|
|
2016-07-28 09:57:35 +07:00
|
|
|
static int gpr32_get_common(struct task_struct *target,
|
2007-12-20 18:58:08 +07:00
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
2016-09-11 20:44:13 +07:00
|
|
|
void *kbuf, void __user *ubuf,
|
|
|
|
unsigned long *regs)
|
2007-12-20 18:58:08 +07:00
|
|
|
{
|
|
|
|
compat_ulong_t *k = kbuf;
|
|
|
|
compat_ulong_t __user *u = ubuf;
|
|
|
|
compat_ulong_t reg;
|
|
|
|
|
|
|
|
pos /= sizeof(reg);
|
|
|
|
count /= sizeof(reg);
|
|
|
|
|
|
|
|
if (kbuf)
|
|
|
|
for (; count > 0 && pos < PT_MSR; --count)
|
|
|
|
*k++ = regs[pos++];
|
|
|
|
else
|
|
|
|
for (; count > 0 && pos < PT_MSR; --count)
|
|
|
|
if (__put_user((compat_ulong_t) regs[pos++], u++))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (count > 0 && pos == PT_MSR) {
|
|
|
|
reg = get_user_msr(target);
|
|
|
|
if (kbuf)
|
|
|
|
*k++ = reg;
|
|
|
|
else if (__put_user(reg, u++))
|
|
|
|
return -EFAULT;
|
|
|
|
++pos;
|
|
|
|
--count;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (kbuf)
|
|
|
|
for (; count > 0 && pos < PT_REGS_COUNT; --count)
|
|
|
|
*k++ = regs[pos++];
|
|
|
|
else
|
|
|
|
for (; count > 0 && pos < PT_REGS_COUNT; --count)
|
|
|
|
if (__put_user((compat_ulong_t) regs[pos++], u++))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
kbuf = k;
|
|
|
|
ubuf = u;
|
|
|
|
pos *= sizeof(reg);
|
|
|
|
count *= sizeof(reg);
|
|
|
|
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
|
|
|
PT_REGS_COUNT * sizeof(reg), -1);
|
|
|
|
}
|
|
|
|
|
2016-07-28 09:57:35 +07:00
|
|
|
static int gpr32_set_common(struct task_struct *target,
|
2007-12-20 18:58:08 +07:00
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
2016-09-11 20:44:13 +07:00
|
|
|
const void *kbuf, const void __user *ubuf,
|
|
|
|
unsigned long *regs)
|
2007-12-20 18:58:08 +07:00
|
|
|
{
|
|
|
|
const compat_ulong_t *k = kbuf;
|
|
|
|
const compat_ulong_t __user *u = ubuf;
|
|
|
|
compat_ulong_t reg;
|
|
|
|
|
|
|
|
pos /= sizeof(reg);
|
|
|
|
count /= sizeof(reg);
|
|
|
|
|
|
|
|
if (kbuf)
|
|
|
|
for (; count > 0 && pos < PT_MSR; --count)
|
|
|
|
regs[pos++] = *k++;
|
|
|
|
else
|
|
|
|
for (; count > 0 && pos < PT_MSR; --count) {
|
|
|
|
if (__get_user(reg, u++))
|
|
|
|
return -EFAULT;
|
|
|
|
regs[pos++] = reg;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
if (count > 0 && pos == PT_MSR) {
|
|
|
|
if (kbuf)
|
|
|
|
reg = *k++;
|
|
|
|
else if (__get_user(reg, u++))
|
|
|
|
return -EFAULT;
|
|
|
|
set_user_msr(target, reg);
|
|
|
|
++pos;
|
|
|
|
--count;
|
|
|
|
}
|
|
|
|
|
2008-03-13 15:25:35 +07:00
|
|
|
if (kbuf) {
|
2007-12-20 18:58:08 +07:00
|
|
|
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
|
|
|
|
regs[pos++] = *k++;
|
2008-03-13 15:25:35 +07:00
|
|
|
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
|
|
|
|
++k;
|
|
|
|
} else {
|
2007-12-20 18:58:08 +07:00
|
|
|
for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
|
|
|
|
if (__get_user(reg, u++))
|
|
|
|
return -EFAULT;
|
|
|
|
regs[pos++] = reg;
|
|
|
|
}
|
2008-03-13 15:25:35 +07:00
|
|
|
for (; count > 0 && pos < PT_TRAP; --count, ++pos)
|
|
|
|
if (__get_user(reg, u++))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
2007-12-20 18:58:08 +07:00
|
|
|
|
|
|
|
if (count > 0 && pos == PT_TRAP) {
|
|
|
|
if (kbuf)
|
|
|
|
reg = *k++;
|
|
|
|
else if (__get_user(reg, u++))
|
|
|
|
return -EFAULT;
|
|
|
|
set_user_trap(target, reg);
|
|
|
|
++pos;
|
|
|
|
--count;
|
|
|
|
}
|
|
|
|
|
|
|
|
kbuf = k;
|
|
|
|
ubuf = u;
|
|
|
|
pos *= sizeof(reg);
|
|
|
|
count *= sizeof(reg);
|
|
|
|
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
|
|
|
(PT_TRAP + 1) * sizeof(reg), -1);
|
|
|
|
}
|
|
|
|
|
2016-07-28 09:57:36 +07:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
static int tm_cgpr32_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
2016-09-11 20:44:13 +07:00
|
|
|
return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
|
|
|
|
&target->thread.ckpt_regs.gpr[0]);
|
2016-07-28 09:57:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int tm_cgpr32_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
2016-09-11 20:44:13 +07:00
|
|
|
return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
|
|
|
|
&target->thread.ckpt_regs.gpr[0]);
|
2016-07-28 09:57:36 +07:00
|
|
|
}
|
|
|
|
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
|
|
|
|
2016-07-28 09:57:35 +07:00
|
|
|
static int gpr32_get(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
void *kbuf, void __user *ubuf)
|
|
|
|
{
|
2016-09-11 20:44:13 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
if (target->thread.regs == NULL)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
if (!FULL_REGS(target->thread.regs)) {
|
|
|
|
/*
|
|
|
|
* We have a partial register set.
|
|
|
|
* Fill 14-31 with bogus values.
|
|
|
|
*/
|
|
|
|
for (i = 14; i < 32; i++)
|
|
|
|
target->thread.regs->gpr[i] = NV_REG_POISON;
|
|
|
|
}
|
|
|
|
return gpr32_get_common(target, regset, pos, count, kbuf, ubuf,
|
|
|
|
&target->thread.regs->gpr[0]);
|
2016-07-28 09:57:35 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int gpr32_set(struct task_struct *target,
|
|
|
|
const struct user_regset *regset,
|
|
|
|
unsigned int pos, unsigned int count,
|
|
|
|
const void *kbuf, const void __user *ubuf)
|
|
|
|
{
|
2016-09-11 20:44:13 +07:00
|
|
|
if (target->thread.regs == NULL)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
CHECK_FULL_REGS(target->thread.regs);
|
|
|
|
return gpr32_set_common(target, regset, pos, count, kbuf, ubuf,
|
|
|
|
&target->thread.regs->gpr[0]);
|
2016-07-28 09:57:35 +07:00
|
|
|
}
|
|
|
|
|
2007-12-20 18:58:08 +07:00
|
|
|
/*
|
|
|
|
* These are the regset flavors matching the CONFIG_PPC32 native set.
|
|
|
|
*/
|
|
|
|
static const struct user_regset compat_regsets[] = {
|
|
|
|
[REGSET_GPR] = {
|
|
|
|
.core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
|
|
|
|
.size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
|
|
|
|
.get = gpr32_get, .set = gpr32_set
|
|
|
|
},
|
|
|
|
[REGSET_FPR] = {
|
|
|
|
.core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
|
|
|
|
.size = sizeof(double), .align = sizeof(double),
|
|
|
|
.get = fpr_get, .set = fpr_set
|
|
|
|
},
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
[REGSET_VMX] = {
|
|
|
|
.core_note_type = NT_PPC_VMX, .n = 34,
|
|
|
|
.size = sizeof(vector128), .align = sizeof(vector128),
|
|
|
|
.active = vr_active, .get = vr_get, .set = vr_set
|
|
|
|
},
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_SPE
|
|
|
|
[REGSET_SPE] = {
|
2008-01-03 08:05:48 +07:00
|
|
|
.core_note_type = NT_PPC_SPE, .n = 35,
|
2007-12-20 18:58:08 +07:00
|
|
|
.size = sizeof(u32), .align = sizeof(u32),
|
|
|
|
.active = evr_active, .get = evr_get, .set = evr_set
|
|
|
|
},
|
|
|
|
#endif
|
2016-07-28 09:57:36 +07:00
|
|
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
|
[REGSET_TM_CGPR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CGPR, .n = ELF_NGREG,
|
|
|
|
.size = sizeof(long), .align = sizeof(long),
|
|
|
|
.active = tm_cgpr_active,
|
|
|
|
.get = tm_cgpr32_get, .set = tm_cgpr32_set
|
|
|
|
},
|
2016-07-28 09:57:37 +07:00
|
|
|
[REGSET_TM_CFPR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CFPR, .n = ELF_NFPREG,
|
|
|
|
.size = sizeof(double), .align = sizeof(double),
|
|
|
|
.active = tm_cfpr_active, .get = tm_cfpr_get, .set = tm_cfpr_set
|
|
|
|
},
|
2016-07-28 09:57:38 +07:00
|
|
|
[REGSET_TM_CVMX] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CVMX, .n = ELF_NVMX,
|
|
|
|
.size = sizeof(vector128), .align = sizeof(vector128),
|
|
|
|
.active = tm_cvmx_active, .get = tm_cvmx_get, .set = tm_cvmx_set
|
|
|
|
},
|
2016-07-28 09:57:39 +07:00
|
|
|
[REGSET_TM_CVSX] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CVSX, .n = ELF_NVSX,
|
|
|
|
.size = sizeof(double), .align = sizeof(double),
|
|
|
|
.active = tm_cvsx_active, .get = tm_cvsx_get, .set = tm_cvsx_set
|
|
|
|
},
|
2016-07-28 09:57:40 +07:00
|
|
|
[REGSET_TM_SPR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_SPR, .n = ELF_NTMSPRREG,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = tm_spr_active, .get = tm_spr_get, .set = tm_spr_set
|
|
|
|
},
|
2016-07-28 09:57:41 +07:00
|
|
|
[REGSET_TM_CTAR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CTAR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = tm_tar_active, .get = tm_tar_get, .set = tm_tar_set
|
|
|
|
},
|
|
|
|
[REGSET_TM_CPPR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CPPR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = tm_ppr_active, .get = tm_ppr_get, .set = tm_ppr_set
|
|
|
|
},
|
|
|
|
[REGSET_TM_CDSCR] = {
|
|
|
|
.core_note_type = NT_PPC_TM_CDSCR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = tm_dscr_active, .get = tm_dscr_get, .set = tm_dscr_set
|
|
|
|
},
|
2016-07-28 09:57:36 +07:00
|
|
|
#endif
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
[REGSET_PPR] = {
|
|
|
|
.core_note_type = NT_PPC_PPR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.get = ppr_get, .set = ppr_set
|
|
|
|
},
|
|
|
|
[REGSET_DSCR] = {
|
|
|
|
.core_note_type = NT_PPC_DSCR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.get = dscr_get, .set = dscr_set
|
|
|
|
},
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
|
|
[REGSET_TAR] = {
|
|
|
|
.core_note_type = NT_PPC_TAR, .n = 1,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.get = tar_get, .set = tar_set
|
|
|
|
},
|
2016-07-28 09:57:43 +07:00
|
|
|
[REGSET_EBB] = {
|
|
|
|
.core_note_type = NT_PPC_EBB, .n = ELF_NEBB,
|
|
|
|
.size = sizeof(u64), .align = sizeof(u64),
|
|
|
|
.active = ebb_active, .get = ebb_get, .set = ebb_set
|
|
|
|
},
|
powerpc/ptrace: Enable support for NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR
This patch enables support for running TAR, PPR, DSCR registers
related ELF core notes NT_PPPC_TAR, NT_PPC_PPR, NT_PPC_DSCR based
ptrace requests through PTRACE_GETREGSET, PTRACE_SETREGSET calls.
This is achieved through adding three new register sets REGSET_TAR,
REGSET_PPR, REGSET_DSCR in powerpc corresponding to the ELF core
note sections added in this regad. It implements the get, set and
active functions for all these new register sets added.
Signed-off-by: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-28 09:57:42 +07:00
|
|
|
#endif
|
2007-12-20 18:58:08 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct user_regset_view user_ppc_compat_view = {
|
|
|
|
.name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
|
|
|
|
.regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
|
|
|
|
};
|
|
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
|
2007-12-20 18:58:00 +07:00
|
|
|
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
|
|
|
{
|
2007-12-20 18:58:08 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
if (test_tsk_thread_flag(task, TIF_32BIT))
|
|
|
|
return &user_ppc_compat_view;
|
|
|
|
#endif
|
2007-12-20 18:58:00 +07:00
|
|
|
return &user_ppc_native_view;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-01-30 19:30:51 +07:00
|
|
|
void user_enable_single_step(struct task_struct *task)
|
2007-06-04 12:15:44 +07:00
|
|
|
{
|
|
|
|
struct pt_regs *regs = task->thread.regs;
|
|
|
|
|
|
|
|
if (regs != NULL) {
|
2010-02-08 18:50:57 +07:00
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
2013-07-04 13:15:46 +07:00
|
|
|
task->thread.debug.dbcr0 &= ~DBCR0_BT;
|
|
|
|
task->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
|
2007-06-04 12:15:44 +07:00
|
|
|
regs->msr |= MSR_DE;
|
|
|
|
#else
|
2009-05-29 04:26:38 +07:00
|
|
|
regs->msr &= ~MSR_BE;
|
2007-06-04 12:15:44 +07:00
|
|
|
regs->msr |= MSR_SE;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
set_tsk_thread_flag(task, TIF_SINGLESTEP);
|
|
|
|
}
|
|
|
|
|
2009-05-29 04:26:38 +07:00
|
|
|
void user_enable_block_step(struct task_struct *task)
|
|
|
|
{
|
|
|
|
struct pt_regs *regs = task->thread.regs;
|
|
|
|
|
|
|
|
if (regs != NULL) {
|
2010-02-08 18:50:57 +07:00
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
2013-07-04 13:15:46 +07:00
|
|
|
task->thread.debug.dbcr0 &= ~DBCR0_IC;
|
|
|
|
task->thread.debug.dbcr0 = DBCR0_IDM | DBCR0_BT;
|
2009-05-29 04:26:38 +07:00
|
|
|
regs->msr |= MSR_DE;
|
|
|
|
#else
|
|
|
|
regs->msr &= ~MSR_SE;
|
|
|
|
regs->msr |= MSR_BE;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
set_tsk_thread_flag(task, TIF_SINGLESTEP);
|
|
|
|
}
|
|
|
|
|
2008-01-30 19:30:51 +07:00
|
|
|
void user_disable_single_step(struct task_struct *task)
|
2007-06-04 12:15:44 +07:00
|
|
|
{
|
|
|
|
struct pt_regs *regs = task->thread.regs;
|
|
|
|
|
|
|
|
if (regs != NULL) {
|
2010-02-08 18:50:57 +07:00
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
2010-02-08 18:51:18 +07:00
|
|
|
/*
|
|
|
|
* The logic to disable single stepping should be as
|
|
|
|
* simple as turning off the Instruction Complete flag.
|
|
|
|
* And, after doing so, if all debug flags are off, turn
|
|
|
|
* off DBCR0(IDM) and MSR(DE) .... Torez
|
|
|
|
*/
|
2013-07-06 02:49:43 +07:00
|
|
|
task->thread.debug.dbcr0 &= ~(DBCR0_IC|DBCR0_BT);
|
2010-02-08 18:51:18 +07:00
|
|
|
/*
|
|
|
|
* Test to see if any of the DBCR_ACTIVE_EVENTS bits are set.
|
|
|
|
*/
|
2013-07-04 13:15:46 +07:00
|
|
|
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
|
|
|
|
task->thread.debug.dbcr1)) {
|
2010-02-08 18:51:18 +07:00
|
|
|
/*
|
|
|
|
* All debug events were off.....
|
|
|
|
*/
|
2013-07-04 13:15:46 +07:00
|
|
|
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
2009-07-08 20:46:18 +07:00
|
|
|
regs->msr &= ~MSR_DE;
|
|
|
|
}
|
2007-06-04 12:15:44 +07:00
|
|
|
#else
|
2009-05-29 04:26:38 +07:00
|
|
|
regs->msr &= ~(MSR_SE | MSR_BE);
|
2007-06-04 12:15:44 +07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
|
|
|
|
}
|
|
|
|
|
2010-06-15 13:05:19 +07:00
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
2011-06-27 19:41:57 +07:00
|
|
|
void ptrace_triggered(struct perf_event *bp,
|
2010-06-15 13:05:19 +07:00
|
|
|
struct perf_sample_data *data, struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct perf_event_attr attr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Disable the breakpoint request here since ptrace has defined a
|
|
|
|
* one-shot behaviour for breakpoint exceptions in PPC64.
|
|
|
|
* The SIGTRAP signal is generated automatically for us in do_dabr().
|
|
|
|
* We don't have to do anything about that here
|
|
|
|
*/
|
|
|
|
attr = bp->attr;
|
|
|
|
attr.disabled = true;
|
|
|
|
modify_user_hw_breakpoint(bp, &attr);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
|
|
|
|
2014-08-20 05:55:18 +07:00
|
|
|
static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
|
2007-06-04 12:15:47 +07:00
|
|
|
unsigned long data)
|
|
|
|
{
|
2010-06-15 13:05:19 +07:00
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
int ret;
|
|
|
|
struct thread_struct *thread = &(task->thread);
|
|
|
|
struct perf_event *bp;
|
|
|
|
struct perf_event_attr attr;
|
|
|
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
2012-12-20 21:06:44 +07:00
|
|
|
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
|
2018-03-27 11:37:18 +07:00
|
|
|
bool set_bp = true;
|
2012-12-20 21:06:44 +07:00
|
|
|
struct arch_hw_breakpoint hw_brk;
|
|
|
|
#endif
|
2010-06-15 13:05:19 +07:00
|
|
|
|
2008-07-23 23:10:41 +07:00
|
|
|
/* For ppc64 we support one DABR and no IABR's at the moment (ppc64).
|
|
|
|
* For embedded processors we support one DAC and no IAC's at the
|
|
|
|
* moment.
|
|
|
|
*/
|
2007-06-04 12:15:47 +07:00
|
|
|
if (addr > 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2008-07-26 02:27:33 +07:00
|
|
|
/* The bottom 3 bits in dabr are flags */
|
2007-06-04 12:15:47 +07:00
|
|
|
if ((data & ~0x7UL) >= TASK_SIZE)
|
|
|
|
return -EIO;
|
|
|
|
|
2010-02-08 18:50:57 +07:00
|
|
|
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
|
2008-07-23 23:10:41 +07:00
|
|
|
/* For processors using DABR (i.e. 970), the bottom 3 bits are flags.
|
|
|
|
* It was assumed, on previous implementations, that 3 bits were
|
|
|
|
* passed together with the data address, fitting the design of the
|
|
|
|
* DABR register, as follows:
|
|
|
|
*
|
|
|
|
* bit 0: Read flag
|
|
|
|
* bit 1: Write flag
|
|
|
|
* bit 2: Breakpoint translation
|
|
|
|
*
|
|
|
|
* Thus, we use them here as so.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Ensure breakpoint translation bit is set */
|
2012-12-20 21:06:44 +07:00
|
|
|
if (data && !(data & HW_BRK_TYPE_TRANSLATE))
|
2007-06-04 12:15:47 +07:00
|
|
|
return -EIO;
|
2012-12-20 21:06:44 +07:00
|
|
|
hw_brk.address = data & (~HW_BRK_TYPE_DABR);
|
|
|
|
hw_brk.type = (data & HW_BRK_TYPE_DABR) | HW_BRK_TYPE_PRIV_ALL;
|
|
|
|
hw_brk.len = 8;
|
2018-03-27 11:37:18 +07:00
|
|
|
set_bp = (data) && (hw_brk.type & HW_BRK_TYPE_RDWR);
|
2010-06-15 13:05:19 +07:00
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
bp = thread->ptrace_bps[0];
|
2018-03-27 11:37:18 +07:00
|
|
|
if (!set_bp) {
|
2010-06-15 13:05:19 +07:00
|
|
|
if (bp) {
|
|
|
|
unregister_hw_breakpoint(bp);
|
|
|
|
thread->ptrace_bps[0] = NULL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (bp) {
|
|
|
|
attr = bp->attr;
|
2012-12-20 21:06:44 +07:00
|
|
|
attr.bp_addr = hw_brk.address;
|
|
|
|
arch_bp_generic_fields(hw_brk.type, &attr.bp_type);
|
2012-11-05 05:15:28 +07:00
|
|
|
|
|
|
|
/* Enable breakpoint */
|
|
|
|
attr.disabled = false;
|
|
|
|
|
2010-06-15 13:05:19 +07:00
|
|
|
ret = modify_user_hw_breakpoint(bp, &attr);
|
2011-05-06 06:53:18 +07:00
|
|
|
if (ret) {
|
2010-06-15 13:05:19 +07:00
|
|
|
return ret;
|
2011-05-06 06:53:18 +07:00
|
|
|
}
|
2010-06-15 13:05:19 +07:00
|
|
|
thread->ptrace_bps[0] = bp;
|
2012-12-20 21:06:44 +07:00
|
|
|
thread->hw_brk = hw_brk;
|
2010-06-15 13:05:19 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a new breakpoint request if one doesn't exist already */
|
|
|
|
hw_breakpoint_init(&attr);
|
2012-12-20 21:06:44 +07:00
|
|
|
attr.bp_addr = hw_brk.address;
|
2018-05-17 12:37:15 +07:00
|
|
|
attr.bp_len = 8;
|
2012-12-20 21:06:44 +07:00
|
|
|
arch_bp_generic_fields(hw_brk.type,
|
|
|
|
&attr.bp_type);
|
2010-06-15 13:05:19 +07:00
|
|
|
|
|
|
|
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
|
2011-06-29 22:42:35 +07:00
|
|
|
ptrace_triggered, NULL, task);
|
2010-06-15 13:05:19 +07:00
|
|
|
if (IS_ERR(bp)) {
|
|
|
|
thread->ptrace_bps[0] = NULL;
|
|
|
|
return PTR_ERR(bp);
|
|
|
|
}
|
|
|
|
|
2018-03-27 11:37:18 +07:00
|
|
|
#else /* !CONFIG_HAVE_HW_BREAKPOINT */
|
|
|
|
if (set_bp && (!ppc_breakpoint_available()))
|
|
|
|
return -ENODEV;
|
2010-06-15 13:05:19 +07:00
|
|
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
2012-12-20 21:06:44 +07:00
|
|
|
task->thread.hw_brk = hw_brk;
|
2010-02-08 18:50:57 +07:00
|
|
|
#else /* CONFIG_PPC_ADV_DEBUG_REGS */
|
2008-07-23 23:10:41 +07:00
|
|
|
/* As described above, it was assumed 3 bits were passed with the data
|
|
|
|
* address, but we will assume only the mode bits will be passed
|
|
|
|
* as to not cause alignment restrictions for DAC-based processors.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* DAC's hold the whole address without any mode flags */
|
2013-07-04 13:15:46 +07:00
|
|
|
task->thread.debug.dac1 = data & ~0x3UL;
|
2010-02-08 18:51:18 +07:00
|
|
|
|
2013-07-04 13:15:46 +07:00
|
|
|
if (task->thread.debug.dac1 == 0) {
|
2010-02-08 18:51:18 +07:00
|
|
|
dbcr_dac(task) &= ~(DBCR_DAC1R | DBCR_DAC1W);
|
2013-07-04 13:15:46 +07:00
|
|
|
if (!DBCR_ACTIVE_EVENTS(task->thread.debug.dbcr0,
|
|
|
|
task->thread.debug.dbcr1)) {
|
2010-02-08 18:51:18 +07:00
|
|
|
task->thread.regs->msr &= ~MSR_DE;
|
2013-07-04 13:15:46 +07:00
|
|
|
task->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
2010-02-08 18:51:18 +07:00
|
|
|
}
|
2008-07-23 23:10:41 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Read or Write bits must be set */
|
|
|
|
|
|
|
|
if (!(data & 0x3UL))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Set the Internal Debugging flag (IDM bit 1) for the DBCR0
|
|
|
|
register */
|
2013-07-04 13:15:46 +07:00
|
|
|
task->thread.debug.dbcr0 |= DBCR0_IDM;
|
2008-07-23 23:10:41 +07:00
|
|
|
|
|
|
|
/* Check for write and read flags and set DBCR0
|
|
|
|
accordingly */
|
2010-02-08 18:51:18 +07:00
|
|
|
dbcr_dac(task) &= ~(DBCR_DAC1R|DBCR_DAC1W);
|
2008-07-23 23:10:41 +07:00
|
|
|
if (data & 0x1UL)
|
2010-02-08 18:51:18 +07:00
|
|
|
dbcr_dac(task) |= DBCR_DAC1R;
|
2008-07-23 23:10:41 +07:00
|
|
|
if (data & 0x2UL)
|
2010-02-08 18:51:18 +07:00
|
|
|
dbcr_dac(task) |= DBCR_DAC1W;
|
2008-07-23 23:10:41 +07:00
|
|
|
task->thread.regs->msr |= MSR_DE;
|
2010-02-08 18:50:57 +07:00
|
|
|
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
|
2007-06-04 12:15:47 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Called by kernel/ptrace.c when detaching..
|
|
|
|
*
|
|
|
|
* Make sure single step bits etc are not set.
|
|
|
|
*/
|
|
|
|
void ptrace_disable(struct task_struct *child)
|
|
|
|
{
|
|
|
|
/* make sure the single step bit is not set. */
|
2008-01-30 19:30:51 +07:00
|
|
|
user_disable_single_step(child);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2010-02-08 18:51:18 +07:00
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
2012-10-28 22:13:16 +07:00
|
|
|
static long set_instruction_bp(struct task_struct *child,
|
2010-02-08 18:51:18 +07:00
|
|
|
struct ppc_hw_breakpoint *bp_info)
|
|
|
|
{
|
|
|
|
int slot;
|
2013-07-04 13:15:46 +07:00
|
|
|
int slot1_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC1) != 0);
|
|
|
|
int slot2_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC2) != 0);
|
|
|
|
int slot3_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC3) != 0);
|
|
|
|
int slot4_in_use = ((child->thread.debug.dbcr0 & DBCR0_IAC4) != 0);
|
2010-02-08 18:51:18 +07:00
|
|
|
|
|
|
|
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
|
|
|
|
slot2_in_use = 1;
|
|
|
|
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
|
|
|
|
slot4_in_use = 1;
|
|
|
|
|
|
|
|
if (bp_info->addr >= TASK_SIZE)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
|
|
|
|
|
|
|
|
/* Make sure range is valid. */
|
|
|
|
if (bp_info->addr2 >= TASK_SIZE)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
/* We need a pair of IAC regsisters */
|
|
|
|
if ((!slot1_in_use) && (!slot2_in_use)) {
|
|
|
|
slot = 1;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac1 = bp_info->addr;
|
|
|
|
child->thread.debug.iac2 = bp_info->addr2;
|
|
|
|
child->thread.debug.dbcr0 |= DBCR0_IAC1;
|
2010-02-08 18:51:18 +07:00
|
|
|
if (bp_info->addr_mode ==
|
|
|
|
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
|
|
|
|
dbcr_iac_range(child) |= DBCR_IAC12X;
|
|
|
|
else
|
|
|
|
dbcr_iac_range(child) |= DBCR_IAC12I;
|
|
|
|
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
|
|
|
} else if ((!slot3_in_use) && (!slot4_in_use)) {
|
|
|
|
slot = 3;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac3 = bp_info->addr;
|
|
|
|
child->thread.debug.iac4 = bp_info->addr2;
|
|
|
|
child->thread.debug.dbcr0 |= DBCR0_IAC3;
|
2010-02-08 18:51:18 +07:00
|
|
|
if (bp_info->addr_mode ==
|
|
|
|
PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
|
|
|
|
dbcr_iac_range(child) |= DBCR_IAC34X;
|
|
|
|
else
|
|
|
|
dbcr_iac_range(child) |= DBCR_IAC34I;
|
|
|
|
#endif
|
|
|
|
} else
|
|
|
|
return -ENOSPC;
|
|
|
|
} else {
|
|
|
|
/* We only need one. If possible leave a pair free in
|
|
|
|
* case a range is needed later
|
|
|
|
*/
|
|
|
|
if (!slot1_in_use) {
|
|
|
|
/*
|
|
|
|
* Don't use iac1 if iac1-iac2 are free and either
|
|
|
|
* iac3 or iac4 (but not both) are free
|
|
|
|
*/
|
|
|
|
if (slot2_in_use || (slot3_in_use == slot4_in_use)) {
|
|
|
|
slot = 1;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac1 = bp_info->addr;
|
|
|
|
child->thread.debug.dbcr0 |= DBCR0_IAC1;
|
2010-02-08 18:51:18 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!slot2_in_use) {
|
|
|
|
slot = 2;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac2 = bp_info->addr;
|
|
|
|
child->thread.debug.dbcr0 |= DBCR0_IAC2;
|
2010-02-08 18:51:18 +07:00
|
|
|
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
|
|
|
} else if (!slot3_in_use) {
|
|
|
|
slot = 3;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac3 = bp_info->addr;
|
|
|
|
child->thread.debug.dbcr0 |= DBCR0_IAC3;
|
2010-02-08 18:51:18 +07:00
|
|
|
} else if (!slot4_in_use) {
|
|
|
|
slot = 4;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac4 = bp_info->addr;
|
|
|
|
child->thread.debug.dbcr0 |= DBCR0_IAC4;
|
2010-02-08 18:51:18 +07:00
|
|
|
#endif
|
|
|
|
} else
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
|
|
|
out:
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dbcr0 |= DBCR0_IDM;
|
2010-02-08 18:51:18 +07:00
|
|
|
child->thread.regs->msr |= MSR_DE;
|
|
|
|
|
|
|
|
return slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int del_instruction_bp(struct task_struct *child, int slot)
|
|
|
|
{
|
|
|
|
switch (slot) {
|
|
|
|
case 1:
|
2013-07-04 13:15:46 +07:00
|
|
|
if ((child->thread.debug.dbcr0 & DBCR0_IAC1) == 0)
|
2010-02-08 18:51:18 +07:00
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (dbcr_iac_range(child) & DBCR_IAC12MODE) {
|
|
|
|
/* address range - clear slots 1 & 2 */
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac2 = 0;
|
2010-02-08 18:51:18 +07:00
|
|
|
dbcr_iac_range(child) &= ~DBCR_IAC12MODE;
|
|
|
|
}
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac1 = 0;
|
|
|
|
child->thread.debug.dbcr0 &= ~DBCR0_IAC1;
|
2010-02-08 18:51:18 +07:00
|
|
|
break;
|
|
|
|
case 2:
|
2013-07-04 13:15:46 +07:00
|
|
|
if ((child->thread.debug.dbcr0 & DBCR0_IAC2) == 0)
|
2010-02-08 18:51:18 +07:00
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (dbcr_iac_range(child) & DBCR_IAC12MODE)
|
|
|
|
/* used in a range */
|
|
|
|
return -EINVAL;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac2 = 0;
|
|
|
|
child->thread.debug.dbcr0 &= ~DBCR0_IAC2;
|
2010-02-08 18:51:18 +07:00
|
|
|
break;
|
|
|
|
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
|
|
|
|
case 3:
|
2013-07-04 13:15:46 +07:00
|
|
|
if ((child->thread.debug.dbcr0 & DBCR0_IAC3) == 0)
|
2010-02-08 18:51:18 +07:00
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (dbcr_iac_range(child) & DBCR_IAC34MODE) {
|
|
|
|
/* address range - clear slots 3 & 4 */
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac4 = 0;
|
2010-02-08 18:51:18 +07:00
|
|
|
dbcr_iac_range(child) &= ~DBCR_IAC34MODE;
|
|
|
|
}
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac3 = 0;
|
|
|
|
child->thread.debug.dbcr0 &= ~DBCR0_IAC3;
|
2010-02-08 18:51:18 +07:00
|
|
|
break;
|
|
|
|
case 4:
|
2013-07-04 13:15:46 +07:00
|
|
|
if ((child->thread.debug.dbcr0 & DBCR0_IAC4) == 0)
|
2010-02-08 18:51:18 +07:00
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (dbcr_iac_range(child) & DBCR_IAC34MODE)
|
|
|
|
/* Used in a range */
|
|
|
|
return -EINVAL;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.iac4 = 0;
|
|
|
|
child->thread.debug.dbcr0 &= ~DBCR0_IAC4;
|
2010-02-08 18:51:18 +07:00
|
|
|
break;
|
|
|
|
#endif
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_dac(struct task_struct *child, struct ppc_hw_breakpoint *bp_info)
|
|
|
|
{
|
|
|
|
int byte_enable =
|
|
|
|
(bp_info->condition_mode >> PPC_BREAKPOINT_CONDITION_BE_SHIFT)
|
|
|
|
& 0xf;
|
|
|
|
int condition_mode =
|
|
|
|
bp_info->condition_mode & PPC_BREAKPOINT_CONDITION_MODE;
|
|
|
|
int slot;
|
|
|
|
|
|
|
|
if (byte_enable && (condition_mode == 0))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (bp_info->addr >= TASK_SIZE)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0) {
|
|
|
|
slot = 1;
|
|
|
|
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
|
|
|
|
dbcr_dac(child) |= DBCR_DAC1R;
|
|
|
|
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
|
|
|
dbcr_dac(child) |= DBCR_DAC1W;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dac1 = (unsigned long)bp_info->addr;
|
2010-02-08 18:51:18 +07:00
|
|
|
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
|
|
|
if (byte_enable) {
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dvc1 =
|
2010-02-08 18:51:18 +07:00
|
|
|
(unsigned long)bp_info->condition_value;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dbcr2 |=
|
2010-02-08 18:51:18 +07:00
|
|
|
((byte_enable << DBCR2_DVC1BE_SHIFT) |
|
|
|
|
(condition_mode << DBCR2_DVC1M_SHIFT));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
2013-07-04 13:15:46 +07:00
|
|
|
} else if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
|
2010-02-08 18:51:18 +07:00
|
|
|
/* Both dac1 and dac2 are part of a range */
|
|
|
|
return -ENOSPC;
|
|
|
|
#endif
|
|
|
|
} else if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0) {
|
|
|
|
slot = 2;
|
|
|
|
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
|
|
|
|
dbcr_dac(child) |= DBCR_DAC2R;
|
|
|
|
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
|
|
|
dbcr_dac(child) |= DBCR_DAC2W;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dac2 = (unsigned long)bp_info->addr;
|
2010-02-08 18:51:18 +07:00
|
|
|
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
|
|
|
if (byte_enable) {
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dvc2 =
|
2010-02-08 18:51:18 +07:00
|
|
|
(unsigned long)bp_info->condition_value;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dbcr2 |=
|
2010-02-08 18:51:18 +07:00
|
|
|
((byte_enable << DBCR2_DVC2BE_SHIFT) |
|
|
|
|
(condition_mode << DBCR2_DVC2M_SHIFT));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
} else
|
|
|
|
return -ENOSPC;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dbcr0 |= DBCR0_IDM;
|
2010-02-08 18:51:18 +07:00
|
|
|
child->thread.regs->msr |= MSR_DE;
|
|
|
|
|
|
|
|
return slot + 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int del_dac(struct task_struct *child, int slot)
|
|
|
|
{
|
|
|
|
if (slot == 1) {
|
2010-03-01 11:57:34 +07:00
|
|
|
if ((dbcr_dac(child) & (DBCR_DAC1R | DBCR_DAC1W)) == 0)
|
2010-02-08 18:51:18 +07:00
|
|
|
return -ENOENT;
|
|
|
|
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dac1 = 0;
|
2010-02-08 18:51:18 +07:00
|
|
|
dbcr_dac(child) &= ~(DBCR_DAC1R | DBCR_DAC1W);
|
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
2013-07-04 13:15:46 +07:00
|
|
|
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE) {
|
|
|
|
child->thread.debug.dac2 = 0;
|
|
|
|
child->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
|
2010-02-08 18:51:18 +07:00
|
|
|
}
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dbcr2 &= ~(DBCR2_DVC1M | DBCR2_DVC1BE);
|
2010-02-08 18:51:18 +07:00
|
|
|
#endif
|
|
|
|
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dvc1 = 0;
|
2010-02-08 18:51:18 +07:00
|
|
|
#endif
|
|
|
|
} else if (slot == 2) {
|
2010-03-01 11:57:34 +07:00
|
|
|
if ((dbcr_dac(child) & (DBCR_DAC2R | DBCR_DAC2W)) == 0)
|
2010-02-08 18:51:18 +07:00
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
2013-07-04 13:15:46 +07:00
|
|
|
if (child->thread.debug.dbcr2 & DBCR2_DAC12MODE)
|
2010-02-08 18:51:18 +07:00
|
|
|
/* Part of a range */
|
|
|
|
return -EINVAL;
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dbcr2 &= ~(DBCR2_DVC2M | DBCR2_DVC2BE);
|
2010-02-08 18:51:18 +07:00
|
|
|
#endif
|
|
|
|
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dvc2 = 0;
|
2010-02-08 18:51:18 +07:00
|
|
|
#endif
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dac2 = 0;
|
2010-02-08 18:51:18 +07:00
|
|
|
dbcr_dac(child) &= ~(DBCR_DAC2R | DBCR_DAC2W);
|
|
|
|
} else
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
|
|
|
static int set_dac_range(struct task_struct *child,
|
|
|
|
struct ppc_hw_breakpoint *bp_info)
|
|
|
|
{
|
|
|
|
int mode = bp_info->addr_mode & PPC_BREAKPOINT_MODE_MASK;
|
|
|
|
|
|
|
|
/* We don't allow range watchpoints to be used with DVC */
|
|
|
|
if (bp_info->condition_mode)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Best effort to verify the address range. The user/supervisor bits
|
|
|
|
* prevent trapping in kernel space, but let's fail on an obvious bad
|
|
|
|
* range. The simple test on the mask is not fool-proof, and any
|
|
|
|
* exclusive range will spill over into kernel space.
|
|
|
|
*/
|
|
|
|
if (bp_info->addr >= TASK_SIZE)
|
|
|
|
return -EIO;
|
|
|
|
if (mode == PPC_BREAKPOINT_MODE_MASK) {
|
|
|
|
/*
|
|
|
|
* dac2 is a bitmask. Don't allow a mask that makes a
|
|
|
|
* kernel space address from a valid dac1 value
|
|
|
|
*/
|
|
|
|
if (~((unsigned long)bp_info->addr2) >= TASK_SIZE)
|
|
|
|
return -EIO;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* For range breakpoints, addr2 must also be a valid address
|
|
|
|
*/
|
|
|
|
if (bp_info->addr2 >= TASK_SIZE)
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2013-07-04 13:15:46 +07:00
|
|
|
if (child->thread.debug.dbcr0 &
|
2010-02-08 18:51:18 +07:00
|
|
|
(DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W))
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dbcr0 |= (DBCR0_DAC1R | DBCR0_IDM);
|
2010-02-08 18:51:18 +07:00
|
|
|
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dbcr0 |= (DBCR0_DAC1W | DBCR0_IDM);
|
|
|
|
child->thread.debug.dac1 = bp_info->addr;
|
|
|
|
child->thread.debug.dac2 = bp_info->addr2;
|
2010-02-08 18:51:18 +07:00
|
|
|
if (mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dbcr2 |= DBCR2_DAC12M;
|
2010-02-08 18:51:18 +07:00
|
|
|
else if (mode == PPC_BREAKPOINT_MODE_RANGE_EXCLUSIVE)
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dbcr2 |= DBCR2_DAC12MX;
|
2010-02-08 18:51:18 +07:00
|
|
|
else /* PPC_BREAKPOINT_MODE_MASK */
|
2013-07-04 13:15:46 +07:00
|
|
|
child->thread.debug.dbcr2 |= DBCR2_DAC12MM;
|
2010-02-08 18:51:18 +07:00
|
|
|
child->thread.regs->msr |= MSR_DE;
|
|
|
|
|
|
|
|
return 5;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PPC_ADV_DEBUG_DAC_RANGE */
|
|
|
|
|
2010-02-08 18:51:05 +07:00
|
|
|
static long ppc_set_hwdebug(struct task_struct *child,
|
|
|
|
struct ppc_hw_breakpoint *bp_info)
|
|
|
|
{
|
2012-10-28 22:13:15 +07:00
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
int len = 0;
|
|
|
|
struct thread_struct *thread = &(child->thread);
|
|
|
|
struct perf_event *bp;
|
|
|
|
struct perf_event_attr attr;
|
|
|
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
2010-11-27 21:24:53 +07:00
|
|
|
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
|
2012-12-20 21:06:44 +07:00
|
|
|
struct arch_hw_breakpoint brk;
|
2010-11-27 21:24:53 +07:00
|
|
|
#endif
|
|
|
|
|
2010-02-08 18:51:18 +07:00
|
|
|
if (bp_info->version != 1)
|
|
|
|
return -ENOTSUPP;
|
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
|
|
|
/*
|
|
|
|
* Check for invalid flags and combinations
|
|
|
|
*/
|
|
|
|
if ((bp_info->trigger_type == 0) ||
|
|
|
|
(bp_info->trigger_type & ~(PPC_BREAKPOINT_TRIGGER_EXECUTE |
|
|
|
|
PPC_BREAKPOINT_TRIGGER_RW)) ||
|
|
|
|
(bp_info->addr_mode & ~PPC_BREAKPOINT_MODE_MASK) ||
|
|
|
|
(bp_info->condition_mode &
|
|
|
|
~(PPC_BREAKPOINT_CONDITION_MODE |
|
|
|
|
PPC_BREAKPOINT_CONDITION_BE_ALL)))
|
|
|
|
return -EINVAL;
|
|
|
|
#if CONFIG_PPC_ADV_DEBUG_DVCS == 0
|
|
|
|
if (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
|
|
|
|
return -EINVAL;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_EXECUTE) {
|
|
|
|
if ((bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_EXECUTE) ||
|
|
|
|
(bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE))
|
|
|
|
return -EINVAL;
|
2012-10-28 22:13:16 +07:00
|
|
|
return set_instruction_bp(child, bp_info);
|
2010-02-08 18:51:18 +07:00
|
|
|
}
|
|
|
|
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
|
|
|
|
return set_dac(child, bp_info);
|
|
|
|
|
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
|
|
|
return set_dac_range(child, bp_info);
|
|
|
|
#else
|
|
|
|
return -EINVAL;
|
|
|
|
#endif
|
|
|
|
#else /* !CONFIG_PPC_ADV_DEBUG_DVCS */
|
2010-02-08 18:51:05 +07:00
|
|
|
/*
|
2010-02-08 18:51:18 +07:00
|
|
|
* We only support one data breakpoint
|
2010-02-08 18:51:05 +07:00
|
|
|
*/
|
2010-11-27 21:24:53 +07:00
|
|
|
if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 ||
|
|
|
|
(bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 ||
|
|
|
|
bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)
|
2010-02-08 18:51:05 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if ((unsigned long)bp_info->addr >= TASK_SIZE)
|
|
|
|
return -EIO;
|
|
|
|
|
2012-12-20 21:06:44 +07:00
|
|
|
brk.address = bp_info->addr & ~7UL;
|
|
|
|
brk.type = HW_BRK_TYPE_TRANSLATE;
|
2013-03-11 23:42:49 +07:00
|
|
|
brk.len = 8;
|
2010-11-27 21:24:53 +07:00
|
|
|
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
|
2012-12-20 21:06:44 +07:00
|
|
|
brk.type |= HW_BRK_TYPE_READ;
|
2010-11-27 21:24:53 +07:00
|
|
|
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
|
2012-12-20 21:06:44 +07:00
|
|
|
brk.type |= HW_BRK_TYPE_WRITE;
|
2012-10-28 22:13:15 +07:00
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
/*
|
|
|
|
* Check if the request is for 'range' breakpoints. We can
|
|
|
|
* support it if range < 8 bytes.
|
|
|
|
*/
|
2013-07-09 06:00:49 +07:00
|
|
|
if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE)
|
2012-10-28 22:13:15 +07:00
|
|
|
len = bp_info->addr2 - bp_info->addr;
|
2013-07-09 06:00:49 +07:00
|
|
|
else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
|
2013-06-24 12:47:22 +07:00
|
|
|
len = 1;
|
2013-07-09 06:00:49 +07:00
|
|
|
else
|
2012-10-28 22:13:15 +07:00
|
|
|
return -EINVAL;
|
|
|
|
bp = thread->ptrace_bps[0];
|
2013-07-09 06:00:49 +07:00
|
|
|
if (bp)
|
2012-10-28 22:13:15 +07:00
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
/* Create a new breakpoint request if one doesn't exist already */
|
|
|
|
hw_breakpoint_init(&attr);
|
|
|
|
attr.bp_addr = (unsigned long)bp_info->addr & ~HW_BREAKPOINT_ALIGN;
|
|
|
|
attr.bp_len = len;
|
2012-12-20 21:06:44 +07:00
|
|
|
arch_bp_generic_fields(brk.type, &attr.bp_type);
|
2012-10-28 22:13:15 +07:00
|
|
|
|
|
|
|
thread->ptrace_bps[0] = bp = register_user_hw_breakpoint(&attr,
|
|
|
|
ptrace_triggered, NULL, child);
|
|
|
|
if (IS_ERR(bp)) {
|
|
|
|
thread->ptrace_bps[0] = NULL;
|
|
|
|
return PTR_ERR(bp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
|
|
|
|
|
|
|
if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-12-20 21:06:44 +07:00
|
|
|
if (child->thread.hw_brk.address)
|
2012-10-28 22:13:15 +07:00
|
|
|
return -ENOSPC;
|
2010-11-27 21:24:53 +07:00
|
|
|
|
2018-03-27 11:37:18 +07:00
|
|
|
if (!ppc_breakpoint_available())
|
|
|
|
return -ENODEV;
|
|
|
|
|
2012-12-20 21:06:44 +07:00
|
|
|
child->thread.hw_brk = brk;
|
2010-02-08 18:51:18 +07:00
|
|
|
|
2010-02-08 18:51:05 +07:00
|
|
|
return 1;
|
2010-02-08 18:51:18 +07:00
|
|
|
#endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */
|
2010-02-08 18:51:05 +07:00
|
|
|
}
|
|
|
|
|
2012-10-28 22:13:17 +07:00
|
|
|
static long ppc_del_hwdebug(struct task_struct *child, long data)
|
2010-02-08 18:51:05 +07:00
|
|
|
{
|
2012-10-28 22:13:15 +07:00
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
int ret = 0;
|
|
|
|
struct thread_struct *thread = &(child->thread);
|
|
|
|
struct perf_event *bp;
|
|
|
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
2010-02-08 18:51:18 +07:00
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (data <= 4)
|
|
|
|
rc = del_instruction_bp(child, (int)data);
|
|
|
|
else
|
|
|
|
rc = del_dac(child, (int)data - 4);
|
|
|
|
|
|
|
|
if (!rc) {
|
2013-07-04 13:15:46 +07:00
|
|
|
if (!DBCR_ACTIVE_EVENTS(child->thread.debug.dbcr0,
|
|
|
|
child->thread.debug.dbcr1)) {
|
|
|
|
child->thread.debug.dbcr0 &= ~DBCR0_IDM;
|
2010-02-08 18:51:18 +07:00
|
|
|
child->thread.regs->msr &= ~MSR_DE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
#else
|
2010-02-08 18:51:05 +07:00
|
|
|
if (data != 1)
|
|
|
|
return -EINVAL;
|
2012-10-28 22:13:15 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
bp = thread->ptrace_bps[0];
|
|
|
|
if (bp) {
|
|
|
|
unregister_hw_breakpoint(bp);
|
|
|
|
thread->ptrace_bps[0] = NULL;
|
|
|
|
} else
|
|
|
|
ret = -ENOENT;
|
|
|
|
return ret;
|
|
|
|
#else /* CONFIG_HAVE_HW_BREAKPOINT */
|
2012-12-20 21:06:44 +07:00
|
|
|
if (child->thread.hw_brk.address == 0)
|
2010-02-08 18:51:05 +07:00
|
|
|
return -ENOENT;
|
|
|
|
|
2012-12-20 21:06:44 +07:00
|
|
|
child->thread.hw_brk.address = 0;
|
|
|
|
child->thread.hw_brk.type = 0;
|
2012-10-28 22:13:15 +07:00
|
|
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
2010-02-08 18:51:18 +07:00
|
|
|
|
2010-02-08 18:51:05 +07:00
|
|
|
return 0;
|
2010-02-08 18:51:18 +07:00
|
|
|
#endif
|
2010-02-08 18:51:05 +07:00
|
|
|
}
|
|
|
|
|
2010-10-28 05:33:47 +07:00
|
|
|
long arch_ptrace(struct task_struct *child, long request,
|
|
|
|
unsigned long addr, unsigned long data)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
int ret = -EPERM;
|
2010-10-28 05:34:01 +07:00
|
|
|
void __user *datavp = (void __user *) data;
|
|
|
|
unsigned long __user *datalp = datavp;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
switch (request) {
|
|
|
|
/* read the word at location addr in the USER area. */
|
|
|
|
case PTRACE_PEEKUSR: {
|
|
|
|
unsigned long index, tmp;
|
|
|
|
|
|
|
|
ret = -EIO;
|
|
|
|
/* convert to index and check */
|
2005-10-13 12:52:04 +07:00
|
|
|
#ifdef CONFIG_PPC32
|
2010-10-28 05:33:47 +07:00
|
|
|
index = addr >> 2;
|
2005-10-13 12:52:04 +07:00
|
|
|
if ((addr & 3) || (index > PT_FPSCR)
|
|
|
|
|| (child->thread.regs == NULL))
|
|
|
|
#else
|
2010-10-28 05:33:47 +07:00
|
|
|
index = addr >> 3;
|
2005-10-13 12:52:04 +07:00
|
|
|
if ((addr & 7) || (index > PT_FPSCR))
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
CHECK_FULL_REGS(child->thread.regs);
|
|
|
|
if (index < PT_FPR0) {
|
2013-02-15 00:44:23 +07:00
|
|
|
ret = ptrace_get_reg(child, (int) index, &tmp);
|
|
|
|
if (ret)
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
} else {
|
2011-09-27 02:37:57 +07:00
|
|
|
unsigned int fpidx = index - PT_FPR0;
|
|
|
|
|
2005-10-13 12:52:04 +07:00
|
|
|
flush_fp_to_thread(child);
|
2011-09-27 02:37:57 +07:00
|
|
|
if (fpidx < (PT_FPSCR - PT_FPR0))
|
2013-12-12 11:59:34 +07:00
|
|
|
memcpy(&tmp, &child->thread.TS_FPR(fpidx),
|
2013-09-23 09:04:38 +07:00
|
|
|
sizeof(long));
|
2011-09-27 02:37:57 +07:00
|
|
|
else
|
2013-09-10 17:20:42 +07:00
|
|
|
tmp = child->thread.fp_state.fpscr;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2010-10-28 05:34:01 +07:00
|
|
|
ret = put_user(tmp, datalp);
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* write the word at location addr in the USER area */
|
|
|
|
case PTRACE_POKEUSR: {
|
|
|
|
unsigned long index;
|
|
|
|
|
|
|
|
ret = -EIO;
|
|
|
|
/* convert to index and check */
|
2005-10-13 12:52:04 +07:00
|
|
|
#ifdef CONFIG_PPC32
|
2010-10-28 05:33:47 +07:00
|
|
|
index = addr >> 2;
|
2005-10-13 12:52:04 +07:00
|
|
|
if ((addr & 3) || (index > PT_FPSCR)
|
|
|
|
|| (child->thread.regs == NULL))
|
|
|
|
#else
|
2010-10-28 05:33:47 +07:00
|
|
|
index = addr >> 3;
|
2005-10-13 12:52:04 +07:00
|
|
|
if ((addr & 7) || (index > PT_FPSCR))
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
CHECK_FULL_REGS(child->thread.regs);
|
|
|
|
if (index < PT_FPR0) {
|
2007-06-04 12:15:44 +07:00
|
|
|
ret = ptrace_put_reg(child, index, data);
|
2005-04-17 05:20:36 +07:00
|
|
|
} else {
|
2011-09-27 02:37:57 +07:00
|
|
|
unsigned int fpidx = index - PT_FPR0;
|
|
|
|
|
2005-10-13 12:52:04 +07:00
|
|
|
flush_fp_to_thread(child);
|
2011-09-27 02:37:57 +07:00
|
|
|
if (fpidx < (PT_FPSCR - PT_FPR0))
|
2013-12-12 11:59:34 +07:00
|
|
|
memcpy(&child->thread.TS_FPR(fpidx), &data,
|
2013-09-23 09:04:38 +07:00
|
|
|
sizeof(long));
|
2011-09-27 02:37:57 +07:00
|
|
|
else
|
2013-09-10 17:20:42 +07:00
|
|
|
child->thread.fp_state.fpscr = data;
|
2005-04-17 05:20:36 +07:00
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2010-02-08 18:51:05 +07:00
|
|
|
case PPC_PTRACE_GETHWDBGINFO: {
|
|
|
|
struct ppc_debug_info dbginfo;
|
|
|
|
|
|
|
|
dbginfo.version = 1;
|
2010-02-08 18:51:18 +07:00
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
|
|
|
dbginfo.num_instruction_bps = CONFIG_PPC_ADV_DEBUG_IACS;
|
|
|
|
dbginfo.num_data_bps = CONFIG_PPC_ADV_DEBUG_DACS;
|
|
|
|
dbginfo.num_condition_regs = CONFIG_PPC_ADV_DEBUG_DVCS;
|
|
|
|
dbginfo.data_bp_alignment = 4;
|
|
|
|
dbginfo.sizeof_condition = 4;
|
|
|
|
dbginfo.features = PPC_DEBUG_FEATURE_INSN_BP_RANGE |
|
|
|
|
PPC_DEBUG_FEATURE_INSN_BP_MASK;
|
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
|
|
|
|
dbginfo.features |=
|
|
|
|
PPC_DEBUG_FEATURE_DATA_BP_RANGE |
|
|
|
|
PPC_DEBUG_FEATURE_DATA_BP_MASK;
|
|
|
|
#endif
|
|
|
|
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
|
2010-02-08 18:51:05 +07:00
|
|
|
dbginfo.num_instruction_bps = 0;
|
2018-03-27 11:37:18 +07:00
|
|
|
if (ppc_breakpoint_available())
|
|
|
|
dbginfo.num_data_bps = 1;
|
|
|
|
else
|
|
|
|
dbginfo.num_data_bps = 0;
|
2010-02-08 18:51:05 +07:00
|
|
|
dbginfo.num_condition_regs = 0;
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
dbginfo.data_bp_alignment = 8;
|
|
|
|
#else
|
|
|
|
dbginfo.data_bp_alignment = 4;
|
|
|
|
#endif
|
|
|
|
dbginfo.sizeof_condition = 0;
|
2012-10-28 22:13:15 +07:00
|
|
|
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
|
|
|
dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
|
2019-04-01 13:03:12 +07:00
|
|
|
if (dawr_enabled())
|
2013-03-22 03:12:33 +07:00
|
|
|
dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
|
2012-10-28 22:13:15 +07:00
|
|
|
#else
|
2010-02-08 18:51:05 +07:00
|
|
|
dbginfo.features = 0;
|
2012-10-28 22:13:15 +07:00
|
|
|
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
|
2010-02-08 18:51:18 +07:00
|
|
|
#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
|
2010-02-08 18:51:05 +07:00
|
|
|
|
2018-05-29 19:57:38 +07:00
|
|
|
if (copy_to_user(datavp, &dbginfo,
|
|
|
|
sizeof(struct ppc_debug_info)))
|
2010-02-08 18:51:05 +07:00
|
|
|
return -EFAULT;
|
2018-05-29 19:57:38 +07:00
|
|
|
return 0;
|
2010-02-08 18:51:05 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
case PPC_PTRACE_SETHWDEBUG: {
|
|
|
|
struct ppc_hw_breakpoint bp_info;
|
|
|
|
|
2018-05-29 19:57:38 +07:00
|
|
|
if (copy_from_user(&bp_info, datavp,
|
|
|
|
sizeof(struct ppc_hw_breakpoint)))
|
2010-02-08 18:51:05 +07:00
|
|
|
return -EFAULT;
|
2018-05-29 19:57:38 +07:00
|
|
|
return ppc_set_hwdebug(child, &bp_info);
|
2010-02-08 18:51:05 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
case PPC_PTRACE_DELHWDEBUG: {
|
2012-10-28 22:13:17 +07:00
|
|
|
ret = ppc_del_hwdebug(child, data);
|
2010-02-08 18:51:05 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2005-10-13 12:52:04 +07:00
|
|
|
case PTRACE_GET_DEBUGREG: {
|
2012-12-20 21:06:44 +07:00
|
|
|
#ifndef CONFIG_PPC_ADV_DEBUG_REGS
|
|
|
|
unsigned long dabr_fake;
|
|
|
|
#endif
|
2005-10-13 12:52:04 +07:00
|
|
|
ret = -EINVAL;
|
|
|
|
/* We only support one DABR and no IABRS at the moment */
|
|
|
|
if (addr > 0)
|
|
|
|
break;
|
2010-02-08 18:51:18 +07:00
|
|
|
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
|
2013-07-04 13:15:46 +07:00
|
|
|
ret = put_user(child->thread.debug.dac1, datalp);
|
2010-02-08 18:51:18 +07:00
|
|
|
#else
|
2012-12-20 21:06:44 +07:00
|
|
|
dabr_fake = ((child->thread.hw_brk.address & (~HW_BRK_TYPE_DABR)) |
|
|
|
|
(child->thread.hw_brk.type & HW_BRK_TYPE_DABR));
|
|
|
|
ret = put_user(dabr_fake, datalp);
|
2010-02-08 18:51:18 +07:00
|
|
|
#endif
|
2005-10-13 12:52:04 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case PTRACE_SET_DEBUGREG:
|
|
|
|
ret = ptrace_set_debugreg(child, addr, data);
|
|
|
|
break;
|
|
|
|
|
[POWERPC] ptrace updates & new, better requests
The powerpc ptrace interface is dodgy at best. We have defined our
"own" versions of GETREGS/SETREGS/GETFPREGS/SETFPREGS that strangely
take arguments in reverse order from other archs (in addition to having
different request numbers) and have subtle issue, like not accessing
all of the registers in their respective categories.
This patch moves the implementation of those to a separate function
in order to facilitate their deprecation in the future, and provides
new ptrace requests that mirror the x86 and sparc ones and use the
same numbers:
PTRACE_GETREGS : returns an entire pt_regs (the whole thing,
not only the 32 GPRs, though that doesn't
include the FPRs etc... There's a compat version
for 32 bits that returns a 32 bits compatible
pt_regs (44 uints)
PTRACE_SETREGS : sets an entire pt_regs (the whole thing,
not only the 32 GPRs, though that doesn't
include the FPRs etc... Some registers cannot be
written to and will just be dropped, this is the
same as with POKEUSR, that is anything above MQ
on 32 bits and CCR on 64 bits. There is a compat
version as well.
PTRACE_GETFPREGS : returns all the FP registers -including- the FPSCR
that is 33 doubles (regardless of 32/64 bits)
PTRACE_SETFPREGS : sets all the FP registers -including- the FPSCR
that is 33 doubles (regardless of 32/64 bits)
And two that only exist on 64 bits kernels:
PTRACE_GETREGS64 : Same as PTRACE_GETREGS, except there is no compat
function, a 32 bits process will obtain the full 64
bits registers
PTRACE_SETREGS64 : Same as PTRACE_SETREGS, except there is no compat
function, a 32 bits process will set the full 64
bits registers
The two later ones makes things easier to have a 32 bits debugger on a
64 bits program (or on a 32 bits program that uses the full 64 bits of
the GPRs, which is possible though has issues that will be fixed in a
later patch).
Finally, while at it, the patch removes a whole bunch of code duplication
between ptrace32.c and ptrace.c, in large part by having the former call
into the later for all requests that don't need any special "compat"
treatment.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2007-06-04 12:15:43 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
case PTRACE_GETREGS64:
|
|
|
|
#endif
|
2007-12-20 18:58:36 +07:00
|
|
|
case PTRACE_GETREGS: /* Get all pt_regs from the child. */
|
|
|
|
return copy_regset_to_user(child, &user_ppc_native_view,
|
|
|
|
REGSET_GPR,
|
2018-10-12 20:39:31 +07:00
|
|
|
0, sizeof(struct user_pt_regs),
|
2010-10-28 05:34:01 +07:00
|
|
|
datavp);
|
2005-10-13 12:52:04 +07:00
|
|
|
|
[POWERPC] ptrace updates & new, better requests
The powerpc ptrace interface is dodgy at best. We have defined our
"own" versions of GETREGS/SETREGS/GETFPREGS/SETFPREGS that strangely
take arguments in reverse order from other archs (in addition to having
different request numbers) and have subtle issue, like not accessing
all of the registers in their respective categories.
This patch moves the implementation of those to a separate function
in order to facilitate their deprecation in the future, and provides
new ptrace requests that mirror the x86 and sparc ones and use the
same numbers:
PTRACE_GETREGS : returns an entire pt_regs (the whole thing,
not only the 32 GPRs, though that doesn't
include the FPRs etc... There's a compat version
for 32 bits that returns a 32 bits compatible
pt_regs (44 uints)
PTRACE_SETREGS : sets an entire pt_regs (the whole thing,
not only the 32 GPRs, though that doesn't
include the FPRs etc... Some registers cannot be
written to and will just be dropped, this is the
same as with POKEUSR, that is anything above MQ
on 32 bits and CCR on 64 bits. There is a compat
version as well.
PTRACE_GETFPREGS : returns all the FP registers -including- the FPSCR
that is 33 doubles (regardless of 32/64 bits)
PTRACE_SETFPREGS : sets all the FP registers -including- the FPSCR
that is 33 doubles (regardless of 32/64 bits)
And two that only exist on 64 bits kernels:
PTRACE_GETREGS64 : Same as PTRACE_GETREGS, except there is no compat
function, a 32 bits process will obtain the full 64
bits registers
PTRACE_SETREGS64 : Same as PTRACE_SETREGS, except there is no compat
function, a 32 bits process will set the full 64
bits registers
The two later ones makes things easier to have a 32 bits debugger on a
64 bits program (or on a 32 bits program that uses the full 64 bits of
the GPRs, which is possible though has issues that will be fixed in a
later patch).
Finally, while at it, the patch removes a whole bunch of code duplication
between ptrace32.c and ptrace.c, in large part by having the former call
into the later for all requests that don't need any special "compat"
treatment.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2007-06-04 12:15:43 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
case PTRACE_SETREGS64:
|
|
|
|
#endif
|
2007-12-20 18:58:36 +07:00
|
|
|
case PTRACE_SETREGS: /* Set all gp regs in the child. */
|
|
|
|
return copy_regset_from_user(child, &user_ppc_native_view,
|
|
|
|
REGSET_GPR,
|
2018-10-12 20:39:31 +07:00
|
|
|
0, sizeof(struct user_pt_regs),
|
2010-10-28 05:34:01 +07:00
|
|
|
datavp);
|
2007-12-20 18:58:36 +07:00
|
|
|
|
|
|
|
case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */
|
|
|
|
return copy_regset_to_user(child, &user_ppc_native_view,
|
|
|
|
REGSET_FPR,
|
|
|
|
0, sizeof(elf_fpregset_t),
|
2010-10-28 05:34:01 +07:00
|
|
|
datavp);
|
2007-12-20 18:58:36 +07:00
|
|
|
|
|
|
|
case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */
|
|
|
|
return copy_regset_from_user(child, &user_ppc_native_view,
|
|
|
|
REGSET_FPR,
|
|
|
|
0, sizeof(elf_fpregset_t),
|
2010-10-28 05:34:01 +07:00
|
|
|
datavp);
|
2005-10-13 12:52:04 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
|
case PTRACE_GETVRREGS:
|
2007-12-20 18:58:36 +07:00
|
|
|
return copy_regset_to_user(child, &user_ppc_native_view,
|
|
|
|
REGSET_VMX,
|
|
|
|
0, (33 * sizeof(vector128) +
|
|
|
|
sizeof(u32)),
|
2010-10-28 05:34:01 +07:00
|
|
|
datavp);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
case PTRACE_SETVRREGS:
|
2007-12-20 18:58:36 +07:00
|
|
|
return copy_regset_from_user(child, &user_ppc_native_view,
|
|
|
|
REGSET_VMX,
|
|
|
|
0, (33 * sizeof(vector128) +
|
|
|
|
sizeof(u32)),
|
2010-10-28 05:34:01 +07:00
|
|
|
datavp);
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
2008-06-25 11:07:18 +07:00
|
|
|
#ifdef CONFIG_VSX
|
|
|
|
case PTRACE_GETVSRREGS:
|
|
|
|
return copy_regset_to_user(child, &user_ppc_native_view,
|
|
|
|
REGSET_VSX,
|
2008-07-28 22:13:14 +07:00
|
|
|
0, 32 * sizeof(double),
|
2010-10-28 05:34:01 +07:00
|
|
|
datavp);
|
2008-06-25 11:07:18 +07:00
|
|
|
|
|
|
|
case PTRACE_SETVSRREGS:
|
|
|
|
return copy_regset_from_user(child, &user_ppc_native_view,
|
|
|
|
REGSET_VSX,
|
2008-07-28 22:13:14 +07:00
|
|
|
0, 32 * sizeof(double),
|
2010-10-28 05:34:01 +07:00
|
|
|
datavp);
|
2008-06-25 11:07:18 +07:00
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
#ifdef CONFIG_SPE
|
|
|
|
case PTRACE_GETEVRREGS:
|
|
|
|
/* Get the child spe register state. */
|
2007-12-20 18:58:36 +07:00
|
|
|
return copy_regset_to_user(child, &user_ppc_native_view,
|
|
|
|
REGSET_SPE, 0, 35 * sizeof(u32),
|
2010-10-28 05:34:01 +07:00
|
|
|
datavp);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
case PTRACE_SETEVRREGS:
|
|
|
|
/* Set the child spe register state. */
|
2007-12-20 18:58:36 +07:00
|
|
|
return copy_regset_from_user(child, &user_ppc_native_view,
|
|
|
|
REGSET_SPE, 0, 35 * sizeof(u32),
|
2010-10-28 05:34:01 +07:00
|
|
|
datavp);
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
default:
|
|
|
|
ret = ptrace_request(child, request, addr, data);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
powerpc/kernel: Enable seccomp filter
This commit enables seccomp filter on powerpc, now that we have all the
necessary pieces in place.
To support seccomp's desire to modify the syscall return value under
some circumstances, we use a different ABI to the ptrace ABI. That is we
use r3 as the syscall return value, and orig_gpr3 is the first syscall
parameter.
This means the seccomp code, or a ptracer via SECCOMP_RET_TRACE, will
see -ENOSYS preloaded in r3. This is identical to the behaviour on x86,
and allows seccomp or the ptracer to either leave the -ENOSYS or change
it to something else, as well as rejecting or not the syscall by
modifying r0.
If seccomp does not reject the syscall, we restore the register state to
match what ptrace and audit expect, ie. r3 is the first syscall
parameter again. We do this restore using orig_gpr3, which may have been
modified by seccomp, which allows seccomp to modify the first syscall
paramater and allow the syscall to proceed.
We need to #ifdef the the additional handling of r3 for seccomp, so move
it all out of line.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Reviewed-by: Kees Cook <keescook@chromium.org>
2015-07-23 17:21:09 +07:00
|
|
|
#ifdef CONFIG_SECCOMP
|
|
|
|
static int do_seccomp(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
if (!test_thread_flag(TIF_SECCOMP))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The ABI we present to seccomp tracers is that r3 contains
|
|
|
|
* the syscall return value and orig_gpr3 contains the first
|
|
|
|
* syscall parameter. This is different to the ptrace ABI where
|
|
|
|
* both r3 and orig_gpr3 contain the first syscall parameter.
|
|
|
|
*/
|
|
|
|
regs->gpr[3] = -ENOSYS;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We use the __ version here because we have already checked
|
|
|
|
* TIF_SECCOMP. If this fails, there is nothing left to do, we
|
|
|
|
* have already loaded -ENOSYS into r3, or seccomp has put
|
|
|
|
* something else in r3 (via SECCOMP_RET_ERRNO/TRACE).
|
|
|
|
*/
|
2016-05-28 02:57:02 +07:00
|
|
|
if (__secure_computing(NULL))
|
powerpc/kernel: Enable seccomp filter
This commit enables seccomp filter on powerpc, now that we have all the
necessary pieces in place.
To support seccomp's desire to modify the syscall return value under
some circumstances, we use a different ABI to the ptrace ABI. That is we
use r3 as the syscall return value, and orig_gpr3 is the first syscall
parameter.
This means the seccomp code, or a ptracer via SECCOMP_RET_TRACE, will
see -ENOSYS preloaded in r3. This is identical to the behaviour on x86,
and allows seccomp or the ptracer to either leave the -ENOSYS or change
it to something else, as well as rejecting or not the syscall by
modifying r0.
If seccomp does not reject the syscall, we restore the register state to
match what ptrace and audit expect, ie. r3 is the first syscall
parameter again. We do this restore using orig_gpr3, which may have been
modified by seccomp, which allows seccomp to modify the first syscall
paramater and allow the syscall to proceed.
We need to #ifdef the the additional handling of r3 for seccomp, so move
it all out of line.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Reviewed-by: Kees Cook <keescook@chromium.org>
2015-07-23 17:21:09 +07:00
|
|
|
return -1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The syscall was allowed by seccomp, restore the register
|
2016-06-03 09:55:09 +07:00
|
|
|
* state to what audit expects.
|
powerpc/kernel: Enable seccomp filter
This commit enables seccomp filter on powerpc, now that we have all the
necessary pieces in place.
To support seccomp's desire to modify the syscall return value under
some circumstances, we use a different ABI to the ptrace ABI. That is we
use r3 as the syscall return value, and orig_gpr3 is the first syscall
parameter.
This means the seccomp code, or a ptracer via SECCOMP_RET_TRACE, will
see -ENOSYS preloaded in r3. This is identical to the behaviour on x86,
and allows seccomp or the ptracer to either leave the -ENOSYS or change
it to something else, as well as rejecting or not the syscall by
modifying r0.
If seccomp does not reject the syscall, we restore the register state to
match what ptrace and audit expect, ie. r3 is the first syscall
parameter again. We do this restore using orig_gpr3, which may have been
modified by seccomp, which allows seccomp to modify the first syscall
paramater and allow the syscall to proceed.
We need to #ifdef the the additional handling of r3 for seccomp, so move
it all out of line.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Reviewed-by: Kees Cook <keescook@chromium.org>
2015-07-23 17:21:09 +07:00
|
|
|
* Note that we use orig_gpr3, which means a seccomp tracer can
|
|
|
|
* modify the first syscall parameter (in orig_gpr3) and also
|
|
|
|
* allow the syscall to proceed.
|
|
|
|
*/
|
|
|
|
regs->gpr[3] = regs->orig_gpr3;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline int do_seccomp(struct pt_regs *regs) { return 0; }
|
|
|
|
#endif /* CONFIG_SECCOMP */
|
|
|
|
|
2015-07-23 17:21:02 +07:00
|
|
|
/**
|
|
|
|
* do_syscall_trace_enter() - Do syscall tracing on kernel entry.
|
|
|
|
* @regs: the pt_regs of the task to trace (current)
|
|
|
|
*
|
|
|
|
* Performs various types of tracing on syscall entry. This includes seccomp,
|
|
|
|
* ptrace, syscall tracepoints and audit.
|
|
|
|
*
|
|
|
|
* The pt_regs are potentially visible to userspace via ptrace, so their
|
|
|
|
* contents is ABI.
|
|
|
|
*
|
|
|
|
* One or more of the tracers may modify the contents of pt_regs, in particular
|
|
|
|
* to modify arguments or even the syscall number itself.
|
|
|
|
*
|
|
|
|
* It's also possible that a tracer can choose to reject the system call. In
|
|
|
|
* that case this function will return an illegal syscall number, and will put
|
|
|
|
* an appropriate return value in regs->r3.
|
|
|
|
*
|
|
|
|
* Return: the (possibly changed) syscall number.
|
2008-07-27 13:51:03 +07:00
|
|
|
*/
|
|
|
|
long do_syscall_trace_enter(struct pt_regs *regs)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2018-12-17 00:28:28 +07:00
|
|
|
u32 flags;
|
|
|
|
|
2013-05-13 23:16:40 +07:00
|
|
|
user_exit();
|
|
|
|
|
2018-12-17 00:28:28 +07:00
|
|
|
flags = READ_ONCE(current_thread_info()->flags) &
|
|
|
|
(_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE);
|
powerpc/ptrace: Add support for PTRACE_SYSEMU
This is a patch that adds support for PTRACE_SYSEMU ptrace request in
PowerPC architecture.
When ptrace(PTRACE_SYSEMU, ...) request is called, it will be handled by
the arch independent function ptrace_resume(), which will tag the task with
the TIF_SYSCALL_EMU flag. This flag needs to be handled from a platform
dependent point of view, which is what this patch does.
This patch adds this task's flag as part of the _TIF_SYSCALL_DOTRACE, which
is the MACRO that is used to trace syscalls at entrance/exit.
Since TIF_SYSCALL_EMU is now part of _TIF_SYSCALL_DOTRACE, if the task has
_TIF_SYSCALL_DOTRACE set, it will hit do_syscall_trace_enter() at syscall
entrance and do_syscall_trace_leave() at syscall leave.
do_syscall_trace_enter() needs to handle the TIF_SYSCALL_EMU flag properly,
which will interrupt the syscall executing if TIF_SYSCALL_EMU is set. The
output values should not be changed, i.e. the return value (r3) should
contain the original syscall argument on exit.
With this flag set, the syscall is not executed fundamentally, because
do_syscall_trace_enter() is returning -1 which is bigger than NR_syscall,
thus, skipping the syscall execution and exiting userspace.
Signed-off-by: Breno Leitao <leitao@debian.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2018-09-20 23:45:06 +07:00
|
|
|
|
2018-12-17 00:28:28 +07:00
|
|
|
if (flags) {
|
|
|
|
int rc = tracehook_report_syscall_entry(regs);
|
|
|
|
|
|
|
|
if (unlikely(flags & _TIF_SYSCALL_EMU)) {
|
|
|
|
/*
|
|
|
|
* A nonzero return code from
|
|
|
|
* tracehook_report_syscall_entry() tells us to prevent
|
|
|
|
* the syscall execution, but we are not going to
|
|
|
|
* execute it anyway.
|
|
|
|
*
|
|
|
|
* Returning -1 will skip the syscall execution. We want
|
|
|
|
* to avoid clobbering any registers, so we don't goto
|
|
|
|
* the skip label below.
|
|
|
|
*/
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc) {
|
|
|
|
/*
|
|
|
|
* The tracer decided to abort the syscall. Note that
|
|
|
|
* the tracer may also just change regs->gpr[0] to an
|
|
|
|
* invalid syscall number, that is handled below on the
|
|
|
|
* exit path.
|
|
|
|
*/
|
|
|
|
goto skip;
|
|
|
|
}
|
|
|
|
}
|
2016-06-03 09:55:09 +07:00
|
|
|
|
|
|
|
/* Run seccomp after ptrace; allow it to set gpr[3]. */
|
powerpc/kernel: Enable seccomp filter
This commit enables seccomp filter on powerpc, now that we have all the
necessary pieces in place.
To support seccomp's desire to modify the syscall return value under
some circumstances, we use a different ABI to the ptrace ABI. That is we
use r3 as the syscall return value, and orig_gpr3 is the first syscall
parameter.
This means the seccomp code, or a ptracer via SECCOMP_RET_TRACE, will
see -ENOSYS preloaded in r3. This is identical to the behaviour on x86,
and allows seccomp or the ptracer to either leave the -ENOSYS or change
it to something else, as well as rejecting or not the syscall by
modifying r0.
If seccomp does not reject the syscall, we restore the register state to
match what ptrace and audit expect, ie. r3 is the first syscall
parameter again. We do this restore using orig_gpr3, which may have been
modified by seccomp, which allows seccomp to modify the first syscall
paramater and allow the syscall to proceed.
We need to #ifdef the the additional handling of r3 for seccomp, so move
it all out of line.
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Reviewed-by: Kees Cook <keescook@chromium.org>
2015-07-23 17:21:09 +07:00
|
|
|
if (do_seccomp(regs))
|
|
|
|
return -1;
|
2005-10-13 12:52:04 +07:00
|
|
|
|
2016-06-03 09:55:09 +07:00
|
|
|
/* Avoid trace and audit when syscall is invalid. */
|
|
|
|
if (regs->gpr[0] >= NR_syscalls)
|
|
|
|
goto skip;
|
2005-05-08 21:56:09 +07:00
|
|
|
|
2011-02-03 00:27:24 +07:00
|
|
|
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
|
|
|
trace_sys_enter(regs, regs->gpr[0]);
|
|
|
|
|
2007-01-14 08:38:18 +07:00
|
|
|
#ifdef CONFIG_PPC64
|
2012-01-04 02:23:06 +07:00
|
|
|
if (!is_32bit_task())
|
2014-03-12 00:29:28 +07:00
|
|
|
audit_syscall_entry(regs->gpr[0], regs->gpr[3], regs->gpr[4],
|
2012-01-04 02:23:06 +07:00
|
|
|
regs->gpr[5], regs->gpr[6]);
|
|
|
|
else
|
2005-10-13 12:52:04 +07:00
|
|
|
#endif
|
2014-03-12 00:29:28 +07:00
|
|
|
audit_syscall_entry(regs->gpr[0],
|
2012-01-04 02:23:06 +07:00
|
|
|
regs->gpr[3] & 0xffffffff,
|
|
|
|
regs->gpr[4] & 0xffffffff,
|
|
|
|
regs->gpr[5] & 0xffffffff,
|
|
|
|
regs->gpr[6] & 0xffffffff);
|
2008-07-27 13:51:03 +07:00
|
|
|
|
2015-07-23 17:21:02 +07:00
|
|
|
/* Return the possibly modified but valid syscall number */
|
|
|
|
return regs->gpr[0];
|
2016-06-03 09:55:09 +07:00
|
|
|
|
|
|
|
skip:
|
|
|
|
/*
|
|
|
|
* If we are aborting explicitly, or if the syscall number is
|
|
|
|
* now invalid, set the return value to -ENOSYS.
|
|
|
|
*/
|
|
|
|
regs->gpr[3] = -ENOSYS;
|
|
|
|
return -1;
|
2005-05-08 21:56:09 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void do_syscall_trace_leave(struct pt_regs *regs)
|
|
|
|
{
|
2008-07-27 13:51:03 +07:00
|
|
|
int step;
|
|
|
|
|
2012-01-04 02:23:06 +07:00
|
|
|
audit_syscall_exit(regs);
|
2005-05-08 21:56:09 +07:00
|
|
|
|
2011-02-03 00:27:24 +07:00
|
|
|
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
|
|
|
|
trace_sys_exit(regs, regs->result);
|
|
|
|
|
2008-07-27 13:51:03 +07:00
|
|
|
step = test_thread_flag(TIF_SINGLESTEP);
|
|
|
|
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
|
|
|
|
tracehook_report_syscall_exit(regs, step);
|
2013-05-13 23:16:40 +07:00
|
|
|
|
|
|
|
user_enter();
|
2005-05-08 21:56:09 +07:00
|
|
|
}
|
2018-10-12 19:13:17 +07:00
|
|
|
|
|
|
|
void __init pt_regs_check(void)
|
|
|
|
{
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, gpr) !=
|
|
|
|
offsetof(struct user_pt_regs, gpr));
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, nip) !=
|
|
|
|
offsetof(struct user_pt_regs, nip));
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
|
|
|
|
offsetof(struct user_pt_regs, msr));
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, msr) !=
|
|
|
|
offsetof(struct user_pt_regs, msr));
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
|
|
|
|
offsetof(struct user_pt_regs, orig_gpr3));
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, ctr) !=
|
|
|
|
offsetof(struct user_pt_regs, ctr));
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, link) !=
|
|
|
|
offsetof(struct user_pt_regs, link));
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, xer) !=
|
|
|
|
offsetof(struct user_pt_regs, xer));
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, ccr) !=
|
|
|
|
offsetof(struct user_pt_regs, ccr));
|
|
|
|
#ifdef __powerpc64__
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, softe) !=
|
|
|
|
offsetof(struct user_pt_regs, softe));
|
|
|
|
#else
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, mq) !=
|
|
|
|
offsetof(struct user_pt_regs, mq));
|
|
|
|
#endif
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, trap) !=
|
|
|
|
offsetof(struct user_pt_regs, trap));
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, dar) !=
|
|
|
|
offsetof(struct user_pt_regs, dar));
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, dsisr) !=
|
|
|
|
offsetof(struct user_pt_regs, dsisr));
|
|
|
|
BUILD_BUG_ON(offsetof(struct pt_regs, result) !=
|
|
|
|
offsetof(struct user_pt_regs, result));
|
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(struct user_pt_regs) > sizeof(struct pt_regs));
|
|
|
|
}
|