mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 20:55:09 +07:00
c7a318ba86
Commit8d460f6156
("powerpc/process: Add the function flush_tmregs_to_thread") added flush_tmregs_to_thread() and included the assumption that it would only be called for a task which is not current. Although this is correct for ptrace, when generating a core dump, some of the routines which call flush_tmregs_to_thread() are called. This leads to a WARNing such as: Not expecting ptrace on self: TM regs may be incorrect ------------[ cut here ]------------ WARNING: CPU: 123 PID: 7727 at arch/powerpc/kernel/process.c:1088 flush_tmregs_to_thread+0x78/0x80 CPU: 123 PID: 7727 Comm: libvirtd Not tainted 4.8.0-rc1-gcc6x-g61e8a0d #1 task: c000000fe631b600 task.stack: c000000fe63b0000 NIP: c00000000001a1a8 LR: c00000000001a1a4 CTR: c000000000717780 REGS: c000000fe63b3420 TRAP: 0700 Not tainted (4.8.0-rc1-gcc6x-g61e8a0d) MSR: 900000010282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 28004222 XER: 20000000 ... NIP [c00000000001a1a8] flush_tmregs_to_thread+0x78/0x80 LR [c00000000001a1a4] flush_tmregs_to_thread+0x74/0x80 Call Trace: flush_tmregs_to_thread+0x74/0x80 (unreliable) vsr_get+0x64/0x1a0 elf_core_dump+0x604/0x1430 do_coredump+0x5fc/0x1200 get_signal+0x398/0x740 do_signal+0x54/0x2b0 do_notify_resume+0x98/0xb0 ret_from_except_lite+0x70/0x74 So fix flush_tmregs_to_thread() to detect the case where it is called on current, and a transaction is active, and in that case flush the TM regs to the thread_struct. This patch also moves flush_tmregs_to_thread() into ptrace.c as it is only called from that file. Fixes:8d460f6156
("powerpc/process: Add the function flush_tmregs_to_thread") Signed-off-by: Cyril Bur <cyrilbur@gmail.com> [mpe: Flesh out change log] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
95 lines
2.5 KiB
C
95 lines
2.5 KiB
C
/*
|
|
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
|
|
*/
|
|
#ifndef _ASM_POWERPC_SWITCH_TO_H
|
|
#define _ASM_POWERPC_SWITCH_TO_H
|
|
|
|
#include <asm/reg.h>
|
|
|
|
struct thread_struct;
|
|
struct task_struct;
|
|
struct pt_regs;
|
|
|
|
extern struct task_struct *__switch_to(struct task_struct *,
|
|
struct task_struct *);
|
|
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
|
|
|
|
extern struct task_struct *_switch(struct thread_struct *prev,
|
|
struct thread_struct *next);
|
|
|
|
extern void switch_booke_debug_regs(struct debug_reg *new_debug);
|
|
|
|
extern int emulate_altivec(struct pt_regs *);
|
|
|
|
extern void flush_all_to_thread(struct task_struct *);
|
|
extern void giveup_all(struct task_struct *);
|
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
extern void enable_kernel_fp(void);
|
|
extern void flush_fp_to_thread(struct task_struct *);
|
|
extern void giveup_fpu(struct task_struct *);
|
|
extern void save_fpu(struct task_struct *);
|
|
static inline void disable_kernel_fp(void)
|
|
{
|
|
msr_check_and_clear(MSR_FP);
|
|
}
|
|
#else
|
|
static inline void __giveup_fpu(struct task_struct *t) { }
|
|
static inline void save_fpu(struct task_struct *t) { }
|
|
static inline void flush_fp_to_thread(struct task_struct *t) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
extern void enable_kernel_altivec(void);
|
|
extern void flush_altivec_to_thread(struct task_struct *);
|
|
extern void giveup_altivec(struct task_struct *);
|
|
extern void save_altivec(struct task_struct *);
|
|
static inline void disable_kernel_altivec(void)
|
|
{
|
|
msr_check_and_clear(MSR_VEC);
|
|
}
|
|
#else
|
|
static inline void save_altivec(struct task_struct *t) { }
|
|
static inline void __giveup_altivec(struct task_struct *t) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_VSX
|
|
extern void enable_kernel_vsx(void);
|
|
extern void flush_vsx_to_thread(struct task_struct *);
|
|
static inline void disable_kernel_vsx(void)
|
|
{
|
|
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_SPE
|
|
extern void enable_kernel_spe(void);
|
|
extern void flush_spe_to_thread(struct task_struct *);
|
|
extern void giveup_spe(struct task_struct *);
|
|
extern void __giveup_spe(struct task_struct *);
|
|
static inline void disable_kernel_spe(void)
|
|
{
|
|
msr_check_and_clear(MSR_SPE);
|
|
}
|
|
#else
|
|
static inline void __giveup_spe(struct task_struct *t) { }
|
|
#endif
|
|
|
|
static inline void clear_task_ebb(struct task_struct *t)
|
|
{
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
/* EBB perf events are not inherited, so clear all EBB state. */
|
|
t->thread.ebbrr = 0;
|
|
t->thread.ebbhr = 0;
|
|
t->thread.bescr = 0;
|
|
t->thread.mmcr2 = 0;
|
|
t->thread.mmcr0 = 0;
|
|
t->thread.siar = 0;
|
|
t->thread.sdar = 0;
|
|
t->thread.sier = 0;
|
|
t->thread.used_ebb = 0;
|
|
#endif
|
|
}
|
|
|
|
#endif /* _ASM_POWERPC_SWITCH_TO_H */
|