mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 12:45:16 +07:00
6cc0c16d82
Implement the bulk of interrupt return logic in C. The asm return code must handle a few cases: restoring full GPRs, and emulating stack store. The stack store emulation is significantly simplfied, rather than creating a new return frame and switching to that before performing the store, it uses the PACA to keep a scratch register around to perform the store. The asm return code is moved into 64e for now. The new logic has made allowance for 64e, but I don't have a full environment that works well to test it, and even booting in emulated qemu is not great for stress testing. 64e shouldn't be too far off working with this, given a bit more testing and auditing of the logic. This is slightly faster on a POWER9 (page fault speed increases about 1.1%), probably due to reduced mtmsrd. mpe: Includes fixes from Nick for _TIF_EMULATE_STACK_STORE handling (including the fast_interrupt_return path), to remove trace_hardirqs_on(), and fixes the interrupt-return part of the MSR_VSX restore bug caught by tm-unavailable selftest. mpe: Incorporate fix from Nick: The return-to-kernel path has to replay any soft-pending interrupts if it is returning to a context that had interrupts soft-enabled. It has to do this carefully and avoid plain enabling interrupts if this is an irq context, which can cause multiple nesting of interrupts on the stack, and other unexpected issues. The code which avoided this case got the soft-mask state wrong, and marked interrupts as enabled before going around again to retry. This seems to be mostly harmless except when PREEMPT=y, this calls preempt_schedule_irq with irqs apparently enabled and runs into a BUG in kernel/sched/core.c Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michal Suchanek <msuchanek@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200225173541.1549955-29-npiggin@gmail.com
110 lines
2.8 KiB
C
110 lines
2.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
|
|
*/
|
|
#ifndef _ASM_POWERPC_SWITCH_TO_H
|
|
#define _ASM_POWERPC_SWITCH_TO_H
|
|
|
|
#include <linux/sched.h>
|
|
#include <asm/reg.h>
|
|
|
|
struct thread_struct;
|
|
struct task_struct;
|
|
struct pt_regs;
|
|
|
|
extern struct task_struct *__switch_to(struct task_struct *,
|
|
struct task_struct *);
|
|
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
|
|
|
|
extern struct task_struct *_switch(struct thread_struct *prev,
|
|
struct thread_struct *next);
|
|
|
|
extern void switch_booke_debug_regs(struct debug_reg *new_debug);
|
|
|
|
extern int emulate_altivec(struct pt_regs *);
|
|
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
void restore_math(struct pt_regs *regs);
|
|
#else
|
|
static inline void restore_math(struct pt_regs *regs)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
void restore_tm_state(struct pt_regs *regs);
|
|
|
|
extern void flush_all_to_thread(struct task_struct *);
|
|
extern void giveup_all(struct task_struct *);
|
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
extern void enable_kernel_fp(void);
|
|
extern void flush_fp_to_thread(struct task_struct *);
|
|
extern void giveup_fpu(struct task_struct *);
|
|
extern void save_fpu(struct task_struct *);
|
|
static inline void disable_kernel_fp(void)
|
|
{
|
|
msr_check_and_clear(MSR_FP);
|
|
}
|
|
#else
|
|
static inline void save_fpu(struct task_struct *t) { }
|
|
static inline void flush_fp_to_thread(struct task_struct *t) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
extern void enable_kernel_altivec(void);
|
|
extern void flush_altivec_to_thread(struct task_struct *);
|
|
extern void giveup_altivec(struct task_struct *);
|
|
extern void save_altivec(struct task_struct *);
|
|
static inline void disable_kernel_altivec(void)
|
|
{
|
|
msr_check_and_clear(MSR_VEC);
|
|
}
|
|
#else
|
|
static inline void save_altivec(struct task_struct *t) { }
|
|
static inline void __giveup_altivec(struct task_struct *t) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_VSX
|
|
extern void enable_kernel_vsx(void);
|
|
extern void flush_vsx_to_thread(struct task_struct *);
|
|
static inline void disable_kernel_vsx(void)
|
|
{
|
|
msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_SPE
|
|
extern void enable_kernel_spe(void);
|
|
extern void flush_spe_to_thread(struct task_struct *);
|
|
extern void giveup_spe(struct task_struct *);
|
|
extern void __giveup_spe(struct task_struct *);
|
|
static inline void disable_kernel_spe(void)
|
|
{
|
|
msr_check_and_clear(MSR_SPE);
|
|
}
|
|
#else
|
|
static inline void __giveup_spe(struct task_struct *t) { }
|
|
#endif
|
|
|
|
static inline void clear_task_ebb(struct task_struct *t)
|
|
{
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
/* EBB perf events are not inherited, so clear all EBB state. */
|
|
t->thread.ebbrr = 0;
|
|
t->thread.ebbhr = 0;
|
|
t->thread.bescr = 0;
|
|
t->thread.mmcr2 = 0;
|
|
t->thread.mmcr0 = 0;
|
|
t->thread.siar = 0;
|
|
t->thread.sdar = 0;
|
|
t->thread.sier = 0;
|
|
t->thread.used_ebb = 0;
|
|
#endif
|
|
}
|
|
|
|
extern int set_thread_uses_vas(void);
|
|
|
|
extern int set_thread_tidr(struct task_struct *t);
|
|
|
|
#endif /* _ASM_POWERPC_SWITCH_TO_H */
|