mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 03:14:42 +07:00
68b34588e2
System call entry and particularly exit code is beyond the limit of what is reasonable to implement in asm. This conversion moves all conditional branches out of the asm code, except for the case that all GPRs should be restored at exit. Null syscall test is about 5% faster after this patch, because the exit work is handled under local_irq_disable, and the hard mask and pending interrupt replay is handled after that, which avoids games with MSR. mpe: Includes subsequent fixes from Nick: This fixes 4 issues caught by TM selftests. First was a tm-syscall bug that hit due to tabort_syscall being called after interrupts were reconciled (in a subsequent patch), which led to interrupts being enabled before tabort_syscall was called. Rather than going through an un-reconciling interrupts for the return, I just go back to putting the test early in asm, the C-ification of that wasn't a big win anyway. Second is the syscall return _TIF_USER_WORK_MASK check would go into an infinite loop if _TIF_RESTORE_TM became set. The asm code uses _TIF_USER_WORK_MASK to brach to slowpath which includes restore_tm_state. Third is system call return was not calling restore_tm_state, I missed this completely (alhtough it's in the return from interrupt C conversion because when the asm syscall code encountered problems it would branch to the interrupt return code. Fourth is MSR_VEC missing from restore_math, which was caught by tm-unavailable selftest taking an unexpected facility unavailable interrupt when testing VSX unavailble exception with MSR.FP=1 MSR.VEC=1. Fourth case also has a fixup in a subsequent patch. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michal Suchanek <msuchanek@suse.de> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200225173541.1549955-26-npiggin@gmail.com
62 lines
1.9 KiB
C
62 lines
1.9 KiB
C
/*
|
|
* Copyright (c) 2007 Benjamin Herrenschmidt, IBM Corporation
|
|
* Extracted from signal_32.c and signal_64.c
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General
|
|
* Public License. See the file README.legal in the main directory of
|
|
* this archive for more details.
|
|
*/
|
|
|
|
#ifndef _POWERPC_ARCH_SIGNAL_H
|
|
#define _POWERPC_ARCH_SIGNAL_H
|
|
|
|
extern void __user *get_sigframe(struct ksignal *ksig, unsigned long sp,
|
|
size_t frame_size, int is_32);
|
|
|
|
extern int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|
struct task_struct *tsk);
|
|
|
|
extern int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
|
|
struct task_struct *tsk);
|
|
|
|
extern unsigned long copy_fpr_to_user(void __user *to,
|
|
struct task_struct *task);
|
|
extern unsigned long copy_ckfpr_to_user(void __user *to,
|
|
struct task_struct *task);
|
|
extern unsigned long copy_fpr_from_user(struct task_struct *task,
|
|
void __user *from);
|
|
extern unsigned long copy_ckfpr_from_user(struct task_struct *task,
|
|
void __user *from);
|
|
extern unsigned long get_tm_stackpointer(struct task_struct *tsk);
|
|
|
|
#ifdef CONFIG_VSX
|
|
extern unsigned long copy_vsx_to_user(void __user *to,
|
|
struct task_struct *task);
|
|
extern unsigned long copy_ckvsx_to_user(void __user *to,
|
|
struct task_struct *task);
|
|
extern unsigned long copy_vsx_from_user(struct task_struct *task,
|
|
void __user *from);
|
|
extern unsigned long copy_ckvsx_from_user(struct task_struct *task,
|
|
void __user *from);
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
extern int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
|
struct task_struct *tsk);
|
|
|
|
#else /* CONFIG_PPC64 */
|
|
|
|
extern long sys_rt_sigreturn(void);
|
|
extern long sys_sigreturn(void);
|
|
|
|
static inline int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
|
struct task_struct *tsk)
|
|
{
|
|
return -EFAULT;
|
|
}
|
|
|
|
#endif /* !defined(CONFIG_PPC64) */
|
|
|
|
#endif /* _POWERPC_ARCH_SIGNAL_H */
|