mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 02:18:18 +07:00
09548fdaf3
The load_up_fpu and load_up_altivec functions were never intended to be called from C, and do things like modifying the MSR value in their callers' stack frames, which are assumed to be interrupt frames. In addition, on 32-bit Book S they require the MMU to be off. This makes KVM use the new load_fp_state() and load_vr_state() functions instead of load_up_fpu/altivec. This means we can remove the assembler glue in book3s_rmhandlers.S, and potentially fixes a bug on Book E, where load_up_fpu was called directly from C. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
98 lines
2.4 KiB
C
98 lines
2.4 KiB
C
/*
|
|
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
|
|
*/
|
|
#ifndef _ASM_POWERPC_SWITCH_TO_H
|
|
#define _ASM_POWERPC_SWITCH_TO_H
|
|
|
|
struct thread_struct;
|
|
struct task_struct;
|
|
struct pt_regs;
|
|
|
|
extern struct task_struct *__switch_to(struct task_struct *,
|
|
struct task_struct *);
|
|
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
|
|
|
|
struct thread_struct;
|
|
extern struct task_struct *_switch(struct thread_struct *prev,
|
|
struct thread_struct *next);
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
static inline void save_tar(struct thread_struct *prev)
|
|
{
|
|
if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
prev->tar = mfspr(SPRN_TAR);
|
|
}
|
|
#else
|
|
static inline void save_tar(struct thread_struct *prev) {}
|
|
#endif
|
|
|
|
extern void enable_kernel_fp(void);
|
|
extern void enable_kernel_altivec(void);
|
|
extern int emulate_altivec(struct pt_regs *);
|
|
extern void __giveup_vsx(struct task_struct *);
|
|
extern void giveup_vsx(struct task_struct *);
|
|
extern void enable_kernel_spe(void);
|
|
extern void giveup_spe(struct task_struct *);
|
|
extern void load_up_spe(struct task_struct *);
|
|
extern void switch_booke_debug_regs(struct thread_struct *new_thread);
|
|
|
|
#ifndef CONFIG_SMP
|
|
extern void discard_lazy_cpu_state(void);
|
|
#else
|
|
static inline void discard_lazy_cpu_state(void)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_PPC_FPU
|
|
extern void flush_fp_to_thread(struct task_struct *);
|
|
extern void giveup_fpu(struct task_struct *);
|
|
#else
|
|
static inline void flush_fp_to_thread(struct task_struct *t) { }
|
|
static inline void giveup_fpu(struct task_struct *t) { }
|
|
#endif
|
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
extern void flush_altivec_to_thread(struct task_struct *);
|
|
extern void giveup_altivec(struct task_struct *);
|
|
extern void giveup_altivec_notask(void);
|
|
#else
|
|
static inline void flush_altivec_to_thread(struct task_struct *t)
|
|
{
|
|
}
|
|
static inline void giveup_altivec(struct task_struct *t)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_VSX
|
|
extern void flush_vsx_to_thread(struct task_struct *);
|
|
#else
|
|
static inline void flush_vsx_to_thread(struct task_struct *t)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_SPE
|
|
extern void flush_spe_to_thread(struct task_struct *);
|
|
#else
|
|
static inline void flush_spe_to_thread(struct task_struct *t)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
static inline void clear_task_ebb(struct task_struct *t)
|
|
{
|
|
#ifdef CONFIG_PPC_BOOK3S_64
|
|
/* EBB perf events are not inherited, so clear all EBB state. */
|
|
t->thread.bescr = 0;
|
|
t->thread.mmcr2 = 0;
|
|
t->thread.mmcr0 = 0;
|
|
t->thread.siar = 0;
|
|
t->thread.sdar = 0;
|
|
t->thread.sier = 0;
|
|
t->thread.used_ebb = 0;
|
|
#endif
|
|
}
|
|
|
|
#endif /* _ASM_POWERPC_SWITCH_TO_H */
|