mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-07 04:46:40 +07:00
x86/fpu: Use 'struct fpu' in switch_fpu_prepare()
Migrate this function to pure 'struct fpu' usage. Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
af2d94fddc
commit
cb8818b6ac
@ -402,10 +402,9 @@ static inline void fpu_reset_state(struct fpu *fpu)
|
||||
*/
|
||||
typedef struct { int preload; } fpu_switch_t;
|
||||
|
||||
static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
|
||||
static inline fpu_switch_t
|
||||
switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
|
||||
{
|
||||
struct fpu *old_fpu = &old->thread.fpu;
|
||||
struct fpu *new_fpu = &new->thread.fpu;
|
||||
fpu_switch_t fpu;
|
||||
|
||||
/*
|
||||
@ -413,33 +412,33 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
|
||||
* or if the past 5 consecutive context-switches used math.
|
||||
*/
|
||||
fpu.preload = new_fpu->fpstate_active &&
|
||||
(use_eager_fpu() || new->thread.fpu.counter > 5);
|
||||
(use_eager_fpu() || new_fpu->counter > 5);
|
||||
|
||||
if (old_fpu->has_fpu) {
|
||||
if (!fpu_save_init(&old->thread.fpu))
|
||||
old->thread.fpu.last_cpu = -1;
|
||||
if (!fpu_save_init(old_fpu))
|
||||
old_fpu->last_cpu = -1;
|
||||
else
|
||||
old->thread.fpu.last_cpu = cpu;
|
||||
old_fpu->last_cpu = cpu;
|
||||
|
||||
/* But leave fpu_fpregs_owner_ctx! */
|
||||
old->thread.fpu.has_fpu = 0;
|
||||
old_fpu->has_fpu = 0;
|
||||
|
||||
/* Don't change CR0.TS if we just switch! */
|
||||
if (fpu.preload) {
|
||||
new->thread.fpu.counter++;
|
||||
new_fpu->counter++;
|
||||
__thread_set_has_fpu(new_fpu);
|
||||
prefetch(new->thread.fpu.state);
|
||||
prefetch(new_fpu->state);
|
||||
} else if (!use_eager_fpu())
|
||||
stts();
|
||||
} else {
|
||||
old->thread.fpu.counter = 0;
|
||||
old->thread.fpu.last_cpu = -1;
|
||||
old_fpu->counter = 0;
|
||||
old_fpu->last_cpu = -1;
|
||||
if (fpu.preload) {
|
||||
new->thread.fpu.counter++;
|
||||
new_fpu->counter++;
|
||||
if (fpu_want_lazy_restore(new_fpu, cpu))
|
||||
fpu.preload = 0;
|
||||
else
|
||||
prefetch(new->thread.fpu.state);
|
||||
prefetch(new_fpu->state);
|
||||
__thread_fpu_begin(new_fpu);
|
||||
}
|
||||
}
|
||||
|
@ -248,7 +248,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
|
||||
/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
|
||||
|
||||
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
|
||||
fpu = switch_fpu_prepare(&prev_p->thread.fpu, &next_p->thread.fpu, cpu);
|
||||
|
||||
/*
|
||||
* Save away %gs. No need to save %fs, as it was saved on the
|
||||
|
@ -278,7 +278,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
|
||||
unsigned fsindex, gsindex;
|
||||
fpu_switch_t fpu;
|
||||
|
||||
fpu = switch_fpu_prepare(prev_p, next_p, cpu);
|
||||
fpu = switch_fpu_prepare(&prev_p->thread.fpu, &next_p->thread.fpu, cpu);
|
||||
|
||||
/* We must save %fs and %gs before load_TLS() because
|
||||
* %fs and %gs may be cleared by load_TLS().
|
||||
|
Loading…
Reference in New Issue
Block a user