mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-10 09:56:42 +07:00
x86/fpu: Remove failure paths from fpstate-alloc low level functions
Now that we always allocate the FPU context as part of task_struct there's no need for separate allocations - remove them and their primary failure handling code. ( Note that there's still secondary error codes that have become superfluous, those will be removed in separate patches. ) Move the somewhat misplaced setup_xstate_comp() call to the core. Reviewed-by: Borislav Petkov <bp@alien8.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
7366ed771f
commit
c4d6ee6e2e
@ -558,10 +558,6 @@ static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
|
||||
}
|
||||
}
|
||||
|
||||
extern void fpstate_cache_init(void);
|
||||
|
||||
extern int fpstate_alloc(struct fpu *fpu);
|
||||
extern void fpstate_free(struct fpu *fpu);
|
||||
extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
|
||||
|
||||
static inline unsigned long
|
||||
|
@ -225,34 +225,6 @@ void fpstate_init(struct fpu *fpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpstate_init);
|
||||
|
||||
/*
|
||||
* FPU state allocation:
|
||||
*/
|
||||
static struct kmem_cache *task_xstate_cachep;
|
||||
|
||||
void fpstate_cache_init(void)
|
||||
{
|
||||
task_xstate_cachep =
|
||||
kmem_cache_create("task_xstate", xstate_size,
|
||||
__alignof__(union thread_xstate),
|
||||
SLAB_PANIC | SLAB_NOTRACK, NULL);
|
||||
setup_xstate_comp();
|
||||
}
|
||||
|
||||
int fpstate_alloc(struct fpu *fpu)
|
||||
{
|
||||
/* The CPU requires the FPU state to be aligned to 16 byte boundaries: */
|
||||
WARN_ON((unsigned long)&fpu->state & 15);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpstate_alloc);
|
||||
|
||||
void fpstate_free(struct fpu *fpu)
|
||||
{
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpstate_free);
|
||||
|
||||
/*
|
||||
* Copy the current task's FPU state to a new task's FPU context.
|
||||
*
|
||||
@ -280,13 +252,9 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
|
||||
dst_fpu->fpregs_active = 0;
|
||||
dst_fpu->last_cpu = -1;
|
||||
|
||||
if (src_fpu->fpstate_active) {
|
||||
int err = fpstate_alloc(dst_fpu);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
if (src_fpu->fpstate_active)
|
||||
fpu_copy(dst_fpu, src_fpu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -305,13 +273,6 @@ int fpstate_alloc_init(struct fpu *fpu)
|
||||
if (WARN_ON_ONCE(fpu->fpstate_active))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Memory allocation at the first usage of the FPU and other state.
|
||||
*/
|
||||
ret = fpstate_alloc(fpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
fpstate_init(fpu);
|
||||
|
||||
/* Safe to do for the current task: */
|
||||
@ -356,13 +317,6 @@ static int fpu__unlazy_stopped(struct fpu *child_fpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Memory allocation at the first usage of the FPU and other state.
|
||||
*/
|
||||
ret = fpstate_alloc(child_fpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
fpstate_init(child_fpu);
|
||||
|
||||
/* Safe to do for stopped child tasks: */
|
||||
@ -423,7 +377,6 @@ void fpu__clear(struct task_struct *tsk)
|
||||
if (!use_eager_fpu()) {
|
||||
/* FPU state will be reallocated lazily at the first use. */
|
||||
drop_fpu(fpu);
|
||||
fpstate_free(fpu);
|
||||
} else {
|
||||
if (!fpu->fpstate_active) {
|
||||
/* kthread execs. TODO: cleanup this horror. */
|
||||
|
@ -265,6 +265,7 @@ void fpu__init_system(struct cpuinfo_x86 *c)
|
||||
fpu__init_system_generic();
|
||||
fpu__init_system_xstate_size_legacy();
|
||||
fpu__init_system_xstate();
|
||||
setup_xstate_comp();
|
||||
|
||||
fpu__init_system_ctx_switch();
|
||||
}
|
||||
|
@ -86,16 +86,6 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
||||
return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
|
||||
}
|
||||
|
||||
void arch_release_task_struct(struct task_struct *tsk)
|
||||
{
|
||||
fpstate_free(&tsk->thread.fpu);
|
||||
}
|
||||
|
||||
void arch_task_cache_init(void)
|
||||
{
|
||||
fpstate_cache_init();
|
||||
}
|
||||
|
||||
/*
|
||||
* Free current thread data structures etc..
|
||||
*/
|
||||
|
@ -7008,10 +7008,6 @@ int fx_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = fpstate_alloc(&vcpu->arch.guest_fpu);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
fpstate_init(&vcpu->arch.guest_fpu);
|
||||
if (cpu_has_xsaves)
|
||||
vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
|
||||
@ -7028,11 +7024,6 @@ int fx_init(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fx_init);
|
||||
|
||||
static void fx_free(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
fpstate_free(&vcpu->arch.guest_fpu);
|
||||
}
|
||||
|
||||
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (vcpu->guest_fpu_loaded)
|
||||
@ -7070,7 +7061,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
kvmclock_reset(vcpu);
|
||||
|
||||
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
|
||||
fx_free(vcpu);
|
||||
kvm_x86_ops->vcpu_free(vcpu);
|
||||
}
|
||||
|
||||
@ -7126,7 +7116,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
kvm_mmu_unload(vcpu);
|
||||
vcpu_put(vcpu);
|
||||
|
||||
fx_free(vcpu);
|
||||
kvm_x86_ops->vcpu_free(vcpu);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user