mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 17:40:53 +07:00
mutex: preemption fixes
The problem is that dropping the spinlock right before schedule is a voluntary preemption point and can cause a schedule, right after which we schedule again. Fix this inefficiency by keeping preemption disabled until we schedule, do this by explicity disabling preemption and providing a schedule() variant that assumes preemption is already disabled. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
93d81d1aca
commit
41719b0309
@ -328,6 +328,7 @@ extern signed long schedule_timeout(signed long timeout);
|
|||||||
extern signed long schedule_timeout_interruptible(signed long timeout);
|
extern signed long schedule_timeout_interruptible(signed long timeout);
|
||||||
extern signed long schedule_timeout_killable(signed long timeout);
|
extern signed long schedule_timeout_killable(signed long timeout);
|
||||||
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
||||||
|
asmlinkage void __schedule(void);
|
||||||
asmlinkage void schedule(void);
|
asmlinkage void schedule(void);
|
||||||
|
|
||||||
struct nsproxy;
|
struct nsproxy;
|
||||||
|
@ -131,6 +131,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
struct mutex_waiter waiter;
|
struct mutex_waiter waiter;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
spin_lock_mutex(&lock->wait_lock, flags);
|
spin_lock_mutex(&lock->wait_lock, flags);
|
||||||
|
|
||||||
debug_mutex_lock_common(lock, &waiter);
|
debug_mutex_lock_common(lock, &waiter);
|
||||||
@ -170,13 +171,14 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||||
|
|
||||||
debug_mutex_free_waiter(&waiter);
|
debug_mutex_free_waiter(&waiter);
|
||||||
|
preempt_enable();
|
||||||
return -EINTR;
|
return -EINTR;
|
||||||
}
|
}
|
||||||
__set_task_state(task, state);
|
__set_task_state(task, state);
|
||||||
|
|
||||||
/* didnt get the lock, go to sleep: */
|
/* didnt get the lock, go to sleep: */
|
||||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||||
schedule();
|
__schedule();
|
||||||
spin_lock_mutex(&lock->wait_lock, flags);
|
spin_lock_mutex(&lock->wait_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,6 +195,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
spin_unlock_mutex(&lock->wait_lock, flags);
|
spin_unlock_mutex(&lock->wait_lock, flags);
|
||||||
|
|
||||||
debug_mutex_free_waiter(&waiter);
|
debug_mutex_free_waiter(&waiter);
|
||||||
|
preempt_enable();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -4538,15 +4538,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
|
|||||||
/*
|
/*
|
||||||
* schedule() is the main scheduler function.
|
* schedule() is the main scheduler function.
|
||||||
*/
|
*/
|
||||||
asmlinkage void __sched schedule(void)
|
asmlinkage void __sched __schedule(void)
|
||||||
{
|
{
|
||||||
struct task_struct *prev, *next;
|
struct task_struct *prev, *next;
|
||||||
unsigned long *switch_count;
|
unsigned long *switch_count;
|
||||||
struct rq *rq;
|
struct rq *rq;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
need_resched:
|
|
||||||
preempt_disable();
|
|
||||||
cpu = smp_processor_id();
|
cpu = smp_processor_id();
|
||||||
rq = cpu_rq(cpu);
|
rq = cpu_rq(cpu);
|
||||||
rcu_qsctr_inc(cpu);
|
rcu_qsctr_inc(cpu);
|
||||||
@ -4603,7 +4601,13 @@ asmlinkage void __sched schedule(void)
|
|||||||
|
|
||||||
if (unlikely(reacquire_kernel_lock(current) < 0))
|
if (unlikely(reacquire_kernel_lock(current) < 0))
|
||||||
goto need_resched_nonpreemptible;
|
goto need_resched_nonpreemptible;
|
||||||
|
}
|
||||||
|
|
||||||
|
asmlinkage void __sched schedule(void)
|
||||||
|
{
|
||||||
|
need_resched:
|
||||||
|
preempt_disable();
|
||||||
|
__schedule();
|
||||||
preempt_enable_no_resched();
|
preempt_enable_no_resched();
|
||||||
if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
|
if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
|
||||||
goto need_resched;
|
goto need_resched;
|
||||||
|
Loading…
Reference in New Issue
Block a user