mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 20:30:53 +07:00
Merge branch 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: note the nested NOT_RUNNING test in worker_clr_flags() isn't a noop workqueue: relax lockdep annotation on flush_work()
This commit is contained in:
commit
5bf7a6503f
@ -514,12 +514,15 @@ static inline void print_irqtrace_events(struct task_struct *curr)
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
# ifdef CONFIG_PROVE_LOCKING
|
||||
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
|
||||
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
|
||||
# else
|
||||
# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
|
||||
# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
|
||||
# endif
|
||||
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
|
||||
#else
|
||||
# define lock_map_acquire(l) do { } while (0)
|
||||
# define lock_map_acquire_read(l) do { } while (0)
|
||||
# define lock_map_release(l) do { } while (0)
|
||||
#endif
|
||||
|
||||
|
@ -768,7 +768,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
|
||||
|
||||
worker->flags &= ~flags;
|
||||
|
||||
/* if transitioning out of NOT_RUNNING, increment nr_running */
|
||||
/*
|
||||
* If transitioning out of NOT_RUNNING, increment nr_running. Note
|
||||
* that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
|
||||
* of multiple flags, not a single flag.
|
||||
*/
|
||||
if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
|
||||
if (!(worker->flags & WORKER_NOT_RUNNING))
|
||||
atomic_inc(get_gcwq_nr_running(gcwq->cpu));
|
||||
@ -1840,7 +1844,7 @@ __acquires(&gcwq->lock)
|
||||
spin_unlock_irq(&gcwq->lock);
|
||||
|
||||
work_clear_pending(work);
|
||||
lock_map_acquire(&cwq->wq->lockdep_map);
|
||||
lock_map_acquire_read(&cwq->wq->lockdep_map);
|
||||
lock_map_acquire(&lockdep_map);
|
||||
trace_workqueue_execute_start(work);
|
||||
f(work);
|
||||
@ -2384,8 +2388,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
|
||||
insert_wq_barrier(cwq, barr, work, worker);
|
||||
spin_unlock_irq(&gcwq->lock);
|
||||
|
||||
lock_map_acquire(&cwq->wq->lockdep_map);
|
||||
/*
|
||||
* If @max_active is 1 or rescuer is in use, flushing another work
|
||||
* item on the same workqueue may lead to deadlock. Make sure the
|
||||
* flusher is not running on the same workqueue by verifying write
|
||||
* access.
|
||||
*/
|
||||
if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
|
||||
lock_map_acquire(&cwq->wq->lockdep_map);
|
||||
else
|
||||
lock_map_acquire_read(&cwq->wq->lockdep_map);
|
||||
lock_map_release(&cwq->wq->lockdep_map);
|
||||
|
||||
return true;
|
||||
already_gone:
|
||||
spin_unlock_irq(&gcwq->lock);
|
||||
|
Loading…
Reference in New Issue
Block a user