completion: Use simple wait queues

completion uses a wait_queue_head_t to enqueue waiters.

wait_queue_head_t contains a spinlock_t to protect the list of waiters
which excludes it from being used in truly atomic context on a PREEMPT_RT
enabled kernel.

The spinlock in the wait queue head cannot be replaced by a raw_spinlock
because:

  - wait queues can have custom wakeup callbacks, which acquire other
    spinlock_t locks and have potentially long execution times

  - wake_up() walks an unbounded number of list entries during the wake up
    and may wake an unbounded number of waiters.

For simplicity and performance reasons complete() should be usable on
PREEMPT_RT enabled kernels.

completions do not use custom wakeup callbacks and are usually single
waiter, except for a few corner cases.

Replace the wait queue in the completion with a simple wait queue (swait),
which uses a raw_spinlock_t for protecting the waiter list and therefore is
safe to use inside truly atomic regions on PREEMPT_RT.

There is no semantical or functional change:

  - completions use the exclusive wait mode which is what swait provides

  - complete() wakes one exclusive waiter

  - complete_all() wakes all waiters while holding the lock which protects
    the wait queue against newly incoming waiters. The conversion to swait
    preserves this behaviour.

complete_all() might cause unbound latencies with a large number of waiters
being woken at once, but most complete_all() usage sites are either in
testing or initialization code or have only a really small number of
concurrent waiters which for now does not cause a latency problem. Keep it
simple for now.

The fixup of the warning check in the USB gadget driver is just a straight
forward conversion of the lockless waiter check from one waitqueue type to
the other.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: Davidlohr Bueso <dbueso@suse.de>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Link: https://lkml.kernel.org/r/20200321113242.317954042@linutronix.de
This commit is contained in:
Thomas Gleixner 2020-03-21 12:26:00 +01:00 committed by Peter Zijlstra
parent b3212fe2bc
commit a5c6234e10
3 changed files with 24 additions and 22 deletions

View File

@ -1703,7 +1703,7 @@ static void ffs_data_put(struct ffs_data *ffs)
pr_info("%s(): freeing\n", __func__); pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs); ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) || BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
waitqueue_active(&ffs->ep0req_completion.wait) || swait_active(&ffs->ep0req_completion.wait) ||
waitqueue_active(&ffs->wait)); waitqueue_active(&ffs->wait));
destroy_workqueue(ffs->io_completion_wq); destroy_workqueue(ffs->io_completion_wq);
kfree(ffs->dev_name); kfree(ffs->dev_name);

View File

@ -9,7 +9,7 @@
* See kernel/sched/completion.c for details. * See kernel/sched/completion.c for details.
*/ */
#include <linux/wait.h> #include <linux/swait.h>
/* /*
* struct completion - structure used to maintain state for a "completion" * struct completion - structure used to maintain state for a "completion"
@ -25,7 +25,7 @@
*/ */
struct completion { struct completion {
unsigned int done; unsigned int done;
wait_queue_head_t wait; struct swait_queue_head wait;
}; };
#define init_completion_map(x, m) __init_completion(x) #define init_completion_map(x, m) __init_completion(x)
@ -34,7 +34,7 @@ static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {} static inline void complete_release(struct completion *x) {}
#define COMPLETION_INITIALIZER(work) \ #define COMPLETION_INITIALIZER(work) \
{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
(*({ init_completion_map(&(work), &(map)); &(work); })) (*({ init_completion_map(&(work), &(map)); &(work); }))
@ -85,7 +85,7 @@ static inline void complete_release(struct completion *x) {}
static inline void __init_completion(struct completion *x) static inline void __init_completion(struct completion *x)
{ {
x->done = 0; x->done = 0;
init_waitqueue_head(&x->wait); init_swait_queue_head(&x->wait);
} }
/** /**

View File

@ -29,12 +29,12 @@ void complete(struct completion *x)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags); raw_spin_lock_irqsave(&x->wait.lock, flags);
if (x->done != UINT_MAX) if (x->done != UINT_MAX)
x->done++; x->done++;
__wake_up_locked(&x->wait, TASK_NORMAL, 1); swake_up_locked(&x->wait);
spin_unlock_irqrestore(&x->wait.lock, flags); raw_spin_unlock_irqrestore(&x->wait.lock, flags);
} }
EXPORT_SYMBOL(complete); EXPORT_SYMBOL(complete);
@ -58,10 +58,12 @@ void complete_all(struct completion *x)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&x->wait.lock, flags); WARN_ON(irqs_disabled());
raw_spin_lock_irqsave(&x->wait.lock, flags);
x->done = UINT_MAX; x->done = UINT_MAX;
__wake_up_locked(&x->wait, TASK_NORMAL, 0); swake_up_all_locked(&x->wait);
spin_unlock_irqrestore(&x->wait.lock, flags); raw_spin_unlock_irqrestore(&x->wait.lock, flags);
} }
EXPORT_SYMBOL(complete_all); EXPORT_SYMBOL(complete_all);
@ -70,20 +72,20 @@ do_wait_for_common(struct completion *x,
long (*action)(long), long timeout, int state) long (*action)(long), long timeout, int state)
{ {
if (!x->done) { if (!x->done) {
DECLARE_WAITQUEUE(wait, current); DECLARE_SWAITQUEUE(wait);
__add_wait_queue_entry_tail_exclusive(&x->wait, &wait);
do { do {
if (signal_pending_state(state, current)) { if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS; timeout = -ERESTARTSYS;
break; break;
} }
__prepare_to_swait(&x->wait, &wait);
__set_current_state(state); __set_current_state(state);
spin_unlock_irq(&x->wait.lock); raw_spin_unlock_irq(&x->wait.lock);
timeout = action(timeout); timeout = action(timeout);
spin_lock_irq(&x->wait.lock); raw_spin_lock_irq(&x->wait.lock);
} while (!x->done && timeout); } while (!x->done && timeout);
__remove_wait_queue(&x->wait, &wait); __finish_swait(&x->wait, &wait);
if (!x->done) if (!x->done)
return timeout; return timeout;
} }
@ -100,9 +102,9 @@ __wait_for_common(struct completion *x,
complete_acquire(x); complete_acquire(x);
spin_lock_irq(&x->wait.lock); raw_spin_lock_irq(&x->wait.lock);
timeout = do_wait_for_common(x, action, timeout, state); timeout = do_wait_for_common(x, action, timeout, state);
spin_unlock_irq(&x->wait.lock); raw_spin_unlock_irq(&x->wait.lock);
complete_release(x); complete_release(x);
@ -291,12 +293,12 @@ bool try_wait_for_completion(struct completion *x)
if (!READ_ONCE(x->done)) if (!READ_ONCE(x->done))
return false; return false;
spin_lock_irqsave(&x->wait.lock, flags); raw_spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done) if (!x->done)
ret = false; ret = false;
else if (x->done != UINT_MAX) else if (x->done != UINT_MAX)
x->done--; x->done--;
spin_unlock_irqrestore(&x->wait.lock, flags); raw_spin_unlock_irqrestore(&x->wait.lock, flags);
return ret; return ret;
} }
EXPORT_SYMBOL(try_wait_for_completion); EXPORT_SYMBOL(try_wait_for_completion);
@ -322,8 +324,8 @@ bool completion_done(struct completion *x)
* otherwise we can end up freeing the completion before complete() * otherwise we can end up freeing the completion before complete()
* is done referencing it. * is done referencing it.
*/ */
spin_lock_irqsave(&x->wait.lock, flags); raw_spin_lock_irqsave(&x->wait.lock, flags);
spin_unlock_irqrestore(&x->wait.lock, flags); raw_spin_unlock_irqrestore(&x->wait.lock, flags);
return true; return true;
} }
EXPORT_SYMBOL(completion_done); EXPORT_SYMBOL(completion_done);