mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 12:00:58 +07:00
Add "run_scheduled_work()" workqueue function
This allows workqueue users to run just their own pending work, rather than wait for the whole workqueue to finish running. This solves the deadlock with networking libphy that was due to other workqueue entries possibly needing a lock that was held by the routine that wanted to flush its own work. It's not wonderful: if you absolutely need to synchronize with the work function having been executed, any user strictly speaking should have its own completion tracking logic, since when we run things explicitly by hand, the generic workqueue layer can no longer help us synchronize. Also, this is strictly only usable for work that has been scheduled without any delayed timers. You can not mix the new interface with schedule_delayed_work(). But it's better than what we had currently. Acked-by: Maciej W. Rozycki <macro@linux-mips.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
2fd8507d14
commit
68380b5813
@ -587,8 +587,7 @@ int phy_stop_interrupts(struct phy_device *phydev)
|
||||
* Finish any pending work; we might have been scheduled
|
||||
* to be called from keventd ourselves, though.
|
||||
*/
|
||||
if (!current_is_keventd())
|
||||
flush_scheduled_work();
|
||||
run_scheduled_work(&phydev->phy_queue);
|
||||
|
||||
free_irq(phydev->irq, phydev);
|
||||
|
||||
|
@ -162,6 +162,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
||||
extern void FASTCALL(flush_workqueue(struct workqueue_struct *wq));
|
||||
|
||||
extern int FASTCALL(schedule_work(struct work_struct *work));
|
||||
extern int FASTCALL(run_scheduled_work(struct work_struct *work));
|
||||
extern int FASTCALL(schedule_delayed_work(struct delayed_work *work, unsigned long delay));
|
||||
|
||||
extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, unsigned long delay);
|
||||
|
@ -108,6 +108,79 @@ static inline void *get_wq_data(struct work_struct *work)
|
||||
return (void *) (work->management & WORK_STRUCT_WQ_DATA_MASK);
|
||||
}
|
||||
|
||||
static int __run_work(struct cpu_workqueue_struct *cwq, struct work_struct *work)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cwq->lock, flags);
|
||||
/*
|
||||
* We need to re-validate the work info after we've gotten
|
||||
* the cpu_workqueue lock. We can run the work now iff:
|
||||
*
|
||||
* - the wq_data still matches the cpu_workqueue_struct
|
||||
* - AND the work is still marked pending
|
||||
* - AND the work is still on a list (which will be this
|
||||
* workqueue_struct list)
|
||||
*
|
||||
* All these conditions are important, because we
|
||||
* need to protect against the work being run right
|
||||
* now on another CPU (all but the last one might be
|
||||
* true if it's currently running and has not been
|
||||
* released yet, for example).
|
||||
*/
|
||||
if (get_wq_data(work) == cwq
|
||||
&& work_pending(work)
|
||||
&& !list_empty(&work->entry)) {
|
||||
work_func_t f = work->func;
|
||||
list_del_init(&work->entry);
|
||||
spin_unlock_irqrestore(&cwq->lock, flags);
|
||||
|
||||
if (!test_bit(WORK_STRUCT_NOAUTOREL, &work->management))
|
||||
work_release(work);
|
||||
f(work);
|
||||
|
||||
spin_lock_irqsave(&cwq->lock, flags);
|
||||
cwq->remove_sequence++;
|
||||
wake_up(&cwq->work_done);
|
||||
ret = 1;
|
||||
}
|
||||
spin_unlock_irqrestore(&cwq->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* run_scheduled_work - run scheduled work synchronously
|
||||
* @work: work to run
|
||||
*
|
||||
* This checks if the work was pending, and runs it
|
||||
* synchronously if so. It returns a boolean to indicate
|
||||
* whether it had any scheduled work to run or not.
|
||||
*
|
||||
* NOTE! This _only_ works for normal work_structs. You
|
||||
* CANNOT use this for delayed work, because the wq data
|
||||
* for delayed work will not point properly to the per-
|
||||
* CPU workqueue struct, but will change!
|
||||
*/
|
||||
int fastcall run_scheduled_work(struct work_struct *work)
|
||||
{
|
||||
for (;;) {
|
||||
struct cpu_workqueue_struct *cwq;
|
||||
|
||||
if (!work_pending(work))
|
||||
return 0;
|
||||
if (list_empty(&work->entry))
|
||||
return 0;
|
||||
/* NOTE! This depends intimately on __queue_work! */
|
||||
cwq = get_wq_data(work);
|
||||
if (!cwq)
|
||||
return 0;
|
||||
if (__run_work(cwq, work))
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(run_scheduled_work);
|
||||
|
||||
/* Preempt must be disabled. */
|
||||
static void __queue_work(struct cpu_workqueue_struct *cwq,
|
||||
struct work_struct *work)
|
||||
|
Loading…
Reference in New Issue
Block a user