mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:50:53 +07:00
workqueue: implement cpu intensive workqueue
This patch implements cpu intensive workqueue which can be specified with WQ_CPU_INTENSIVE flag on creation. Works queued to a cpu intensive workqueue don't participate in concurrency management. IOW, it doesn't contribute to gcwq->nr_running and thus doesn't delay excution of other works. Note that although cpu intensive works won't delay other works, they can be delayed by other works. Combine with WQ_HIGHPRI to avoid being delayed by other works too. As the name suggests this is useful when using workqueue for cpu intensive works. Workers executing cpu intensive works are not considered for workqueue concurrency management and left for the scheduler to manage. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
649027d73a
commit
fb0e7beb5c
@ -232,6 +232,7 @@ enum {
|
||||
WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */
|
||||
WQ_RESCUER = 1 << 3, /* has an rescue worker */
|
||||
WQ_HIGHPRI = 1 << 4, /* high priority */
|
||||
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
|
||||
|
||||
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
|
||||
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
|
||||
|
@ -52,8 +52,10 @@ enum {
|
||||
WORKER_PREP = 1 << 3, /* preparing to run works */
|
||||
WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
|
||||
WORKER_REBIND = 1 << 5, /* mom is home, come back */
|
||||
WORKER_CPU_INTENSIVE = 1 << 6, /* cpu intensive */
|
||||
|
||||
WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND,
|
||||
WORKER_NOT_RUNNING = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
|
||||
WORKER_CPU_INTENSIVE,
|
||||
|
||||
/* gcwq->trustee_state */
|
||||
TRUSTEE_START = 0, /* start */
|
||||
@ -1641,6 +1643,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
|
||||
struct cpu_workqueue_struct *cwq = get_work_cwq(work);
|
||||
struct global_cwq *gcwq = cwq->gcwq;
|
||||
struct hlist_head *bwh = busy_worker_head(gcwq, work);
|
||||
bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
|
||||
work_func_t f = work->func;
|
||||
int work_color;
|
||||
struct worker *collision;
|
||||
@ -1692,6 +1695,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
|
||||
gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
|
||||
}
|
||||
|
||||
/*
|
||||
* CPU intensive works don't participate in concurrency
|
||||
* management. They're the scheduler's responsibility.
|
||||
*/
|
||||
if (unlikely(cpu_intensive))
|
||||
worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
|
||||
|
||||
spin_unlock_irq(&gcwq->lock);
|
||||
|
||||
work_clear_pending(work);
|
||||
@ -1713,6 +1723,10 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
|
||||
|
||||
spin_lock_irq(&gcwq->lock);
|
||||
|
||||
/* clear cpu intensive status */
|
||||
if (unlikely(cpu_intensive))
|
||||
worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
|
||||
|
||||
/* we're done with it, release */
|
||||
hlist_del_init(&worker->hentry);
|
||||
worker->current_work = NULL;
|
||||
|
Loading…
Reference in New Issue
Block a user