2013-01-19 05:05:55 +07:00
|
|
|
/*
|
|
|
|
* kernel/workqueue_internal.h
|
|
|
|
*
|
|
|
|
* Workqueue internal header file. Only to be included by workqueue and
|
|
|
|
* core kernel subsystems.
|
|
|
|
*/
|
|
|
|
#ifndef _KERNEL_WORKQUEUE_INTERNAL_H
|
|
|
|
#define _KERNEL_WORKQUEUE_INTERNAL_H
|
|
|
|
|
2013-01-19 05:05:55 +07:00
|
|
|
#include <linux/workqueue.h>
|
2013-01-19 05:05:56 +07:00
|
|
|
#include <linux/kthread.h>
|
2013-01-19 05:05:55 +07:00
|
|
|
|
|
|
|
struct worker_pool;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The poor guys doing the actual heavy lifting. All on-duty workers are
|
|
|
|
* either serving the manager role, on idle list or on busy hash. For
|
|
|
|
* details on the locking annotation (L, I, X...), refer to workqueue.c.
|
|
|
|
*
|
|
|
|
* Only to be used in workqueue and async.
|
|
|
|
*/
|
|
|
|
struct worker {
|
|
|
|
/* on idle list while idle, on busy hash table while busy */
|
|
|
|
union {
|
|
|
|
struct list_head entry; /* L: while idle */
|
|
|
|
struct hlist_node hentry; /* L: while busy */
|
|
|
|
};
|
|
|
|
|
|
|
|
struct work_struct *current_work; /* L: work being processed */
|
|
|
|
work_func_t current_func; /* L: current_work's fn */
|
2013-02-14 10:29:12 +07:00
|
|
|
struct pool_workqueue *current_pwq; /* L: current_work's pwq */
|
2013-05-01 05:27:22 +07:00
|
|
|
bool desc_valid; /* ->desc is valid */
|
2013-01-19 05:05:55 +07:00
|
|
|
struct list_head scheduled; /* L: scheduled works */
|
2013-05-01 05:27:22 +07:00
|
|
|
|
|
|
|
/* 64 bytes boundary on 64bit, 32 on 32bit */
|
|
|
|
|
2013-01-19 05:05:55 +07:00
|
|
|
struct task_struct *task; /* I: worker task */
|
|
|
|
struct worker_pool *pool; /* I: the associated pool */
|
2013-02-20 03:17:02 +07:00
|
|
|
/* L: for rescuers */
|
2014-05-20 16:46:34 +07:00
|
|
|
struct list_head node; /* A: anchored at pool->workers */
|
|
|
|
/* A: runs through worker->node */
|
2013-05-01 05:27:22 +07:00
|
|
|
|
2013-01-19 05:05:55 +07:00
|
|
|
unsigned long last_active; /* L: last active timestamp */
|
|
|
|
unsigned int flags; /* X: flags */
|
|
|
|
int id; /* I: worker id */
|
|
|
|
|
2013-05-01 05:27:22 +07:00
|
|
|
/*
|
|
|
|
* Opaque string set with work_set_desc(). Printed out with task
|
|
|
|
* dump for debugging - WARN, BUG, panic or sysrq.
|
|
|
|
*/
|
|
|
|
char desc[WORKER_DESC_LEN];
|
|
|
|
|
2013-01-19 05:05:55 +07:00
|
|
|
/* used only by rescuers to point to the target workqueue */
|
|
|
|
struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
|
|
|
|
};
|
|
|
|
|
2013-01-19 05:05:56 +07:00
|
|
|
/**
|
|
|
|
* current_wq_worker - return struct worker if %current is a workqueue worker
|
|
|
|
*/
|
|
|
|
static inline struct worker *current_wq_worker(void)
|
|
|
|
{
|
|
|
|
if (current->flags & PF_WQ_WORKER)
|
|
|
|
return kthread_data(current);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-01-19 05:05:55 +07:00
|
|
|
/*
|
|
|
|
* Scheduler hooks for concurrency managed workqueue. Only to be used from
|
2013-06-04 14:40:24 +07:00
|
|
|
* sched/core.c and workqueue.c.
|
2013-01-19 05:05:55 +07:00
|
|
|
*/
|
2013-03-13 01:29:59 +07:00
|
|
|
void wq_worker_waking_up(struct task_struct *task, int cpu);
|
2016-03-02 18:53:31 +07:00
|
|
|
struct task_struct *wq_worker_sleeping(struct task_struct *task);
|
2013-01-19 05:05:55 +07:00
|
|
|
|
|
|
|
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
|