io-wq: remove now redundant struct io_wq_nulls_list

Since we don't iterate these lists anymore after commit:

e61df66c69 ("io-wq: ensure free/busy list browsing see all items")

we don't need to retain the nulls value we use for them. That means it's
pretty pointless to wrap the hlist_nulls_head in a structure, so get rid
of it.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Jens Axboe 2019-11-14 08:00:41 -07:00
parent a320e9fa1e
commit 021d1cdda3

View File

@ -59,11 +59,6 @@ struct io_worker {
struct files_struct *restore_files; struct files_struct *restore_files;
}; };
struct io_wq_nulls_list {
struct hlist_nulls_head head;
unsigned long nulls;
};
#if BITS_PER_LONG == 64 #if BITS_PER_LONG == 64
#define IO_WQ_HASH_ORDER 6 #define IO_WQ_HASH_ORDER 6
#else #else
@ -95,8 +90,8 @@ struct io_wqe {
int node; int node;
struct io_wqe_acct acct[2]; struct io_wqe_acct acct[2];
struct io_wq_nulls_list free_list; struct hlist_nulls_head free_list;
struct io_wq_nulls_list busy_list; struct hlist_nulls_head busy_list;
struct list_head all_list; struct list_head all_list;
struct io_wq *wq; struct io_wq *wq;
@ -249,7 +244,7 @@ static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
struct hlist_nulls_node *n; struct hlist_nulls_node *n;
struct io_worker *worker; struct io_worker *worker;
n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list.head)); n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
if (is_a_nulls(n)) if (is_a_nulls(n))
return false; return false;
@ -325,8 +320,7 @@ static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
if (worker->flags & IO_WORKER_F_FREE) { if (worker->flags & IO_WORKER_F_FREE) {
worker->flags &= ~IO_WORKER_F_FREE; worker->flags &= ~IO_WORKER_F_FREE;
hlist_nulls_del_init_rcu(&worker->nulls_node); hlist_nulls_del_init_rcu(&worker->nulls_node);
hlist_nulls_add_head_rcu(&worker->nulls_node, hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->busy_list);
&wqe->busy_list.head);
} }
/* /*
@ -365,8 +359,7 @@ static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
if (!(worker->flags & IO_WORKER_F_FREE)) { if (!(worker->flags & IO_WORKER_F_FREE)) {
worker->flags |= IO_WORKER_F_FREE; worker->flags |= IO_WORKER_F_FREE;
hlist_nulls_del_init_rcu(&worker->nulls_node); hlist_nulls_del_init_rcu(&worker->nulls_node);
hlist_nulls_add_head_rcu(&worker->nulls_node, hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
&wqe->free_list.head);
} }
return __io_worker_unuse(wqe, worker); return __io_worker_unuse(wqe, worker);
@ -592,7 +585,7 @@ static void create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
} }
spin_lock_irq(&wqe->lock); spin_lock_irq(&wqe->lock);
hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list.head); hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
list_add_tail_rcu(&worker->all_list, &wqe->all_list); list_add_tail_rcu(&worker->all_list, &wqe->all_list);
worker->flags |= IO_WORKER_F_FREE; worker->flags |= IO_WORKER_F_FREE;
if (index == IO_WQ_ACCT_BOUND) if (index == IO_WQ_ACCT_BOUND)
@ -617,7 +610,7 @@ static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
if (index == IO_WQ_ACCT_BOUND && !acct->nr_workers) if (index == IO_WQ_ACCT_BOUND && !acct->nr_workers)
return true; return true;
/* if we have available workers or no work, no need */ /* if we have available workers or no work, no need */
if (!hlist_nulls_empty(&wqe->free_list.head) || !io_wqe_run_queue(wqe)) if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
return false; return false;
return acct->nr_workers < acct->max_workers; return acct->nr_workers < acct->max_workers;
} }
@ -665,7 +658,7 @@ static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
return true; return true;
rcu_read_lock(); rcu_read_lock();
free_worker = !hlist_nulls_empty(&wqe->free_list.head); free_worker = !hlist_nulls_empty(&wqe->free_list);
rcu_read_unlock(); rcu_read_unlock();
if (free_worker) if (free_worker)
return true; return true;
@ -1009,10 +1002,8 @@ struct io_wq *io_wq_create(unsigned bounded, struct mm_struct *mm,
wqe->wq = wq; wqe->wq = wq;
spin_lock_init(&wqe->lock); spin_lock_init(&wqe->lock);
INIT_LIST_HEAD(&wqe->work_list); INIT_LIST_HEAD(&wqe->work_list);
INIT_HLIST_NULLS_HEAD(&wqe->free_list.head, 0); INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
wqe->free_list.nulls = 0; INIT_HLIST_NULLS_HEAD(&wqe->busy_list, 1);
INIT_HLIST_NULLS_HEAD(&wqe->busy_list.head, 1);
wqe->busy_list.nulls = 1;
INIT_LIST_HEAD(&wqe->all_list); INIT_LIST_HEAD(&wqe->all_list);
i++; i++;