mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 03:50:53 +07:00
blk-mq-sched: add flush insertion into blk_mq_sched_insert_request()
Instead of letting the caller check this and handle the details of inserting a flush request, put the logic in the scheduler insertion function. This fixes direct flush insertion outside of the usual make_request_fn calls, like from dm via blk_insert_cloned_request(). Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
f73f44eb00
commit
bd6737f1ae
@ -2129,7 +2129,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
|
|||||||
if (q->mq_ops) {
|
if (q->mq_ops) {
|
||||||
if (blk_queue_io_stat(q))
|
if (blk_queue_io_stat(q))
|
||||||
blk_account_io_start(rq, true);
|
blk_account_io_start(rq, true);
|
||||||
blk_mq_sched_insert_request(rq, false, true, false);
|
blk_mq_sched_insert_request(rq, false, true, false, false);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
|
|||||||
* be reused after dying flag is set
|
* be reused after dying flag is set
|
||||||
*/
|
*/
|
||||||
if (q->mq_ops) {
|
if (q->mq_ops) {
|
||||||
blk_mq_sched_insert_request(rq, at_head, true, false);
|
blk_mq_sched_insert_request(rq, at_head, true, false, false);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -456,7 +456,7 @@ void blk_insert_flush(struct request *rq)
|
|||||||
if ((policy & REQ_FSEQ_DATA) &&
|
if ((policy & REQ_FSEQ_DATA) &&
|
||||||
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
||||||
if (q->mq_ops)
|
if (q->mq_ops)
|
||||||
blk_mq_sched_insert_request(rq, false, true, false);
|
blk_mq_sched_insert_request(rq, false, true, false, false);
|
||||||
else
|
else
|
||||||
list_add_tail(&rq->queuelist, &q->queue_head);
|
list_add_tail(&rq->queuelist, &q->queue_head);
|
||||||
return;
|
return;
|
||||||
|
@ -335,6 +335,64 @@ void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Add flush/fua to the queue. If we fail getting a driver tag, then
|
||||||
|
* punt to the requeue list. Requeue will re-invoke us from a context
|
||||||
|
* that's safe to block from.
|
||||||
|
*/
|
||||||
|
static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx,
|
||||||
|
struct request *rq, bool can_block)
|
||||||
|
{
|
||||||
|
if (blk_mq_get_driver_tag(rq, &hctx, can_block)) {
|
||||||
|
blk_insert_flush(rq);
|
||||||
|
blk_mq_run_hw_queue(hctx, true);
|
||||||
|
} else
|
||||||
|
blk_mq_add_to_requeue_list(rq, true, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
||||||
|
bool run_queue, bool async, bool can_block)
|
||||||
|
{
|
||||||
|
struct request_queue *q = rq->q;
|
||||||
|
struct elevator_queue *e = q->elevator;
|
||||||
|
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||||
|
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
|
||||||
|
|
||||||
|
if (rq->tag == -1 && (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))) {
|
||||||
|
blk_mq_sched_insert_flush(hctx, rq, can_block);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (e && e->type->ops.mq.insert_requests) {
|
||||||
|
LIST_HEAD(list);
|
||||||
|
|
||||||
|
list_add(&rq->queuelist, &list);
|
||||||
|
e->type->ops.mq.insert_requests(hctx, &list, at_head);
|
||||||
|
} else {
|
||||||
|
spin_lock(&ctx->lock);
|
||||||
|
__blk_mq_insert_request(hctx, rq, at_head);
|
||||||
|
spin_unlock(&ctx->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (run_queue)
|
||||||
|
blk_mq_run_hw_queue(hctx, async);
|
||||||
|
}
|
||||||
|
|
||||||
|
void blk_mq_sched_insert_requests(struct request_queue *q,
|
||||||
|
struct blk_mq_ctx *ctx,
|
||||||
|
struct list_head *list, bool run_queue_async)
|
||||||
|
{
|
||||||
|
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
|
||||||
|
struct elevator_queue *e = hctx->queue->elevator;
|
||||||
|
|
||||||
|
if (e && e->type->ops.mq.insert_requests)
|
||||||
|
e->type->ops.mq.insert_requests(hctx, list, false);
|
||||||
|
else
|
||||||
|
blk_mq_insert_requests(hctx, ctx, list);
|
||||||
|
|
||||||
|
blk_mq_run_hw_queue(hctx, run_queue_async);
|
||||||
|
}
|
||||||
|
|
||||||
static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
|
static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
|
||||||
struct blk_mq_hw_ctx *hctx,
|
struct blk_mq_hw_ctx *hctx,
|
||||||
unsigned int hctx_idx)
|
unsigned int hctx_idx)
|
||||||
|
@ -21,6 +21,12 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
|
|||||||
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
|
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
|
||||||
void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
|
void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
|
||||||
|
|
||||||
|
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
|
||||||
|
bool run_queue, bool async, bool can_block);
|
||||||
|
void blk_mq_sched_insert_requests(struct request_queue *q,
|
||||||
|
struct blk_mq_ctx *ctx,
|
||||||
|
struct list_head *list, bool run_queue_async);
|
||||||
|
|
||||||
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
|
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
|
||||||
void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
|
void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
|
||||||
struct list_head *rq_list,
|
struct list_head *rq_list,
|
||||||
@ -62,45 +68,6 @@ static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
|
|||||||
e->type->ops.mq.put_rq_priv(q, rq);
|
e->type->ops.mq.put_rq_priv(q, rq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
|
||||||
blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue,
|
|
||||||
bool async)
|
|
||||||
{
|
|
||||||
struct request_queue *q = rq->q;
|
|
||||||
struct elevator_queue *e = q->elevator;
|
|
||||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
|
||||||
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
|
|
||||||
|
|
||||||
if (e && e->type->ops.mq.insert_requests) {
|
|
||||||
LIST_HEAD(list);
|
|
||||||
|
|
||||||
list_add(&rq->queuelist, &list);
|
|
||||||
e->type->ops.mq.insert_requests(hctx, &list, at_head);
|
|
||||||
} else {
|
|
||||||
spin_lock(&ctx->lock);
|
|
||||||
__blk_mq_insert_request(hctx, rq, at_head);
|
|
||||||
spin_unlock(&ctx->lock);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (run_queue)
|
|
||||||
blk_mq_run_hw_queue(hctx, async);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void
|
|
||||||
blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_ctx *ctx,
|
|
||||||
struct list_head *list, bool run_queue_async)
|
|
||||||
{
|
|
||||||
struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
|
|
||||||
struct elevator_queue *e = hctx->queue->elevator;
|
|
||||||
|
|
||||||
if (e && e->type->ops.mq.insert_requests)
|
|
||||||
e->type->ops.mq.insert_requests(hctx, list, false);
|
|
||||||
else
|
|
||||||
blk_mq_insert_requests(hctx, ctx, list);
|
|
||||||
|
|
||||||
blk_mq_run_hw_queue(hctx, run_queue_async);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool
|
static inline bool
|
||||||
blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
|
blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
|
@ -106,6 +106,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
|||||||
struct sbq_wait_state *ws;
|
struct sbq_wait_state *ws;
|
||||||
DEFINE_WAIT(wait);
|
DEFINE_WAIT(wait);
|
||||||
unsigned int tag_offset;
|
unsigned int tag_offset;
|
||||||
|
bool drop_ctx;
|
||||||
int tag;
|
int tag;
|
||||||
|
|
||||||
if (data->flags & BLK_MQ_REQ_RESERVED) {
|
if (data->flags & BLK_MQ_REQ_RESERVED) {
|
||||||
@ -128,6 +129,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
|||||||
return BLK_MQ_TAG_FAIL;
|
return BLK_MQ_TAG_FAIL;
|
||||||
|
|
||||||
ws = bt_wait_ptr(bt, data->hctx);
|
ws = bt_wait_ptr(bt, data->hctx);
|
||||||
|
drop_ctx = data->ctx == NULL;
|
||||||
do {
|
do {
|
||||||
prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
|
prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
|
||||||
|
|
||||||
@ -150,7 +152,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
|||||||
if (tag != -1)
|
if (tag != -1)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
blk_mq_put_ctx(data->ctx);
|
if (data->ctx)
|
||||||
|
blk_mq_put_ctx(data->ctx);
|
||||||
|
|
||||||
io_schedule();
|
io_schedule();
|
||||||
|
|
||||||
@ -166,6 +169,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
|
|||||||
ws = bt_wait_ptr(bt, data->hctx);
|
ws = bt_wait_ptr(bt, data->hctx);
|
||||||
} while (1);
|
} while (1);
|
||||||
|
|
||||||
|
if (drop_ctx && data->ctx)
|
||||||
|
blk_mq_put_ctx(data->ctx);
|
||||||
|
|
||||||
finish_wait(&ws->wait, &wait);
|
finish_wait(&ws->wait, &wait);
|
||||||
|
|
||||||
found_tag:
|
found_tag:
|
||||||
|
@ -568,13 +568,13 @@ static void blk_mq_requeue_work(struct work_struct *work)
|
|||||||
|
|
||||||
rq->rq_flags &= ~RQF_SOFTBARRIER;
|
rq->rq_flags &= ~RQF_SOFTBARRIER;
|
||||||
list_del_init(&rq->queuelist);
|
list_del_init(&rq->queuelist);
|
||||||
blk_mq_sched_insert_request(rq, true, false, false);
|
blk_mq_sched_insert_request(rq, true, false, false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (!list_empty(&rq_list)) {
|
while (!list_empty(&rq_list)) {
|
||||||
rq = list_entry(rq_list.next, struct request, queuelist);
|
rq = list_entry(rq_list.next, struct request, queuelist);
|
||||||
list_del_init(&rq->queuelist);
|
list_del_init(&rq->queuelist);
|
||||||
blk_mq_sched_insert_request(rq, false, false, false);
|
blk_mq_sched_insert_request(rq, false, false, false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
blk_mq_run_hw_queues(q, false);
|
blk_mq_run_hw_queues(q, false);
|
||||||
@ -847,12 +847,11 @@ static inline unsigned int queued_to_index(unsigned int queued)
|
|||||||
return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
|
return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool blk_mq_get_driver_tag(struct request *rq,
|
bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
|
||||||
struct blk_mq_hw_ctx **hctx, bool wait)
|
bool wait)
|
||||||
{
|
{
|
||||||
struct blk_mq_alloc_data data = {
|
struct blk_mq_alloc_data data = {
|
||||||
.q = rq->q,
|
.q = rq->q,
|
||||||
.ctx = rq->mq_ctx,
|
|
||||||
.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
|
.hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
|
||||||
.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
|
.flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
|
||||||
};
|
};
|
||||||
@ -1395,7 +1394,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
|
|||||||
}
|
}
|
||||||
|
|
||||||
insert:
|
insert:
|
||||||
blk_mq_sched_insert_request(rq, false, true, true);
|
blk_mq_sched_insert_request(rq, false, true, true, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1446,10 +1445,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
cookie = request_to_qc_t(data.hctx, rq);
|
cookie = request_to_qc_t(data.hctx, rq);
|
||||||
|
|
||||||
if (unlikely(is_flush_fua)) {
|
if (unlikely(is_flush_fua)) {
|
||||||
|
blk_mq_put_ctx(data.ctx);
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio);
|
||||||
blk_mq_get_driver_tag(rq, NULL, true);
|
blk_mq_get_driver_tag(rq, NULL, true);
|
||||||
blk_insert_flush(rq);
|
blk_insert_flush(rq);
|
||||||
goto run_queue;
|
blk_mq_run_hw_queue(data.hctx, true);
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
plug = current->plug;
|
plug = current->plug;
|
||||||
@ -1502,7 +1503,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio);
|
||||||
blk_mq_sched_insert_request(rq, false, true,
|
blk_mq_sched_insert_request(rq, false, true,
|
||||||
!is_sync || is_flush_fua);
|
!is_sync || is_flush_fua, true);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
||||||
@ -1512,7 +1513,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
* latter allows for merging opportunities and more efficient
|
* latter allows for merging opportunities and more efficient
|
||||||
* dispatching.
|
* dispatching.
|
||||||
*/
|
*/
|
||||||
run_queue:
|
|
||||||
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
|
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
|
||||||
}
|
}
|
||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
@ -1568,10 +1568,12 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
cookie = request_to_qc_t(data.hctx, rq);
|
cookie = request_to_qc_t(data.hctx, rq);
|
||||||
|
|
||||||
if (unlikely(is_flush_fua)) {
|
if (unlikely(is_flush_fua)) {
|
||||||
|
blk_mq_put_ctx(data.ctx);
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio);
|
||||||
blk_mq_get_driver_tag(rq, NULL, true);
|
blk_mq_get_driver_tag(rq, NULL, true);
|
||||||
blk_insert_flush(rq);
|
blk_insert_flush(rq);
|
||||||
goto run_queue;
|
blk_mq_run_hw_queue(data.hctx, true);
|
||||||
|
goto done;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1612,7 +1614,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
blk_mq_put_ctx(data.ctx);
|
blk_mq_put_ctx(data.ctx);
|
||||||
blk_mq_bio_to_request(rq, bio);
|
blk_mq_bio_to_request(rq, bio);
|
||||||
blk_mq_sched_insert_request(rq, false, true,
|
blk_mq_sched_insert_request(rq, false, true,
|
||||||
!is_sync || is_flush_fua);
|
!is_sync || is_flush_fua, true);
|
||||||
goto done;
|
goto done;
|
||||||
}
|
}
|
||||||
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
|
||||||
@ -1622,7 +1624,6 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
* latter allows for merging opportunities and more efficient
|
* latter allows for merging opportunities and more efficient
|
||||||
* dispatching.
|
* dispatching.
|
||||||
*/
|
*/
|
||||||
run_queue:
|
|
||||||
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
|
blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,6 +34,8 @@ void blk_mq_wake_waiters(struct request_queue *q);
|
|||||||
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
|
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
|
||||||
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
||||||
bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
|
bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
|
||||||
|
bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
|
||||||
|
bool wait);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Internal helpers for allocating/freeing the request map
|
* Internal helpers for allocating/freeing the request map
|
||||||
|
Loading…
Reference in New Issue
Block a user