mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 02:16:45 +07:00
block: remove non mq parts from the flush code
Reviewed-by: Hannes Reinecke <hare@suse.com> Tested-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
7ca0192646
commit
7e992f847a
@ -134,16 +134,8 @@ static void blk_flush_restore_request(struct request *rq)
|
|||||||
|
|
||||||
static bool blk_flush_queue_rq(struct request *rq, bool add_front)
|
static bool blk_flush_queue_rq(struct request *rq, bool add_front)
|
||||||
{
|
{
|
||||||
if (rq->q->mq_ops) {
|
blk_mq_add_to_requeue_list(rq, add_front, true);
|
||||||
blk_mq_add_to_requeue_list(rq, add_front, true);
|
return false;
|
||||||
return false;
|
|
||||||
} else {
|
|
||||||
if (add_front)
|
|
||||||
list_add(&rq->queuelist, &rq->q->queue_head);
|
|
||||||
else
|
|
||||||
list_add_tail(&rq->queuelist, &rq->q->queue_head);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -204,10 +196,7 @@ static bool blk_flush_complete_seq(struct request *rq,
|
|||||||
BUG_ON(!list_empty(&rq->queuelist));
|
BUG_ON(!list_empty(&rq->queuelist));
|
||||||
list_del_init(&rq->flush.list);
|
list_del_init(&rq->flush.list);
|
||||||
blk_flush_restore_request(rq);
|
blk_flush_restore_request(rq);
|
||||||
if (q->mq_ops)
|
blk_mq_end_request(rq, error);
|
||||||
blk_mq_end_request(rq, error);
|
|
||||||
else
|
|
||||||
__blk_end_request_all(rq, error);
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -226,20 +215,17 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||||||
struct request *rq, *n;
|
struct request *rq, *n;
|
||||||
unsigned long flags = 0;
|
unsigned long flags = 0;
|
||||||
struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
|
struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
|
||||||
|
struct blk_mq_hw_ctx *hctx;
|
||||||
|
|
||||||
if (q->mq_ops) {
|
/* release the tag's ownership to the req cloned from */
|
||||||
struct blk_mq_hw_ctx *hctx;
|
spin_lock_irqsave(&fq->mq_flush_lock, flags);
|
||||||
|
hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
|
||||||
/* release the tag's ownership to the req cloned from */
|
if (!q->elevator) {
|
||||||
spin_lock_irqsave(&fq->mq_flush_lock, flags);
|
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
|
||||||
hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
|
flush_rq->tag = -1;
|
||||||
if (!q->elevator) {
|
} else {
|
||||||
blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
|
blk_mq_put_driver_tag_hctx(hctx, flush_rq);
|
||||||
flush_rq->tag = -1;
|
flush_rq->internal_tag = -1;
|
||||||
} else {
|
|
||||||
blk_mq_put_driver_tag_hctx(hctx, flush_rq);
|
|
||||||
flush_rq->internal_tag = -1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
running = &fq->flush_queue[fq->flush_running_idx];
|
running = &fq->flush_queue[fq->flush_running_idx];
|
||||||
@ -248,9 +234,6 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||||||
/* account completion of the flush request */
|
/* account completion of the flush request */
|
||||||
fq->flush_running_idx ^= 1;
|
fq->flush_running_idx ^= 1;
|
||||||
|
|
||||||
if (!q->mq_ops)
|
|
||||||
elv_completed_request(q, flush_rq);
|
|
||||||
|
|
||||||
/* and push the waiting requests to the next stage */
|
/* and push the waiting requests to the next stage */
|
||||||
list_for_each_entry_safe(rq, n, running, flush.list) {
|
list_for_each_entry_safe(rq, n, running, flush.list) {
|
||||||
unsigned int seq = blk_flush_cur_seq(rq);
|
unsigned int seq = blk_flush_cur_seq(rq);
|
||||||
@ -259,24 +242,8 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
|
|||||||
queued |= blk_flush_complete_seq(rq, fq, seq, error);
|
queued |= blk_flush_complete_seq(rq, fq, seq, error);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Kick the queue to avoid stall for two cases:
|
|
||||||
* 1. Moving a request silently to empty queue_head may stall the
|
|
||||||
* queue.
|
|
||||||
* 2. When flush request is running in non-queueable queue, the
|
|
||||||
* queue is hold. Restart the queue after flush request is finished
|
|
||||||
* to avoid stall.
|
|
||||||
* This function is called from request completion path and calling
|
|
||||||
* directly into request_fn may confuse the driver. Always use
|
|
||||||
* kblockd.
|
|
||||||
*/
|
|
||||||
if (queued || fq->flush_queue_delayed) {
|
|
||||||
WARN_ON(q->mq_ops);
|
|
||||||
blk_run_queue_async(q);
|
|
||||||
}
|
|
||||||
fq->flush_queue_delayed = 0;
|
fq->flush_queue_delayed = 0;
|
||||||
if (q->mq_ops)
|
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
||||||
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -301,6 +268,7 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
|||||||
struct request *first_rq =
|
struct request *first_rq =
|
||||||
list_first_entry(pending, struct request, flush.list);
|
list_first_entry(pending, struct request, flush.list);
|
||||||
struct request *flush_rq = fq->flush_rq;
|
struct request *flush_rq = fq->flush_rq;
|
||||||
|
struct blk_mq_hw_ctx *hctx;
|
||||||
|
|
||||||
/* C1 described at the top of this file */
|
/* C1 described at the top of this file */
|
||||||
if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
|
if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending))
|
||||||
@ -334,19 +302,15 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
|||||||
* In case of IO scheduler, flush rq need to borrow scheduler tag
|
* In case of IO scheduler, flush rq need to borrow scheduler tag
|
||||||
* just for cheating put/get driver tag.
|
* just for cheating put/get driver tag.
|
||||||
*/
|
*/
|
||||||
if (q->mq_ops) {
|
flush_rq->mq_ctx = first_rq->mq_ctx;
|
||||||
struct blk_mq_hw_ctx *hctx;
|
|
||||||
|
|
||||||
flush_rq->mq_ctx = first_rq->mq_ctx;
|
if (!q->elevator) {
|
||||||
|
fq->orig_rq = first_rq;
|
||||||
if (!q->elevator) {
|
flush_rq->tag = first_rq->tag;
|
||||||
fq->orig_rq = first_rq;
|
hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
|
||||||
flush_rq->tag = first_rq->tag;
|
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
|
||||||
hctx = blk_mq_map_queue(q, first_rq->mq_ctx->cpu);
|
} else {
|
||||||
blk_mq_tag_set_rq(hctx, first_rq->tag, flush_rq);
|
flush_rq->internal_tag = first_rq->internal_tag;
|
||||||
} else {
|
|
||||||
flush_rq->internal_tag = first_rq->internal_tag;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
|
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
|
||||||
@ -358,49 +322,6 @@ static bool blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
|
|||||||
return blk_flush_queue_rq(flush_rq, false);
|
return blk_flush_queue_rq(flush_rq, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void flush_data_end_io(struct request *rq, blk_status_t error)
|
|
||||||
{
|
|
||||||
struct request_queue *q = rq->q;
|
|
||||||
struct blk_flush_queue *fq = blk_get_flush_queue(q, NULL);
|
|
||||||
|
|
||||||
lockdep_assert_held(q->queue_lock);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Updating q->in_flight[] here for making this tag usable
|
|
||||||
* early. Because in blk_queue_start_tag(),
|
|
||||||
* q->in_flight[BLK_RW_ASYNC] is used to limit async I/O and
|
|
||||||
* reserve tags for sync I/O.
|
|
||||||
*
|
|
||||||
* More importantly this way can avoid the following I/O
|
|
||||||
* deadlock:
|
|
||||||
*
|
|
||||||
* - suppose there are 40 fua requests comming to flush queue
|
|
||||||
* and queue depth is 31
|
|
||||||
* - 30 rqs are scheduled then blk_queue_start_tag() can't alloc
|
|
||||||
* tag for async I/O any more
|
|
||||||
* - all the 30 rqs are completed before FLUSH_PENDING_TIMEOUT
|
|
||||||
* and flush_data_end_io() is called
|
|
||||||
* - the other rqs still can't go ahead if not updating
|
|
||||||
* q->in_flight[BLK_RW_ASYNC] here, meantime these rqs
|
|
||||||
* are held in flush data queue and make no progress of
|
|
||||||
* handling post flush rq
|
|
||||||
* - only after the post flush rq is handled, all these rqs
|
|
||||||
* can be completed
|
|
||||||
*/
|
|
||||||
|
|
||||||
elv_completed_request(q, rq);
|
|
||||||
|
|
||||||
/* for avoiding double accounting */
|
|
||||||
rq->rq_flags &= ~RQF_STARTED;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* After populating an empty queue, kick it to avoid stall. Read
|
|
||||||
* the comment in flush_end_io().
|
|
||||||
*/
|
|
||||||
if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error))
|
|
||||||
blk_run_queue_async(q);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
@ -443,9 +364,6 @@ void blk_insert_flush(struct request *rq)
|
|||||||
unsigned int policy = blk_flush_policy(fflags, rq);
|
unsigned int policy = blk_flush_policy(fflags, rq);
|
||||||
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
|
struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
|
||||||
|
|
||||||
if (!q->mq_ops)
|
|
||||||
lockdep_assert_held(q->queue_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* @policy now records what operations need to be done. Adjust
|
* @policy now records what operations need to be done. Adjust
|
||||||
* REQ_PREFLUSH and FUA for the driver.
|
* REQ_PREFLUSH and FUA for the driver.
|
||||||
@ -468,10 +386,7 @@ void blk_insert_flush(struct request *rq)
|
|||||||
* complete the request.
|
* complete the request.
|
||||||
*/
|
*/
|
||||||
if (!policy) {
|
if (!policy) {
|
||||||
if (q->mq_ops)
|
blk_mq_end_request(rq, 0);
|
||||||
blk_mq_end_request(rq, 0);
|
|
||||||
else
|
|
||||||
__blk_end_request(rq, 0, 0);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -484,10 +399,7 @@ void blk_insert_flush(struct request *rq)
|
|||||||
*/
|
*/
|
||||||
if ((policy & REQ_FSEQ_DATA) &&
|
if ((policy & REQ_FSEQ_DATA) &&
|
||||||
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
|
||||||
if (q->mq_ops)
|
blk_mq_request_bypass_insert(rq, false);
|
||||||
blk_mq_request_bypass_insert(rq, false);
|
|
||||||
else
|
|
||||||
list_add_tail(&rq->queuelist, &q->queue_head);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -499,17 +411,12 @@ void blk_insert_flush(struct request *rq)
|
|||||||
INIT_LIST_HEAD(&rq->flush.list);
|
INIT_LIST_HEAD(&rq->flush.list);
|
||||||
rq->rq_flags |= RQF_FLUSH_SEQ;
|
rq->rq_flags |= RQF_FLUSH_SEQ;
|
||||||
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
|
rq->flush.saved_end_io = rq->end_io; /* Usually NULL */
|
||||||
if (q->mq_ops) {
|
|
||||||
rq->end_io = mq_flush_data_end_io;
|
|
||||||
|
|
||||||
spin_lock_irq(&fq->mq_flush_lock);
|
rq->end_io = mq_flush_data_end_io;
|
||||||
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
|
|
||||||
spin_unlock_irq(&fq->mq_flush_lock);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
rq->end_io = flush_data_end_io;
|
|
||||||
|
|
||||||
|
spin_lock_irq(&fq->mq_flush_lock);
|
||||||
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
|
blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0);
|
||||||
|
spin_unlock_irq(&fq->mq_flush_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -575,8 +482,7 @@ struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
|
|||||||
if (!fq)
|
if (!fq)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
if (q->mq_ops)
|
spin_lock_init(&fq->mq_flush_lock);
|
||||||
spin_lock_init(&fq->mq_flush_lock);
|
|
||||||
|
|
||||||
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
|
rq_sz = round_up(rq_sz + cmd_size, cache_line_size());
|
||||||
fq->flush_rq = kzalloc_node(rq_sz, flags, node);
|
fq->flush_rq = kzalloc_node(rq_sz, flags, node);
|
||||||
|
@ -114,9 +114,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
|
|||||||
static inline struct blk_flush_queue *blk_get_flush_queue(
|
static inline struct blk_flush_queue *blk_get_flush_queue(
|
||||||
struct request_queue *q, struct blk_mq_ctx *ctx)
|
struct request_queue *q, struct blk_mq_ctx *ctx)
|
||||||
{
|
{
|
||||||
if (q->mq_ops)
|
return blk_mq_map_queue(q, ctx->cpu)->fq;
|
||||||
return blk_mq_map_queue(q, ctx->cpu)->fq;
|
|
||||||
return q->fq;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __blk_get_queue(struct request_queue *q)
|
static inline void __blk_get_queue(struct request_queue *q)
|
||||||
|
Loading…
Reference in New Issue
Block a user