block: return ELEVATOR_DISCARD_MERGE if possible

[ Upstream commit 866663b7b52d2da267b28e12eed89ee781b8fed1 ]

When merging one bio to request, if they are discard IO and the queue
supports multi-range discard, we need to return ELEVATOR_DISCARD_MERGE
because both block core and related drivers(nvme, virtio-blk) doesn't
handle mixed discard io merge(traditional IO merge together with
discard merge) well.

Fix the issue by returning ELEVATOR_DISCARD_MERGE in this situation,
so both blk-mq and drivers just need to handle multi-range discard.

Reported-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Tested-by: Oleksandr Natalenko <oleksandr@natalenko.name>
Fixes: 2705dfb20947 ("block: fix discard request merge")
Link: https://lore.kernel.org/r/20210729034226.1591070-1-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Ming Lei 2021-07-29 11:42:26 +08:00 committed by AuxXxilium
parent 16d9591e09
commit acf96f9d84
5 changed files with 24 additions and 16 deletions

View File

@ -2251,6 +2251,9 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
__rq = bfq_find_rq_fmerge(bfqd, bio, q);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_FRONT_MERGE;
}

View File

@ -737,22 +737,6 @@ static void blk_account_io_merge_request(struct request *req)
}
}
/*
* Two cases of handling DISCARD merge:
* If max_discard_segments > 1, the driver takes every bio
* as a range and send them to controller together. The ranges
* needn't to be contiguous.
* Otherwise, the bios/requests will be handled as same as
* others which should be contiguous.
*/
static inline bool blk_discard_mergable(struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1)
return true;
return false;
}
static enum elv_merge blk_try_req_merge(struct request *req,
struct request *next)
{

View File

@ -339,6 +339,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_BACK_MERGE;
}

View File

@ -454,6 +454,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
if (elv_bio_merge_ok(__rq, bio)) {
*rq = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_FRONT_MERGE;
}
}

View File

@ -1565,6 +1565,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
return offset << SECTOR_SHIFT;
}
/*
* Two cases of handling DISCARD merge:
* If max_discard_segments > 1, the driver takes every bio
* as a range and send them to controller together. The ranges
* needn't to be contiguous.
* Otherwise, the bios/requests will be handled as same as
* others which should be contiguous.
*/
static inline bool blk_discard_mergable(struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1)
return true;
return false;
}
static inline int bdev_discard_alignment(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);