block: make blk_crypto_rq_bio_prep() able to fail

blk_crypto_rq_bio_prep() assumes its gfp_mask argument always includes
__GFP_DIRECT_RECLAIM, so that the mempool_alloc() will always succeed.

However, blk_crypto_rq_bio_prep() might be called with GFP_ATOMIC via
setup_clone() in drivers/md/dm-rq.c.

This case isn't currently reachable with a bio that actually has an
encryption context.  However, it's fragile to rely on this.  Just make
blk_crypto_rq_bio_prep() able to fail.

Suggested-by: Satya Tangirala <satyat@google.com>
Signed-off-by: Eric Biggers <ebiggers@google.com>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Reviewed-by: Satya Tangirala <satyat@google.com>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Eric Biggers 2020-09-15 20:53:14 -07:00 committed by Jens Axboe
parent 07560151db
commit 93f221ae08
4 changed files with 34 additions and 20 deletions

View File

@ -1617,9 +1617,11 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
if (rq->bio) { if (rq->bio) {
rq->biotail->bi_next = bio; rq->biotail->bi_next = bio;
rq->biotail = bio; rq->biotail = bio;
} else } else {
rq->bio = rq->biotail = bio; rq->bio = rq->biotail = bio;
} }
bio = NULL;
}
/* Copy attributes of the original request to the clone request. */ /* Copy attributes of the original request to the clone request. */
rq->__sector = blk_rq_pos(rq_src); rq->__sector = blk_rq_pos(rq_src);
@ -1631,8 +1633,8 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
rq->nr_phys_segments = rq_src->nr_phys_segments; rq->nr_phys_segments = rq_src->nr_phys_segments;
rq->ioprio = rq_src->ioprio; rq->ioprio = rq_src->ioprio;
if (rq->bio) if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask); goto free_and_out;
return 0; return 0;

View File

@ -142,13 +142,24 @@ static inline void blk_crypto_free_request(struct request *rq)
__blk_crypto_free_request(rq); __blk_crypto_free_request(rq);
} }
void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
gfp_t gfp_mask); gfp_t gfp_mask);
static inline void blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio, /**
* blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
* is inserted
* @rq: The request to prepare
* @bio: The first bio being inserted into the request
* @gfp_mask: Memory allocation flags
*
* Return: 0 on success, -ENOMEM if out of memory. -ENOMEM is only possible if
* @gfp_mask doesn't include %__GFP_DIRECT_RECLAIM.
*/
static inline int blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
if (bio_has_crypt_ctx(bio)) if (bio_has_crypt_ctx(bio))
__blk_crypto_rq_bio_prep(rq, bio, gfp_mask); return __blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
return 0;
} }
/** /**

View File

@ -283,20 +283,16 @@ bool __blk_crypto_bio_prep(struct bio **bio_ptr)
return false; return false;
} }
/** int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
* __blk_crypto_rq_bio_prep - Prepare a request's crypt_ctx when its first bio
* is inserted
*
* @rq: The request to prepare
* @bio: The first bio being inserted into the request
* @gfp_mask: gfp mask
*/
void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
if (!rq->crypt_ctx) if (!rq->crypt_ctx) {
rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask); rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
if (!rq->crypt_ctx)
return -ENOMEM;
}
*rq->crypt_ctx = *bio->bi_crypt_context; *rq->crypt_ctx = *bio->bi_crypt_context;
return 0;
} }
/** /**

View File

@ -1940,13 +1940,18 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
unsigned int nr_segs) unsigned int nr_segs)
{ {
int err;
if (bio->bi_opf & REQ_RAHEAD) if (bio->bi_opf & REQ_RAHEAD)
rq->cmd_flags |= REQ_FAILFAST_MASK; rq->cmd_flags |= REQ_FAILFAST_MASK;
rq->__sector = bio->bi_iter.bi_sector; rq->__sector = bio->bi_iter.bi_sector;
rq->write_hint = bio->bi_write_hint; rq->write_hint = bio->bi_write_hint;
blk_rq_bio_prep(rq, bio, nr_segs); blk_rq_bio_prep(rq, bio, nr_segs);
blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
/* This can't fail, since GFP_NOIO includes __GFP_DIRECT_RECLAIM. */
err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO);
WARN_ON_ONCE(err);
blk_account_io_start(rq); blk_account_io_start(rq);
} }