mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 05:10:51 +07:00
block: Copy a user iovec if it includes gaps
For drivers that don't support gaps in the SG lists handed to them we must bounce (copy the user buffers) and pass a bio that does not include gaps. This doesn't matter for any current user, but will help to allow iser which can't handle gaps to use the block virtual boundary instead of using driver-local bounce buffering when handling SG_IO commands. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Jens Axboe <axboe@fb.com>
This commit is contained in:
parent
87a816df53
commit
46348456c1
@ -9,6 +9,24 @@
|
|||||||
|
|
||||||
#include "blk.h"
|
#include "blk.h"
|
||||||
|
|
||||||
|
static bool iovec_gap_to_prv(struct request_queue *q,
|
||||||
|
struct iovec *prv, struct iovec *cur)
|
||||||
|
{
|
||||||
|
unsigned long prev_end;
|
||||||
|
|
||||||
|
if (!queue_virt_boundary(q))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (prv->iov_base == NULL && prv->iov_len == 0)
|
||||||
|
/* prv is not set - don't check */
|
||||||
|
return false;
|
||||||
|
|
||||||
|
prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
|
||||||
|
|
||||||
|
return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
|
||||||
|
prev_end & queue_virt_boundary(q));
|
||||||
|
}
|
||||||
|
|
||||||
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
|
int blk_rq_append_bio(struct request_queue *q, struct request *rq,
|
||||||
struct bio *bio)
|
struct bio *bio)
|
||||||
{
|
{
|
||||||
@ -67,7 +85,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
|||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
int unaligned = 0;
|
int unaligned = 0;
|
||||||
struct iov_iter i;
|
struct iov_iter i;
|
||||||
struct iovec iov;
|
struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
|
||||||
|
|
||||||
if (!iter || !iter->count)
|
if (!iter || !iter->count)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -81,8 +99,12 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
|
|||||||
/*
|
/*
|
||||||
* Keep going so we check length of all segments
|
* Keep going so we check length of all segments
|
||||||
*/
|
*/
|
||||||
if (uaddr & queue_dma_alignment(q))
|
if ((uaddr & queue_dma_alignment(q)) ||
|
||||||
|
iovec_gap_to_prv(q, &prv, &iov))
|
||||||
unaligned = 1;
|
unaligned = 1;
|
||||||
|
|
||||||
|
prv.iov_base = iov.iov_base;
|
||||||
|
prv.iov_len = iov.iov_len;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
|
if (unaligned || (q->dma_pad_mask & iter->count) || map_data)
|
||||||
|
Loading…
Reference in New Issue
Block a user