mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 12:46:04 +07:00
RDMA/efa: Use API to get contiguous memory blocks aligned to device supported page size
Use the ib_umem_find_best_pgsz() and rdma_for_each_block() API when registering an MR instead of coding it in the driver. ib_umem_find_best_pgsz() is used to find the best suitable page size which replaces the existing efa_cont_pages() implementation. rdma_for_each_block() is used to iterate the umem in aligned contiguous memory blocks. Reviewed-by: Firas JahJah <firasj@amazon.com> Reviewed-by: Yossi Leybovich <sleybo@amazon.com> Reviewed-by: Shiraz Saleem <shiraz.saleem@intel.com> Signed-off-by: Gal Pressman <galpress@amazon.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
4a9ceb7dba
commit
40ddb3f020
@ -1011,21 +1011,15 @@ static int umem_to_page_list(struct efa_dev *dev,
|
||||
u8 hp_shift)
|
||||
{
|
||||
u32 pages_in_hp = BIT(hp_shift - PAGE_SHIFT);
|
||||
struct sg_dma_page_iter sg_iter;
|
||||
unsigned int page_idx = 0;
|
||||
struct ib_block_iter biter;
|
||||
unsigned int hp_idx = 0;
|
||||
|
||||
ibdev_dbg(&dev->ibdev, "hp_cnt[%u], pages_in_hp[%u]\n",
|
||||
hp_cnt, pages_in_hp);
|
||||
|
||||
for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
|
||||
if (page_idx % pages_in_hp == 0) {
|
||||
page_list[hp_idx] = sg_page_iter_dma_address(&sg_iter);
|
||||
hp_idx++;
|
||||
}
|
||||
|
||||
page_idx++;
|
||||
}
|
||||
rdma_for_each_block(umem->sg_head.sgl, &biter, umem->nmap,
|
||||
BIT(hp_shift))
|
||||
page_list[hp_idx++] = rdma_block_iter_dma_address(&biter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1356,56 +1350,6 @@ static int efa_create_pbl(struct efa_dev *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void efa_cont_pages(struct ib_umem *umem, u64 addr,
|
||||
unsigned long max_page_shift,
|
||||
int *count, u8 *shift, u32 *ncont)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
u64 base = ~0, p = 0;
|
||||
unsigned long tmp;
|
||||
unsigned long m;
|
||||
u64 len, pfn;
|
||||
int i = 0;
|
||||
int entry;
|
||||
|
||||
addr = addr >> PAGE_SHIFT;
|
||||
tmp = (unsigned long)addr;
|
||||
m = find_first_bit(&tmp, BITS_PER_LONG);
|
||||
if (max_page_shift)
|
||||
m = min_t(unsigned long, max_page_shift - PAGE_SHIFT, m);
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
|
||||
len = DIV_ROUND_UP(sg_dma_len(sg), PAGE_SIZE);
|
||||
pfn = sg_dma_address(sg) >> PAGE_SHIFT;
|
||||
if (base + p != pfn) {
|
||||
/*
|
||||
* If either the offset or the new
|
||||
* base are unaligned update m
|
||||
*/
|
||||
tmp = (unsigned long)(pfn | p);
|
||||
if (!IS_ALIGNED(tmp, 1 << m))
|
||||
m = find_first_bit(&tmp, BITS_PER_LONG);
|
||||
|
||||
base = pfn;
|
||||
p = 0;
|
||||
}
|
||||
|
||||
p += len;
|
||||
i += len;
|
||||
}
|
||||
|
||||
if (i) {
|
||||
m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
|
||||
*ncont = DIV_ROUND_UP(i, (1 << m));
|
||||
} else {
|
||||
m = 0;
|
||||
*ncont = 0;
|
||||
}
|
||||
|
||||
*shift = PAGE_SHIFT + m;
|
||||
*count = i;
|
||||
}
|
||||
|
||||
struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata)
|
||||
@ -1413,11 +1357,10 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
||||
struct efa_dev *dev = to_edev(ibpd->device);
|
||||
struct efa_com_reg_mr_params params = {};
|
||||
struct efa_com_reg_mr_result result = {};
|
||||
unsigned long max_page_shift;
|
||||
struct pbl_context pbl;
|
||||
unsigned int pg_sz;
|
||||
struct efa_mr *mr;
|
||||
int inline_size;
|
||||
int npages;
|
||||
int err;
|
||||
|
||||
if (udata->inlen &&
|
||||
@ -1454,13 +1397,24 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
|
||||
params.iova = virt_addr;
|
||||
params.mr_length_in_bytes = length;
|
||||
params.permissions = access_flags & 0x1;
|
||||
max_page_shift = fls64(dev->dev_attr.page_size_cap);
|
||||
|
||||
efa_cont_pages(mr->umem, start, max_page_shift, &npages,
|
||||
¶ms.page_shift, ¶ms.page_num);
|
||||
pg_sz = ib_umem_find_best_pgsz(mr->umem,
|
||||
dev->dev_attr.page_size_cap,
|
||||
virt_addr);
|
||||
if (!pg_sz) {
|
||||
err = -EOPNOTSUPP;
|
||||
ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
|
||||
dev->dev_attr.page_size_cap);
|
||||
goto err_unmap;
|
||||
}
|
||||
|
||||
params.page_shift = __ffs(pg_sz);
|
||||
params.page_num = DIV_ROUND_UP(length + (start & (pg_sz - 1)),
|
||||
pg_sz);
|
||||
|
||||
ibdev_dbg(&dev->ibdev,
|
||||
"start %#llx length %#llx npages %d params.page_shift %u params.page_num %u\n",
|
||||
start, length, npages, params.page_shift, params.page_num);
|
||||
"start %#llx length %#llx params.page_shift %u params.page_num %u\n",
|
||||
start, length, params.page_shift, params.page_num);
|
||||
|
||||
inline_size = ARRAY_SIZE(params.pbl.inline_pbl_array);
|
||||
if (params.page_num <= inline_size) {
|
||||
|
Loading…
Reference in New Issue
Block a user