mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 19:10:52 +07:00
RDMA/core: Add weak ordering dma attr to dma mapping
For memory regions registered with IB_ACCESS_RELAXED_ORDERING will be dma mapped with the DMA_ATTR_WEAK_ORDERING. This will allow reads and writes to the mapping to be weakly ordered, such change can enhance performance on some supporting architectures. Link: https://lore.kernel.org/r/20200212073559.684139-1-leon@kernel.org Signed-off-by: Michael Guralnik <michaelgur@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
ca750d4a9c
commit
f03d9fadfe
@ -197,6 +197,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
|
||||
unsigned long lock_limit;
|
||||
unsigned long new_pinned;
|
||||
unsigned long cur_base;
|
||||
unsigned long dma_attr = 0;
|
||||
struct mm_struct *mm;
|
||||
unsigned long npages;
|
||||
int ret;
|
||||
@ -278,10 +279,12 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
|
||||
|
||||
sg_mark_end(sg);
|
||||
|
||||
umem->nmap = ib_dma_map_sg(device,
|
||||
umem->sg_head.sgl,
|
||||
umem->sg_nents,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (access & IB_ACCESS_RELAXED_ORDERING)
|
||||
dma_attr |= DMA_ATTR_WEAK_ORDERING;
|
||||
|
||||
umem->nmap =
|
||||
ib_dma_map_sg_attrs(device, umem->sg_head.sgl, umem->sg_nents,
|
||||
DMA_BIDIRECTIONAL, dma_attr);
|
||||
|
||||
if (!umem->nmap) {
|
||||
ret = -ENOMEM;
|
||||
|
Loading…
Reference in New Issue
Block a user