mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-19 21:36:22 +07:00
dma-mapping: add generic helpers for mapping sgtable objects
struct sg_table is a common structure used for describing a memory buffer. It consists of a scatterlist with memory pages and DMA addresses (sgl entry), as well as the number of scatterlist entries: CPU pages (orig_nents entry) and DMA mapped pages (nents entry). It turned out that it was a common mistake to misuse nents and orig_nents entries, calling DMA-mapping functions with a wrong number of entries or ignoring the number of mapped entries returned by the dma_map_sg function. To avoid such issues, let's introduce a common wrappers operating directly on the struct sg_table objects, which take care of the proper use of the nents and orig_nents entries. Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Reviewed-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
parent
24085f70a6
commit
d9d200bceb
@ -609,6 +609,86 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
return dma_sync_single_for_device(dev, addr + offset, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_map_sgtable - Map the given buffer for DMA
|
||||
* @dev: The device for which to perform the DMA operation
|
||||
* @sgt: The sg_table object describing the buffer
|
||||
* @dir: DMA direction
|
||||
* @attrs: Optional DMA attributes for the map operation
|
||||
*
|
||||
* Maps a buffer described by a scatterlist stored in the given sg_table
|
||||
* object for the @dir DMA operation by the @dev device. After success the
|
||||
* ownership for the buffer is transferred to the DMA domain. One has to
|
||||
* call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the
|
||||
* ownership of the buffer back to the CPU domain before touching the
|
||||
* buffer by the CPU.
|
||||
*
|
||||
* Returns 0 on success or -EINVAL on error during mapping the buffer.
|
||||
*/
|
||||
static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
int nents;
|
||||
|
||||
nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
|
||||
if (nents <= 0)
|
||||
return -EINVAL;
|
||||
sgt->nents = nents;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_unmap_sgtable - Unmap the given buffer for DMA
|
||||
* @dev: The device for which to perform the DMA operation
|
||||
* @sgt: The sg_table object describing the buffer
|
||||
* @dir: DMA direction
|
||||
* @attrs: Optional DMA attributes for the unmap operation
|
||||
*
|
||||
* Unmaps a buffer described by a scatterlist stored in the given sg_table
|
||||
* object for the @dir DMA operation by the @dev device. After this function
|
||||
* the ownership of the buffer is transferred back to the CPU domain.
|
||||
*/
|
||||
static inline void dma_unmap_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
dma_unmap_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_sync_sgtable_for_cpu - Synchronize the given buffer for CPU access
|
||||
* @dev: The device for which to perform the DMA operation
|
||||
* @sgt: The sg_table object describing the buffer
|
||||
* @dir: DMA direction
|
||||
*
|
||||
* Performs the needed cache synchronization and moves the ownership of the
|
||||
* buffer back to the CPU domain, so it is safe to perform any access to it
|
||||
* by the CPU. Before doing any further DMA operations, one has to transfer
|
||||
* the ownership of the buffer back to the DMA domain by calling the
|
||||
* dma_sync_sgtable_for_device().
|
||||
*/
|
||||
static inline void dma_sync_sgtable_for_cpu(struct device *dev,
|
||||
struct sg_table *sgt, enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_sg_for_cpu(dev, sgt->sgl, sgt->orig_nents, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_sync_sgtable_for_device - Synchronize the given buffer for DMA
|
||||
* @dev: The device for which to perform the DMA operation
|
||||
* @sgt: The sg_table object describing the buffer
|
||||
* @dir: DMA direction
|
||||
*
|
||||
* Performs the needed cache synchronization and moves the ownership of the
|
||||
* buffer back to the DMA domain, so it is safe to perform the DMA operation.
|
||||
* Once finished, one has to call dma_sync_sgtable_for_cpu() or
|
||||
* dma_unmap_sgtable().
|
||||
*/
|
||||
static inline void dma_sync_sgtable_for_device(struct device *dev,
|
||||
struct sg_table *sgt, enum dma_data_direction dir)
|
||||
{
|
||||
dma_sync_sg_for_device(dev, sgt->sgl, sgt->orig_nents, dir);
|
||||
}
|
||||
|
||||
#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
|
||||
#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
|
||||
#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
|
||||
|
Loading…
Reference in New Issue
Block a user