mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 04:50:53 +07:00
dma: Introduce dma_max_mapping_size()
The function returns the maximum size that can be mapped using DMA-API functions. The patch also adds the implementation for direct DMA and a new dma_map_ops pointer so that other implementations can expose their limit. Cc: stable@vger.kernel.org Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
This commit is contained in:
parent
492366f7b4
commit
133d624b1c
@ -195,6 +195,14 @@ Requesting the required mask does not alter the current mask. If you
|
||||
wish to take advantage of it, you should issue a dma_set_mask()
|
||||
call to set the mask to the value returned.
|
||||
|
||||
::
|
||||
|
||||
size_t
|
||||
dma_direct_max_mapping_size(struct device *dev);
|
||||
|
||||
Returns the maximum size of a mapping for the device. The size parameter
|
||||
of the mapping functions like dma_map_single(), dma_map_page() and
|
||||
others should not be larger than the returned value.
|
||||
|
||||
Part Id - Streaming DMA mappings
|
||||
--------------------------------
|
||||
|
@ -130,6 +130,7 @@ struct dma_map_ops {
|
||||
enum dma_data_direction direction);
|
||||
int (*dma_supported)(struct device *dev, u64 mask);
|
||||
u64 (*get_required_mask)(struct device *dev);
|
||||
size_t (*max_mapping_size)(struct device *dev);
|
||||
};
|
||||
|
||||
#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
|
||||
@ -257,6 +258,8 @@ static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t dma_direct_max_mapping_size(struct device *dev);
|
||||
|
||||
#ifdef CONFIG_HAS_DMA
|
||||
#include <asm/dma-mapping.h>
|
||||
|
||||
@ -460,6 +463,7 @@ int dma_supported(struct device *dev, u64 mask);
|
||||
int dma_set_mask(struct device *dev, u64 mask);
|
||||
int dma_set_coherent_mask(struct device *dev, u64 mask);
|
||||
u64 dma_get_required_mask(struct device *dev);
|
||||
size_t dma_max_mapping_size(struct device *dev);
|
||||
#else /* CONFIG_HAS_DMA */
|
||||
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
struct page *page, size_t offset, size_t size,
|
||||
@ -561,6 +565,10 @@ static inline u64 dma_get_required_mask(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline size_t dma_max_mapping_size(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_HAS_DMA */
|
||||
|
||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
||||
|
@ -380,3 +380,14 @@ int dma_direct_supported(struct device *dev, u64 mask)
|
||||
*/
|
||||
return mask >= __phys_to_dma(dev, min_mask);
|
||||
}
|
||||
|
||||
size_t dma_direct_max_mapping_size(struct device *dev)
|
||||
{
|
||||
size_t size = SIZE_MAX;
|
||||
|
||||
/* If SWIOTLB is active, use its maximum mapping size */
|
||||
if (is_swiotlb_active())
|
||||
size = swiotlb_max_mapping_size(dev);
|
||||
|
||||
return size;
|
||||
}
|
||||
|
@ -357,3 +357,17 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
ops->cache_sync(dev, vaddr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_cache_sync);
|
||||
|
||||
size_t dma_max_mapping_size(struct device *dev)
|
||||
{
|
||||
const struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
size_t size = SIZE_MAX;
|
||||
|
||||
if (dma_is_direct(ops))
|
||||
size = dma_direct_max_mapping_size(dev);
|
||||
else if (ops && ops->max_mapping_size)
|
||||
size = ops->max_mapping_size(dev);
|
||||
|
||||
return size;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_max_mapping_size);
|
||||
|
Loading…
Reference in New Issue
Block a user