mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-29 22:26:44 +07:00
9c5a362142
The DMA Contiguous Memory Allocator support on x86 is disabled when swiotlb config option is enabled. So DMA CMA is always disabled on x86_64 because swiotlb is always enabled. This attempts to support for DMA CMA with enabling swiotlb config option. The contiguous memory allocator on x86 is integrated in the function dma_generic_alloc_coherent() which is .alloc callback in nommu_dma_ops for dma_alloc_coherent(). x86_swiotlb_alloc_coherent() which is .alloc callback in swiotlb_dma_ops tries to allocate with dma_generic_alloc_coherent() firstly and then swiotlb_alloc_coherent() is called as a fallback. The main part of supporting DMA CMA with swiotlb is that changing x86_swiotlb_free_coherent() which is .free callback in swiotlb_dma_ops for dma_free_coherent() so that it can distinguish memory allocated by dma_generic_alloc_coherent() from one allocated by swiotlb_alloc_coherent() and release it with dma_generic_free_coherent() which can handle contiguous memory. This change requires making is_swiotlb_buffer() global function. This also needs to change .free callback in the dma_map_ops for amd_gart and sta2x11, because these dma_ops are also using dma_generic_alloc_coherent(). Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Acked-by: Marek Szyprowski <m.szyprowski@samsung.com> Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Don Dutile <ddutile@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
122 lines
3.5 KiB
C
122 lines
3.5 KiB
C
#ifndef __LINUX_SWIOTLB_H
|
|
#define __LINUX_SWIOTLB_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
struct device;
|
|
struct dma_attrs;
|
|
struct scatterlist;
|
|
|
|
extern int swiotlb_force;
|
|
|
|
/*
|
|
* Maximum allowable number of contiguous slabs to map,
|
|
* must be a power of 2. What is the appropriate value ?
|
|
* The complexity of {map,unmap}_single is linearly dependent on this value.
|
|
*/
|
|
#define IO_TLB_SEGSIZE 128
|
|
|
|
/*
|
|
* log of the size of each IO TLB slab. The number of slabs is command line
|
|
* controllable.
|
|
*/
|
|
#define IO_TLB_SHIFT 11
|
|
|
|
extern void swiotlb_init(int verbose);
|
|
int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
|
|
extern unsigned long swiotlb_nr_tbl(void);
|
|
unsigned long swiotlb_size_or_default(void);
|
|
extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
|
|
|
|
/*
|
|
* Enumeration for sync targets
|
|
*/
|
|
enum dma_sync_target {
|
|
SYNC_FOR_CPU = 0,
|
|
SYNC_FOR_DEVICE = 1,
|
|
};
|
|
|
|
/* define the last possible byte of physical address space as a mapping error */
|
|
#define SWIOTLB_MAP_ERROR (~(phys_addr_t)0x0)
|
|
|
|
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
|
|
dma_addr_t tbl_dma_addr,
|
|
phys_addr_t phys, size_t size,
|
|
enum dma_data_direction dir);
|
|
|
|
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
|
|
phys_addr_t tlb_addr,
|
|
size_t size, enum dma_data_direction dir);
|
|
|
|
extern void swiotlb_tbl_sync_single(struct device *hwdev,
|
|
phys_addr_t tlb_addr,
|
|
size_t size, enum dma_data_direction dir,
|
|
enum dma_sync_target target);
|
|
|
|
/* Accessory functions. */
|
|
extern void
|
|
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t flags);
|
|
|
|
extern void
|
|
swiotlb_free_coherent(struct device *hwdev, size_t size,
|
|
void *vaddr, dma_addr_t dma_handle);
|
|
|
|
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
|
|
unsigned long offset, size_t size,
|
|
enum dma_data_direction dir,
|
|
struct dma_attrs *attrs);
|
|
extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
|
size_t size, enum dma_data_direction dir,
|
|
struct dma_attrs *attrs);
|
|
|
|
extern int
|
|
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
|
enum dma_data_direction dir);
|
|
|
|
extern void
|
|
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
|
enum dma_data_direction dir);
|
|
|
|
extern int
|
|
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
|
enum dma_data_direction dir, struct dma_attrs *attrs);
|
|
|
|
extern void
|
|
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
|
int nelems, enum dma_data_direction dir,
|
|
struct dma_attrs *attrs);
|
|
|
|
extern void
|
|
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
|
|
size_t size, enum dma_data_direction dir);
|
|
|
|
extern void
|
|
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
|
int nelems, enum dma_data_direction dir);
|
|
|
|
extern void
|
|
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
|
|
size_t size, enum dma_data_direction dir);
|
|
|
|
extern void
|
|
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
|
int nelems, enum dma_data_direction dir);
|
|
|
|
extern int
|
|
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
|
|
|
|
extern int
|
|
swiotlb_dma_supported(struct device *hwdev, u64 mask);
|
|
|
|
#ifdef CONFIG_SWIOTLB
|
|
extern void __init swiotlb_free(void);
|
|
#else
|
|
static inline void swiotlb_free(void) { }
|
|
#endif
|
|
|
|
extern void swiotlb_print_info(void);
|
|
extern int is_swiotlb_buffer(phys_addr_t paddr);
|
|
|
|
#endif /* __LINUX_SWIOTLB_H */
|