mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 04:20:53 +07:00
5740afdb68
swiotlb_free() function frees all allocated memory for swiotlb. We need to initialize swiotlb before IOMMU initialization (x86 and powerpc needs to allocate memory from bootmem allocator). If IOMMU initialization is successful, we need to free swiotlb resource (don't want to waste 64MB). Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> Cc: chrisw@sous-sol.org Cc: dwmw2@infradead.org Cc: joerg.roedel@amd.com Cc: muli@il.ibm.com LKML-Reference: <1257849980-22640-8-git-send-email-fujita.tomonori@lab.ntt.co.jp> [ -v2: build fix for the !CONFIG_SWIOTLB case ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
98 lines
2.7 KiB
C
98 lines
2.7 KiB
C
#ifndef __LINUX_SWIOTLB_H
|
|
#define __LINUX_SWIOTLB_H
|
|
|
|
#include <linux/types.h>
|
|
|
|
struct device;
|
|
struct dma_attrs;
|
|
struct scatterlist;
|
|
|
|
/*
|
|
* Maximum allowable number of contiguous slabs to map,
|
|
* must be a power of 2. What is the appropriate value ?
|
|
* The complexity of {map,unmap}_single is linearly dependent on this value.
|
|
*/
|
|
#define IO_TLB_SEGSIZE 128
|
|
|
|
/*
|
|
* log of the size of each IO TLB slab. The number of slabs is command line
|
|
* controllable.
|
|
*/
|
|
#define IO_TLB_SHIFT 11
|
|
|
|
extern void
|
|
swiotlb_init(void);
|
|
|
|
extern void
|
|
*swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t flags);
|
|
|
|
extern void
|
|
swiotlb_free_coherent(struct device *hwdev, size_t size,
|
|
void *vaddr, dma_addr_t dma_handle);
|
|
|
|
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
|
|
unsigned long offset, size_t size,
|
|
enum dma_data_direction dir,
|
|
struct dma_attrs *attrs);
|
|
extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
|
|
size_t size, enum dma_data_direction dir,
|
|
struct dma_attrs *attrs);
|
|
|
|
extern int
|
|
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
|
int direction);
|
|
|
|
extern void
|
|
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
|
|
int direction);
|
|
|
|
extern int
|
|
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
|
|
enum dma_data_direction dir, struct dma_attrs *attrs);
|
|
|
|
extern void
|
|
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
|
|
int nelems, enum dma_data_direction dir,
|
|
struct dma_attrs *attrs);
|
|
|
|
extern void
|
|
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
|
|
size_t size, enum dma_data_direction dir);
|
|
|
|
extern void
|
|
swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
|
|
int nelems, enum dma_data_direction dir);
|
|
|
|
extern void
|
|
swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
|
|
size_t size, enum dma_data_direction dir);
|
|
|
|
extern void
|
|
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
|
|
int nelems, enum dma_data_direction dir);
|
|
|
|
extern void
|
|
swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
|
|
unsigned long offset, size_t size,
|
|
enum dma_data_direction dir);
|
|
|
|
extern void
|
|
swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
|
|
unsigned long offset, size_t size,
|
|
enum dma_data_direction dir);
|
|
|
|
extern int
|
|
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
|
|
|
|
extern int
|
|
swiotlb_dma_supported(struct device *hwdev, u64 mask);
|
|
|
|
#ifdef CONFIG_SWIOTLB
|
|
extern void __init swiotlb_free(void);
|
|
#else
|
|
static inline void swiotlb_free(void) { }
|
|
#endif
|
|
|
|
#endif /* __LINUX_SWIOTLB_H */
|