mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 11:29:17 +07:00
d484864dd9
Pull CMA and ARM DMA-mapping updates from Marek Szyprowski: "These patches contain two major updates for DMA mapping subsystem (mainly for ARM architecture). First one is Contiguous Memory Allocator (CMA) which makes it possible for device drivers to allocate big contiguous chunks of memory after the system has booted. The main difference from the similar frameworks is the fact that CMA allows to transparently reuse the memory region reserved for the big chunk allocation as a system memory, so no memory is wasted when no big chunk is allocated. Once the alloc request is issued, the framework migrates system pages to create space for the required big chunk of physically contiguous memory. For more information one can refer to nice LWN articles: - 'A reworked contiguous memory allocator': http://lwn.net/Articles/447405/ - 'CMA and ARM': http://lwn.net/Articles/450286/ - 'A deep dive into CMA': http://lwn.net/Articles/486301/ - and the following thread with the patches and links to all previous versions: https://lkml.org/lkml/2012/4/3/204 The main client for this new framework is ARM DMA-mapping subsystem. The second part provides a complete redesign in ARM DMA-mapping subsystem. The core implementation has been changed to use common struct dma_map_ops based infrastructure with the recent updates for new dma attributes merged in v3.4-rc2. This allows to use more than one implementation of dma-mapping calls and change/select them on the struct device basis. The first client of this new infractructure is dmabounce implementation which has been completely cut out of the core, common code. The last patch of this redesign update introduces a new, experimental implementation of dma-mapping calls on top of generic IOMMU framework. This lets ARM sub-platform to transparently use IOMMU for DMA-mapping calls if one provides required IOMMU hardware. For more information please refer to the following thread: http://www.spinics.net/lists/arm-kernel/msg175729.html The last patch merges changes from both updates and provides a resolution for the conflicts which cannot be avoided when patches have been applied on the same files (mainly arch/arm/mm/dma-mapping.c)." Acked by Andrew Morton <akpm@linux-foundation.org>: "Yup, this one please. It's had much work, plenty of review and I think even Russell is happy with it." * 'for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping: (28 commits) ARM: dma-mapping: use PMD size for section unmap cma: fix migration mode ARM: integrate CMA with DMA-mapping subsystem X86: integrate CMA with DMA-mapping subsystem drivers: add Contiguous Memory Allocator mm: trigger page reclaim in alloc_contig_range() to stabilise watermarks mm: extract reclaim code from __alloc_pages_direct_reclaim() mm: Serialize access to min_free_kbytes mm: page_isolation: MIGRATE_CMA isolation functions added mm: mmzone: MIGRATE_CMA migration type added mm: page_alloc: change fallbacks array handling mm: page_alloc: introduce alloc_contig_range() mm: compaction: export some of the functions mm: compaction: introduce isolate_freepages_range() mm: compaction: introduce map_pages() mm: compaction: introduce isolate_migratepages_range() mm: page_alloc: remove trailing whitespace ARM: dma-mapping: add support for IOMMU mapper ARM: dma-mapping: use alloc, mmap, free from dma_ops ARM: dma-mapping: remove redundant code and do the cleanup ... Conflicts: arch/x86/include/asm/dma-mapping.h
176 lines
4.3 KiB
C
176 lines
4.3 KiB
C
#ifndef _ASM_X86_DMA_MAPPING_H
|
|
#define _ASM_X86_DMA_MAPPING_H
|
|
|
|
/*
|
|
* IOMMU interface. See Documentation/DMA-API-HOWTO.txt and
|
|
* Documentation/DMA-API.txt for documentation.
|
|
*/
|
|
|
|
#include <linux/kmemcheck.h>
|
|
#include <linux/scatterlist.h>
|
|
#include <linux/dma-debug.h>
|
|
#include <linux/dma-attrs.h>
|
|
#include <asm/io.h>
|
|
#include <asm/swiotlb.h>
|
|
#include <asm-generic/dma-coherent.h>
|
|
#include <linux/dma-contiguous.h>
|
|
|
|
#ifdef CONFIG_ISA
|
|
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
|
|
#else
|
|
# define ISA_DMA_BIT_MASK DMA_BIT_MASK(32)
|
|
#endif
|
|
|
|
#define DMA_ERROR_CODE 0
|
|
|
|
extern int iommu_merge;
|
|
extern struct device x86_dma_fallback_dev;
|
|
extern int panic_on_overflow;
|
|
|
|
extern struct dma_map_ops *dma_ops;
|
|
|
|
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|
{
|
|
#ifndef CONFIG_X86_DEV_DMA_OPS
|
|
return dma_ops;
|
|
#else
|
|
if (unlikely(!dev) || !dev->archdata.dma_ops)
|
|
return dma_ops;
|
|
else
|
|
return dev->archdata.dma_ops;
|
|
#endif
|
|
}
|
|
|
|
#include <asm-generic/dma-mapping-common.h>
|
|
|
|
/* Make sure we keep the same behaviour */
|
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
{
|
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
if (ops->mapping_error)
|
|
return ops->mapping_error(dev, dma_addr);
|
|
|
|
return (dma_addr == DMA_ERROR_CODE);
|
|
}
|
|
|
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
|
|
extern int dma_supported(struct device *hwdev, u64 mask);
|
|
extern int dma_set_mask(struct device *dev, u64 mask);
|
|
|
|
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
|
dma_addr_t *dma_addr, gfp_t flag,
|
|
struct dma_attrs *attrs);
|
|
|
|
extern void dma_generic_free_coherent(struct device *dev, size_t size,
|
|
void *vaddr, dma_addr_t dma_addr,
|
|
struct dma_attrs *attrs);
|
|
|
|
#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
|
|
extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
|
|
extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
|
|
extern phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
|
|
#else
|
|
|
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|
{
|
|
if (!dev->dma_mask)
|
|
return 0;
|
|
|
|
return addr + size - 1 <= *dev->dma_mask;
|
|
}
|
|
|
|
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
{
|
|
return paddr;
|
|
}
|
|
|
|
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
{
|
|
return daddr;
|
|
}
|
|
#endif /* CONFIG_X86_DMA_REMAP */
|
|
|
|
static inline void
|
|
dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
|
enum dma_data_direction dir)
|
|
{
|
|
flush_write_buffers();
|
|
}
|
|
|
|
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
|
|
gfp_t gfp)
|
|
{
|
|
unsigned long dma_mask = 0;
|
|
|
|
dma_mask = dev->coherent_dma_mask;
|
|
if (!dma_mask)
|
|
dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
|
|
|
|
return dma_mask;
|
|
}
|
|
|
|
static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
|
|
{
|
|
unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
|
|
|
|
if (dma_mask <= DMA_BIT_MASK(24))
|
|
gfp |= GFP_DMA;
|
|
#ifdef CONFIG_X86_64
|
|
if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
|
|
gfp |= GFP_DMA32;
|
|
#endif
|
|
return gfp;
|
|
}
|
|
|
|
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
|
|
static inline void *
|
|
dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
gfp_t gfp, struct dma_attrs *attrs)
|
|
{
|
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
void *memory;
|
|
|
|
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
|
|
|
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
|
|
return memory;
|
|
|
|
if (!dev)
|
|
dev = &x86_dma_fallback_dev;
|
|
|
|
if (!is_device_dma_capable(dev))
|
|
return NULL;
|
|
|
|
if (!ops->alloc)
|
|
return NULL;
|
|
|
|
memory = ops->alloc(dev, size, dma_handle,
|
|
dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
|
|
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
|
|
|
return memory;
|
|
}
|
|
|
|
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
|
|
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
void *vaddr, dma_addr_t bus,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
|
|
WARN_ON(irqs_disabled()); /* for portability */
|
|
|
|
if (dma_release_from_coherent(dev, get_order(size), vaddr))
|
|
return;
|
|
|
|
debug_dma_free_coherent(dev, size, vaddr, bus);
|
|
if (ops->free)
|
|
ops->free(dev, size, vaddr, bus, attrs);
|
|
}
|
|
|
|
#endif
|