mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 04:30:52 +07:00
dma-pool: fix coherent pool allocations for IOMMU mappings
When allocating coherent pool memory for an IOMMU mapping we don't care about the DMA mask. Move the guess for the initial GFP mask into the dma_direct_alloc_pages and pass dma_coherent_ok as a function pointer argument so that it doesn't get applied to the IOMMU case. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Amit Pundir <amit.pundir@linaro.org>
This commit is contained in:
parent
a1d21081a6
commit
9420139f51
@ -1035,8 +1035,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size,
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
|
||||
!gfpflags_allow_blocking(gfp) && !coherent)
|
||||
cpu_addr = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page,
|
||||
gfp);
|
||||
page = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &cpu_addr,
|
||||
gfp, NULL);
|
||||
else
|
||||
cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
|
||||
if (!cpu_addr)
|
||||
|
@ -73,9 +73,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
|
||||
}
|
||||
|
||||
u64 dma_direct_get_required_mask(struct device *dev);
|
||||
gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||||
u64 *phys_mask);
|
||||
bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
|
||||
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs);
|
||||
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
|
@ -522,8 +522,9 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
|
||||
pgprot_t prot, const void *caller);
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size);
|
||||
|
||||
void *dma_alloc_from_pool(struct device *dev, size_t size,
|
||||
struct page **ret_page, gfp_t flags);
|
||||
struct page *dma_alloc_from_pool(struct device *dev, size_t size,
|
||||
void **cpu_addr, gfp_t flags,
|
||||
bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t));
|
||||
bool dma_free_from_pool(struct device *dev, void *start, size_t size);
|
||||
|
||||
int
|
||||
|
@ -43,7 +43,7 @@ u64 dma_direct_get_required_mask(struct device *dev)
|
||||
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
|
||||
}
|
||||
|
||||
gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||||
static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||||
u64 *phys_limit)
|
||||
{
|
||||
u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit);
|
||||
@ -68,7 +68,7 @@ gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
||||
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
|
||||
{
|
||||
return phys_to_dma_direct(dev, phys) + size - 1 <=
|
||||
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
|
||||
@ -161,8 +161,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
|
||||
ret = dma_alloc_from_pool(dev, size, &page, gfp);
|
||||
if (!ret)
|
||||
u64 phys_mask;
|
||||
|
||||
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||||
&phys_mask);
|
||||
page = dma_alloc_from_pool(dev, size, &ret, gfp,
|
||||
dma_coherent_ok);
|
||||
if (!page)
|
||||
return NULL;
|
||||
goto done;
|
||||
}
|
||||
|
@ -196,93 +196,75 @@ static int __init dma_atomic_pool_init(void)
|
||||
}
|
||||
postcore_initcall(dma_atomic_pool_init);
|
||||
|
||||
static inline struct gen_pool *dma_guess_pool_from_device(struct device *dev)
|
||||
static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
|
||||
{
|
||||
u64 phys_mask;
|
||||
gfp_t gfp;
|
||||
|
||||
gfp = dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
|
||||
&phys_mask);
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) && gfp == GFP_DMA)
|
||||
if (prev == NULL) {
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
|
||||
return atomic_pool_dma32;
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
|
||||
return atomic_pool_dma;
|
||||
return atomic_pool_kernel;
|
||||
}
|
||||
if (prev == atomic_pool_kernel)
|
||||
return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
|
||||
if (prev == atomic_pool_dma32)
|
||||
return atomic_pool_dma;
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32) && gfp == GFP_DMA32)
|
||||
return atomic_pool_dma32;
|
||||
return atomic_pool_kernel;
|
||||
}
|
||||
|
||||
static inline struct gen_pool *dma_get_safer_pool(struct gen_pool *bad_pool)
|
||||
{
|
||||
if (bad_pool == atomic_pool_kernel)
|
||||
return atomic_pool_dma32 ? : atomic_pool_dma;
|
||||
|
||||
if (bad_pool == atomic_pool_dma32)
|
||||
return atomic_pool_dma;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct gen_pool *dma_guess_pool(struct device *dev,
|
||||
struct gen_pool *bad_pool)
|
||||
static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
|
||||
struct gen_pool *pool, void **cpu_addr,
|
||||
bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
|
||||
{
|
||||
if (bad_pool)
|
||||
return dma_get_safer_pool(bad_pool);
|
||||
|
||||
return dma_guess_pool_from_device(dev);
|
||||
}
|
||||
|
||||
void *dma_alloc_from_pool(struct device *dev, size_t size,
|
||||
struct page **ret_page, gfp_t flags)
|
||||
{
|
||||
struct gen_pool *pool = NULL;
|
||||
unsigned long val = 0;
|
||||
void *ptr = NULL;
|
||||
unsigned long addr;
|
||||
phys_addr_t phys;
|
||||
|
||||
while (1) {
|
||||
pool = dma_guess_pool(dev, pool);
|
||||
if (!pool) {
|
||||
WARN(1, "Failed to get suitable pool for %s\n",
|
||||
dev_name(dev));
|
||||
break;
|
||||
}
|
||||
addr = gen_pool_alloc(pool, size);
|
||||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
val = gen_pool_alloc(pool, size);
|
||||
if (!val)
|
||||
continue;
|
||||
|
||||
phys = gen_pool_virt_to_phys(pool, val);
|
||||
if (dma_coherent_ok(dev, phys, size))
|
||||
break;
|
||||
|
||||
gen_pool_free(pool, val, size);
|
||||
val = 0;
|
||||
phys = gen_pool_virt_to_phys(pool, addr);
|
||||
if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
|
||||
gen_pool_free(pool, addr, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (gen_pool_avail(pool) < atomic_pool_size)
|
||||
schedule_work(&atomic_pool_work);
|
||||
|
||||
if (val) {
|
||||
*ret_page = pfn_to_page(__phys_to_pfn(phys));
|
||||
ptr = (void *)val;
|
||||
memset(ptr, 0, size);
|
||||
*cpu_addr = (void *)addr;
|
||||
memset(*cpu_addr, 0, size);
|
||||
return pfn_to_page(__phys_to_pfn(phys));
|
||||
}
|
||||
|
||||
if (gen_pool_avail(pool) < atomic_pool_size)
|
||||
schedule_work(&atomic_pool_work);
|
||||
struct page *dma_alloc_from_pool(struct device *dev, size_t size,
|
||||
void **cpu_addr, gfp_t gfp,
|
||||
bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
|
||||
{
|
||||
struct gen_pool *pool = NULL;
|
||||
struct page *page;
|
||||
|
||||
while ((pool = dma_guess_pool(pool, gfp))) {
|
||||
page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
|
||||
phys_addr_ok);
|
||||
if (page)
|
||||
return page;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool dma_free_from_pool(struct device *dev, void *start, size_t size)
|
||||
{
|
||||
struct gen_pool *pool = NULL;
|
||||
|
||||
while (1) {
|
||||
pool = dma_guess_pool(dev, pool);
|
||||
if (!pool)
|
||||
return false;
|
||||
|
||||
if (gen_pool_has_addr(pool, (unsigned long)start, size)) {
|
||||
gen_pool_free(pool, (unsigned long)start, size);
|
||||
return true;
|
||||
}
|
||||
while ((pool = dma_guess_pool(pool, 0))) {
|
||||
if (!gen_pool_has_addr(pool, (unsigned long)start, size))
|
||||
continue;
|
||||
gen_pool_free(pool, (unsigned long)start, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user