mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 06:56:52 +07:00
58c6d3dfe4
Prevent passing an order to bitmap_find_free_region() that is larger than the actual bitmap can represent. These requests can come from device drivers that have no idea how big the dma region is and need to rely on dma_alloc_from_coherent() to sort it out for them. Reported-by: Guennadi Liakhovetski <lg@denx.de> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Dmitry Baryshkov <dbaryshkov@gmail.com> Cc: Jesse Barnes <jbarnes@virtuousgeek.org> Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
176 lines
4.5 KiB
C
176 lines
4.5 KiB
C
/*
|
|
* Coherent per-device memory handling.
|
|
* Borrowed from i386
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
struct dma_coherent_mem {
|
|
void *virt_base;
|
|
u32 device_base;
|
|
int size;
|
|
int flags;
|
|
unsigned long *bitmap;
|
|
};
|
|
|
|
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
|
dma_addr_t device_addr, size_t size, int flags)
|
|
{
|
|
void __iomem *mem_base = NULL;
|
|
int pages = size >> PAGE_SHIFT;
|
|
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
|
|
|
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
|
|
goto out;
|
|
if (!size)
|
|
goto out;
|
|
if (dev->dma_mem)
|
|
goto out;
|
|
|
|
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
|
|
|
|
mem_base = ioremap(bus_addr, size);
|
|
if (!mem_base)
|
|
goto out;
|
|
|
|
dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
|
|
if (!dev->dma_mem)
|
|
goto out;
|
|
dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
|
if (!dev->dma_mem->bitmap)
|
|
goto free1_out;
|
|
|
|
dev->dma_mem->virt_base = mem_base;
|
|
dev->dma_mem->device_base = device_addr;
|
|
dev->dma_mem->size = pages;
|
|
dev->dma_mem->flags = flags;
|
|
|
|
if (flags & DMA_MEMORY_MAP)
|
|
return DMA_MEMORY_MAP;
|
|
|
|
return DMA_MEMORY_IO;
|
|
|
|
free1_out:
|
|
kfree(dev->dma_mem);
|
|
out:
|
|
if (mem_base)
|
|
iounmap(mem_base);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(dma_declare_coherent_memory);
|
|
|
|
void dma_release_declared_memory(struct device *dev)
|
|
{
|
|
struct dma_coherent_mem *mem = dev->dma_mem;
|
|
|
|
if (!mem)
|
|
return;
|
|
dev->dma_mem = NULL;
|
|
iounmap(mem->virt_base);
|
|
kfree(mem->bitmap);
|
|
kfree(mem);
|
|
}
|
|
EXPORT_SYMBOL(dma_release_declared_memory);
|
|
|
|
void *dma_mark_declared_memory_occupied(struct device *dev,
|
|
dma_addr_t device_addr, size_t size)
|
|
{
|
|
struct dma_coherent_mem *mem = dev->dma_mem;
|
|
int pos, err;
|
|
|
|
size += device_addr & ~PAGE_MASK;
|
|
|
|
if (!mem)
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
|
|
err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
|
|
if (err != 0)
|
|
return ERR_PTR(err);
|
|
return mem->virt_base + (pos << PAGE_SHIFT);
|
|
}
|
|
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
|
|
|
/**
|
|
* dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
|
|
*
|
|
* @dev: device from which we allocate memory
|
|
* @size: size of requested memory area
|
|
* @dma_handle: This will be filled with the correct dma handle
|
|
* @ret: This pointer will be filled with the virtual address
|
|
* to allocated area.
|
|
*
|
|
* This function should be only called from per-arch dma_alloc_coherent()
|
|
* to support allocation from per-device coherent memory pools.
|
|
*
|
|
* Returns 0 if dma_alloc_coherent should continue with allocating from
|
|
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
|
|
*/
|
|
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
|
dma_addr_t *dma_handle, void **ret)
|
|
{
|
|
struct dma_coherent_mem *mem;
|
|
int order = get_order(size);
|
|
int pageno;
|
|
|
|
if (!dev)
|
|
return 0;
|
|
mem = dev->dma_mem;
|
|
if (!mem)
|
|
return 0;
|
|
if (unlikely(size > mem->size))
|
|
return 0;
|
|
|
|
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
|
|
if (pageno >= 0) {
|
|
/*
|
|
* Memory was found in the per-device arena.
|
|
*/
|
|
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
|
|
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
|
|
memset(*ret, 0, size);
|
|
} else if (mem->flags & DMA_MEMORY_EXCLUSIVE) {
|
|
/*
|
|
* The per-device arena is exhausted and we are not
|
|
* permitted to fall back to generic memory.
|
|
*/
|
|
*ret = NULL;
|
|
} else {
|
|
/*
|
|
* The per-device arena is exhausted and we are
|
|
* permitted to fall back to generic memory.
|
|
*/
|
|
return 0;
|
|
}
|
|
return 1;
|
|
}
|
|
EXPORT_SYMBOL(dma_alloc_from_coherent);
|
|
|
|
/**
|
|
* dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
|
|
* @dev: device from which the memory was allocated
|
|
* @order: the order of pages allocated
|
|
* @vaddr: virtual address of allocated pages
|
|
*
|
|
* This checks whether the memory was allocated from the per-device
|
|
* coherent memory pool and if so, releases that memory.
|
|
*
|
|
* Returns 1 if we correctly released the memory, or 0 if
|
|
* dma_release_coherent() should proceed with releasing memory from
|
|
* generic pools.
|
|
*/
|
|
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
|
|
{
|
|
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
|
|
|
if (mem && vaddr >= mem->virt_base && vaddr <
|
|
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
|
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
|
|
|
bitmap_release_region(mem->bitmap, page, order);
|
|
return 1;
|
|
}
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(dma_release_from_coherent);
|