mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-01 17:26:39 +07:00
622a9edd91
dma_cache_(wback|inv|wback_inv) were the earliest attempt on a generalized cache managment API for I/O purposes. Originally it was basically the raw MIPS low level cache API exported to the entire world. The API has suffered from a lack of documentation, was not very widely used unlike it's more modern brothers and can easily be replaced by dma_cache_sync. So remove it rsp. turn the surviving bits back into an arch private API, as discussed on linux-arch. Signed-off-by: Ralf Baechle <ralf@linux-mips.org> Acked-by: Paul Mundt <lethal@linux-sh.org> Acked-by: Paul Mackerras <paulus@samba.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Kyle McMartin <kyle@parisc-linux.org> Acked-by: Haavard Skinnemoen <hskinnemoen@atmel.com> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
144 lines
3.3 KiB
C
144 lines
3.3 KiB
C
/*
|
|
* Copyright (C) 2004-2006 Atmel Corporation
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <asm/addrspace.h>
|
|
#include <asm/cacheflush.h>
|
|
|
|
void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
|
|
{
|
|
/*
|
|
* No need to sync an uncached area
|
|
*/
|
|
if (PXSEG(vaddr) == P2SEG)
|
|
return;
|
|
|
|
switch (direction) {
|
|
case DMA_FROM_DEVICE: /* invalidate only */
|
|
invalidate_dcache_region(vaddr, size);
|
|
break;
|
|
case DMA_TO_DEVICE: /* writeback only */
|
|
clean_dcache_region(vaddr, size);
|
|
break;
|
|
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
|
|
flush_dcache_region(vaddr, size);
|
|
break;
|
|
default:
|
|
BUG();
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(dma_cache_sync);
|
|
|
|
static struct page *__dma_alloc(struct device *dev, size_t size,
|
|
dma_addr_t *handle, gfp_t gfp)
|
|
{
|
|
struct page *page, *free, *end;
|
|
int order;
|
|
|
|
size = PAGE_ALIGN(size);
|
|
order = get_order(size);
|
|
|
|
page = alloc_pages(gfp, order);
|
|
if (!page)
|
|
return NULL;
|
|
split_page(page, order);
|
|
|
|
/*
|
|
* When accessing physical memory with valid cache data, we
|
|
* get a cache hit even if the virtual memory region is marked
|
|
* as uncached.
|
|
*
|
|
* Since the memory is newly allocated, there is no point in
|
|
* doing a writeback. If the previous owner cares, he should
|
|
* have flushed the cache before releasing the memory.
|
|
*/
|
|
invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
|
|
|
|
*handle = page_to_bus(page);
|
|
free = page + (size >> PAGE_SHIFT);
|
|
end = page + (1 << order);
|
|
|
|
/*
|
|
* Free any unused pages
|
|
*/
|
|
while (free < end) {
|
|
__free_page(free);
|
|
free++;
|
|
}
|
|
|
|
return page;
|
|
}
|
|
|
|
static void __dma_free(struct device *dev, size_t size,
|
|
struct page *page, dma_addr_t handle)
|
|
{
|
|
struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
|
|
|
|
while (page < end)
|
|
__free_page(page++);
|
|
}
|
|
|
|
void *dma_alloc_coherent(struct device *dev, size_t size,
|
|
dma_addr_t *handle, gfp_t gfp)
|
|
{
|
|
struct page *page;
|
|
void *ret = NULL;
|
|
|
|
page = __dma_alloc(dev, size, handle, gfp);
|
|
if (page)
|
|
ret = phys_to_uncached(page_to_phys(page));
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(dma_alloc_coherent);
|
|
|
|
void dma_free_coherent(struct device *dev, size_t size,
|
|
void *cpu_addr, dma_addr_t handle)
|
|
{
|
|
void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
|
|
struct page *page;
|
|
|
|
pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
|
|
cpu_addr, (unsigned long)handle, (unsigned)size);
|
|
BUG_ON(!virt_addr_valid(addr));
|
|
page = virt_to_page(addr);
|
|
__dma_free(dev, size, page, handle);
|
|
}
|
|
EXPORT_SYMBOL(dma_free_coherent);
|
|
|
|
void *dma_alloc_writecombine(struct device *dev, size_t size,
|
|
dma_addr_t *handle, gfp_t gfp)
|
|
{
|
|
struct page *page;
|
|
dma_addr_t phys;
|
|
|
|
page = __dma_alloc(dev, size, handle, gfp);
|
|
if (!page)
|
|
return NULL;
|
|
|
|
phys = page_to_phys(page);
|
|
*handle = phys;
|
|
|
|
/* Now, map the page into P3 with write-combining turned on */
|
|
return __ioremap(phys, size, _PAGE_BUFFER);
|
|
}
|
|
EXPORT_SYMBOL(dma_alloc_writecombine);
|
|
|
|
void dma_free_writecombine(struct device *dev, size_t size,
|
|
void *cpu_addr, dma_addr_t handle)
|
|
{
|
|
struct page *page;
|
|
|
|
iounmap(cpu_addr);
|
|
|
|
page = phys_to_page(handle);
|
|
__dma_free(dev, size, page, handle);
|
|
}
|
|
EXPORT_SYMBOL(dma_free_writecombine);
|