linux_dsm_epyc7002/arch/nds32/kernel/dma.c
Christoph Hellwig 6309513c1b nds32: use the generic remapping allocator for coherent DMA allocations
Replace the code that sets up uncached PTEs with the generic vmap based
remapping code.  It also provides an atomic pool for allocations from
non-blocking context, which we not properly supported by the existing
nds32 code.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Greentime Hu <greentime@andestech.com>
Reviewed-by: Greentime Hu <greentime@andestech.com>
2019-06-25 14:28:06 +02:00

89 lines
1.9 KiB
C

// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/dma-noncoherent.h>
#include <linux/cache.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/proc-fns.h>
static inline void cache_op(phys_addr_t paddr, size_t size,
void (*fn)(unsigned long start, unsigned long end))
{
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
unsigned offset = paddr & ~PAGE_MASK;
size_t left = size;
unsigned long start;
do {
size_t len = left;
if (PageHighMem(page)) {
void *addr;
if (offset + len > PAGE_SIZE) {
if (offset >= PAGE_SIZE) {
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
}
len = PAGE_SIZE - offset;
}
addr = kmap_atomic(page);
start = (unsigned long)(addr + offset);
fn(start, start + len);
kunmap_atomic(addr);
} else {
start = (unsigned long)phys_to_virt(paddr);
fn(start, start + size);
}
offset = 0;
page++;
left -= len;
} while (left);
}
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_FROM_DEVICE:
break;
case DMA_TO_DEVICE:
case DMA_BIDIRECTIONAL:
cache_op(paddr, size, cpu_dma_wb_range);
break;
default:
BUG();
}
}
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
break;
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
cache_op(paddr, size, cpu_dma_inval_range);
break;
default:
BUG();
}
}
void arch_dma_prep_coherent(struct page *page, size_t size)
{
cache_op(page_to_phys(page), size, cpu_dma_wbinval_range);
}
static int __init atomic_pool_init(void)
{
return dma_atomic_pool_init(GFP_KERNEL, pgprot_noncached(PAGE_KERNEL));
}
postcore_initcall(atomic_pool_init);