mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-06 11:56:40 +07:00
01be5d63fd
Leaving this configurable caused more trouble than it was ever worth, so just make it explicit. Boards that are verified one way or the other can fix up their selects accordingly. We presently default to non-coherent for most platforms. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
83 lines
1.9 KiB
C
83 lines
1.9 KiB
C
/*
|
|
* DMA mapping support for platforms lacking IOMMUs.
|
|
*
|
|
* Copyright (C) 2009 Paul Mundt
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*/
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/io.h>
|
|
|
|
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
|
|
unsigned long offset, size_t size,
|
|
enum dma_data_direction dir,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
dma_addr_t addr = page_to_phys(page) + offset;
|
|
|
|
WARN_ON(size == 0);
|
|
dma_cache_sync(dev, page_address(page) + offset, size, dir);
|
|
|
|
return addr;
|
|
}
|
|
|
|
static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
|
|
int nents, enum dma_data_direction dir,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
struct scatterlist *s;
|
|
int i;
|
|
|
|
WARN_ON(nents == 0 || sg[0].length == 0);
|
|
|
|
for_each_sg(sg, s, nents, i) {
|
|
BUG_ON(!sg_page(s));
|
|
|
|
dma_cache_sync(dev, sg_virt(s), s->length, dir);
|
|
|
|
s->dma_address = sg_phys(s);
|
|
s->dma_length = s->length;
|
|
}
|
|
|
|
return nents;
|
|
}
|
|
|
|
#ifdef CONFIG_DMA_NONCOHERENT
|
|
static void nommu_sync_single(struct device *dev, dma_addr_t addr,
|
|
size_t size, enum dma_data_direction dir)
|
|
{
|
|
dma_cache_sync(dev, phys_to_virt(addr), size, dir);
|
|
}
|
|
|
|
static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
|
|
int nelems, enum dma_data_direction dir)
|
|
{
|
|
struct scatterlist *s;
|
|
int i;
|
|
|
|
for_each_sg(sg, s, nelems, i)
|
|
dma_cache_sync(dev, sg_virt(s), s->length, dir);
|
|
}
|
|
#endif
|
|
|
|
struct dma_map_ops nommu_dma_ops = {
|
|
.alloc_coherent = dma_generic_alloc_coherent,
|
|
.free_coherent = dma_generic_free_coherent,
|
|
.map_page = nommu_map_page,
|
|
.map_sg = nommu_map_sg,
|
|
#ifdef CONFIG_DMA_NONCOHERENT
|
|
.sync_single_for_device = nommu_sync_single,
|
|
.sync_sg_for_device = nommu_sync_sg,
|
|
#endif
|
|
.is_phys = 1,
|
|
};
|
|
|
|
void __init no_iommu_init(void)
|
|
{
|
|
if (dma_ops)
|
|
return;
|
|
dma_ops = &nommu_dma_ops;
|
|
}
|