mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 18:36:41 +07:00
sparc: remove the sparc32_dma_ops indirection
There is no good reason to have a double indirection for the sparc32 dma ops, so remove the sparc32_dma_ops and define separate dma_map_ops instance for the different IOMMU types. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
53b7670e57
commit
ce65d36f3e
@ -91,54 +91,10 @@ extern int isa_dma_bridge_buggy;
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SPARC32
|
#ifdef CONFIG_SPARC32
|
||||||
|
|
||||||
/* Routines for data transfer buffers. */
|
|
||||||
struct device;
|
struct device;
|
||||||
struct scatterlist;
|
|
||||||
|
|
||||||
struct sparc32_dma_ops {
|
|
||||||
__u32 (*get_scsi_one)(struct device *, char *, unsigned long);
|
|
||||||
void (*get_scsi_sgl)(struct device *, struct scatterlist *, int);
|
|
||||||
void (*release_scsi_one)(struct device *, __u32, unsigned long);
|
|
||||||
void (*release_scsi_sgl)(struct device *, struct scatterlist *,int);
|
|
||||||
#ifdef CONFIG_SBUS
|
|
||||||
int (*map_dma_area)(struct device *, dma_addr_t *, unsigned long, unsigned long, int);
|
|
||||||
void (*unmap_dma_area)(struct device *, unsigned long, int);
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
extern const struct sparc32_dma_ops *sparc32_dma_ops;
|
|
||||||
|
|
||||||
#define mmu_get_scsi_one(dev,vaddr,len) \
|
|
||||||
sparc32_dma_ops->get_scsi_one(dev, vaddr, len)
|
|
||||||
#define mmu_get_scsi_sgl(dev,sg,sz) \
|
|
||||||
sparc32_dma_ops->get_scsi_sgl(dev, sg, sz)
|
|
||||||
#define mmu_release_scsi_one(dev,vaddr,len) \
|
|
||||||
sparc32_dma_ops->release_scsi_one(dev, vaddr,len)
|
|
||||||
#define mmu_release_scsi_sgl(dev,sg,sz) \
|
|
||||||
sparc32_dma_ops->release_scsi_sgl(dev, sg, sz)
|
|
||||||
|
|
||||||
#ifdef CONFIG_SBUS
|
|
||||||
/*
|
|
||||||
* mmu_map/unmap are provided by iommu/iounit; Invalid to call on IIep.
|
|
||||||
*
|
|
||||||
* The mmu_map_dma_area establishes two mappings in one go.
|
|
||||||
* These mappings point to pages normally mapped at 'va' (linear address).
|
|
||||||
* First mapping is for CPU visible address at 'a', uncached.
|
|
||||||
* This is an alias, but it works because it is an uncached mapping.
|
|
||||||
* Second mapping is for device visible address, or "bus" address.
|
|
||||||
* The bus address is returned at '*pba'.
|
|
||||||
*
|
|
||||||
* These functions seem distinct, but are hard to split.
|
|
||||||
* On sun4m, page attributes depend on the CPU type, so we have to
|
|
||||||
* know if we are mapping RAM or I/O, so it has to be an additional argument
|
|
||||||
* to a separate mapping function for CPU visible mappings.
|
|
||||||
*/
|
|
||||||
#define sbus_map_dma_area(dev,pba,va,a,len) \
|
|
||||||
sparc32_dma_ops->map_dma_area(dev, pba, va, a, len)
|
|
||||||
#define sbus_unmap_dma_area(dev,ba,len) \
|
|
||||||
sparc32_dma_ops->unmap_dma_area(dev, ba, len)
|
|
||||||
#endif /* CONFIG_SBUS */
|
|
||||||
|
|
||||||
|
unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len);
|
||||||
|
bool sparc_dma_free_resource(void *cpu_addr, size_t size);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* !(_ASM_SPARC_DMA_H) */
|
#endif /* !(_ASM_SPARC_DMA_H) */
|
||||||
|
@ -52,8 +52,6 @@
|
|||||||
#include <asm/io-unit.h>
|
#include <asm/io-unit.h>
|
||||||
#include <asm/leon.h>
|
#include <asm/leon.h>
|
||||||
|
|
||||||
const struct sparc32_dma_ops *sparc32_dma_ops;
|
|
||||||
|
|
||||||
/* This function must make sure that caches and memory are coherent after DMA
|
/* This function must make sure that caches and memory are coherent after DMA
|
||||||
* On LEON systems without cache snooping it flushes the entire D-CACHE.
|
* On LEON systems without cache snooping it flushes the entire D-CACHE.
|
||||||
*/
|
*/
|
||||||
@ -247,7 +245,7 @@ static void _sparc_free_io(struct resource *res)
|
|||||||
release_resource(res);
|
release_resource(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len)
|
unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len)
|
||||||
{
|
{
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
|
|
||||||
@ -266,7 +264,7 @@ static unsigned long sparc_dma_alloc_resource(struct device *dev, size_t len)
|
|||||||
return res->start;
|
return res->start;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool sparc_dma_free_resource(void *cpu_addr, size_t size)
|
bool sparc_dma_free_resource(void *cpu_addr, size_t size)
|
||||||
{
|
{
|
||||||
unsigned long addr = (unsigned long)cpu_addr;
|
unsigned long addr = (unsigned long)cpu_addr;
|
||||||
struct resource *res;
|
struct resource *res;
|
||||||
@ -302,122 +300,6 @@ void sbus_set_sbus64(struct device *dev, int x)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sbus_set_sbus64);
|
EXPORT_SYMBOL(sbus_set_sbus64);
|
||||||
|
|
||||||
/*
|
|
||||||
* Allocate a chunk of memory suitable for DMA.
|
|
||||||
* Typically devices use them for control blocks.
|
|
||||||
* CPU may access them without any explicit flushing.
|
|
||||||
*/
|
|
||||||
static void *sbus_alloc_coherent(struct device *dev, size_t len,
|
|
||||||
dma_addr_t *dma_addrp, gfp_t gfp,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
unsigned long len_total = PAGE_ALIGN(len);
|
|
||||||
unsigned long va, addr;
|
|
||||||
int order;
|
|
||||||
|
|
||||||
/* XXX why are some lengths signed, others unsigned? */
|
|
||||||
if (len <= 0) {
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
|
||||||
if (len > 256*1024) { /* __get_free_pages() limit */
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
order = get_order(len_total);
|
|
||||||
va = __get_free_pages(gfp, order);
|
|
||||||
if (va == 0)
|
|
||||||
goto err_nopages;
|
|
||||||
|
|
||||||
addr = sparc_dma_alloc_resource(dev, len_total);
|
|
||||||
if (!addr)
|
|
||||||
goto err_nomem;
|
|
||||||
|
|
||||||
// XXX The sbus_map_dma_area does this for us below, see comments.
|
|
||||||
// srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
|
|
||||||
/*
|
|
||||||
* XXX That's where sdev would be used. Currently we load
|
|
||||||
* all iommu tables with the same translations.
|
|
||||||
*/
|
|
||||||
if (sbus_map_dma_area(dev, dma_addrp, va, addr, len_total) != 0)
|
|
||||||
goto err_noiommu;
|
|
||||||
|
|
||||||
return (void *)addr;
|
|
||||||
|
|
||||||
err_noiommu:
|
|
||||||
sparc_dma_free_resource((void *)addr, len_total);
|
|
||||||
err_nomem:
|
|
||||||
free_pages(va, order);
|
|
||||||
err_nopages:
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sbus_free_coherent(struct device *dev, size_t n, void *p,
|
|
||||||
dma_addr_t ba, unsigned long attrs)
|
|
||||||
{
|
|
||||||
struct page *pgv;
|
|
||||||
|
|
||||||
n = PAGE_ALIGN(n);
|
|
||||||
if (!sparc_dma_free_resource(p, n))
|
|
||||||
return;
|
|
||||||
|
|
||||||
pgv = virt_to_page(p);
|
|
||||||
sbus_unmap_dma_area(dev, ba, n);
|
|
||||||
|
|
||||||
__free_pages(pgv, get_order(n));
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Map a chunk of memory so that devices can see it.
|
|
||||||
* CPU view of this memory may be inconsistent with
|
|
||||||
* a device view and explicit flushing is necessary.
|
|
||||||
*/
|
|
||||||
static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
|
|
||||||
unsigned long offset, size_t len,
|
|
||||||
enum dma_data_direction dir,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
void *va = page_address(page) + offset;
|
|
||||||
|
|
||||||
/* XXX why are some lengths signed, others unsigned? */
|
|
||||||
if (len <= 0) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
|
||||||
if (len > 256*1024) { /* __get_free_pages() limit */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
return mmu_get_scsi_one(dev, va, len);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
|
|
||||||
enum dma_data_direction dir, unsigned long attrs)
|
|
||||||
{
|
|
||||||
mmu_release_scsi_one(dev, ba, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
|
|
||||||
enum dma_data_direction dir, unsigned long attrs)
|
|
||||||
{
|
|
||||||
mmu_get_scsi_sgl(dev, sg, n);
|
|
||||||
return n;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
|
|
||||||
enum dma_data_direction dir, unsigned long attrs)
|
|
||||||
{
|
|
||||||
mmu_release_scsi_sgl(dev, sg, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct dma_map_ops sbus_dma_ops = {
|
|
||||||
.alloc = sbus_alloc_coherent,
|
|
||||||
.free = sbus_free_coherent,
|
|
||||||
.map_page = sbus_map_page,
|
|
||||||
.unmap_page = sbus_unmap_page,
|
|
||||||
.map_sg = sbus_map_sg,
|
|
||||||
.unmap_sg = sbus_unmap_sg,
|
|
||||||
};
|
|
||||||
|
|
||||||
static int __init sparc_register_ioport(void)
|
static int __init sparc_register_ioport(void)
|
||||||
{
|
{
|
||||||
register_proc_sparc_ioport();
|
register_proc_sparc_ioport();
|
||||||
@ -491,7 +373,7 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
|||||||
dma_make_coherent(paddr, PAGE_ALIGN(size));
|
dma_make_coherent(paddr, PAGE_ALIGN(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct dma_map_ops *dma_ops = &sbus_dma_ops;
|
const struct dma_map_ops *dma_ops;
|
||||||
EXPORT_SYMBOL(dma_ops);
|
EXPORT_SYMBOL(dma_ops);
|
||||||
|
|
||||||
#ifdef CONFIG_PROC_FS
|
#ifdef CONFIG_PROC_FS
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/of_device.h>
|
#include <linux/of_device.h>
|
||||||
|
|
||||||
@ -140,18 +140,26 @@ nexti: scan = find_next_zero_bit(iounit->bmap, limit, scan);
|
|||||||
return vaddr;
|
return vaddr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
|
static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
|
||||||
|
unsigned long offset, size_t len, enum dma_data_direction dir,
|
||||||
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
|
void *vaddr = page_address(page) + offset;
|
||||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||||
unsigned long ret, flags;
|
unsigned long ret, flags;
|
||||||
|
|
||||||
|
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||||
|
if (!len || len > 256 * 1024)
|
||||||
|
return DMA_MAPPING_ERROR;
|
||||||
|
|
||||||
spin_lock_irqsave(&iounit->lock, flags);
|
spin_lock_irqsave(&iounit->lock, flags);
|
||||||
ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
|
ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
|
||||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
|
static int iounit_map_sg(struct device *dev, struct scatterlist *sg, int sz,
|
||||||
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -165,9 +173,11 @@ static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int
|
|||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||||
|
return sz;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
|
static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
|
||||||
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -181,7 +191,8 @@ static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned lo
|
|||||||
spin_unlock_irqrestore(&iounit->lock, flags);
|
spin_unlock_irqrestore(&iounit->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
|
static void iounit_unmap_sg(struct device *dev, struct scatterlist *sg, int sz,
|
||||||
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -201,14 +212,27 @@ static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SBUS
|
#ifdef CONFIG_SBUS
|
||||||
static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
|
static void *iounit_alloc(struct device *dev, size_t len,
|
||||||
|
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct iounit_struct *iounit = dev->archdata.iommu;
|
struct iounit_struct *iounit = dev->archdata.iommu;
|
||||||
unsigned long page, end;
|
unsigned long va, addr, page, end, ret;
|
||||||
pgprot_t dvma_prot;
|
pgprot_t dvma_prot;
|
||||||
iopte_t __iomem *iopte;
|
iopte_t __iomem *iopte;
|
||||||
|
|
||||||
*pba = addr;
|
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||||
|
if (!len || len > 256 * 1024)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
len = PAGE_ALIGN(len);
|
||||||
|
va = __get_free_pages(gfp, get_order(len));
|
||||||
|
if (!va)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
addr = ret = sparc_dma_alloc_resource(dev, len);
|
||||||
|
if (!addr)
|
||||||
|
goto out_free_pages;
|
||||||
|
*dma_handle = addr;
|
||||||
|
|
||||||
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
|
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
|
||||||
end = PAGE_ALIGN((addr + len));
|
end = PAGE_ALIGN((addr + len));
|
||||||
@ -237,27 +261,32 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
|
|||||||
flush_cache_all();
|
flush_cache_all();
|
||||||
flush_tlb_all();
|
flush_tlb_all();
|
||||||
|
|
||||||
return 0;
|
return (void *)ret;
|
||||||
|
|
||||||
|
out_free_pages:
|
||||||
|
free_pages(va, get_order(len));
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
|
static void iounit_free(struct device *dev, size_t size, void *cpu_addr,
|
||||||
|
dma_addr_t dma_addr, unsigned long attrs)
|
||||||
{
|
{
|
||||||
/* XXX Somebody please fill this in */
|
/* XXX Somebody please fill this in */
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct sparc32_dma_ops iounit_dma_ops = {
|
static const struct dma_map_ops iounit_dma_ops = {
|
||||||
.get_scsi_one = iounit_get_scsi_one,
|
|
||||||
.get_scsi_sgl = iounit_get_scsi_sgl,
|
|
||||||
.release_scsi_one = iounit_release_scsi_one,
|
|
||||||
.release_scsi_sgl = iounit_release_scsi_sgl,
|
|
||||||
#ifdef CONFIG_SBUS
|
#ifdef CONFIG_SBUS
|
||||||
.map_dma_area = iounit_map_dma_area,
|
.alloc = iounit_alloc,
|
||||||
.unmap_dma_area = iounit_unmap_dma_area,
|
.free = iounit_free,
|
||||||
#endif
|
#endif
|
||||||
|
.map_page = iounit_map_page,
|
||||||
|
.unmap_page = iounit_unmap_page,
|
||||||
|
.map_sg = iounit_map_sg,
|
||||||
|
.unmap_sg = iounit_unmap_sg,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init ld_mmu_iounit(void)
|
void __init ld_mmu_iounit(void)
|
||||||
{
|
{
|
||||||
sparc32_dma_ops = &iounit_dma_ops;
|
dma_ops = &iounit_dma_ops;
|
||||||
}
|
}
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/dma-mapping.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/of_device.h>
|
#include <linux/of_device.h>
|
||||||
|
|
||||||
@ -205,38 +205,44 @@ static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
|
|||||||
return busa0;
|
return busa0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
|
static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
|
||||||
|
unsigned long offset, size_t len)
|
||||||
{
|
{
|
||||||
unsigned long off;
|
void *vaddr = page_address(page) + offset;
|
||||||
int npages;
|
unsigned long off = (unsigned long)vaddr & ~PAGE_MASK;
|
||||||
struct page *page;
|
unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
u32 busa;
|
|
||||||
|
|
||||||
off = (unsigned long)vaddr & ~PAGE_MASK;
|
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||||
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
|
if (!len || len > 256 * 1024)
|
||||||
page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
|
return DMA_MAPPING_ERROR;
|
||||||
busa = iommu_get_one(dev, page, npages);
|
return iommu_get_one(dev, virt_to_page(vaddr), npages) + off;
|
||||||
return busa + off;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
|
static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
|
||||||
|
struct page *page, unsigned long offset, size_t len,
|
||||||
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
flush_page_for_dma(0);
|
flush_page_for_dma(0);
|
||||||
return iommu_get_scsi_one(dev, vaddr, len);
|
return __sbus_iommu_map_page(dev, page, offset, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
|
static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
|
||||||
|
struct page *page, unsigned long offset, size_t len,
|
||||||
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
|
void *vaddr = page_address(page) + offset;
|
||||||
|
unsigned long p = ((unsigned long)vaddr) & PAGE_MASK;
|
||||||
|
|
||||||
while(page < ((unsigned long)(vaddr + len))) {
|
while (p < (unsigned long)vaddr + len) {
|
||||||
flush_page_for_dma(page);
|
flush_page_for_dma(p);
|
||||||
page += PAGE_SIZE;
|
p += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
return iommu_get_scsi_one(dev, vaddr, len);
|
|
||||||
|
return __sbus_iommu_map_page(dev, page, offset, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
|
static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sg,
|
||||||
|
int sz, enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
@ -248,9 +254,12 @@ static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg
|
|||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return sz;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
|
static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sg,
|
||||||
|
int sz, enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
unsigned long page, oldpage = 0;
|
unsigned long page, oldpage = 0;
|
||||||
int n, i;
|
int n, i;
|
||||||
@ -279,6 +288,8 @@ static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg
|
|||||||
sg->dma_length = sg->length;
|
sg->dma_length = sg->length;
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return sz;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_release_one(struct device *dev, u32 busa, int npages)
|
static void iommu_release_one(struct device *dev, u32 busa, int npages)
|
||||||
@ -297,23 +308,23 @@ static void iommu_release_one(struct device *dev, u32 busa, int npages)
|
|||||||
bit_map_clear(&iommu->usemap, ioptex, npages);
|
bit_map_clear(&iommu->usemap, ioptex, npages);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
|
static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||||
|
size_t len, enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
unsigned long off;
|
unsigned long off = dma_addr & ~PAGE_MASK;
|
||||||
int npages;
|
int npages;
|
||||||
|
|
||||||
off = vaddr & ~PAGE_MASK;
|
|
||||||
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
|
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||||
iommu_release_one(dev, vaddr & PAGE_MASK, npages);
|
iommu_release_one(dev, dma_addr & PAGE_MASK, npages);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
|
static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||||
|
int sz, enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
int n;
|
int n;
|
||||||
|
|
||||||
while(sz != 0) {
|
while(sz != 0) {
|
||||||
--sz;
|
--sz;
|
||||||
|
|
||||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||||
iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
|
iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
|
||||||
sg->dma_address = 0x21212121;
|
sg->dma_address = 0x21212121;
|
||||||
@ -322,15 +333,28 @@ static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, i
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SBUS
|
#ifdef CONFIG_SBUS
|
||||||
static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
|
static void *sbus_iommu_alloc(struct device *dev, size_t len,
|
||||||
unsigned long addr, int len)
|
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||||
unsigned long page, end;
|
unsigned long va, addr, page, end, ret;
|
||||||
iopte_t *iopte = iommu->page_table;
|
iopte_t *iopte = iommu->page_table;
|
||||||
iopte_t *first;
|
iopte_t *first;
|
||||||
int ioptex;
|
int ioptex;
|
||||||
|
|
||||||
|
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||||
|
if (!len || len > 256 * 1024)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
len = PAGE_ALIGN(len);
|
||||||
|
va = __get_free_pages(gfp, get_order(len));
|
||||||
|
if (va == 0)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
addr = ret = sparc_dma_alloc_resource(dev, len);
|
||||||
|
if (!addr)
|
||||||
|
goto out_free_pages;
|
||||||
|
|
||||||
BUG_ON((va & ~PAGE_MASK) != 0);
|
BUG_ON((va & ~PAGE_MASK) != 0);
|
||||||
BUG_ON((addr & ~PAGE_MASK) != 0);
|
BUG_ON((addr & ~PAGE_MASK) != 0);
|
||||||
BUG_ON((len & ~PAGE_MASK) != 0);
|
BUG_ON((len & ~PAGE_MASK) != 0);
|
||||||
@ -385,16 +409,25 @@ static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long
|
|||||||
flush_tlb_all();
|
flush_tlb_all();
|
||||||
iommu_invalidate(iommu->regs);
|
iommu_invalidate(iommu->regs);
|
||||||
|
|
||||||
*pba = iommu->start + (ioptex << PAGE_SHIFT);
|
*dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
|
||||||
return 0;
|
return (void *)ret;
|
||||||
|
|
||||||
|
out_free_pages:
|
||||||
|
free_pages(va, get_order(len));
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
|
static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
|
||||||
|
dma_addr_t busa, unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||||
iopte_t *iopte = iommu->page_table;
|
iopte_t *iopte = iommu->page_table;
|
||||||
unsigned long end;
|
struct page *page = virt_to_page(cpu_addr);
|
||||||
int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
|
int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
|
||||||
|
unsigned long end;
|
||||||
|
|
||||||
|
if (!sparc_dma_free_resource(cpu_addr, len))
|
||||||
|
return;
|
||||||
|
|
||||||
BUG_ON((busa & ~PAGE_MASK) != 0);
|
BUG_ON((busa & ~PAGE_MASK) != 0);
|
||||||
BUG_ON((len & ~PAGE_MASK) != 0);
|
BUG_ON((len & ~PAGE_MASK) != 0);
|
||||||
@ -408,38 +441,40 @@ static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len
|
|||||||
flush_tlb_all();
|
flush_tlb_all();
|
||||||
iommu_invalidate(iommu->regs);
|
iommu_invalidate(iommu->regs);
|
||||||
bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
|
bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
|
||||||
|
|
||||||
|
__free_pages(page, get_order(len));
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
|
static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
|
||||||
.get_scsi_one = iommu_get_scsi_one_gflush,
|
|
||||||
.get_scsi_sgl = iommu_get_scsi_sgl_gflush,
|
|
||||||
.release_scsi_one = iommu_release_scsi_one,
|
|
||||||
.release_scsi_sgl = iommu_release_scsi_sgl,
|
|
||||||
#ifdef CONFIG_SBUS
|
#ifdef CONFIG_SBUS
|
||||||
.map_dma_area = iommu_map_dma_area,
|
.alloc = sbus_iommu_alloc,
|
||||||
.unmap_dma_area = iommu_unmap_dma_area,
|
.free = sbus_iommu_free,
|
||||||
#endif
|
#endif
|
||||||
|
.map_page = sbus_iommu_map_page_gflush,
|
||||||
|
.unmap_page = sbus_iommu_unmap_page,
|
||||||
|
.map_sg = sbus_iommu_map_sg_gflush,
|
||||||
|
.unmap_sg = sbus_iommu_unmap_sg,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
|
static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
|
||||||
.get_scsi_one = iommu_get_scsi_one_pflush,
|
|
||||||
.get_scsi_sgl = iommu_get_scsi_sgl_pflush,
|
|
||||||
.release_scsi_one = iommu_release_scsi_one,
|
|
||||||
.release_scsi_sgl = iommu_release_scsi_sgl,
|
|
||||||
#ifdef CONFIG_SBUS
|
#ifdef CONFIG_SBUS
|
||||||
.map_dma_area = iommu_map_dma_area,
|
.alloc = sbus_iommu_alloc,
|
||||||
.unmap_dma_area = iommu_unmap_dma_area,
|
.free = sbus_iommu_free,
|
||||||
#endif
|
#endif
|
||||||
|
.map_page = sbus_iommu_map_page_pflush,
|
||||||
|
.unmap_page = sbus_iommu_unmap_page,
|
||||||
|
.map_sg = sbus_iommu_map_sg_pflush,
|
||||||
|
.unmap_sg = sbus_iommu_unmap_sg,
|
||||||
};
|
};
|
||||||
|
|
||||||
void __init ld_mmu_iommu(void)
|
void __init ld_mmu_iommu(void)
|
||||||
{
|
{
|
||||||
if (flush_page_for_dma_global) {
|
if (flush_page_for_dma_global) {
|
||||||
/* flush_page_for_dma flushes everything, no matter of what page is it */
|
/* flush_page_for_dma flushes everything, no matter of what page is it */
|
||||||
sparc32_dma_ops = &iommu_dma_gflush_ops;
|
dma_ops = &sbus_iommu_dma_gflush_ops;
|
||||||
} else {
|
} else {
|
||||||
sparc32_dma_ops = &iommu_dma_pflush_ops;
|
dma_ops = &sbus_iommu_dma_pflush_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
|
if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
|
||||||
|
Loading…
Reference in New Issue
Block a user