mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-05 08:26:49 +07:00
00085f1efa
The dma-mapping core and the implementations do not change the DMA attributes passed by pointer. Thus the pointer can point to const data. However the attributes do not have to be a bitfield. Instead unsigned long will do fine: 1. This is just simpler. Both in terms of reading the code and setting attributes. Instead of initializing local attributes on the stack and passing pointer to it to dma_set_attr(), just set the bits. 2. It brings safeness and checking for const correctness because the attributes are passed by value. Semantic patches for this change (at least most of them): virtual patch virtual context @r@ identifier f, attrs; @@ f(..., - struct dma_attrs *attrs + unsigned long attrs , ...) { ... } @@ identifier r.f; @@ f(..., - NULL + 0 ) and // Options: --all-includes virtual patch virtual context @r@ identifier f, attrs; type t; @@ t f(..., struct dma_attrs *attrs); @@ identifier r.f; @@ f(..., - NULL + 0 ) Link: http://lkml.kernel.org/r/1468399300-5399-2-git-send-email-k.kozlowski@samsung.com Signed-off-by: Krzysztof Kozlowski <k.kozlowski@samsung.com> Acked-by: Vineet Gupta <vgupta@synopsys.com> Acked-by: Robin Murphy <robin.murphy@arm.com> Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no> Acked-by: Mark Salter <msalter@redhat.com> [c6x] Acked-by: Jesper Nilsson <jesper.nilsson@axis.com> [cris] Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch> [drm] Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Acked-by: Joerg Roedel <jroedel@suse.de> [iommu] Acked-by: Fabien Dessenne <fabien.dessenne@st.com> [bdisp] Reviewed-by: Marek Szyprowski <m.szyprowski@samsung.com> [vb2-core] Acked-by: David Vrabel <david.vrabel@citrix.com> [xen] Acked-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> [xen swiotlb] Acked-by: Joerg Roedel <jroedel@suse.de> [iommu] Acked-by: Richard Kuo <rkuo@codeaurora.org> [hexagon] Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Acked-by: Gerald Schaefer <gerald.schaefer@de.ibm.com> [s390] Acked-by: Bjorn Andersson <bjorn.andersson@linaro.org> Acked-by: Hans-Christian Noren Egtvedt <egtvedt@samfundet.no> [avr32] Acked-by: Vineet Gupta <vgupta@synopsys.com> [arc] Acked-by: Robin Murphy <robin.murphy@arm.com> [arm64 and dma-iommu] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
221 lines
5.8 KiB
C
221 lines
5.8 KiB
C
#include <linux/cpu.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/bootmem.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/highmem.h>
|
|
#include <linux/export.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/of_address.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/types.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/swiotlb.h>
|
|
|
|
#include <xen/xen.h>
|
|
#include <xen/interface/grant_table.h>
|
|
#include <xen/interface/memory.h>
|
|
#include <xen/page.h>
|
|
#include <xen/swiotlb-xen.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/xen/hypercall.h>
|
|
#include <asm/xen/interface.h>
|
|
|
|
unsigned long xen_get_swiotlb_free_pages(unsigned int order)
|
|
{
|
|
struct memblock_region *reg;
|
|
gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
|
|
|
|
for_each_memblock(memory, reg) {
|
|
if (reg->base < (phys_addr_t)0xffffffff) {
|
|
flags |= __GFP_DMA;
|
|
break;
|
|
}
|
|
}
|
|
return __get_free_pages(flags, order);
|
|
}
|
|
|
|
enum dma_cache_op {
|
|
DMA_UNMAP,
|
|
DMA_MAP,
|
|
};
|
|
static bool hypercall_cflush = false;
|
|
|
|
/* functions called by SWIOTLB */
|
|
|
|
static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
|
|
size_t size, enum dma_data_direction dir, enum dma_cache_op op)
|
|
{
|
|
struct gnttab_cache_flush cflush;
|
|
unsigned long xen_pfn;
|
|
size_t left = size;
|
|
|
|
xen_pfn = (handle >> XEN_PAGE_SHIFT) + offset / XEN_PAGE_SIZE;
|
|
offset %= XEN_PAGE_SIZE;
|
|
|
|
do {
|
|
size_t len = left;
|
|
|
|
/* buffers in highmem or foreign pages cannot cross page
|
|
* boundaries */
|
|
if (len + offset > XEN_PAGE_SIZE)
|
|
len = XEN_PAGE_SIZE - offset;
|
|
|
|
cflush.op = 0;
|
|
cflush.a.dev_bus_addr = xen_pfn << XEN_PAGE_SHIFT;
|
|
cflush.offset = offset;
|
|
cflush.length = len;
|
|
|
|
if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
|
|
cflush.op = GNTTAB_CACHE_INVAL;
|
|
if (op == DMA_MAP) {
|
|
if (dir == DMA_FROM_DEVICE)
|
|
cflush.op = GNTTAB_CACHE_INVAL;
|
|
else
|
|
cflush.op = GNTTAB_CACHE_CLEAN;
|
|
}
|
|
if (cflush.op)
|
|
HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
|
|
|
|
offset = 0;
|
|
xen_pfn++;
|
|
left -= len;
|
|
} while (left);
|
|
}
|
|
|
|
static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
|
|
size_t size, enum dma_data_direction dir)
|
|
{
|
|
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
|
|
}
|
|
|
|
static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
|
|
size_t size, enum dma_data_direction dir)
|
|
{
|
|
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
|
|
}
|
|
|
|
void __xen_dma_map_page(struct device *hwdev, struct page *page,
|
|
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
|
enum dma_data_direction dir, unsigned long attrs)
|
|
{
|
|
if (is_device_dma_coherent(hwdev))
|
|
return;
|
|
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
|
|
return;
|
|
|
|
__xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
|
|
}
|
|
|
|
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
|
size_t size, enum dma_data_direction dir,
|
|
unsigned long attrs)
|
|
|
|
{
|
|
if (is_device_dma_coherent(hwdev))
|
|
return;
|
|
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
|
|
return;
|
|
|
|
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
}
|
|
|
|
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
{
|
|
if (is_device_dma_coherent(hwdev))
|
|
return;
|
|
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
}
|
|
|
|
void __xen_dma_sync_single_for_device(struct device *hwdev,
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
{
|
|
if (is_device_dma_coherent(hwdev))
|
|
return;
|
|
__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
|
|
}
|
|
|
|
bool xen_arch_need_swiotlb(struct device *dev,
|
|
phys_addr_t phys,
|
|
dma_addr_t dev_addr)
|
|
{
|
|
unsigned int xen_pfn = XEN_PFN_DOWN(phys);
|
|
unsigned int bfn = XEN_PFN_DOWN(dev_addr);
|
|
|
|
/*
|
|
* The swiotlb buffer should be used if
|
|
* - Xen doesn't have the cache flush hypercall
|
|
* - The Linux page refers to foreign memory
|
|
* - The device doesn't support coherent DMA request
|
|
*
|
|
* The Linux page may be spanned acrros multiple Xen page, although
|
|
* it's not possible to have a mix of local and foreign Xen page.
|
|
* Furthermore, range_straddles_page_boundary is already checking
|
|
* if buffer is physically contiguous in the host RAM.
|
|
*
|
|
* Therefore we only need to check the first Xen page to know if we
|
|
* require a bounce buffer because the device doesn't support coherent
|
|
* memory and we are not able to flush the cache.
|
|
*/
|
|
return (!hypercall_cflush && (xen_pfn != bfn) &&
|
|
!is_device_dma_coherent(dev));
|
|
}
|
|
|
|
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
|
|
unsigned int address_bits,
|
|
dma_addr_t *dma_handle)
|
|
{
|
|
if (!xen_initial_domain())
|
|
return -EINVAL;
|
|
|
|
/* we assume that dom0 is mapped 1:1 for now */
|
|
*dma_handle = pstart;
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
|
|
|
|
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
|
|
{
|
|
return;
|
|
}
|
|
EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
|
|
|
|
struct dma_map_ops *xen_dma_ops;
|
|
EXPORT_SYMBOL(xen_dma_ops);
|
|
|
|
static struct dma_map_ops xen_swiotlb_dma_ops = {
|
|
.mapping_error = xen_swiotlb_dma_mapping_error,
|
|
.alloc = xen_swiotlb_alloc_coherent,
|
|
.free = xen_swiotlb_free_coherent,
|
|
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
|
|
.sync_single_for_device = xen_swiotlb_sync_single_for_device,
|
|
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
|
|
.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
|
|
.map_sg = xen_swiotlb_map_sg_attrs,
|
|
.unmap_sg = xen_swiotlb_unmap_sg_attrs,
|
|
.map_page = xen_swiotlb_map_page,
|
|
.unmap_page = xen_swiotlb_unmap_page,
|
|
.dma_supported = xen_swiotlb_dma_supported,
|
|
.set_dma_mask = xen_swiotlb_set_dma_mask,
|
|
};
|
|
|
|
int __init xen_mm_init(void)
|
|
{
|
|
struct gnttab_cache_flush cflush;
|
|
if (!xen_initial_domain())
|
|
return 0;
|
|
xen_swiotlb_init(1, false);
|
|
xen_dma_ops = &xen_swiotlb_dma_ops;
|
|
|
|
cflush.op = 0;
|
|
cflush.a.dev_bus_addr = 0;
|
|
cflush.offset = 0;
|
|
cflush.length = 0;
|
|
if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
|
|
hypercall_cflush = true;
|
|
return 0;
|
|
}
|
|
arch_initcall(xen_mm_init);
|