mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
52ba0746b3
Currently xen_dma_map_page concludes that DMA to anything other than
the head page of a compound page must be foreign, since the PFN of the
page is that of the head.
Fix the check to instead consider the whole of a compound page to be
local if the PFN of the head passes the 1:1 check.
We can never see a compound page which is a mixture of foreign and
local sub-pages.
The comment already correctly described the intention, but fixup the
spelling and some grammar.
This fixes the various SSH protocol errors which we have been seeing
on the cubietrucks in our automated test infrastructure.
This has been broken since commit 3567258d28
("xen/arm: use
hypercall to flush caches in map_page"), which was in v3.19-rc1.
NB arch/arm64/.../xen/page-coherent.h also includes this file.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Cc: xen-devel@lists.xenproject.org
Cc: linux-arm-kernel@lists.infradead.org
Cc: stable@vger.kernel.org # v3.19+
103 lines
3.8 KiB
C
103 lines
3.8 KiB
C
#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
|
|
#define _ASM_ARM_XEN_PAGE_COHERENT_H
|
|
|
|
#include <asm/page.h>
|
|
#include <linux/dma-attrs.h>
|
|
#include <linux/dma-mapping.h>
|
|
|
|
void __xen_dma_map_page(struct device *hwdev, struct page *page,
|
|
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
|
enum dma_data_direction dir, struct dma_attrs *attrs);
|
|
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
|
size_t size, enum dma_data_direction dir,
|
|
struct dma_attrs *attrs);
|
|
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir);
|
|
|
|
void __xen_dma_sync_single_for_device(struct device *hwdev,
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir);
|
|
|
|
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t flags,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
|
|
}
|
|
|
|
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
|
|
void *cpu_addr, dma_addr_t dma_handle,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
|
|
}
|
|
|
|
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
|
|
dma_addr_t dev_addr, unsigned long offset, size_t size,
|
|
enum dma_data_direction dir, struct dma_attrs *attrs)
|
|
{
|
|
unsigned long page_pfn = page_to_xen_pfn(page);
|
|
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
|
|
unsigned long compound_pages =
|
|
(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
|
|
bool local = (page_pfn <= dev_pfn) &&
|
|
(dev_pfn - page_pfn < compound_pages);
|
|
|
|
/*
|
|
* Dom0 is mapped 1:1, while the Linux page can span across
|
|
* multiple Xen pages, it's not possible for it to contain a
|
|
* mix of local and foreign Xen pages. So if the first xen_pfn
|
|
* == mfn the page is local otherwise it's a foreign page
|
|
* grant-mapped in dom0. If the page is local we can safely
|
|
* call the native dma_ops function, otherwise we call the xen
|
|
* specific function.
|
|
*/
|
|
if (local)
|
|
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
|
|
else
|
|
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
|
|
}
|
|
|
|
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
|
size_t size, enum dma_data_direction dir,
|
|
struct dma_attrs *attrs)
|
|
{
|
|
unsigned long pfn = PFN_DOWN(handle);
|
|
/*
|
|
* Dom0 is mapped 1:1, while the Linux page can be spanned accross
|
|
* multiple Xen page, it's not possible to have a mix of local and
|
|
* foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
|
|
* foreign mfn will always return false. If the page is local we can
|
|
* safely call the native dma_ops function, otherwise we call the xen
|
|
* specific function.
|
|
*/
|
|
if (pfn_valid(pfn)) {
|
|
if (__generic_dma_ops(hwdev)->unmap_page)
|
|
__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
|
|
} else
|
|
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
|
|
}
|
|
|
|
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
{
|
|
unsigned long pfn = PFN_DOWN(handle);
|
|
if (pfn_valid(pfn)) {
|
|
if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
|
|
__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
|
|
} else
|
|
__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
|
|
}
|
|
|
|
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
{
|
|
unsigned long pfn = PFN_DOWN(handle);
|
|
if (pfn_valid(pfn)) {
|
|
if (__generic_dma_ops(hwdev)->sync_single_for_device)
|
|
__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
|
|
} else
|
|
__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
|
|
}
|
|
|
|
#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
|