mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 00:35:19 +07:00
9087c37584
If a device doesn't support DMA to a physical address that includes the encryption bit (currently bit 47, so 48-bit DMA), then the DMA must occur to unencrypted memory. SWIOTLB is used to satisfy that requirement if an IOMMU is not active (enabled or configured in passthrough mode). However, commitfafadcd165
("swiotlb: don't dip into swiotlb pool for coherent allocations") modified the coherent allocation support in SWIOTLB to use the DMA direct coherent allocation support. When an IOMMU is not active, this resulted in dma_alloc_coherent() failing for devices that didn't support DMA addresses that included the encryption bit. Addressing this requires changes to the force_dma_unencrypted() function in kernel/dma/direct.c. Since the function is now non-trivial and SME/SEV specific, update the DMA direct support to add an arch override for the force_dma_unencrypted() function. The arch override is selected when CONFIG_AMD_MEM_ENCRYPT is set. The arch override function resides in the arch/x86/mm/mem_encrypt.c file and forces unencrypted DMA when either SEV is active or SME is active and the device does not support DMA to physical addresses that include the encryption bit. Fixes:fafadcd165
("swiotlb: don't dip into swiotlb pool for coherent allocations") Suggested-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Acked-by: Thomas Gleixner <tglx@linutronix.de> [hch: moved the force_dma_unencrypted declaration to dma-mapping.h, fold the s390 fix from Halil Pasic] Signed-off-by: Christoph Hellwig <hch@lst.de>
74 lines
2.4 KiB
C
74 lines
2.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_DMA_DIRECT_H
|
|
#define _LINUX_DMA_DIRECT_H 1
|
|
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/mem_encrypt.h>
|
|
|
|
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
|
|
#include <asm/dma-direct.h>
|
|
#else
|
|
static inline dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
{
|
|
dma_addr_t dev_addr = (dma_addr_t)paddr;
|
|
|
|
return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
|
|
}
|
|
|
|
static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
|
{
|
|
phys_addr_t paddr = (phys_addr_t)dev_addr;
|
|
|
|
return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
|
|
}
|
|
|
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|
{
|
|
if (!dev->dma_mask)
|
|
return false;
|
|
|
|
return addr + size - 1 <=
|
|
min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
|
|
}
|
|
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
|
|
|
|
#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
|
bool force_dma_unencrypted(struct device *dev);
|
|
#else
|
|
static inline bool force_dma_unencrypted(struct device *dev)
|
|
{
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
|
|
|
|
/*
|
|
* If memory encryption is supported, phys_to_dma will set the memory encryption
|
|
* bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
|
|
* and __dma_to_phys versions should only be used on non-encrypted memory for
|
|
* special occasions like DMA coherent buffers.
|
|
*/
|
|
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
|
|
{
|
|
return __sme_set(__phys_to_dma(dev, paddr));
|
|
}
|
|
|
|
static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|
{
|
|
return __sme_clr(__dma_to_phys(dev, daddr));
|
|
}
|
|
|
|
u64 dma_direct_get_required_mask(struct device *dev);
|
|
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
gfp_t gfp, unsigned long attrs);
|
|
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
|
|
dma_addr_t dma_addr, unsigned long attrs);
|
|
void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
|
|
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
|
dma_addr_t dma_addr, unsigned long attrs);
|
|
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
|
|
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
|
|
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
|
|
int dma_direct_supported(struct device *dev, u64 mask);
|
|
#endif /* _LINUX_DMA_DIRECT_H */
|