mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 23:26:45 +07:00
arm64 : Introduce support for ACPI _CCA object
section 6.2.17 _CCA states that ARM platforms require ACPI _CCA object to be specified for DMA-cabpable devices. Therefore, this patch specifies ACPI_CCA_REQUIRED in arm64 Kconfig. In addition, to handle the case when _CCA is missing, arm64 would assign dummy_dma_ops to disable DMA capability of the device. Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Mark Salter <msalter@redhat.com> Signed-off-by: Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
parent
d056267483
commit
b6197b93fa
@ -1,5 +1,6 @@
|
||||
config ARM64
|
||||
def_bool y
|
||||
select ACPI_CCA_REQUIRED if ACPI
|
||||
select ACPI_GENERIC_GSI if ACPI
|
||||
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
|
@ -18,6 +18,7 @@
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
@ -28,13 +29,23 @@
|
||||
|
||||
#define DMA_ERROR_CODE (~(dma_addr_t)0)
|
||||
extern struct dma_map_ops *dma_ops;
|
||||
extern struct dma_map_ops dummy_dma_ops;
|
||||
|
||||
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
|
||||
{
|
||||
if (unlikely(!dev) || !dev->archdata.dma_ops)
|
||||
if (unlikely(!dev))
|
||||
return dma_ops;
|
||||
else
|
||||
else if (dev->archdata.dma_ops)
|
||||
return dev->archdata.dma_ops;
|
||||
else if (acpi_disabled)
|
||||
return dma_ops;
|
||||
|
||||
/*
|
||||
* When ACPI is enabled, if arch_set_dma_ops is not called,
|
||||
* we will disable device DMA capability by setting it
|
||||
* to dummy_dma_ops.
|
||||
*/
|
||||
return &dummy_dma_ops;
|
||||
}
|
||||
|
||||
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
@ -48,6 +59,9 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
struct iommu_ops *iommu, bool coherent)
|
||||
{
|
||||
if (!acpi_disabled && !dev->archdata.dma_ops)
|
||||
dev->archdata.dma_ops = dma_ops;
|
||||
|
||||
dev->archdata.dma_coherent = coherent;
|
||||
}
|
||||
#define arch_setup_dma_ops arch_setup_dma_ops
|
||||
|
@ -414,6 +414,98 @@ static int __init atomic_pool_init(void)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/********************************************
|
||||
* The following APIs are for dummy DMA ops *
|
||||
********************************************/
|
||||
|
||||
static void *__dummy_alloc(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __dummy_free(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static int __dummy_mmap(struct device *dev,
|
||||
struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return DMA_ERROR_CODE;
|
||||
}
|
||||
|
||||
static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nelems, enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __dummy_unmap_sg(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static void __dummy_sync_single(struct device *dev,
|
||||
dma_addr_t dev_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
static void __dummy_sync_sg(struct device *dev,
|
||||
struct scatterlist *sgl, int nelems,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
}
|
||||
|
||||
static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __dummy_dma_supported(struct device *hwdev, u64 mask)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct dma_map_ops dummy_dma_ops = {
|
||||
.alloc = __dummy_alloc,
|
||||
.free = __dummy_free,
|
||||
.mmap = __dummy_mmap,
|
||||
.map_page = __dummy_map_page,
|
||||
.unmap_page = __dummy_unmap_page,
|
||||
.map_sg = __dummy_map_sg,
|
||||
.unmap_sg = __dummy_unmap_sg,
|
||||
.sync_single_for_cpu = __dummy_sync_single,
|
||||
.sync_single_for_device = __dummy_sync_single,
|
||||
.sync_sg_for_cpu = __dummy_sync_sg,
|
||||
.sync_sg_for_device = __dummy_sync_sg,
|
||||
.mapping_error = __dummy_mapping_error,
|
||||
.dma_supported = __dummy_dma_supported,
|
||||
};
|
||||
EXPORT_SYMBOL(dummy_dma_ops);
|
||||
|
||||
static int __init arm64_dma_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
Loading…
Reference in New Issue
Block a user