mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 22:59:08 +07:00
8eaef922e9
commit 2b8652936f0ca9ca2e6c984ae76c7bfcda1b3f22 upstream We recently introduced a 1 GB sized ZONE_DMA to cater for platforms incorporating masters that can address less than 32 bits of DMA, in particular the Raspberry Pi 4, which has 4 or 8 GB of DRAM, but has peripherals that can only address up to 1 GB (and its PCIe host bridge can only access the bottom 3 GB) Instructing the DMA layer about these limitations is straight-forward, even though we had to fix some issues regarding memory limits set in the IORT for named components, and regarding the handling of ACPI _DMA methods. However, the DMA layer also needs to be able to allocate memory that is guaranteed to meet those DMA constraints, for bounce buffering as well as allocating the backing for consistent mappings. This is why the 1 GB ZONE_DMA was introduced recently. Unfortunately, it turns out the having a 1 GB ZONE_DMA as well as a ZONE_DMA32 causes problems with kdump, and potentially in other places where allocations cannot cross zone boundaries. Therefore, we should avoid having two separate DMA zones when possible. So let's do an early scan of the IORT, and only create the ZONE_DMA if we encounter any devices that need it. This puts the burden on the firmware to describe such limitations in the IORT, which may be redundant (and less precise) if _DMA methods are also being provided. However, it should be noted that this situation is highly unusual for arm64 ACPI machines. Also, the DMA subsystem still gives precedence to the _DMA method if implemented, and so we will not lose the ability to perform streaming DMA outside the ZONE_DMA if the _DMA method permits it. [nsaenz: unified implementation with DT's counterpart] Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Tested-by: Jeremy Linton <jeremy.linton@arm.com> Acked-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Acked-by: Hanjun Guo <guohanjun@huawei.com> Cc: Jeremy Linton <jeremy.linton@arm.com> Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Cc: Nicolas Saenz Julienne <nsaenzjulienne@suse.de> Cc: Rob Herring <robh+dt@kernel.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Hanjun Guo <guohanjun@huawei.com> Cc: Sudeep Holla <sudeep.holla@arm.com> Cc: Anshuman Khandual <anshuman.khandual@arm.com> Link: https://lore.kernel.org/r/20201119175400.9995-7-nsaenzjulienne@suse.de Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> Cc: <stable@vger.kernel.org> Signed-off-by: Jing Xiangfeng <jingxiangfeng@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
65 lines
2.3 KiB
C
65 lines
2.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2016, Semihalf
|
|
* Author: Tomasz Nowicki <tn@semihalf.com>
|
|
*/
|
|
|
|
#ifndef __ACPI_IORT_H__
|
|
#define __ACPI_IORT_H__
|
|
|
|
#include <linux/acpi.h>
|
|
#include <linux/fwnode.h>
|
|
#include <linux/irqdomain.h>
|
|
|
|
#define IORT_IRQ_MASK(irq) (irq & 0xffffffffULL)
|
|
#define IORT_IRQ_TRIGGER_MASK(irq) ((irq >> 32) & 0xffffffffULL)
|
|
|
|
/*
|
|
* PMCG model identifiers for use in smmu pmu driver. Please note
|
|
* that this is purely for the use of software and has nothing to
|
|
* do with hardware or with IORT specification.
|
|
*/
|
|
#define IORT_SMMU_V3_PMCG_GENERIC 0x00000000 /* Generic SMMUv3 PMCG */
|
|
#define IORT_SMMU_V3_PMCG_HISI_HIP08 0x00000001 /* HiSilicon HIP08 PMCG */
|
|
|
|
int iort_register_domain_token(int trans_id, phys_addr_t base,
|
|
struct fwnode_handle *fw_node);
|
|
void iort_deregister_domain_token(int trans_id);
|
|
struct fwnode_handle *iort_find_domain_token(int trans_id);
|
|
#ifdef CONFIG_ACPI_IORT
|
|
void acpi_iort_init(void);
|
|
u32 iort_msi_map_id(struct device *dev, u32 id);
|
|
struct irq_domain *iort_get_device_domain(struct device *dev, u32 id,
|
|
enum irq_domain_bus_token bus_token);
|
|
void acpi_configure_pmsi_domain(struct device *dev);
|
|
int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
|
|
/* IOMMU interface */
|
|
void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *size);
|
|
const struct iommu_ops *iort_iommu_configure_id(struct device *dev,
|
|
const u32 *id_in);
|
|
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head);
|
|
phys_addr_t acpi_iort_dma_get_max_cpu_address(void);
|
|
#else
|
|
static inline void acpi_iort_init(void) { }
|
|
static inline u32 iort_msi_map_id(struct device *dev, u32 id)
|
|
{ return id; }
|
|
static inline struct irq_domain *iort_get_device_domain(
|
|
struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
|
|
{ return NULL; }
|
|
static inline void acpi_configure_pmsi_domain(struct device *dev) { }
|
|
/* IOMMU interface */
|
|
static inline void iort_dma_setup(struct device *dev, u64 *dma_addr,
|
|
u64 *size) { }
|
|
static inline const struct iommu_ops *iort_iommu_configure_id(
|
|
struct device *dev, const u32 *id_in)
|
|
{ return NULL; }
|
|
static inline
|
|
int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
|
|
{ return 0; }
|
|
|
|
static inline phys_addr_t acpi_iort_dma_get_max_cpu_address(void)
|
|
{ return PHYS_ADDR_MAX; }
|
|
#endif
|
|
|
|
#endif /* __ACPI_IORT_H__ */
|