diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 767605a5c659..1cb50d21c21d 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig @@ -145,6 +145,16 @@ config PCIE_IPROC_BCMA Say Y here if you want to use the Broadcom iProc PCIe controller through the BCMA bus interface +config PCIE_IPROC_MSI + bool "Broadcom iProc PCIe MSI support" + depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA + depends on PCI_MSI + select PCI_MSI_IRQ_DOMAIN + default ARCH_BCM_IPROC + help + Say Y here if you want to enable MSI support for Broadcom's iProc + PCIe controller + config PCIE_ALTERA bool "Altera PCIe controller" depends on ARM || NIOS2 diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index 9d4d3c6924a1..0e4e95e39c21 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o +obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c index 96a7d999fd5e..0d7bee4a0d26 100644 --- a/drivers/pci/host/pcie-iproc-bcma.c +++ b/drivers/pci/host/pcie-iproc-bcma.c @@ -55,6 +55,7 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev) bcma_set_drvdata(bdev, pcie); pcie->base = bdev->io_addr; + pcie->base_addr = bdev->addr; res_mem.start = bdev->addr_s[0]; res_mem.end = bdev->addr_s[0] + SZ_128M - 1; diff --git a/drivers/pci/host/pcie-iproc-msi.c b/drivers/pci/host/pcie-iproc-msi.c new file mode 100644 index 000000000000..9a2973bdc78a --- /dev/null +++ b/drivers/pci/host/pcie-iproc-msi.c @@ -0,0 +1,675 @@ +/* + * Copyright (C) 2015 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "pcie-iproc.h" + +#define IPROC_MSI_INTR_EN_SHIFT 11 +#define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT) +#define IPROC_MSI_INT_N_EVENT_SHIFT 1 +#define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT) +#define IPROC_MSI_EQ_EN_SHIFT 0 +#define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT) + +#define IPROC_MSI_EQ_MASK 0x3f + +/* Max number of GIC interrupts */ +#define NR_HW_IRQS 6 + +/* Number of entries in each event queue */ +#define EQ_LEN 64 + +/* Size of each event queue memory region */ +#define EQ_MEM_REGION_SIZE SZ_4K + +/* Size of each MSI address region */ +#define MSI_MEM_REGION_SIZE SZ_4K + +enum iproc_msi_reg { + IPROC_MSI_EQ_PAGE = 0, + IPROC_MSI_EQ_PAGE_UPPER, + IPROC_MSI_PAGE, + IPROC_MSI_PAGE_UPPER, + IPROC_MSI_CTRL, + IPROC_MSI_EQ_HEAD, + IPROC_MSI_EQ_TAIL, + IPROC_MSI_INTS_EN, + IPROC_MSI_REG_SIZE, +}; + +struct iproc_msi; + +/** + * iProc MSI group + * + * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI + * event queue. + * + * @msi: pointer to iProc MSI data + * @gic_irq: GIC interrupt + * @eq: Event queue number + */ +struct iproc_msi_grp { + struct iproc_msi *msi; + int gic_irq; + unsigned int eq; +}; + +/** + * iProc event queue based MSI + * + * Only meant to be used on platforms without MSI support integrated into the + * GIC. + * + * @pcie: pointer to iProc PCIe data + * @reg_offsets: MSI register offsets + * @grps: MSI groups + * @nr_irqs: number of total interrupts connected to GIC + * @nr_cpus: number of toal CPUs + * @has_inten_reg: indicates the MSI interrupt enable register needs to be + * set explicitly (required for some legacy platforms) + * @bitmap: MSI vector bitmap + * @bitmap_lock: lock to protect access to the MSI bitmap + * @nr_msi_vecs: total number of MSI vectors + * @inner_domain: inner IRQ domain + * @msi_domain: MSI IRQ domain + * @nr_eq_region: required number of 4K aligned memory region for MSI event + * queues + * @nr_msi_region: required number of 4K aligned address region for MSI posted + * writes + * @eq_cpu: pointer to allocated memory region for MSI event queues + * @eq_dma: DMA address of MSI event queues + * @msi_addr: MSI address + */ +struct iproc_msi { + struct iproc_pcie *pcie; + const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE]; + struct iproc_msi_grp *grps; + int nr_irqs; + int nr_cpus; + bool has_inten_reg; + unsigned long *bitmap; + struct mutex bitmap_lock; + unsigned int nr_msi_vecs; + struct irq_domain *inner_domain; + struct irq_domain *msi_domain; + unsigned int nr_eq_region; + unsigned int nr_msi_region; + void *eq_cpu; + dma_addr_t eq_dma; + phys_addr_t msi_addr; +}; + +static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { + { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 }, + { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 }, +}; + +static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { + { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 }, + { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 }, + { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 }, + { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c }, +}; + +static inline u32 iproc_msi_read_reg(struct iproc_msi *msi, + enum iproc_msi_reg reg, + unsigned int eq) +{ + struct iproc_pcie *pcie = msi->pcie; + + return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]); +} + +static inline void iproc_msi_write_reg(struct iproc_msi *msi, + enum iproc_msi_reg reg, + int eq, u32 val) +{ + struct iproc_pcie *pcie = msi->pcie; + + writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]); +} + +static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq) +{ + return (hwirq % msi->nr_irqs); +} + +static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi, + unsigned long hwirq) +{ + if (msi->nr_msi_region > 1) + return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE; + else + return hwirq_to_group(msi, hwirq) * sizeof(u32); +} + +static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) +{ + if (msi->nr_eq_region > 1) + return eq * EQ_MEM_REGION_SIZE; + else + return eq * EQ_LEN * sizeof(u32); +} + +static struct irq_chip iproc_msi_irq_chip = { + .name = "iProc-MSI", +}; + +static struct msi_domain_info iproc_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX, + .chip = &iproc_msi_irq_chip, +}; + +/* + * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a + * dedicated event queue. Each MSI group can support up to 64 MSI vectors. + * + * The number of MSI groups varies between different iProc SoCs. The total + * number of CPU cores also varies. To support MSI IRQ affinity, we + * distribute GIC interrupts across all available CPUs. MSI vector is moved + * from one GIC interrupt to another to steer to the target CPU. + * + * Assuming: + * - the number of MSI groups is M + * - the number of CPU cores is N + * - M is always a multiple of N + * + * Total number of raw MSI vectors = M * 64 + * Total number of supported MSI vectors = (M * 64) / N + */ +static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq) +{ + return (hwirq % msi->nr_cpus); +} + +static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi, + unsigned long hwirq) +{ + return (hwirq - hwirq_to_cpu(msi, hwirq)); +} + +static int iproc_msi_irq_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + struct iproc_msi *msi = irq_data_get_irq_chip_data(data); + int target_cpu = cpumask_first(mask); + int curr_cpu; + + curr_cpu = hwirq_to_cpu(msi, data->hwirq); + if (curr_cpu == target_cpu) + return IRQ_SET_MASK_OK_DONE; + + /* steer MSI to the target CPU */ + data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; + + return IRQ_SET_MASK_OK; +} + +static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, + struct msi_msg *msg) +{ + struct iproc_msi *msi = irq_data_get_irq_chip_data(data); + dma_addr_t addr; + + addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq); + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = data->hwirq; +} + +static struct irq_chip iproc_msi_bottom_irq_chip = { + .name = "MSI", + .irq_set_affinity = iproc_msi_irq_set_affinity, + .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg, +}; + +static int iproc_msi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct iproc_msi *msi = domain->host_data; + int hwirq; + + mutex_lock(&msi->bitmap_lock); + + /* Allocate 'nr_cpus' number of MSI vectors each time */ + hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0, + msi->nr_cpus, 0); + if (hwirq < msi->nr_msi_vecs) { + bitmap_set(msi->bitmap, hwirq, msi->nr_cpus); + } else { + mutex_unlock(&msi->bitmap_lock); + return -ENOSPC; + } + + mutex_unlock(&msi->bitmap_lock); + + irq_domain_set_info(domain, virq, hwirq, &iproc_msi_bottom_irq_chip, + domain->host_data, handle_simple_irq, NULL, NULL); + + return 0; +} + +static void iproc_msi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *data = irq_domain_get_irq_data(domain, virq); + struct iproc_msi *msi = irq_data_get_irq_chip_data(data); + unsigned int hwirq; + + mutex_lock(&msi->bitmap_lock); + + hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq); + bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus); + + mutex_unlock(&msi->bitmap_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops msi_domain_ops = { + .alloc = iproc_msi_irq_domain_alloc, + .free = iproc_msi_irq_domain_free, +}; + +static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) +{ + u32 *msg, hwirq; + unsigned int offs; + + offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); + msg = (u32 *)(msi->eq_cpu + offs); + hwirq = *msg & IPROC_MSI_EQ_MASK; + + /* + * Since we have multiple hwirq mapped to a single MSI vector, + * now we need to derive the hwirq at CPU0. It can then be used to + * mapped back to virq. + */ + return hwirq_to_canonical_hwirq(msi, hwirq); +} + +static void iproc_msi_handler(struct irq_desc *desc) +{ + struct irq_chip *chip = irq_desc_get_chip(desc); + struct iproc_msi_grp *grp; + struct iproc_msi *msi; + struct iproc_pcie *pcie; + u32 eq, head, tail, nr_events; + unsigned long hwirq; + int virq; + + chained_irq_enter(chip, desc); + + grp = irq_desc_get_handler_data(desc); + msi = grp->msi; + pcie = msi->pcie; + eq = grp->eq; + + /* + * iProc MSI event queue is tracked by head and tail pointers. Head + * pointer indicates the next entry (MSI data) to be consumed by SW in + * the queue and needs to be updated by SW. iProc MSI core uses the + * tail pointer as the next data insertion point. + * + * Entries between head and tail pointers contain valid MSI data. MSI + * data is guaranteed to be in the event queue memory before the tail + * pointer is updated by the iProc MSI core. + */ + head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD, + eq) & IPROC_MSI_EQ_MASK; + do { + tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL, + eq) & IPROC_MSI_EQ_MASK; + + /* + * Figure out total number of events (MSI data) to be + * processed. + */ + nr_events = (tail < head) ? + (EQ_LEN - (head - tail)) : (tail - head); + if (!nr_events) + break; + + /* process all outstanding events */ + while (nr_events--) { + hwirq = decode_msi_hwirq(msi, eq, head); + virq = irq_find_mapping(msi->inner_domain, hwirq); + generic_handle_irq(virq); + + head++; + head %= EQ_LEN; + } + + /* + * Now all outstanding events have been processed. Update the + * head pointer. + */ + iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head); + + /* + * Now go read the tail pointer again to see if there are new + * oustanding events that came in during the above window. + */ + } while (true); + + chained_irq_exit(chip, desc); +} + +static void iproc_msi_enable(struct iproc_msi *msi) +{ + int i, eq; + u32 val; + + /* Program memory region for each event queue */ + for (i = 0; i < msi->nr_eq_region; i++) { + dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE); + + iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i, + lower_32_bits(addr)); + iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i, + upper_32_bits(addr)); + } + + /* Program address region for MSI posted writes */ + for (i = 0; i < msi->nr_msi_region; i++) { + phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE); + + iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i, + lower_32_bits(addr)); + iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i, + upper_32_bits(addr)); + } + + for (eq = 0; eq < msi->nr_irqs; eq++) { + /* Enable MSI event queue */ + val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | + IPROC_MSI_EQ_EN; + iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); + + /* + * Some legacy platforms require the MSI interrupt enable + * register to be set explicitly. + */ + if (msi->has_inten_reg) { + val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); + val |= BIT(eq); + iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); + } + } +} + +static void iproc_msi_disable(struct iproc_msi *msi) +{ + u32 eq, val; + + for (eq = 0; eq < msi->nr_irqs; eq++) { + if (msi->has_inten_reg) { + val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); + val &= ~BIT(eq); + iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); + } + + val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq); + val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | + IPROC_MSI_EQ_EN); + iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); + } +} + +static int iproc_msi_alloc_domains(struct device_node *node, + struct iproc_msi *msi) +{ + msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs, + &msi_domain_ops, msi); + if (!msi->inner_domain) + return -ENOMEM; + + msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), + &iproc_msi_domain_info, + msi->inner_domain); + if (!msi->msi_domain) { + irq_domain_remove(msi->inner_domain); + return -ENOMEM; + } + + return 0; +} + +static void iproc_msi_free_domains(struct iproc_msi *msi) +{ + if (msi->msi_domain) + irq_domain_remove(msi->msi_domain); + + if (msi->inner_domain) + irq_domain_remove(msi->inner_domain); +} + +static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu) +{ + int i; + + for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { + irq_set_chained_handler_and_data(msi->grps[i].gic_irq, + NULL, NULL); + } +} + +static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu) +{ + int i, ret; + cpumask_var_t mask; + struct iproc_pcie *pcie = msi->pcie; + + for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { + irq_set_chained_handler_and_data(msi->grps[i].gic_irq, + iproc_msi_handler, + &msi->grps[i]); + /* Dedicate GIC interrupt to each CPU core */ + if (alloc_cpumask_var(&mask, GFP_KERNEL)) { + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + ret = irq_set_affinity(msi->grps[i].gic_irq, mask); + if (ret) + dev_err(pcie->dev, + "failed to set affinity for IRQ%d\n", + msi->grps[i].gic_irq); + free_cpumask_var(mask); + } else { + dev_err(pcie->dev, "failed to alloc CPU mask\n"); + ret = -EINVAL; + } + + if (ret) { + /* Free all configured/unconfigured IRQs */ + iproc_msi_irq_free(msi, cpu); + return ret; + } + } + + return 0; +} + +int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) +{ + struct iproc_msi *msi; + int i, ret; + unsigned int cpu; + + if (!of_device_is_compatible(node, "brcm,iproc-msi")) + return -ENODEV; + + if (!of_find_property(node, "msi-controller", NULL)) + return -ENODEV; + + if (pcie->msi) + return -EBUSY; + + msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL); + if (!msi) + return -ENOMEM; + + msi->pcie = pcie; + pcie->msi = msi; + msi->msi_addr = pcie->base_addr; + mutex_init(&msi->bitmap_lock); + msi->nr_cpus = num_possible_cpus(); + + msi->nr_irqs = of_irq_count(node); + if (!msi->nr_irqs) { + dev_err(pcie->dev, "found no MSI GIC interrupt\n"); + return -ENODEV; + } + + if (msi->nr_irqs > NR_HW_IRQS) { + dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n", + msi->nr_irqs); + msi->nr_irqs = NR_HW_IRQS; + } + + if (msi->nr_irqs < msi->nr_cpus) { + dev_err(pcie->dev, + "not enough GIC interrupts for MSI affinity\n"); + return -EINVAL; + } + + if (msi->nr_irqs % msi->nr_cpus != 0) { + msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus; + dev_warn(pcie->dev, "Reducing number of interrupts to %d\n", + msi->nr_irqs); + } + + switch (pcie->type) { + case IPROC_PCIE_PAXB: + msi->reg_offsets = iproc_msi_reg_paxb; + msi->nr_eq_region = 1; + msi->nr_msi_region = 1; + break; + case IPROC_PCIE_PAXC: + msi->reg_offsets = iproc_msi_reg_paxc; + msi->nr_eq_region = msi->nr_irqs; + msi->nr_msi_region = msi->nr_irqs; + break; + default: + dev_err(pcie->dev, "incompatible iProc PCIe interface\n"); + return -EINVAL; + } + + if (of_find_property(node, "brcm,pcie-msi-inten", NULL)) + msi->has_inten_reg = true; + + msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN; + msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs), + sizeof(*msi->bitmap), GFP_KERNEL); + if (!msi->bitmap) + return -ENOMEM; + + msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps), + GFP_KERNEL); + if (!msi->grps) + return -ENOMEM; + + for (i = 0; i < msi->nr_irqs; i++) { + unsigned int irq = irq_of_parse_and_map(node, i); + + if (!irq) { + dev_err(pcie->dev, "unable to parse/map interrupt\n"); + ret = -ENODEV; + goto free_irqs; + } + msi->grps[i].gic_irq = irq; + msi->grps[i].msi = msi; + msi->grps[i].eq = i; + } + + /* Reserve memory for event queue and make sure memories are zeroed */ + msi->eq_cpu = dma_zalloc_coherent(pcie->dev, + msi->nr_eq_region * EQ_MEM_REGION_SIZE, + &msi->eq_dma, GFP_KERNEL); + if (!msi->eq_cpu) { + ret = -ENOMEM; + goto free_irqs; + } + + ret = iproc_msi_alloc_domains(node, msi); + if (ret) { + dev_err(pcie->dev, "failed to create MSI domains\n"); + goto free_eq_dma; + } + + for_each_online_cpu(cpu) { + ret = iproc_msi_irq_setup(msi, cpu); + if (ret) + goto free_msi_irq; + } + + iproc_msi_enable(msi); + + return 0; + +free_msi_irq: + for_each_online_cpu(cpu) + iproc_msi_irq_free(msi, cpu); + iproc_msi_free_domains(msi); + +free_eq_dma: + dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, + msi->eq_cpu, msi->eq_dma); + +free_irqs: + for (i = 0; i < msi->nr_irqs; i++) { + if (msi->grps[i].gic_irq) + irq_dispose_mapping(msi->grps[i].gic_irq); + } + pcie->msi = NULL; + return ret; +} +EXPORT_SYMBOL(iproc_msi_init); + +void iproc_msi_exit(struct iproc_pcie *pcie) +{ + struct iproc_msi *msi = pcie->msi; + unsigned int i, cpu; + + if (!msi) + return; + + iproc_msi_disable(msi); + + for_each_online_cpu(cpu) + iproc_msi_irq_free(msi, cpu); + + iproc_msi_free_domains(msi); + + dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, + msi->eq_cpu, msi->eq_dma); + + for (i = 0; i < msi->nr_irqs; i++) { + if (msi->grps[i].gic_irq) + irq_dispose_mapping(msi->grps[i].gic_irq); + } +} +EXPORT_SYMBOL(iproc_msi_exit); diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index e8b32d856bd8..1738c5288eb6 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c @@ -71,6 +71,7 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) dev_err(pcie->dev, "unable to map controller registers\n"); return -ENOMEM; } + pcie->base_addr = reg.start; if (of_property_read_bool(np, "brcm,pcie-ob")) { u32 val; diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index 0e11f85ba7b0..5816bceddb65 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c @@ -440,6 +440,26 @@ static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, return 0; } +static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) +{ + struct device_node *msi_node; + + msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0); + if (!msi_node) + return -ENODEV; + + /* + * If another MSI controller is being used, the call below should fail + * but that is okay + */ + return iproc_msi_init(pcie, msi_node); +} + +static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) +{ + iproc_msi_exit(pcie); +} + int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) { int ret; @@ -507,6 +527,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) iproc_pcie_enable(pcie); + if (IS_ENABLED(CONFIG_PCI_MSI)) + if (iproc_pcie_msi_enable(pcie)) + dev_info(pcie->dev, "not using iProc MSI\n"); + pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); @@ -531,6 +555,8 @@ int iproc_pcie_remove(struct iproc_pcie *pcie) pci_stop_root_bus(pcie->root_bus); pci_remove_root_bus(pcie->root_bus); + iproc_pcie_msi_disable(pcie); + phy_power_off(pcie->phy); phy_exit(pcie->phy); diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h index a38e96ae156d..e84d93c53c7b 100644 --- a/drivers/pci/host/pcie-iproc.h +++ b/drivers/pci/host/pcie-iproc.h @@ -41,6 +41,8 @@ struct iproc_pcie_ob { resource_size_t window_size; }; +struct iproc_msi; + /** * iProc PCIe device * @@ -48,19 +50,21 @@ struct iproc_pcie_ob { * @type: iProc PCIe interface type * @reg_offsets: register offsets * @base: PCIe host controller I/O register base + * @base_addr: PCIe host controller register base physical address * @sysdata: Per PCI controller data (ARM-specific) * @root_bus: pointer to root bus * @phy: optional PHY device that controls the Serdes - * @irqs: interrupt IDs * @map_irq: function callback to map interrupts - * @need_ob_cfg: indidates SW needs to configure the outbound mapping window + * @need_ob_cfg: indicates SW needs to configure the outbound mapping window * @ob: outbound mapping parameters + * @msi: MSI data */ struct iproc_pcie { struct device *dev; enum iproc_pcie_type type; const u16 *reg_offsets; void __iomem *base; + phys_addr_t base_addr; #ifdef CONFIG_ARM struct pci_sys_data sysdata; #endif @@ -69,9 +73,24 @@ struct iproc_pcie { int (*map_irq)(const struct pci_dev *, u8, u8); bool need_ob_cfg; struct iproc_pcie_ob ob; + struct iproc_msi *msi; }; int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res); int iproc_pcie_remove(struct iproc_pcie *pcie); +#ifdef CONFIG_PCIE_IPROC_MSI +int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node); +void iproc_msi_exit(struct iproc_pcie *pcie); +#else +static inline int iproc_msi_init(struct iproc_pcie *pcie, + struct device_node *node) +{ + return -ENODEV; +} +static inline void iproc_msi_exit(struct iproc_pcie *pcie) +{ +} +#endif + #endif /* _PCIE_IPROC_H */