linux_dsm_epyc7002/drivers/pci/controller/dwc/pcie-designware-host.c

723 lines
18 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* Synopsys DesignWare PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Author: Jingoo Han <jg1.han@samsung.com>
*/
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_pci.h>
#include <linux/pci_regs.h>
#include <linux/platform_device.h>
#include "../../pci.h"
#include "pcie-designware.h"
static struct pci_ops dw_pcie_ops;
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
struct dw_pcie *pci;
if (pp->ops->rd_own_conf)
return pp->ops->rd_own_conf(pp, where, size, val);
pci = to_dw_pcie_from_pp(pp);
return dw_pcie_read(pci->dbi_base + where, size, val);
}
static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
u32 val)
{
struct dw_pcie *pci;
if (pp->ops->wr_own_conf)
return pp->ops->wr_own_conf(pp, where, size, val);
pci = to_dw_pcie_from_pp(pp);
return dw_pcie_write(pci->dbi_base + where, size, val);
}
static void dw_msi_ack_irq(struct irq_data *d)
{
irq_chip_ack_parent(d);
}
static void dw_msi_mask_irq(struct irq_data *d)
{
pci_msi_mask_irq(d);
irq_chip_mask_parent(d);
}
static void dw_msi_unmask_irq(struct irq_data *d)
{
pci_msi_unmask_irq(d);
irq_chip_unmask_parent(d);
}
static struct irq_chip dw_pcie_msi_irq_chip = {
.name = "PCI-MSI",
.irq_ack = dw_msi_ack_irq,
.irq_mask = dw_msi_mask_irq,
.irq_unmask = dw_msi_unmask_irq,
};
static struct msi_domain_info dw_pcie_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
.chip = &dw_pcie_msi_irq_chip,
};
/* MSI int handler */
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
{
int i, pos, irq;
u32 val, num_ctrls;
irqreturn_t ret = IRQ_NONE;
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
for (i = 0; i < num_ctrls; i++) {
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
(i * MSI_REG_CTRL_BLOCK_SIZE),
4, &val);
if (!val)
continue;
ret = IRQ_HANDLED;
pos = 0;
while ((pos = find_next_bit((unsigned long *) &val,
MAX_MSI_IRQS_PER_CTRL,
pos)) != MAX_MSI_IRQS_PER_CTRL) {
irq = irq_find_mapping(pp->irq_domain,
(i * MAX_MSI_IRQS_PER_CTRL) +
pos);
generic_handle_irq(irq);
pos++;
}
}
return ret;
}
/* Chained MSI interrupt service routine */
static void dw_chained_msi_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct pcie_port *pp;
chained_irq_enter(chip, desc);
pp = irq_desc_get_handler_data(desc);
dw_handle_msi_irq(pp);
chained_irq_exit(chip, desc);
}
static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
u64 msi_target;
if (pp->ops->get_msi_addr)
msi_target = pp->ops->get_msi_addr(pp);
else
msi_target = (u64)pp->msi_data;
msg->address_lo = lower_32_bits(msi_target);
msg->address_hi = upper_32_bits(msi_target);
if (pp->ops->get_msi_data)
msg->data = pp->ops->get_msi_data(pp, d->hwirq);
else
msg->data = d->hwirq;
dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
(int)d->hwirq, msg->address_hi, msg->address_lo);
}
static int dw_pci_msi_set_affinity(struct irq_data *d,
const struct cpumask *mask, bool force)
{
return -EINVAL;
}
static void dw_pci_bottom_mask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
unsigned int res, bit, ctrl;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
if (pp->ops->msi_clear_irq) {
pp->ops->msi_clear_irq(pp, d->hwirq);
} else {
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_mask[ctrl] |= BIT(bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
pp->irq_mask[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static void dw_pci_bottom_unmask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
unsigned int res, bit, ctrl;
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
if (pp->ops->msi_set_irq) {
pp->ops->msi_set_irq(pp, d->hwirq);
} else {
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
pp->irq_mask[ctrl] &= ~BIT(bit);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
pp->irq_mask[ctrl]);
}
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static void dw_pci_bottom_ack(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
unsigned int res, bit, ctrl;
unsigned long flags;
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
raw_spin_lock_irqsave(&pp->lock, flags);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
if (pp->ops->msi_irq_ack)
pp->ops->msi_irq_ack(d->hwirq, pp);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
.name = "DWPCI-MSI",
.irq_ack = dw_pci_bottom_ack,
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
.irq_set_affinity = dw_pci_msi_set_affinity,
.irq_mask = dw_pci_bottom_mask,
.irq_unmask = dw_pci_bottom_unmask,
};
static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *args)
{
struct pcie_port *pp = domain->host_data;
unsigned long flags;
u32 i;
int bit;
raw_spin_lock_irqsave(&pp->lock, flags);
bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
order_base_2(nr_irqs));
raw_spin_unlock_irqrestore(&pp->lock, flags);
if (bit < 0)
return -ENOSPC;
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, bit + i,
&dw_pci_msi_bottom_irq_chip,
pp, handle_edge_irq,
NULL, NULL);
return 0;
}
static void dw_pcie_irq_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
unsigned long flags;
raw_spin_lock_irqsave(&pp->lock, flags);
bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
order_base_2(nr_irqs));
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
.alloc = dw_pcie_irq_domain_alloc,
.free = dw_pcie_irq_domain_free,
};
int dw_pcie_allocate_domains(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
&dw_pcie_msi_domain_ops, pp);
if (!pp->irq_domain) {
dev_err(pci->dev, "Failed to create IRQ domain\n");
return -ENOMEM;
}
pp->msi_domain = pci_msi_create_irq_domain(fwnode,
&dw_pcie_msi_domain_info,
pp->irq_domain);
if (!pp->msi_domain) {
dev_err(pci->dev, "Failed to create MSI domain\n");
irq_domain_remove(pp->irq_domain);
return -ENOMEM;
}
return 0;
}
void dw_pcie_free_msi(struct pcie_port *pp)
{
irq_set_chained_handler(pp->msi_irq, NULL);
irq_set_handler_data(pp->msi_irq, NULL);
irq_domain_remove(pp->msi_domain);
irq_domain_remove(pp->irq_domain);
}
void dw_pcie_msi_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct page *page;
u64 msi_target;
page = alloc_page(GFP_KERNEL);
pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, pp->msi_data)) {
dev_err(dev, "Failed to map MSI data\n");
__free_page(page);
return;
}
msi_target = (u64)pp->msi_data;
/* Program the msi_data */
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
lower_32_bits(msi_target));
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
upper_32_bits(msi_target));
}
int dw_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
struct resource_entry *win, *tmp;
struct pci_bus *bus, *child;
struct pci_host_bridge *bridge;
struct resource *cfg_res;
int ret;
raw_spin_lock_init(&pci->pp.lock);
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
if (cfg_res) {
pp->cfg0_size = resource_size(cfg_res) >> 1;
pp->cfg1_size = resource_size(cfg_res) >> 1;
pp->cfg0_base = cfg_res->start;
pp->cfg1_base = cfg_res->start + pp->cfg0_size;
} else if (!pp->va_cfg0_base) {
dev_err(dev, "Missing *config* reg space\n");
}
bridge = pci_alloc_host_bridge(0);
if (!bridge)
return -ENOMEM;
ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
&bridge->windows, &pp->io_base);
if (ret)
return ret;
ret = devm_request_pci_bus_resources(dev, &bridge->windows);
if (ret)
goto error;
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
PCI: designware: Fix I/O space page leak When testing the R-Car PCIe driver on the Condor board, if the PCIe PHY driver is left disabled, the kernel crashed with this BUG: kernel BUG at lib/ioremap.c:72! Internal error: Oops - BUG: 0 [#1] PREEMPT SMP Modules linked in: CPU: 0 PID: 39 Comm: kworker/0:1 Not tainted 4.17.0-dirty #1092 Hardware name: Renesas Condor board based on r8a77980 (DT) Workqueue: events deferred_probe_work_func pstate: 80000005 (Nzcv daif -PAN -UAO) pc : ioremap_page_range+0x370/0x3c8 lr : ioremap_page_range+0x40/0x3c8 sp : ffff000008da39e0 x29: ffff000008da39e0 x28: 00e8000000000f07 x27: ffff7dfffee00000 x26: 0140000000000000 x25: ffff7dfffef00000 x24: 00000000000fe100 x23: ffff80007b906000 x22: ffff000008ab8000 x21: ffff000008bb1d58 x20: ffff7dfffef00000 x19: ffff800009c30fb8 x18: 0000000000000001 x17: 00000000000152d0 x16: 00000000014012d0 x15: 0000000000000000 x14: 0720072007200720 x13: 0720072007200720 x12: 0720072007200720 x11: 0720072007300730 x10: 00000000000000ae x9 : 0000000000000000 x8 : ffff7dffff000000 x7 : 0000000000000000 x6 : 0000000000000100 x5 : 0000000000000000 x4 : 000000007b906000 x3 : ffff80007c61a880 x2 : ffff7dfffeefffff x1 : 0000000040000000 x0 : 00e80000fe100f07 Process kworker/0:1 (pid: 39, stack limit = 0x (ptrval)) Call trace: ioremap_page_range+0x370/0x3c8 pci_remap_iospace+0x7c/0xac pci_parse_request_of_pci_ranges+0x13c/0x190 rcar_pcie_probe+0x4c/0xb04 platform_drv_probe+0x50/0xbc driver_probe_device+0x21c/0x308 __device_attach_driver+0x98/0xc8 bus_for_each_drv+0x54/0x94 __device_attach+0xc4/0x12c device_initial_probe+0x10/0x18 bus_probe_device+0x90/0x98 deferred_probe_work_func+0xb0/0x150 process_one_work+0x12c/0x29c worker_thread+0x200/0x3fc kthread+0x108/0x134 ret_from_fork+0x10/0x18 Code: f9004ba2 54000080 aa0003fb 17ffff48 (d4210000) It turned out that pci_remap_iospace() wasn't undone when the driver's probe failed, and since devm_phy_optional_get() returned -EPROBE_DEFER, the probe was retried, finally causing the BUG due to trying to remap already remapped pages. The DesignWare PCIe controller driver has the same issue. Replace devm_pci_remap_iospace() with a devm_ managed version to fix the bug. Fixes: cbce7900598c ("PCI: designware: Make driver arch-agnostic") Signed-off-by: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com> [lorenzo.pieralisi@arm.com: updated the commit log] Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Acked-by: Jingoo Han <jingoohan1@gmail.com>
2018-07-19 03:40:46 +07:00
ret = devm_pci_remap_iospace(dev, win->res,
pp->io_base);
if (ret) {
dev_warn(dev, "Error %d: failed to map resource %pR\n",
ret, win->res);
resource_list_destroy_entry(win);
} else {
pp->io = win->res;
pp->io->name = "I/O";
pp->io_size = resource_size(pp->io);
pp->io_bus_addr = pp->io->start - win->offset;
}
break;
case IORESOURCE_MEM:
pp->mem = win->res;
pp->mem->name = "MEM";
pp->mem_size = resource_size(pp->mem);
pp->mem_bus_addr = pp->mem->start - win->offset;
break;
case 0:
pp->cfg = win->res;
pp->cfg0_size = resource_size(pp->cfg) >> 1;
pp->cfg1_size = resource_size(pp->cfg) >> 1;
pp->cfg0_base = pp->cfg->start;
pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
break;
case IORESOURCE_BUS:
pp->busn = win->res;
break;
}
}
if (!pci->dbi_base) {
pci->dbi_base = devm_pci_remap_cfgspace(dev,
pp->cfg->start,
resource_size(pp->cfg));
if (!pci->dbi_base) {
dev_err(dev, "Error with ioremap\n");
ret = -ENOMEM;
goto error;
}
}
pp->mem_base = pp->mem->start;
if (!pp->va_cfg0_base) {
pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
pp->cfg0_base, pp->cfg0_size);
if (!pp->va_cfg0_base) {
dev_err(dev, "Error with ioremap in function\n");
ret = -ENOMEM;
goto error;
}
}
if (!pp->va_cfg1_base) {
pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
pp->cfg1_base,
pp->cfg1_size);
if (!pp->va_cfg1_base) {
dev_err(dev, "Error with ioremap\n");
ret = -ENOMEM;
goto error;
}
}
ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
if (ret)
pci->num_viewport = 2;
if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) {
/*
* If a specific SoC driver needs to change the
* default number of vectors, it needs to implement
* the set_num_vectors callback.
*/
if (!pp->ops->set_num_vectors) {
pp->num_vectors = MSI_DEF_NUM_VECTORS;
} else {
pp->ops->set_num_vectors(pp);
if (pp->num_vectors > MAX_MSI_IRQS ||
pp->num_vectors == 0) {
dev_err(dev,
"Invalid number of vectors\n");
goto error;
}
}
if (!pp->ops->msi_host_init) {
ret = dw_pcie_allocate_domains(pp);
if (ret)
goto error;
if (pp->msi_irq)
irq_set_chained_handler_and_data(pp->msi_irq,
dw_chained_msi_isr,
pp);
} else {
ret = pp->ops->msi_host_init(pp);
if (ret < 0)
goto error;
}
}
if (pp->ops->host_init) {
ret = pp->ops->host_init(pp);
if (ret)
goto error;
}
pp->root_bus_nr = pp->busn->start;
bridge->dev.parent = dev;
bridge->sysdata = pp;
bridge->busnr = pp->root_bus_nr;
bridge->ops = &dw_pcie_ops;
bridge->map_irq = of_irq_parse_and_map_pci;
bridge->swizzle_irq = pci_common_swizzle;
ret = pci_scan_root_bus_bridge(bridge);
if (ret)
goto error;
bus = bridge->bus;
if (pp->ops->scan_bus)
pp->ops->scan_bus(pp);
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
pcie_bus_configure_settings(child);
pci_bus_add_devices(bus);
return 0;
error:
pci_free_host_bridge(bridge);
return ret;
}
static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val,
bool write)
{
int ret, type;
u32 busdev, cfg_size;
u64 cpu_addr;
void __iomem *va_cfg_base;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
PCIE_ATU_FUNC(PCI_FUNC(devfn));
if (bus->parent->number == pp->root_bus_nr) {
type = PCIE_ATU_TYPE_CFG0;
cpu_addr = pp->cfg0_base;
cfg_size = pp->cfg0_size;
va_cfg_base = pp->va_cfg0_base;
} else {
type = PCIE_ATU_TYPE_CFG1;
cpu_addr = pp->cfg1_base;
cfg_size = pp->cfg1_size;
va_cfg_base = pp->va_cfg1_base;
}
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
type, cpu_addr,
busdev, cfg_size);
if (write)
ret = dw_pcie_write(va_cfg_base + where, size, *val);
else
ret = dw_pcie_read(va_cfg_base + where, size, val);
if (pci->num_viewport <= 2)
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size);
return ret;
}
static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
if (pp->ops->rd_other_conf)
return pp->ops->rd_other_conf(pp, bus, devfn, where,
size, val);
return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val,
false);
}
static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
if (pp->ops->wr_other_conf)
return pp->ops->wr_other_conf(pp, bus, devfn, where,
size, val);
return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val,
true);
}
static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
int dev)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
/* If there is no link, then there is no device */
if (bus->number != pp->root_bus_nr) {
if (!dw_pcie_link_up(pci))
return 0;
}
/* Access only one slot on each root port */
if (bus->number == pp->root_bus_nr && dev > 0)
return 0;
return 1;
}
static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
struct pcie_port *pp = bus->sysdata;
if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
if (bus->number == pp->root_bus_nr)
return dw_pcie_rd_own_conf(pp, where, size, val);
return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
}
static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
struct pcie_port *pp = bus->sysdata;
if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
if (bus->number == pp->root_bus_nr)
return dw_pcie_wr_own_conf(pp, where, size, val);
return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
}
static struct pci_ops dw_pcie_ops = {
.read = dw_pcie_rd_conf,
.write = dw_pcie_wr_conf,
};
static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
{
u32 val;
val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
if (val == 0xffffffff)
return 1;
return 0;
}
void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val, ctrl, num_ctrls;
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
dw_pcie_setup(pci);
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
/* Initialize IRQ Status array */
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
pp->irq_mask[ctrl] = ~0;
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
4, pp->irq_mask[ctrl]);
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
4, ~0);
}
/* Setup RC BARs */
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
/* Setup interrupt pins */
dw_pcie_dbi_ro_wr_en(pci);
val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
val &= 0xffff00ff;
val |= 0x00000100;
dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
dw_pcie_dbi_ro_wr_dis(pci);
/* Setup bus numbers */
val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
val &= 0xff000000;
PCI: dwc: Fix enumeration end when reaching root subordinate The subordinate value indicates the highest bus number which can be reached downstream though a certain device. Commit a20c7f36bd3d ("PCI: Do not allocate more buses than available in parent") ensures that downstream devices cannot assign busnumbers higher than the upstream device subordinate number, which was indeed illogical. By default, dw_pcie_setup_rc() inits the Root Complex subordinate to a value of 0x01. Due to this combined with above commit, enumeration stops digging deeper downstream as soon as bus num 0x01 has been assigned, which is always the case for a bridge device. This results in all devices behind a bridge bus remaining undetected, as these would be connected to bus 0x02 or higher. Fix this by initializing the RC to a subordinate value of 0xff, which is not altering hardware behaviour in any way, but informs probing function pci_scan_bridge() later on which reads this value back from register. The following nasty errors during boot are also fixed by this: pci_bus 0000:02: busn_res: can not insert [bus 02-ff] under [bus 01] (conflicts with (null) [bus 01]) ... pci_bus 0000:03: [bus 03] partially hidden behind bridge 0000:01 [bus 01] ... pci_bus 0000:04: [bus 04] partially hidden behind bridge 0000:01 [bus 01] ... pci_bus 0000:05: [bus 05] partially hidden behind bridge 0000:01 [bus 01] pci_bus 0000:02: busn_res: [bus 02-ff] end is updated to 05 pci_bus 0000:02: busn_res: can not insert [bus 02-05] under [bus 01] (conflicts with (null) [bus 01]) pci_bus 0000:02: [bus 02-05] partially hidden behind bridge 0000:01 [bus 01] Fixes: a20c7f36bd3d ("PCI: Do not allocate more buses than available in parent") Tested-by: Niklas Cassel <niklas.cassel@axis.com> Tested-by: Fabio Estevam <fabio.estevam@nxp.com> Tested-by: Sebastian Reichel <sebastian.reichel@collabora.co.uk> Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com> Acked-by: Lucas Stach <l.stach@pengutronix.de> Cc: stable@vger.kernel.org # v4.15+ Cc: Binghui Wang <wangbinghui@hisilicon.com> Cc: Jesper Nilsson <jesper.nilsson@axis.com> Cc: Jianguo Sun <sunjianguo1@huawei.com> Cc: Jingoo Han <jingoohan1@gmail.com> Cc: Kishon Vijay Abraham I <kishon@ti.com> Cc: Lucas Stach <l.stach@pengutronix.de> Cc: Mika Westerberg <mika.westerberg@linux.intel.com> Cc: Minghuan Lian <minghuan.Lian@freescale.com> Cc: Mingkai Hu <mingkai.hu@freescale.com> Cc: Murali Karicheri <m-karicheri2@ti.com> Cc: Pratyush Anand <pratyush.anand@gmail.com> Cc: Richard Zhu <hongxing.zhu@nxp.com> Cc: Roy Zang <tie-fei.zang@freescale.com> Cc: Shawn Guo <shawn.guo@linaro.org> Cc: Stanimir Varbanov <svarbanov@mm-sol.com> Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Cc: Xiaowei Song <songxiaowei@hisilicon.com> Cc: Zhou Wang <wangzhou1@hisilicon.com>
2018-03-07 23:46:39 +07:00
val |= 0x00ff0100;
dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
/* Setup command register */
val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
/*
* If the platform provides ->rd_other_conf, it means the platform
* uses its own address translation component rather than ATU, so
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf) {
/* Get iATU unroll support */
pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
dev_dbg(pci->dev, "iATU unroll: %s\n",
pci->iatu_unroll_enabled ? "enabled" : "disabled");
PCI: dwc: Don't hard-code DBI/ATU offset The DWC PCIe core contains various separate register spaces: DBI, DBI2, ATU, DMA, etc. The relationship between the addresses of these register spaces is entirely determined by the implementation of the IP block, not by the IP block design itself. Hence, the DWC driver must not make assumptions that one register space can be accessed at a fixed offset from any other register space. To avoid such assumptions, introduce an explicit/separate register pointer for the ATU register space. In particular, the current assumption is not valid for NVIDIA's T194 SoC. The ATU register space is only used on systems that require unrolled ATU access. This property is detected at run-time for host controllers, and when this is detected, this patch provides a default value for atu_base that matches the previous assumption re: register layout. An alternative would be to update all drivers for HW that requires unrolled access to explicitly set atu_base. However, it's hard to tell which drivers would require atu_base to be set. The unrolled property is not detected for endpoint systems, and so any endpoint driver that requires unrolled access must explicitly set the iatu_unroll_enabled flag (none do at present), and so a check is added to require the driver to also set atu_base while at it. Signed-off-by: Stephen Warren <swarren@nvidia.com> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Acked-by: Gustavo Pimentel <gustavo.pimentel@synopsys.com> Acked-by: Vidya Sagar <vidyas@nvidia.com>
2018-12-01 01:37:19 +07:00
if (pci->iatu_unroll_enabled && !pci->atu_base)
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
if (pci->num_viewport > 2)
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
PCIE_ATU_TYPE_IO, pp->io_base,
pp->io_bus_addr, pp->io_size);
}
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
/* Enable write permission for the DBI read-only register */
dw_pcie_dbi_ro_wr_en(pci);
/* Program correct class for RC */
dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
/* Better disable write permission right after the update */
dw_pcie_dbi_ro_wr_dis(pci);
dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
val |= PORT_LOGIC_SPEED_CHANGE;
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
}