2018-01-27 01:50:27 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2017-02-15 20:18:17 +07:00
|
|
|
/*
|
2017-09-02 04:35:50 +07:00
|
|
|
* Synopsys DesignWare PCIe host controller driver
|
2017-02-15 20:18:17 +07:00
|
|
|
*
|
|
|
|
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
|
|
|
|
* http://www.samsung.com
|
|
|
|
*
|
|
|
|
* Author: Jingoo Han <jg1.han@samsung.com>
|
|
|
|
*/
|
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
#include <linux/irqchip/chained_irq.h>
|
2017-02-15 20:18:17 +07:00
|
|
|
#include <linux/irqdomain.h>
|
|
|
|
#include <linux/of_address.h>
|
|
|
|
#include <linux/of_pci.h>
|
|
|
|
#include <linux/pci_regs.h>
|
|
|
|
#include <linux/platform_device.h>
|
|
|
|
|
2018-05-31 08:12:37 +07:00
|
|
|
#include "../../pci.h"
|
2017-02-15 20:18:17 +07:00
|
|
|
#include "pcie-designware.h"
|
|
|
|
|
|
|
|
static struct pci_ops dw_pcie_ops;
|
|
|
|
|
|
|
|
static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
|
|
|
|
u32 *val)
|
|
|
|
{
|
|
|
|
struct dw_pcie *pci;
|
|
|
|
|
|
|
|
if (pp->ops->rd_own_conf)
|
|
|
|
return pp->ops->rd_own_conf(pp, where, size, val);
|
|
|
|
|
|
|
|
pci = to_dw_pcie_from_pp(pp);
|
|
|
|
return dw_pcie_read(pci->dbi_base + where, size, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
|
|
|
|
u32 val)
|
|
|
|
{
|
|
|
|
struct dw_pcie *pci;
|
|
|
|
|
|
|
|
if (pp->ops->wr_own_conf)
|
|
|
|
return pp->ops->wr_own_conf(pp, where, size, val);
|
|
|
|
|
|
|
|
pci = to_dw_pcie_from_pp(pp);
|
|
|
|
return dw_pcie_write(pci->dbi_base + where, size, val);
|
|
|
|
}
|
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
static void dw_msi_ack_irq(struct irq_data *d)
|
|
|
|
{
|
|
|
|
irq_chip_ack_parent(d);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_msi_mask_irq(struct irq_data *d)
|
|
|
|
{
|
|
|
|
pci_msi_mask_irq(d);
|
|
|
|
irq_chip_mask_parent(d);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dw_msi_unmask_irq(struct irq_data *d)
|
|
|
|
{
|
|
|
|
pci_msi_unmask_irq(d);
|
|
|
|
irq_chip_unmask_parent(d);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct irq_chip dw_pcie_msi_irq_chip = {
|
2017-02-15 20:18:17 +07:00
|
|
|
.name = "PCI-MSI",
|
2018-03-06 18:54:53 +07:00
|
|
|
.irq_ack = dw_msi_ack_irq,
|
|
|
|
.irq_mask = dw_msi_mask_irq,
|
|
|
|
.irq_unmask = dw_msi_unmask_irq,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct msi_domain_info dw_pcie_msi_domain_info = {
|
|
|
|
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
|
|
|
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
|
|
|
|
.chip = &dw_pcie_msi_irq_chip,
|
2017-02-15 20:18:17 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/* MSI int handler */
|
|
|
|
irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
|
|
|
|
{
|
|
|
|
int i, pos, irq;
|
2018-03-06 18:54:55 +07:00
|
|
|
u32 val, num_ctrls;
|
2017-02-15 20:18:17 +07:00
|
|
|
irqreturn_t ret = IRQ_NONE;
|
|
|
|
|
2018-03-06 18:54:55 +07:00
|
|
|
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
|
|
|
|
|
|
|
|
for (i = 0; i < num_ctrls; i++) {
|
2018-05-14 22:09:50 +07:00
|
|
|
dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
|
|
|
|
(i * MSI_REG_CTRL_BLOCK_SIZE),
|
|
|
|
4, &val);
|
2017-03-17 02:34:59 +07:00
|
|
|
if (!val)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = IRQ_HANDLED;
|
|
|
|
pos = 0;
|
2018-05-14 22:09:50 +07:00
|
|
|
while ((pos = find_next_bit((unsigned long *) &val,
|
|
|
|
MAX_MSI_IRQS_PER_CTRL,
|
|
|
|
pos)) != MAX_MSI_IRQS_PER_CTRL) {
|
|
|
|
irq = irq_find_mapping(pp->irq_domain,
|
|
|
|
(i * MAX_MSI_IRQS_PER_CTRL) +
|
|
|
|
pos);
|
2017-08-10 18:24:55 +07:00
|
|
|
generic_handle_irq(irq);
|
2017-03-17 02:34:59 +07:00
|
|
|
pos++;
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
/* Chained MSI interrupt service routine */
|
|
|
|
static void dw_chained_msi_isr(struct irq_desc *desc)
|
2017-02-15 20:18:17 +07:00
|
|
|
{
|
2018-03-06 18:54:53 +07:00
|
|
|
struct irq_chip *chip = irq_desc_get_chip(desc);
|
|
|
|
struct pcie_port *pp;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
chained_irq_enter(chip, desc);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
pp = irq_desc_get_handler_data(desc);
|
|
|
|
dw_handle_msi_irq(pp);
|
|
|
|
|
|
|
|
chained_irq_exit(chip, desc);
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
|
2019-02-01 01:17:03 +07:00
|
|
|
static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
|
2017-02-15 20:18:17 +07:00
|
|
|
{
|
2019-02-01 01:17:03 +07:00
|
|
|
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
2018-03-06 18:54:53 +07:00
|
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
u64 msi_target;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
if (pp->ops->get_msi_addr)
|
|
|
|
msi_target = pp->ops->get_msi_addr(pp);
|
|
|
|
else
|
|
|
|
msi_target = (u64)pp->msi_data;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
msg->address_lo = lower_32_bits(msi_target);
|
|
|
|
msg->address_hi = upper_32_bits(msi_target);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
if (pp->ops->get_msi_data)
|
2019-02-01 01:17:03 +07:00
|
|
|
msg->data = pp->ops->get_msi_data(pp, d->hwirq);
|
2018-03-06 18:54:53 +07:00
|
|
|
else
|
2019-02-01 01:17:03 +07:00
|
|
|
msg->data = d->hwirq;
|
2018-03-06 18:54:53 +07:00
|
|
|
|
|
|
|
dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
|
2019-02-01 01:17:03 +07:00
|
|
|
(int)d->hwirq, msg->address_hi, msg->address_lo);
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
|
2019-02-01 01:17:04 +07:00
|
|
|
static int dw_pci_msi_set_affinity(struct irq_data *d,
|
2018-03-06 18:54:53 +07:00
|
|
|
const struct cpumask *mask, bool force)
|
2017-02-15 20:18:17 +07:00
|
|
|
{
|
2018-03-06 18:54:53 +07:00
|
|
|
return -EINVAL;
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
|
2019-02-01 01:17:02 +07:00
|
|
|
static void dw_pci_bottom_mask(struct irq_data *d)
|
2017-02-15 20:18:17 +07:00
|
|
|
{
|
2019-02-01 01:17:02 +07:00
|
|
|
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
2018-03-06 18:54:53 +07:00
|
|
|
unsigned int res, bit, ctrl;
|
|
|
|
unsigned long flags;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
raw_spin_lock_irqsave(&pp->lock, flags);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
if (pp->ops->msi_clear_irq) {
|
2019-02-01 01:17:02 +07:00
|
|
|
pp->ops->msi_clear_irq(pp, d->hwirq);
|
2018-03-06 18:54:53 +07:00
|
|
|
} else {
|
2019-02-01 01:17:02 +07:00
|
|
|
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
|
2018-05-14 22:09:50 +07:00
|
|
|
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
|
2019-02-01 01:17:02 +07:00
|
|
|
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2019-02-01 01:17:07 +07:00
|
|
|
pp->irq_mask[ctrl] |= BIT(bit);
|
2018-11-14 05:57:32 +07:00
|
|
|
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
|
2019-02-01 01:17:06 +07:00
|
|
|
pp->irq_mask[ctrl]);
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
|
2019-02-01 01:17:02 +07:00
|
|
|
static void dw_pci_bottom_unmask(struct irq_data *d)
|
2017-02-15 20:18:17 +07:00
|
|
|
{
|
2019-02-01 01:17:02 +07:00
|
|
|
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
2018-03-06 18:54:53 +07:00
|
|
|
unsigned int res, bit, ctrl;
|
|
|
|
unsigned long flags;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
raw_spin_lock_irqsave(&pp->lock, flags);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
if (pp->ops->msi_set_irq) {
|
2019-02-01 01:17:02 +07:00
|
|
|
pp->ops->msi_set_irq(pp, d->hwirq);
|
2018-03-06 18:54:53 +07:00
|
|
|
} else {
|
2019-02-01 01:17:02 +07:00
|
|
|
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
|
2018-05-14 22:09:50 +07:00
|
|
|
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
|
2019-02-01 01:17:02 +07:00
|
|
|
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2019-02-01 01:17:07 +07:00
|
|
|
pp->irq_mask[ctrl] &= ~BIT(bit);
|
2018-11-14 05:57:32 +07:00
|
|
|
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4,
|
2019-02-01 01:17:06 +07:00
|
|
|
pp->irq_mask[ctrl]);
|
2018-03-06 18:54:53 +07:00
|
|
|
}
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
static void dw_pci_bottom_ack(struct irq_data *d)
|
2017-02-15 20:18:17 +07:00
|
|
|
{
|
2018-11-14 05:57:34 +07:00
|
|
|
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
|
|
|
unsigned int res, bit, ctrl;
|
2018-11-14 05:57:33 +07:00
|
|
|
unsigned long flags;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-11-14 05:57:34 +07:00
|
|
|
ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
|
|
|
|
res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
|
|
|
|
bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-11-14 05:57:33 +07:00
|
|
|
raw_spin_lock_irqsave(&pp->lock, flags);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2019-02-01 01:17:07 +07:00
|
|
|
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit));
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
if (pp->ops->msi_irq_ack)
|
|
|
|
pp->ops->msi_irq_ack(d->hwirq, pp);
|
2018-11-14 05:57:33 +07:00
|
|
|
|
|
|
|
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
static struct irq_chip dw_pci_msi_bottom_irq_chip = {
|
|
|
|
.name = "DWPCI-MSI",
|
|
|
|
.irq_ack = dw_pci_bottom_ack,
|
|
|
|
.irq_compose_msi_msg = dw_pci_setup_msi_msg,
|
|
|
|
.irq_set_affinity = dw_pci_msi_set_affinity,
|
|
|
|
.irq_mask = dw_pci_bottom_mask,
|
|
|
|
.irq_unmask = dw_pci_bottom_unmask,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
|
|
|
|
unsigned int virq, unsigned int nr_irqs,
|
|
|
|
void *args)
|
2017-02-15 20:18:17 +07:00
|
|
|
{
|
2018-03-06 18:54:53 +07:00
|
|
|
struct pcie_port *pp = domain->host_data;
|
|
|
|
unsigned long flags;
|
|
|
|
u32 i;
|
|
|
|
int bit;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&pp->lock, flags);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
|
|
|
|
order_base_2(nr_irqs));
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
if (bit < 0)
|
|
|
|
return -ENOSPC;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
for (i = 0; i < nr_irqs; i++)
|
|
|
|
irq_domain_set_info(domain, virq + i, bit + i,
|
|
|
|
&dw_pci_msi_bottom_irq_chip,
|
|
|
|
pp, handle_edge_irq,
|
|
|
|
NULL, NULL);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
static void dw_pcie_irq_domain_free(struct irq_domain *domain,
|
|
|
|
unsigned int virq, unsigned int nr_irqs)
|
2017-02-15 20:18:17 +07:00
|
|
|
{
|
2019-02-01 01:17:05 +07:00
|
|
|
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
|
|
|
|
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
|
2018-03-06 18:54:53 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
raw_spin_lock_irqsave(&pp->lock, flags);
|
2018-05-14 22:09:48 +07:00
|
|
|
|
2019-02-01 01:17:05 +07:00
|
|
|
bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
|
2018-03-06 18:54:53 +07:00
|
|
|
order_base_2(nr_irqs));
|
2018-05-14 22:09:48 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
raw_spin_unlock_irqrestore(&pp->lock, flags);
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
|
|
|
|
.alloc = dw_pcie_irq_domain_alloc,
|
|
|
|
.free = dw_pcie_irq_domain_free,
|
2017-02-15 20:18:17 +07:00
|
|
|
};
|
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
int dw_pcie_allocate_domains(struct pcie_port *pp)
|
2017-02-15 20:18:17 +07:00
|
|
|
{
|
2018-03-06 18:54:53 +07:00
|
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node);
|
|
|
|
|
|
|
|
pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors,
|
|
|
|
&dw_pcie_msi_domain_ops, pp);
|
|
|
|
if (!pp->irq_domain) {
|
2018-05-14 22:09:48 +07:00
|
|
|
dev_err(pci->dev, "Failed to create IRQ domain\n");
|
2018-03-06 18:54:53 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
pp->msi_domain = pci_msi_create_irq_domain(fwnode,
|
|
|
|
&dw_pcie_msi_domain_info,
|
|
|
|
pp->irq_domain);
|
|
|
|
if (!pp->msi_domain) {
|
2018-05-14 22:09:48 +07:00
|
|
|
dev_err(pci->dev, "Failed to create MSI domain\n");
|
2018-03-06 18:54:53 +07:00
|
|
|
irq_domain_remove(pp->irq_domain);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2017-02-15 20:18:17 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
void dw_pcie_free_msi(struct pcie_port *pp)
|
|
|
|
{
|
|
|
|
irq_set_chained_handler(pp->msi_irq, NULL);
|
|
|
|
irq_set_handler_data(pp->msi_irq, NULL);
|
|
|
|
|
|
|
|
irq_domain_remove(pp->msi_domain);
|
|
|
|
irq_domain_remove(pp->irq_domain);
|
|
|
|
}
|
|
|
|
|
2017-02-15 20:18:17 +07:00
|
|
|
void dw_pcie_msi_init(struct pcie_port *pp)
|
|
|
|
{
|
2017-12-20 06:29:22 +07:00
|
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
struct device *dev = pci->dev;
|
|
|
|
struct page *page;
|
2017-02-15 20:18:17 +07:00
|
|
|
u64 msi_target;
|
|
|
|
|
2017-12-20 06:29:22 +07:00
|
|
|
page = alloc_page(GFP_KERNEL);
|
|
|
|
pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
|
|
|
|
if (dma_mapping_error(dev, pp->msi_data)) {
|
2018-05-14 22:09:48 +07:00
|
|
|
dev_err(dev, "Failed to map MSI data\n");
|
2017-12-20 06:29:22 +07:00
|
|
|
__free_page(page);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
msi_target = (u64)pp->msi_data;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-05-14 22:09:48 +07:00
|
|
|
/* Program the msi_data */
|
2017-02-15 20:18:17 +07:00
|
|
|
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
|
2018-03-06 18:54:53 +07:00
|
|
|
lower_32_bits(msi_target));
|
2017-02-15 20:18:17 +07:00
|
|
|
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4,
|
2018-03-06 18:54:53 +07:00
|
|
|
upper_32_bits(msi_target));
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
int dw_pcie_host_init(struct pcie_port *pp)
|
|
|
|
{
|
|
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
struct device *dev = pci->dev;
|
|
|
|
struct device_node *np = dev->of_node;
|
|
|
|
struct platform_device *pdev = to_platform_device(dev);
|
2018-03-06 18:54:53 +07:00
|
|
|
struct resource_entry *win, *tmp;
|
2017-02-15 20:18:17 +07:00
|
|
|
struct pci_bus *bus, *child;
|
2017-06-29 03:13:56 +07:00
|
|
|
struct pci_host_bridge *bridge;
|
2017-02-15 20:18:17 +07:00
|
|
|
struct resource *cfg_res;
|
2018-03-06 18:54:53 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
raw_spin_lock_init(&pci->pp.lock);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
|
|
|
cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
|
|
|
|
if (cfg_res) {
|
2018-05-14 22:09:49 +07:00
|
|
|
pp->cfg0_size = resource_size(cfg_res) >> 1;
|
|
|
|
pp->cfg1_size = resource_size(cfg_res) >> 1;
|
2017-02-15 20:18:17 +07:00
|
|
|
pp->cfg0_base = cfg_res->start;
|
|
|
|
pp->cfg1_base = cfg_res->start + pp->cfg0_size;
|
|
|
|
} else if (!pp->va_cfg0_base) {
|
2018-05-14 22:09:48 +07:00
|
|
|
dev_err(dev, "Missing *config* reg space\n");
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
|
2017-06-29 03:13:56 +07:00
|
|
|
bridge = pci_alloc_host_bridge(0);
|
|
|
|
if (!bridge)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2018-05-15 16:07:05 +07:00
|
|
|
ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff,
|
2017-06-29 03:13:56 +07:00
|
|
|
&bridge->windows, &pp->io_base);
|
2017-02-15 20:18:17 +07:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-06-29 03:13:56 +07:00
|
|
|
ret = devm_request_pci_bus_resources(dev, &bridge->windows);
|
2017-02-15 20:18:17 +07:00
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
/* Get the I/O and memory ranges from DT */
|
2017-06-29 03:13:56 +07:00
|
|
|
resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
|
2017-02-15 20:18:17 +07:00
|
|
|
switch (resource_type(win->res)) {
|
|
|
|
case IORESOURCE_IO:
|
2018-07-19 03:40:46 +07:00
|
|
|
ret = devm_pci_remap_iospace(dev, win->res,
|
|
|
|
pp->io_base);
|
2017-02-15 20:18:17 +07:00
|
|
|
if (ret) {
|
2018-05-14 22:09:48 +07:00
|
|
|
dev_warn(dev, "Error %d: failed to map resource %pR\n",
|
2017-02-15 20:18:17 +07:00
|
|
|
ret, win->res);
|
|
|
|
resource_list_destroy_entry(win);
|
|
|
|
} else {
|
|
|
|
pp->io = win->res;
|
|
|
|
pp->io->name = "I/O";
|
|
|
|
pp->io_size = resource_size(pp->io);
|
|
|
|
pp->io_bus_addr = pp->io->start - win->offset;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case IORESOURCE_MEM:
|
|
|
|
pp->mem = win->res;
|
|
|
|
pp->mem->name = "MEM";
|
|
|
|
pp->mem_size = resource_size(pp->mem);
|
|
|
|
pp->mem_bus_addr = pp->mem->start - win->offset;
|
|
|
|
break;
|
|
|
|
case 0:
|
|
|
|
pp->cfg = win->res;
|
2018-05-14 22:09:49 +07:00
|
|
|
pp->cfg0_size = resource_size(pp->cfg) >> 1;
|
|
|
|
pp->cfg1_size = resource_size(pp->cfg) >> 1;
|
2017-02-15 20:18:17 +07:00
|
|
|
pp->cfg0_base = pp->cfg->start;
|
|
|
|
pp->cfg1_base = pp->cfg->start + pp->cfg0_size;
|
|
|
|
break;
|
|
|
|
case IORESOURCE_BUS:
|
|
|
|
pp->busn = win->res;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pci->dbi_base) {
|
2017-04-19 23:49:03 +07:00
|
|
|
pci->dbi_base = devm_pci_remap_cfgspace(dev,
|
|
|
|
pp->cfg->start,
|
|
|
|
resource_size(pp->cfg));
|
2017-02-15 20:18:17 +07:00
|
|
|
if (!pci->dbi_base) {
|
2018-05-14 22:09:48 +07:00
|
|
|
dev_err(dev, "Error with ioremap\n");
|
2017-02-15 20:18:17 +07:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pp->mem_base = pp->mem->start;
|
|
|
|
|
|
|
|
if (!pp->va_cfg0_base) {
|
2017-04-19 23:49:03 +07:00
|
|
|
pp->va_cfg0_base = devm_pci_remap_cfgspace(dev,
|
|
|
|
pp->cfg0_base, pp->cfg0_size);
|
2017-02-15 20:18:17 +07:00
|
|
|
if (!pp->va_cfg0_base) {
|
2018-05-14 22:09:48 +07:00
|
|
|
dev_err(dev, "Error with ioremap in function\n");
|
2017-02-15 20:18:17 +07:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!pp->va_cfg1_base) {
|
2017-04-19 23:49:03 +07:00
|
|
|
pp->va_cfg1_base = devm_pci_remap_cfgspace(dev,
|
|
|
|
pp->cfg1_base,
|
2017-02-15 20:18:17 +07:00
|
|
|
pp->cfg1_size);
|
|
|
|
if (!pp->va_cfg1_base) {
|
2018-05-14 22:09:48 +07:00
|
|
|
dev_err(dev, "Error with ioremap\n");
|
2017-02-15 20:18:17 +07:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport);
|
|
|
|
if (ret)
|
|
|
|
pci->num_viewport = 2;
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_PCI_MSI)) {
|
2018-03-06 18:54:53 +07:00
|
|
|
/*
|
|
|
|
* If a specific SoC driver needs to change the
|
|
|
|
* default number of vectors, it needs to implement
|
|
|
|
* the set_num_vectors callback.
|
|
|
|
*/
|
|
|
|
if (!pp->ops->set_num_vectors) {
|
|
|
|
pp->num_vectors = MSI_DEF_NUM_VECTORS;
|
|
|
|
} else {
|
|
|
|
pp->ops->set_num_vectors(pp);
|
|
|
|
|
|
|
|
if (pp->num_vectors > MAX_MSI_IRQS ||
|
|
|
|
pp->num_vectors == 0) {
|
|
|
|
dev_err(dev,
|
|
|
|
"Invalid number of vectors\n");
|
2017-02-15 20:18:17 +07:00
|
|
|
goto error;
|
|
|
|
}
|
2018-03-06 18:54:53 +07:00
|
|
|
}
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
if (!pp->ops->msi_host_init) {
|
|
|
|
ret = dw_pcie_allocate_domains(pp);
|
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
if (pp->msi_irq)
|
|
|
|
irq_set_chained_handler_and_data(pp->msi_irq,
|
|
|
|
dw_chained_msi_isr,
|
|
|
|
pp);
|
2017-02-15 20:18:17 +07:00
|
|
|
} else {
|
2018-03-06 18:54:54 +07:00
|
|
|
ret = pp->ops->msi_host_init(pp);
|
2017-02-15 20:18:17 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-16 13:39:45 +07:00
|
|
|
if (pp->ops->host_init) {
|
|
|
|
ret = pp->ops->host_init(pp);
|
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
}
|
2017-02-15 20:18:17 +07:00
|
|
|
|
|
|
|
pp->root_bus_nr = pp->busn->start;
|
2017-06-29 03:13:56 +07:00
|
|
|
|
|
|
|
bridge->dev.parent = dev;
|
|
|
|
bridge->sysdata = pp;
|
|
|
|
bridge->busnr = pp->root_bus_nr;
|
|
|
|
bridge->ops = &dw_pcie_ops;
|
2017-06-29 03:14:07 +07:00
|
|
|
bridge->map_irq = of_irq_parse_and_map_pci;
|
|
|
|
bridge->swizzle_irq = pci_common_swizzle;
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2017-06-29 03:13:56 +07:00
|
|
|
ret = pci_scan_root_bus_bridge(bridge);
|
|
|
|
if (ret)
|
|
|
|
goto error;
|
|
|
|
|
|
|
|
bus = bridge->bus;
|
|
|
|
|
2017-02-15 20:18:17 +07:00
|
|
|
if (pp->ops->scan_bus)
|
|
|
|
pp->ops->scan_bus(pp);
|
|
|
|
|
|
|
|
pci_bus_size_bridges(bus);
|
|
|
|
pci_bus_assign_resources(bus);
|
|
|
|
|
|
|
|
list_for_each_entry(child, &bus->children, node)
|
|
|
|
pcie_bus_configure_settings(child);
|
|
|
|
|
|
|
|
pci_bus_add_devices(bus);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error:
|
2017-06-29 03:13:56 +07:00
|
|
|
pci_free_host_bridge(bridge);
|
2017-02-15 20:18:17 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-02-20 03:02:39 +07:00
|
|
|
static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus,
|
|
|
|
u32 devfn, int where, int size, u32 *val,
|
|
|
|
bool write)
|
2017-02-15 20:18:17 +07:00
|
|
|
{
|
|
|
|
int ret, type;
|
|
|
|
u32 busdev, cfg_size;
|
|
|
|
u64 cpu_addr;
|
|
|
|
void __iomem *va_cfg_base;
|
|
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
|
|
|
|
busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) |
|
|
|
|
PCIE_ATU_FUNC(PCI_FUNC(devfn));
|
|
|
|
|
|
|
|
if (bus->parent->number == pp->root_bus_nr) {
|
|
|
|
type = PCIE_ATU_TYPE_CFG0;
|
|
|
|
cpu_addr = pp->cfg0_base;
|
|
|
|
cfg_size = pp->cfg0_size;
|
|
|
|
va_cfg_base = pp->va_cfg0_base;
|
|
|
|
} else {
|
|
|
|
type = PCIE_ATU_TYPE_CFG1;
|
|
|
|
cpu_addr = pp->cfg1_base;
|
|
|
|
cfg_size = pp->cfg1_size;
|
|
|
|
va_cfg_base = pp->va_cfg1_base;
|
|
|
|
}
|
|
|
|
|
|
|
|
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
|
|
|
|
type, cpu_addr,
|
|
|
|
busdev, cfg_size);
|
2019-02-20 03:02:39 +07:00
|
|
|
if (write)
|
|
|
|
ret = dw_pcie_write(va_cfg_base + where, size, *val);
|
|
|
|
else
|
|
|
|
ret = dw_pcie_read(va_cfg_base + where, size, val);
|
|
|
|
|
2017-02-15 20:18:17 +07:00
|
|
|
if (pci->num_viewport <= 2)
|
|
|
|
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1,
|
|
|
|
PCIE_ATU_TYPE_IO, pp->io_base,
|
|
|
|
pp->io_bus_addr, pp->io_size);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-02-20 03:02:39 +07:00
|
|
|
static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
|
|
|
|
u32 devfn, int where, int size, u32 *val)
|
|
|
|
{
|
|
|
|
if (pp->ops->rd_other_conf)
|
|
|
|
return pp->ops->rd_other_conf(pp, bus, devfn, where,
|
|
|
|
size, val);
|
|
|
|
|
|
|
|
return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val,
|
|
|
|
false);
|
|
|
|
}
|
|
|
|
|
2017-02-15 20:18:17 +07:00
|
|
|
static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
|
|
|
|
u32 devfn, int where, int size, u32 val)
|
|
|
|
{
|
|
|
|
if (pp->ops->wr_other_conf)
|
2019-02-20 03:02:39 +07:00
|
|
|
return pp->ops->wr_other_conf(pp, bus, devfn, where,
|
|
|
|
size, val);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2019-02-20 03:02:39 +07:00
|
|
|
return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val,
|
|
|
|
true);
|
2017-02-15 20:18:17 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus,
|
|
|
|
int dev)
|
|
|
|
{
|
|
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
|
|
|
|
/* If there is no link, then there is no device */
|
|
|
|
if (bus->number != pp->root_bus_nr) {
|
|
|
|
if (!dw_pcie_link_up(pci))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-14 22:09:48 +07:00
|
|
|
/* Access only one slot on each root port */
|
2017-02-15 20:18:17 +07:00
|
|
|
if (bus->number == pp->root_bus_nr && dev > 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
|
|
|
|
int size, u32 *val)
|
|
|
|
{
|
|
|
|
struct pcie_port *pp = bus->sysdata;
|
|
|
|
|
|
|
|
if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) {
|
|
|
|
*val = 0xffffffff;
|
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bus->number == pp->root_bus_nr)
|
|
|
|
return dw_pcie_rd_own_conf(pp, where, size, val);
|
|
|
|
|
|
|
|
return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
|
|
|
|
int where, int size, u32 val)
|
|
|
|
{
|
|
|
|
struct pcie_port *pp = bus->sysdata;
|
|
|
|
|
|
|
|
if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn)))
|
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
|
|
|
|
|
|
|
if (bus->number == pp->root_bus_nr)
|
|
|
|
return dw_pcie_wr_own_conf(pp, where, size, val);
|
|
|
|
|
|
|
|
return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pci_ops dw_pcie_ops = {
|
|
|
|
.read = dw_pcie_rd_conf,
|
|
|
|
.write = dw_pcie_wr_conf,
|
|
|
|
};
|
|
|
|
|
|
|
|
static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci)
|
|
|
|
{
|
|
|
|
u32 val;
|
|
|
|
|
|
|
|
val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT);
|
|
|
|
if (val == 0xffffffff)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void dw_pcie_setup_rc(struct pcie_port *pp)
|
|
|
|
{
|
2018-03-06 18:54:55 +07:00
|
|
|
u32 val, ctrl, num_ctrls;
|
2017-02-15 20:18:17 +07:00
|
|
|
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
|
|
|
|
|
|
|
|
dw_pcie_setup(pci);
|
|
|
|
|
2018-03-06 18:54:55 +07:00
|
|
|
num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
|
|
|
|
|
2018-03-06 18:54:53 +07:00
|
|
|
/* Initialize IRQ Status array */
|
2018-11-14 05:57:32 +07:00
|
|
|
for (ctrl = 0; ctrl < num_ctrls; ctrl++) {
|
2019-02-01 01:17:06 +07:00
|
|
|
pp->irq_mask[ctrl] = ~0;
|
2018-11-14 05:57:32 +07:00
|
|
|
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK +
|
2018-05-14 22:09:50 +07:00
|
|
|
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
|
2019-02-01 01:17:06 +07:00
|
|
|
4, pp->irq_mask[ctrl]);
|
2018-11-14 05:57:32 +07:00
|
|
|
dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE +
|
|
|
|
(ctrl * MSI_REG_CTRL_BLOCK_SIZE),
|
|
|
|
4, ~0);
|
|
|
|
}
|
2018-05-14 22:09:48 +07:00
|
|
|
|
|
|
|
/* Setup RC BARs */
|
2017-02-15 20:18:17 +07:00
|
|
|
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004);
|
|
|
|
dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000);
|
|
|
|
|
2018-05-14 22:09:48 +07:00
|
|
|
/* Setup interrupt pins */
|
2017-08-28 17:53:00 +07:00
|
|
|
dw_pcie_dbi_ro_wr_en(pci);
|
2017-02-15 20:18:17 +07:00
|
|
|
val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE);
|
|
|
|
val &= 0xffff00ff;
|
|
|
|
val |= 0x00000100;
|
|
|
|
dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val);
|
2017-08-28 17:53:00 +07:00
|
|
|
dw_pcie_dbi_ro_wr_dis(pci);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
2018-05-14 22:09:48 +07:00
|
|
|
/* Setup bus numbers */
|
2017-02-15 20:18:17 +07:00
|
|
|
val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS);
|
|
|
|
val &= 0xff000000;
|
2018-03-07 23:46:39 +07:00
|
|
|
val |= 0x00ff0100;
|
2017-02-15 20:18:17 +07:00
|
|
|
dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val);
|
|
|
|
|
2018-05-14 22:09:48 +07:00
|
|
|
/* Setup command register */
|
2017-02-15 20:18:17 +07:00
|
|
|
val = dw_pcie_readl_dbi(pci, PCI_COMMAND);
|
|
|
|
val &= 0xffff0000;
|
|
|
|
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
|
|
|
|
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
|
|
|
|
dw_pcie_writel_dbi(pci, PCI_COMMAND, val);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the platform provides ->rd_other_conf, it means the platform
|
|
|
|
* uses its own address translation component rather than ATU, so
|
|
|
|
* we should not program the ATU here.
|
|
|
|
*/
|
|
|
|
if (!pp->ops->rd_other_conf) {
|
2018-05-14 22:09:48 +07:00
|
|
|
/* Get iATU unroll support */
|
2017-02-15 20:18:17 +07:00
|
|
|
pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci);
|
|
|
|
dev_dbg(pci->dev, "iATU unroll: %s\n",
|
|
|
|
pci->iatu_unroll_enabled ? "enabled" : "disabled");
|
|
|
|
|
PCI: dwc: Don't hard-code DBI/ATU offset
The DWC PCIe core contains various separate register spaces: DBI, DBI2,
ATU, DMA, etc. The relationship between the addresses of these register
spaces is entirely determined by the implementation of the IP block, not
by the IP block design itself. Hence, the DWC driver must not make
assumptions that one register space can be accessed at a fixed offset from
any other register space. To avoid such assumptions, introduce an
explicit/separate register pointer for the ATU register space. In
particular, the current assumption is not valid for NVIDIA's T194 SoC.
The ATU register space is only used on systems that require unrolled ATU
access. This property is detected at run-time for host controllers, and
when this is detected, this patch provides a default value for atu_base
that matches the previous assumption re: register layout. An alternative
would be to update all drivers for HW that requires unrolled access to
explicitly set atu_base. However, it's hard to tell which drivers would
require atu_base to be set. The unrolled property is not detected for
endpoint systems, and so any endpoint driver that requires unrolled access
must explicitly set the iatu_unroll_enabled flag (none do at present), and
so a check is added to require the driver to also set atu_base while at
it.
Signed-off-by: Stephen Warren <swarren@nvidia.com>
Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Acked-by: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
Acked-by: Vidya Sagar <vidyas@nvidia.com>
2018-12-01 01:37:19 +07:00
|
|
|
if (pci->iatu_unroll_enabled && !pci->atu_base)
|
|
|
|
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
|
|
|
|
|
2017-02-15 20:18:17 +07:00
|
|
|
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0,
|
|
|
|
PCIE_ATU_TYPE_MEM, pp->mem_base,
|
|
|
|
pp->mem_bus_addr, pp->mem_size);
|
|
|
|
if (pci->num_viewport > 2)
|
|
|
|
dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2,
|
|
|
|
PCIE_ATU_TYPE_IO, pp->io_base,
|
|
|
|
pp->io_bus_addr, pp->io_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
|
|
|
|
|
2017-08-28 17:53:00 +07:00
|
|
|
/* Enable write permission for the DBI read-only register */
|
|
|
|
dw_pcie_dbi_ro_wr_en(pci);
|
2018-05-14 22:09:48 +07:00
|
|
|
/* Program correct class for RC */
|
2017-02-15 20:18:17 +07:00
|
|
|
dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
|
2017-08-28 17:53:00 +07:00
|
|
|
/* Better disable write permission right after the update */
|
|
|
|
dw_pcie_dbi_ro_wr_dis(pci);
|
2017-02-15 20:18:17 +07:00
|
|
|
|
|
|
|
dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
|
|
|
|
val |= PORT_LOGIC_SPEED_CHANGE;
|
|
|
|
dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
|
|
|
|
}
|