2011-09-20 00:45:05 +07:00
|
|
|
/*
|
|
|
|
* Support PCI/PCIe on PowerNV platforms
|
|
|
|
*
|
|
|
|
* Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/irq.h>
|
|
|
|
#include <linux/io.h>
|
2011-09-20 00:45:06 +07:00
|
|
|
#include <linux/msi.h>
|
2013-05-21 10:33:09 +07:00
|
|
|
#include <linux/iommu.h>
|
2011-09-20 00:45:05 +07:00
|
|
|
|
|
|
|
#include <asm/sections.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/prom.h>
|
|
|
|
#include <asm/pci-bridge.h>
|
|
|
|
#include <asm/machdep.h>
|
2013-03-06 04:12:37 +07:00
|
|
|
#include <asm/msi_bitmap.h>
|
2011-09-20 00:45:05 +07:00
|
|
|
#include <asm/ppc-pci.h>
|
2016-05-20 13:41:40 +07:00
|
|
|
#include <asm/pnv-pci.h>
|
2011-09-20 00:45:05 +07:00
|
|
|
#include <asm/opal.h>
|
|
|
|
#include <asm/iommu.h>
|
|
|
|
#include <asm/tce.h>
|
2012-03-16 01:18:00 +07:00
|
|
|
#include <asm/firmware.h>
|
2013-06-20 12:21:15 +07:00
|
|
|
#include <asm/eeh_event.h>
|
|
|
|
#include <asm/eeh.h>
|
2011-09-20 00:45:05 +07:00
|
|
|
|
|
|
|
#include "powernv.h"
|
|
|
|
#include "pci.h"
|
|
|
|
|
2016-05-20 13:41:40 +07:00
|
|
|
int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
|
|
|
|
{
|
|
|
|
struct device_node *parent = np;
|
|
|
|
u32 bdfn;
|
|
|
|
u64 phbid;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = of_property_read_u32(np, "reg", &bdfn);
|
|
|
|
if (ret)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
bdfn = ((bdfn & 0x00ffff00) >> 8);
|
|
|
|
while ((parent = of_get_parent(parent))) {
|
|
|
|
if (!PCI_DN(parent)) {
|
|
|
|
of_node_put(parent);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) {
|
|
|
|
of_node_put(parent);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid);
|
|
|
|
if (ret) {
|
|
|
|
of_node_put(parent);
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
*id = PCI_SLOT_ID(phbid, bdfn);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
|
|
|
|
|
2016-05-20 13:41:41 +07:00
|
|
|
int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
|
|
|
|
{
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!opal_check_token(OPAL_GET_DEVICE_TREE))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
|
|
|
|
if (rc < OPAL_SUCCESS)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
|
|
|
|
|
|
|
|
int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
|
|
|
|
{
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
rc = opal_pci_get_presence_state(id, (uint64_t)state);
|
|
|
|
if (rc != OPAL_SUCCESS)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
|
|
|
|
|
|
|
|
int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
|
|
|
|
{
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
rc = opal_pci_get_power_state(id, (uint64_t)state);
|
|
|
|
if (rc != OPAL_SUCCESS)
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
|
|
|
|
|
|
|
|
int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
|
|
|
|
{
|
|
|
|
struct opal_msg m;
|
|
|
|
int token, ret;
|
|
|
|
int64_t rc;
|
|
|
|
|
|
|
|
if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
token = opal_async_get_token_interruptible();
|
|
|
|
if (unlikely(token < 0))
|
|
|
|
return token;
|
|
|
|
|
|
|
|
rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
|
|
|
|
if (rc == OPAL_SUCCESS) {
|
|
|
|
ret = 0;
|
|
|
|
goto exit;
|
|
|
|
} else if (rc != OPAL_ASYNC_COMPLETION) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = opal_async_wait_response(token, &m);
|
|
|
|
if (ret < 0)
|
|
|
|
goto exit;
|
|
|
|
|
|
|
|
if (msg) {
|
|
|
|
ret = 1;
|
|
|
|
memcpy(msg, &m, sizeof(m));
|
|
|
|
}
|
|
|
|
|
|
|
|
exit:
|
|
|
|
opal_async_release_token(token);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
|
|
|
|
|
2011-09-20 00:45:06 +07:00
|
|
|
#ifdef CONFIG_PCI_MSI
|
2015-04-28 12:12:05 +07:00
|
|
|
int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
|
2011-09-20 00:45:06 +07:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct msi_desc *entry;
|
|
|
|
struct msi_msg msg;
|
2013-03-06 04:12:37 +07:00
|
|
|
int hwirq;
|
|
|
|
unsigned int virq;
|
2011-09-20 00:45:06 +07:00
|
|
|
int rc;
|
|
|
|
|
2014-09-08 01:57:53 +07:00
|
|
|
if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
|
|
|
|
return -ENODEV;
|
|
|
|
|
2014-10-07 12:12:36 +07:00
|
|
|
if (pdev->no_64bit_msi && !phb->msi32_support)
|
2011-09-20 00:45:06 +07:00
|
|
|
return -ENODEV;
|
|
|
|
|
2015-07-09 15:00:38 +07:00
|
|
|
for_each_pci_msi_entry(entry, pdev) {
|
2011-09-20 00:45:06 +07:00
|
|
|
if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
|
|
|
|
pr_warn("%s: Supports only 64-bit MSIs\n",
|
|
|
|
pci_name(pdev));
|
|
|
|
return -ENXIO;
|
|
|
|
}
|
2013-03-06 04:12:37 +07:00
|
|
|
hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
|
|
|
|
if (hwirq < 0) {
|
2011-09-20 00:45:06 +07:00
|
|
|
pr_warn("%s: Failed to find a free MSI\n",
|
|
|
|
pci_name(pdev));
|
|
|
|
return -ENOSPC;
|
|
|
|
}
|
2013-03-06 04:12:37 +07:00
|
|
|
virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
|
2016-09-06 18:53:24 +07:00
|
|
|
if (!virq) {
|
2011-09-20 00:45:06 +07:00
|
|
|
pr_warn("%s: Failed to map MSI to linux irq\n",
|
|
|
|
pci_name(pdev));
|
2013-03-06 04:12:37 +07:00
|
|
|
msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
|
2011-09-20 00:45:06 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2013-03-06 04:12:37 +07:00
|
|
|
rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
|
2013-04-26 02:20:59 +07:00
|
|
|
virq, entry->msi_attrib.is_64, &msg);
|
2011-09-20 00:45:06 +07:00
|
|
|
if (rc) {
|
|
|
|
pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
|
|
|
|
irq_dispose_mapping(virq);
|
2013-03-06 04:12:37 +07:00
|
|
|
msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
|
2011-09-20 00:45:06 +07:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
irq_set_msi_desc(virq, entry);
|
2014-11-09 22:10:34 +07:00
|
|
|
pci_write_msi_msg(virq, &msg);
|
2011-09-20 00:45:06 +07:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:12:05 +07:00
|
|
|
void pnv_teardown_msi_irqs(struct pci_dev *pdev)
|
2011-09-20 00:45:06 +07:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct msi_desc *entry;
|
powerpc/MSI: Fix race condition in tearing down MSI interrupts
This fixes a race which can result in the same virtual IRQ number
being assigned to two different MSI interrupts. The most visible
consequence of that is usually a warning and stack trace from the
sysfs code about an attempt to create a duplicate entry in sysfs.
The race happens when one CPU (say CPU 0) is disposing of an MSI
while another CPU (say CPU 1) is setting up an MSI. CPU 0 calls
(for example) pnv_teardown_msi_irqs(), which calls
msi_bitmap_free_hwirqs() to indicate that the MSI (i.e. its
hardware IRQ number) is no longer in use. Then, before CPU 0 gets
to calling irq_dispose_mapping() to free up the virtal IRQ number,
CPU 1 comes in and calls msi_bitmap_alloc_hwirqs() to allocate an
MSI, and gets the same hardware IRQ number that CPU 0 just freed.
CPU 1 then calls irq_create_mapping() to get a virtual IRQ number,
which sees that there is currently a mapping for that hardware IRQ
number and returns the corresponding virtual IRQ number (which is
the same virtual IRQ number that CPU 0 was using). CPU 0 then
calls irq_dispose_mapping() and frees that virtual IRQ number.
Now, if another CPU comes along and calls irq_create_mapping(), it
is likely to get the virtual IRQ number that was just freed,
resulting in the same virtual IRQ number apparently being used for
two different hardware interrupts.
To fix this race, we just move the call to msi_bitmap_free_hwirqs()
to after the call to irq_dispose_mapping(). Since virq_to_hw()
doesn't work for the virtual IRQ number after irq_dispose_mapping()
has been called, we need to call it before irq_dispose_mapping() and
remember the result for the msi_bitmap_free_hwirqs() call.
The pattern of calling msi_bitmap_free_hwirqs() before
irq_dispose_mapping() appears in 5 places under arch/powerpc, and
appears to have originated in commit 05af7bd2d75e ("[POWERPC] MPIC
U3/U4 MSI backend") from 2007.
Fixes: 05af7bd2d75e ("[POWERPC] MPIC U3/U4 MSI backend")
Cc: stable@vger.kernel.org # v2.6.22+
Reported-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-09-10 11:36:21 +07:00
|
|
|
irq_hw_number_t hwirq;
|
2011-09-20 00:45:06 +07:00
|
|
|
|
|
|
|
if (WARN_ON(!phb))
|
|
|
|
return;
|
|
|
|
|
2015-07-09 15:00:38 +07:00
|
|
|
for_each_pci_msi_entry(entry, pdev) {
|
2016-09-06 18:53:24 +07:00
|
|
|
if (!entry->irq)
|
2011-09-20 00:45:06 +07:00
|
|
|
continue;
|
powerpc/MSI: Fix race condition in tearing down MSI interrupts
This fixes a race which can result in the same virtual IRQ number
being assigned to two different MSI interrupts. The most visible
consequence of that is usually a warning and stack trace from the
sysfs code about an attempt to create a duplicate entry in sysfs.
The race happens when one CPU (say CPU 0) is disposing of an MSI
while another CPU (say CPU 1) is setting up an MSI. CPU 0 calls
(for example) pnv_teardown_msi_irqs(), which calls
msi_bitmap_free_hwirqs() to indicate that the MSI (i.e. its
hardware IRQ number) is no longer in use. Then, before CPU 0 gets
to calling irq_dispose_mapping() to free up the virtal IRQ number,
CPU 1 comes in and calls msi_bitmap_alloc_hwirqs() to allocate an
MSI, and gets the same hardware IRQ number that CPU 0 just freed.
CPU 1 then calls irq_create_mapping() to get a virtual IRQ number,
which sees that there is currently a mapping for that hardware IRQ
number and returns the corresponding virtual IRQ number (which is
the same virtual IRQ number that CPU 0 was using). CPU 0 then
calls irq_dispose_mapping() and frees that virtual IRQ number.
Now, if another CPU comes along and calls irq_create_mapping(), it
is likely to get the virtual IRQ number that was just freed,
resulting in the same virtual IRQ number apparently being used for
two different hardware interrupts.
To fix this race, we just move the call to msi_bitmap_free_hwirqs()
to after the call to irq_dispose_mapping(). Since virq_to_hw()
doesn't work for the virtual IRQ number after irq_dispose_mapping()
has been called, we need to call it before irq_dispose_mapping() and
remember the result for the msi_bitmap_free_hwirqs() call.
The pattern of calling msi_bitmap_free_hwirqs() before
irq_dispose_mapping() appears in 5 places under arch/powerpc, and
appears to have originated in commit 05af7bd2d75e ("[POWERPC] MPIC
U3/U4 MSI backend") from 2007.
Fixes: 05af7bd2d75e ("[POWERPC] MPIC U3/U4 MSI backend")
Cc: stable@vger.kernel.org # v2.6.22+
Reported-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-09-10 11:36:21 +07:00
|
|
|
hwirq = virq_to_hw(entry->irq);
|
2011-09-20 00:45:06 +07:00
|
|
|
irq_set_msi_desc(entry->irq, NULL);
|
|
|
|
irq_dispose_mapping(entry->irq);
|
powerpc/MSI: Fix race condition in tearing down MSI interrupts
This fixes a race which can result in the same virtual IRQ number
being assigned to two different MSI interrupts. The most visible
consequence of that is usually a warning and stack trace from the
sysfs code about an attempt to create a duplicate entry in sysfs.
The race happens when one CPU (say CPU 0) is disposing of an MSI
while another CPU (say CPU 1) is setting up an MSI. CPU 0 calls
(for example) pnv_teardown_msi_irqs(), which calls
msi_bitmap_free_hwirqs() to indicate that the MSI (i.e. its
hardware IRQ number) is no longer in use. Then, before CPU 0 gets
to calling irq_dispose_mapping() to free up the virtal IRQ number,
CPU 1 comes in and calls msi_bitmap_alloc_hwirqs() to allocate an
MSI, and gets the same hardware IRQ number that CPU 0 just freed.
CPU 1 then calls irq_create_mapping() to get a virtual IRQ number,
which sees that there is currently a mapping for that hardware IRQ
number and returns the corresponding virtual IRQ number (which is
the same virtual IRQ number that CPU 0 was using). CPU 0 then
calls irq_dispose_mapping() and frees that virtual IRQ number.
Now, if another CPU comes along and calls irq_create_mapping(), it
is likely to get the virtual IRQ number that was just freed,
resulting in the same virtual IRQ number apparently being used for
two different hardware interrupts.
To fix this race, we just move the call to msi_bitmap_free_hwirqs()
to after the call to irq_dispose_mapping(). Since virq_to_hw()
doesn't work for the virtual IRQ number after irq_dispose_mapping()
has been called, we need to call it before irq_dispose_mapping() and
remember the result for the msi_bitmap_free_hwirqs() call.
The pattern of calling msi_bitmap_free_hwirqs() before
irq_dispose_mapping() appears in 5 places under arch/powerpc, and
appears to have originated in commit 05af7bd2d75e ("[POWERPC] MPIC
U3/U4 MSI backend") from 2007.
Fixes: 05af7bd2d75e ("[POWERPC] MPIC U3/U4 MSI backend")
Cc: stable@vger.kernel.org # v2.6.22+
Reported-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2015-09-10 11:36:21 +07:00
|
|
|
msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
|
2011-09-20 00:45:06 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_MSI */
|
2011-09-20 00:45:05 +07:00
|
|
|
|
2013-11-22 15:28:45 +07:00
|
|
|
static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
|
|
|
|
struct OpalIoPhbErrorCommon *common)
|
2011-11-30 01:22:53 +07:00
|
|
|
{
|
2013-11-22 15:28:45 +07:00
|
|
|
struct OpalIoP7IOCPhbErrorData *data;
|
2011-11-30 01:22:53 +07:00
|
|
|
int i;
|
|
|
|
|
2013-11-22 15:28:45 +07:00
|
|
|
data = (struct OpalIoP7IOCPhbErrorData *)common;
|
2016-11-16 10:02:15 +07:00
|
|
|
pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
hose->global_number, be32_to_cpu(common->version));
|
2013-11-22 15:28:45 +07:00
|
|
|
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->brdgCtl)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("brdgCtl: %08x\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be32_to_cpu(data->brdgCtl));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->portStatusReg || data->rootCmplxStatus ||
|
|
|
|
data->busAgentStatus)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("UtlSts: %08x %08x %08x\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be32_to_cpu(data->portStatusReg),
|
|
|
|
be32_to_cpu(data->rootCmplxStatus),
|
|
|
|
be32_to_cpu(data->busAgentStatus));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->deviceStatus || data->slotStatus ||
|
|
|
|
data->linkStatus || data->devCmdStatus ||
|
|
|
|
data->devSecStatus)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("RootSts: %08x %08x %08x %08x %08x\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be32_to_cpu(data->deviceStatus),
|
|
|
|
be32_to_cpu(data->slotStatus),
|
|
|
|
be32_to_cpu(data->linkStatus),
|
|
|
|
be32_to_cpu(data->devCmdStatus),
|
|
|
|
be32_to_cpu(data->devSecStatus));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->rootErrorStatus || data->uncorrErrorStatus ||
|
|
|
|
data->corrErrorStatus)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("RootErrSts: %08x %08x %08x\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be32_to_cpu(data->rootErrorStatus),
|
|
|
|
be32_to_cpu(data->uncorrErrorStatus),
|
|
|
|
be32_to_cpu(data->corrErrorStatus));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->tlpHdr1 || data->tlpHdr2 ||
|
|
|
|
data->tlpHdr3 || data->tlpHdr4)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("RootErrLog: %08x %08x %08x %08x\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be32_to_cpu(data->tlpHdr1),
|
|
|
|
be32_to_cpu(data->tlpHdr2),
|
|
|
|
be32_to_cpu(data->tlpHdr3),
|
|
|
|
be32_to_cpu(data->tlpHdr4));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->sourceId || data->errorClass ||
|
|
|
|
data->correlator)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("RootErrLog1: %08x %016llx %016llx\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be32_to_cpu(data->sourceId),
|
|
|
|
be64_to_cpu(data->errorClass),
|
|
|
|
be64_to_cpu(data->correlator));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->p7iocPlssr || data->p7iocCsr)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("PhbSts: %016llx %016llx\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be64_to_cpu(data->p7iocPlssr),
|
|
|
|
be64_to_cpu(data->p7iocCsr));
|
2014-04-24 15:00:10 +07:00
|
|
|
if (data->lemFir)
|
|
|
|
pr_info("Lem: %016llx %016llx %016llx\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be64_to_cpu(data->lemFir),
|
|
|
|
be64_to_cpu(data->lemErrorMask),
|
|
|
|
be64_to_cpu(data->lemWOF));
|
2014-04-24 15:00:10 +07:00
|
|
|
if (data->phbErrorStatus)
|
|
|
|
pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be64_to_cpu(data->phbErrorStatus),
|
|
|
|
be64_to_cpu(data->phbFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->phbErrorLog0),
|
|
|
|
be64_to_cpu(data->phbErrorLog1));
|
2014-04-24 15:00:10 +07:00
|
|
|
if (data->mmioErrorStatus)
|
|
|
|
pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be64_to_cpu(data->mmioErrorStatus),
|
|
|
|
be64_to_cpu(data->mmioFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->mmioErrorLog0),
|
|
|
|
be64_to_cpu(data->mmioErrorLog1));
|
2014-04-24 15:00:10 +07:00
|
|
|
if (data->dma0ErrorStatus)
|
|
|
|
pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be64_to_cpu(data->dma0ErrorStatus),
|
|
|
|
be64_to_cpu(data->dma0FirstErrorStatus),
|
|
|
|
be64_to_cpu(data->dma0ErrorLog0),
|
|
|
|
be64_to_cpu(data->dma0ErrorLog1));
|
2014-04-24 15:00:10 +07:00
|
|
|
if (data->dma1ErrorStatus)
|
|
|
|
pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
be64_to_cpu(data->dma1ErrorStatus),
|
|
|
|
be64_to_cpu(data->dma1FirstErrorStatus),
|
|
|
|
be64_to_cpu(data->dma1ErrorLog0),
|
|
|
|
be64_to_cpu(data->dma1ErrorLog1));
|
2011-11-30 01:22:53 +07:00
|
|
|
|
|
|
|
for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
|
2016-08-02 11:10:32 +07:00
|
|
|
if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
|
|
|
|
(be64_to_cpu(data->pestB[i]) >> 63) == 0)
|
2011-11-30 01:22:53 +07:00
|
|
|
continue;
|
2013-11-22 15:28:45 +07:00
|
|
|
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("PE[%3d] A/B: %016llx %016llx\n",
|
2014-07-17 11:41:42 +07:00
|
|
|
i, be64_to_cpu(data->pestA[i]),
|
|
|
|
be64_to_cpu(data->pestB[i]));
|
2011-11-30 01:22:53 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-22 15:28:45 +07:00
|
|
|
static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
|
|
|
|
struct OpalIoPhbErrorCommon *common)
|
2011-11-30 01:22:53 +07:00
|
|
|
{
|
2013-11-22 15:28:45 +07:00
|
|
|
struct OpalIoPhb3ErrorData *data;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
data = (struct OpalIoPhb3ErrorData*)common;
|
2016-11-16 10:02:15 +07:00
|
|
|
pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
hose->global_number, be32_to_cpu(common->version));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->brdgCtl)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("brdgCtl: %08x\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be32_to_cpu(data->brdgCtl));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->portStatusReg || data->rootCmplxStatus ||
|
|
|
|
data->busAgentStatus)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("UtlSts: %08x %08x %08x\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be32_to_cpu(data->portStatusReg),
|
|
|
|
be32_to_cpu(data->rootCmplxStatus),
|
|
|
|
be32_to_cpu(data->busAgentStatus));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->deviceStatus || data->slotStatus ||
|
|
|
|
data->linkStatus || data->devCmdStatus ||
|
|
|
|
data->devSecStatus)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("RootSts: %08x %08x %08x %08x %08x\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be32_to_cpu(data->deviceStatus),
|
|
|
|
be32_to_cpu(data->slotStatus),
|
|
|
|
be32_to_cpu(data->linkStatus),
|
|
|
|
be32_to_cpu(data->devCmdStatus),
|
|
|
|
be32_to_cpu(data->devSecStatus));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->rootErrorStatus || data->uncorrErrorStatus ||
|
|
|
|
data->corrErrorStatus)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("RootErrSts: %08x %08x %08x\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be32_to_cpu(data->rootErrorStatus),
|
|
|
|
be32_to_cpu(data->uncorrErrorStatus),
|
|
|
|
be32_to_cpu(data->corrErrorStatus));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->tlpHdr1 || data->tlpHdr2 ||
|
|
|
|
data->tlpHdr3 || data->tlpHdr4)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("RootErrLog: %08x %08x %08x %08x\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be32_to_cpu(data->tlpHdr1),
|
|
|
|
be32_to_cpu(data->tlpHdr2),
|
|
|
|
be32_to_cpu(data->tlpHdr3),
|
|
|
|
be32_to_cpu(data->tlpHdr4));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->sourceId || data->errorClass ||
|
|
|
|
data->correlator)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("RootErrLog1: %08x %016llx %016llx\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be32_to_cpu(data->sourceId),
|
|
|
|
be64_to_cpu(data->errorClass),
|
|
|
|
be64_to_cpu(data->correlator));
|
2014-04-24 15:00:10 +07:00
|
|
|
if (data->nFir)
|
|
|
|
pr_info("nFir: %016llx %016llx %016llx\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be64_to_cpu(data->nFir),
|
|
|
|
be64_to_cpu(data->nFirMask),
|
|
|
|
be64_to_cpu(data->nFirWOF));
|
2014-02-25 14:28:38 +07:00
|
|
|
if (data->phbPlssr || data->phbCsr)
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("PhbSts: %016llx %016llx\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be64_to_cpu(data->phbPlssr),
|
|
|
|
be64_to_cpu(data->phbCsr));
|
2014-04-24 15:00:10 +07:00
|
|
|
if (data->lemFir)
|
|
|
|
pr_info("Lem: %016llx %016llx %016llx\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be64_to_cpu(data->lemFir),
|
|
|
|
be64_to_cpu(data->lemErrorMask),
|
|
|
|
be64_to_cpu(data->lemWOF));
|
2014-04-24 15:00:10 +07:00
|
|
|
if (data->phbErrorStatus)
|
|
|
|
pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be64_to_cpu(data->phbErrorStatus),
|
|
|
|
be64_to_cpu(data->phbFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->phbErrorLog0),
|
|
|
|
be64_to_cpu(data->phbErrorLog1));
|
2014-04-24 15:00:10 +07:00
|
|
|
if (data->mmioErrorStatus)
|
|
|
|
pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be64_to_cpu(data->mmioErrorStatus),
|
|
|
|
be64_to_cpu(data->mmioFirstErrorStatus),
|
|
|
|
be64_to_cpu(data->mmioErrorLog0),
|
|
|
|
be64_to_cpu(data->mmioErrorLog1));
|
2014-04-24 15:00:10 +07:00
|
|
|
if (data->dma0ErrorStatus)
|
|
|
|
pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be64_to_cpu(data->dma0ErrorStatus),
|
|
|
|
be64_to_cpu(data->dma0FirstErrorStatus),
|
|
|
|
be64_to_cpu(data->dma0ErrorLog0),
|
|
|
|
be64_to_cpu(data->dma0ErrorLog1));
|
2014-04-24 15:00:10 +07:00
|
|
|
if (data->dma1ErrorStatus)
|
|
|
|
pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
be64_to_cpu(data->dma1ErrorStatus),
|
|
|
|
be64_to_cpu(data->dma1FirstErrorStatus),
|
|
|
|
be64_to_cpu(data->dma1ErrorLog0),
|
|
|
|
be64_to_cpu(data->dma1ErrorLog1));
|
2013-11-22 15:28:45 +07:00
|
|
|
|
|
|
|
for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
|
2014-06-09 15:58:51 +07:00
|
|
|
if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
|
|
|
|
(be64_to_cpu(data->pestB[i]) >> 63) == 0)
|
2013-11-22 15:28:45 +07:00
|
|
|
continue;
|
|
|
|
|
2014-04-24 15:00:10 +07:00
|
|
|
pr_info("PE[%3d] A/B: %016llx %016llx\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
i, be64_to_cpu(data->pestA[i]),
|
|
|
|
be64_to_cpu(data->pestB[i]));
|
2013-11-22 15:28:45 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
|
|
|
|
unsigned char *log_buff)
|
|
|
|
{
|
|
|
|
struct OpalIoPhbErrorCommon *common;
|
|
|
|
|
|
|
|
if (!hose || !log_buff)
|
|
|
|
return;
|
|
|
|
|
|
|
|
common = (struct OpalIoPhbErrorCommon *)log_buff;
|
2014-06-09 15:58:51 +07:00
|
|
|
switch (be32_to_cpu(common->ioType)) {
|
2013-11-22 15:28:45 +07:00
|
|
|
case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
|
|
|
|
pnv_pci_dump_p7ioc_diag_data(hose, common);
|
|
|
|
break;
|
|
|
|
case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
|
|
|
|
pnv_pci_dump_phb3_diag_data(hose, common);
|
2011-11-30 01:22:53 +07:00
|
|
|
break;
|
|
|
|
default:
|
2013-11-22 15:28:45 +07:00
|
|
|
pr_warn("%s: Unrecognized ioType %d\n",
|
2014-06-09 15:58:51 +07:00
|
|
|
__func__, be32_to_cpu(common->ioType));
|
2011-11-30 01:22:53 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
|
|
|
|
{
|
|
|
|
unsigned long flags, rc;
|
2014-07-21 11:42:35 +07:00
|
|
|
int has_diag, ret = 0;
|
2011-11-30 01:22:53 +07:00
|
|
|
|
|
|
|
spin_lock_irqsave(&phb->lock, flags);
|
|
|
|
|
2014-07-21 11:42:35 +07:00
|
|
|
/* Fetch PHB diag-data */
|
2013-06-20 12:21:05 +07:00
|
|
|
rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
|
|
|
|
PNV_PCI_DIAG_BUF_SIZE);
|
2011-11-30 01:22:53 +07:00
|
|
|
has_diag = (rc == OPAL_SUCCESS);
|
|
|
|
|
2014-07-21 11:42:35 +07:00
|
|
|
/* If PHB supports compound PE, to handle it */
|
|
|
|
if (phb->unfreeze_pe) {
|
|
|
|
ret = phb->unfreeze_pe(phb,
|
|
|
|
pe_no,
|
2011-11-30 01:22:53 +07:00
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
2014-07-21 11:42:35 +07:00
|
|
|
} else {
|
|
|
|
rc = opal_pci_eeh_freeze_clear(phb->opal_id,
|
|
|
|
pe_no,
|
|
|
|
OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
|
|
|
|
if (rc) {
|
|
|
|
pr_warn("%s: Failure %ld clearing frozen "
|
|
|
|
"PHB#%x-PE#%x\n",
|
|
|
|
__func__, rc, phb->hose->global_number,
|
|
|
|
pe_no);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
2011-11-30 01:22:53 +07:00
|
|
|
}
|
|
|
|
|
2014-07-21 11:42:35 +07:00
|
|
|
/*
|
|
|
|
* For now, let's only display the diag buffer when we fail to clear
|
|
|
|
* the EEH status. We'll do more sensible things later when we have
|
|
|
|
* proper EEH support. We need to make sure we don't pollute ourselves
|
|
|
|
* with the normal errors generated when probing empty slots
|
|
|
|
*/
|
|
|
|
if (has_diag && ret)
|
|
|
|
pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
|
|
|
|
|
2011-11-30 01:22:53 +07:00
|
|
|
spin_unlock_irqrestore(&phb->lock, flags);
|
|
|
|
}
|
|
|
|
|
2015-03-17 12:15:03 +07:00
|
|
|
static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
|
2011-09-20 00:45:05 +07:00
|
|
|
{
|
2015-03-17 12:15:03 +07:00
|
|
|
struct pnv_phb *phb = pdn->phb->private_data;
|
2011-09-20 00:45:05 +07:00
|
|
|
u8 fstate;
|
2013-09-23 09:05:01 +07:00
|
|
|
__be16 pcierr;
|
2016-05-03 12:41:25 +07:00
|
|
|
unsigned int pe_no;
|
2014-07-21 11:42:35 +07:00
|
|
|
s64 rc;
|
2011-09-20 00:45:05 +07:00
|
|
|
|
2013-06-27 12:46:48 +07:00
|
|
|
/*
|
|
|
|
* Get the PE#. During the PCI probe stage, we might not
|
|
|
|
* setup that yet. So all ER errors should be mapped to
|
2013-11-04 15:32:47 +07:00
|
|
|
* reserved PE.
|
2013-06-27 12:46:48 +07:00
|
|
|
*/
|
2015-03-17 12:15:03 +07:00
|
|
|
pe_no = pdn->pe_number;
|
2013-11-04 15:32:47 +07:00
|
|
|
if (pe_no == IODA_INVALID_PE) {
|
2016-05-03 12:41:24 +07:00
|
|
|
pe_no = phb->ioda.reserved_pe_idx;
|
2013-11-04 15:32:47 +07:00
|
|
|
}
|
2011-09-20 00:45:05 +07:00
|
|
|
|
2014-07-21 11:42:35 +07:00
|
|
|
/*
|
|
|
|
* Fetch frozen state. If the PHB support compound PE,
|
|
|
|
* we need handle that case.
|
|
|
|
*/
|
|
|
|
if (phb->get_pe_state) {
|
|
|
|
fstate = phb->get_pe_state(phb, pe_no);
|
|
|
|
} else {
|
|
|
|
rc = opal_pci_eeh_freeze_status(phb->opal_id,
|
|
|
|
pe_no,
|
|
|
|
&fstate,
|
|
|
|
&pcierr,
|
|
|
|
NULL);
|
|
|
|
if (rc) {
|
|
|
|
pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
|
|
|
|
__func__, rc, phb->hose->global_number, pe_no);
|
|
|
|
return;
|
|
|
|
}
|
2011-09-20 00:45:05 +07:00
|
|
|
}
|
2014-07-21 11:42:35 +07:00
|
|
|
|
2016-11-16 10:02:15 +07:00
|
|
|
pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
|
2016-05-02 14:06:12 +07:00
|
|
|
(pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
|
2014-07-21 11:42:35 +07:00
|
|
|
|
|
|
|
/* Clear the frozen state if applicable */
|
|
|
|
if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
|
|
|
|
fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
|
|
|
|
fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
|
|
|
|
/*
|
|
|
|
* If PHB supports compound PE, freeze it for
|
|
|
|
* consistency.
|
|
|
|
*/
|
|
|
|
if (phb->freeze_pe)
|
|
|
|
phb->freeze_pe(phb, pe_no);
|
|
|
|
|
2011-11-30 01:22:53 +07:00
|
|
|
pnv_pci_handle_eeh_config(phb, pe_no);
|
2014-07-21 11:42:35 +07:00
|
|
|
}
|
2011-09-20 00:45:05 +07:00
|
|
|
}
|
|
|
|
|
2015-03-17 12:15:03 +07:00
|
|
|
int pnv_pci_cfg_read(struct pci_dn *pdn,
|
2013-06-27 12:46:48 +07:00
|
|
|
int where, int size, u32 *val)
|
2011-09-20 00:45:05 +07:00
|
|
|
{
|
2013-06-27 12:46:48 +07:00
|
|
|
struct pnv_phb *phb = pdn->phb->private_data;
|
|
|
|
u32 bdfn = (pdn->busno << 8) | pdn->devfn;
|
2011-09-20 00:45:05 +07:00
|
|
|
s64 rc;
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case 1: {
|
|
|
|
u8 v8;
|
|
|
|
rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
|
|
|
|
*val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 2: {
|
2013-09-23 09:05:01 +07:00
|
|
|
__be16 v16;
|
2011-09-20 00:45:05 +07:00
|
|
|
rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
|
|
|
|
&v16);
|
2013-09-23 09:05:01 +07:00
|
|
|
*val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
|
2011-09-20 00:45:05 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 4: {
|
2013-09-23 09:05:01 +07:00
|
|
|
__be32 v32;
|
2011-09-20 00:45:05 +07:00
|
|
|
rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
|
2013-09-23 09:05:01 +07:00
|
|
|
*val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
|
2011-09-20 00:45:05 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return PCIBIOS_FUNC_NOT_SUPPORTED;
|
|
|
|
}
|
2014-04-24 15:00:12 +07:00
|
|
|
|
2016-05-02 14:06:12 +07:00
|
|
|
pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
|
|
|
|
__func__, pdn->busno, pdn->devfn, where, size, *val);
|
2011-09-20 00:45:05 +07:00
|
|
|
return PCIBIOS_SUCCESSFUL;
|
|
|
|
}
|
|
|
|
|
2015-03-17 12:15:03 +07:00
|
|
|
int pnv_pci_cfg_write(struct pci_dn *pdn,
|
2013-06-27 12:46:48 +07:00
|
|
|
int where, int size, u32 val)
|
2011-09-20 00:45:05 +07:00
|
|
|
{
|
2013-06-27 12:46:48 +07:00
|
|
|
struct pnv_phb *phb = pdn->phb->private_data;
|
|
|
|
u32 bdfn = (pdn->busno << 8) | pdn->devfn;
|
2011-09-20 00:45:05 +07:00
|
|
|
|
2016-05-02 14:06:12 +07:00
|
|
|
pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
|
|
|
|
__func__, pdn->busno, pdn->devfn, where, size, val);
|
2011-09-20 00:45:05 +07:00
|
|
|
switch (size) {
|
|
|
|
case 1:
|
|
|
|
opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
|
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return PCIBIOS_FUNC_NOT_SUPPORTED;
|
|
|
|
}
|
2013-06-20 12:21:15 +07:00
|
|
|
|
2014-04-24 15:00:12 +07:00
|
|
|
return PCIBIOS_SUCCESSFUL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#if CONFIG_EEH
|
2015-03-17 12:15:03 +07:00
|
|
|
static bool pnv_pci_cfg_check(struct pci_dn *pdn)
|
2014-04-24 15:00:12 +07:00
|
|
|
{
|
|
|
|
struct eeh_dev *edev = NULL;
|
2015-03-17 12:15:03 +07:00
|
|
|
struct pnv_phb *phb = pdn->phb->private_data;
|
2014-04-24 15:00:12 +07:00
|
|
|
|
|
|
|
/* EEH not enabled ? */
|
2014-04-24 15:00:09 +07:00
|
|
|
if (!(phb->flags & PNV_PHB_FLAG_EEH))
|
2014-04-24 15:00:12 +07:00
|
|
|
return true;
|
2011-09-20 00:45:05 +07:00
|
|
|
|
powerpc/eeh: No hotplug on permanently removed dev
The issue was detected in a bit complicated test case where
we have multiple hierarchical PEs shown as following figure:
+-----------------+
| PE#3 p2p#0 |
| p2p#1 |
+-----------------+
|
+-----------------+
| PE#4 pdev#0 |
| pdev#1 |
+-----------------+
PE#4 (have 2 PCI devices) is the child of PE#3, which has 2 p2p
bridges. We accidentally had less-known scenario: PE#4 was removed
permanently from the system because of permanent failure (e.g.
exceeding the max allowd failure times in last hour), then we detects
EEH errors on PE#3 and tried to recover it. However, eeh_dev instances
for pdev#0/1 were not detached from PE#4, which was still connected to
PE#3. All of that was because of the fact that we rely on count-based
pcibios_release_device(), which isn't reliable enough. When doing
recovery for PE#3, we still apply hotplug on PE#4 and pdev#0/1, which
are not valid any more. Eventually, we run into kernel crash.
The patch fixes above issue from two aspects. For unplug, we simply
skip those permanently removed PE, whose state is (EEH_PE_STATE_ISOLATED
&& !EEH_PE_STATE_RECOVERING) and its frozen count should be greater
than EEH_MAX_ALLOWED_FREEZES. For plug, we marked all permanently
removed EEH devices with EEH_DEV_REMOVED and return 0xFF's on read
its PCI config so that PCI core will omit them.
Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-04-24 15:00:19 +07:00
|
|
|
/* PE reset or device removed ? */
|
2015-03-17 12:15:03 +07:00
|
|
|
edev = pdn->edev;
|
powerpc/eeh: No hotplug on permanently removed dev
The issue was detected in a bit complicated test case where
we have multiple hierarchical PEs shown as following figure:
+-----------------+
| PE#3 p2p#0 |
| p2p#1 |
+-----------------+
|
+-----------------+
| PE#4 pdev#0 |
| pdev#1 |
+-----------------+
PE#4 (have 2 PCI devices) is the child of PE#3, which has 2 p2p
bridges. We accidentally had less-known scenario: PE#4 was removed
permanently from the system because of permanent failure (e.g.
exceeding the max allowd failure times in last hour), then we detects
EEH errors on PE#3 and tried to recover it. However, eeh_dev instances
for pdev#0/1 were not detached from PE#4, which was still connected to
PE#3. All of that was because of the fact that we rely on count-based
pcibios_release_device(), which isn't reliable enough. When doing
recovery for PE#3, we still apply hotplug on PE#4 and pdev#0/1, which
are not valid any more. Eventually, we run into kernel crash.
The patch fixes above issue from two aspects. For unplug, we simply
skip those permanently removed PE, whose state is (EEH_PE_STATE_ISOLATED
&& !EEH_PE_STATE_RECOVERING) and its frozen count should be greater
than EEH_MAX_ALLOWED_FREEZES. For plug, we marked all permanently
removed EEH devices with EEH_DEV_REMOVED and return 0xFF's on read
its PCI config so that PCI core will omit them.
Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-04-24 15:00:19 +07:00
|
|
|
if (edev) {
|
|
|
|
if (edev->pe &&
|
2014-10-01 14:07:50 +07:00
|
|
|
(edev->pe->state & EEH_PE_CFG_BLOCKED))
|
powerpc/eeh: No hotplug on permanently removed dev
The issue was detected in a bit complicated test case where
we have multiple hierarchical PEs shown as following figure:
+-----------------+
| PE#3 p2p#0 |
| p2p#1 |
+-----------------+
|
+-----------------+
| PE#4 pdev#0 |
| pdev#1 |
+-----------------+
PE#4 (have 2 PCI devices) is the child of PE#3, which has 2 p2p
bridges. We accidentally had less-known scenario: PE#4 was removed
permanently from the system because of permanent failure (e.g.
exceeding the max allowd failure times in last hour), then we detects
EEH errors on PE#3 and tried to recover it. However, eeh_dev instances
for pdev#0/1 were not detached from PE#4, which was still connected to
PE#3. All of that was because of the fact that we rely on count-based
pcibios_release_device(), which isn't reliable enough. When doing
recovery for PE#3, we still apply hotplug on PE#4 and pdev#0/1, which
are not valid any more. Eventually, we run into kernel crash.
The patch fixes above issue from two aspects. For unplug, we simply
skip those permanently removed PE, whose state is (EEH_PE_STATE_ISOLATED
&& !EEH_PE_STATE_RECOVERING) and its frozen count should be greater
than EEH_MAX_ALLOWED_FREEZES. For plug, we marked all permanently
removed EEH devices with EEH_DEV_REMOVED and return 0xFF's on read
its PCI config so that PCI core will omit them.
Signed-off-by: Gavin Shan <gwshan@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2014-04-24 15:00:19 +07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
if (edev->mode & EEH_DEV_REMOVED)
|
|
|
|
return false;
|
|
|
|
}
|
2014-04-24 15:00:12 +07:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#else
|
2015-03-17 12:15:03 +07:00
|
|
|
static inline pnv_pci_cfg_check(struct pci_dn *pdn)
|
2014-04-24 15:00:12 +07:00
|
|
|
{
|
|
|
|
return true;
|
2011-09-20 00:45:05 +07:00
|
|
|
}
|
2014-04-24 15:00:12 +07:00
|
|
|
#endif /* CONFIG_EEH */
|
2011-09-20 00:45:05 +07:00
|
|
|
|
2013-06-27 12:46:48 +07:00
|
|
|
static int pnv_pci_read_config(struct pci_bus *bus,
|
|
|
|
unsigned int devfn,
|
|
|
|
int where, int size, u32 *val)
|
|
|
|
{
|
|
|
|
struct pci_dn *pdn;
|
2014-04-24 15:00:12 +07:00
|
|
|
struct pnv_phb *phb;
|
|
|
|
int ret;
|
2013-06-27 12:46:48 +07:00
|
|
|
|
2014-04-24 15:00:12 +07:00
|
|
|
*val = 0xFFFFFFFF;
|
2015-03-17 12:15:03 +07:00
|
|
|
pdn = pci_get_pdn_by_devfn(bus, devfn);
|
|
|
|
if (!pdn)
|
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
2013-06-27 12:46:48 +07:00
|
|
|
|
2015-03-17 12:15:03 +07:00
|
|
|
if (!pnv_pci_cfg_check(pdn))
|
2014-04-24 15:00:12 +07:00
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
|
|
|
|
2015-03-17 12:15:03 +07:00
|
|
|
ret = pnv_pci_cfg_read(pdn, where, size, val);
|
|
|
|
phb = pdn->phb->private_data;
|
|
|
|
if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
|
2014-04-24 15:00:12 +07:00
|
|
|
if (*val == EEH_IO_ERROR_VALUE(size) &&
|
2015-03-17 12:15:03 +07:00
|
|
|
eeh_dev_check_failure(pdn->edev))
|
2014-04-24 15:00:12 +07:00
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
|
|
|
} else {
|
2015-03-17 12:15:03 +07:00
|
|
|
pnv_pci_config_check_eeh(pdn);
|
2014-04-24 15:00:12 +07:00
|
|
|
}
|
2013-06-27 12:46:48 +07:00
|
|
|
|
2014-04-24 15:00:12 +07:00
|
|
|
return ret;
|
2013-06-27 12:46:48 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int pnv_pci_write_config(struct pci_bus *bus,
|
|
|
|
unsigned int devfn,
|
|
|
|
int where, int size, u32 val)
|
|
|
|
{
|
|
|
|
struct pci_dn *pdn;
|
2014-04-24 15:00:12 +07:00
|
|
|
struct pnv_phb *phb;
|
|
|
|
int ret;
|
2013-06-27 12:46:48 +07:00
|
|
|
|
2015-03-17 12:15:03 +07:00
|
|
|
pdn = pci_get_pdn_by_devfn(bus, devfn);
|
|
|
|
if (!pdn)
|
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
2013-06-27 12:46:48 +07:00
|
|
|
|
2015-03-17 12:15:03 +07:00
|
|
|
if (!pnv_pci_cfg_check(pdn))
|
2014-04-24 15:00:12 +07:00
|
|
|
return PCIBIOS_DEVICE_NOT_FOUND;
|
|
|
|
|
2015-03-17 12:15:03 +07:00
|
|
|
ret = pnv_pci_cfg_write(pdn, where, size, val);
|
|
|
|
phb = pdn->phb->private_data;
|
2014-04-24 15:00:12 +07:00
|
|
|
if (!(phb->flags & PNV_PHB_FLAG_EEH))
|
2015-03-17 12:15:03 +07:00
|
|
|
pnv_pci_config_check_eeh(pdn);
|
2014-04-24 15:00:12 +07:00
|
|
|
|
|
|
|
return ret;
|
2013-06-27 12:46:48 +07:00
|
|
|
}
|
|
|
|
|
2011-09-20 00:45:05 +07:00
|
|
|
struct pci_ops pnv_pci_ops = {
|
2013-06-27 12:46:48 +07:00
|
|
|
.read = pnv_pci_read_config,
|
2011-09-20 00:45:05 +07:00
|
|
|
.write = pnv_pci_write_config,
|
|
|
|
};
|
|
|
|
|
2015-06-05 13:35:14 +07:00
|
|
|
static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
|
|
|
|
{
|
|
|
|
__be64 *tmp = ((__be64 *)tbl->it_base);
|
2015-06-05 13:35:19 +07:00
|
|
|
int level = tbl->it_indirect_levels;
|
|
|
|
const long shift = ilog2(tbl->it_level_size);
|
|
|
|
unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
|
|
|
|
|
|
|
|
while (level) {
|
|
|
|
int n = (idx & mask) >> (level * shift);
|
|
|
|
unsigned long tce = be64_to_cpu(tmp[n]);
|
|
|
|
|
|
|
|
tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
|
|
|
|
idx &= ~mask;
|
|
|
|
mask >>= shift;
|
|
|
|
--level;
|
|
|
|
}
|
2015-06-05 13:35:14 +07:00
|
|
|
|
|
|
|
return tmp + idx;
|
|
|
|
}
|
|
|
|
|
2015-06-05 13:35:06 +07:00
|
|
|
int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
|
|
|
|
unsigned long uaddr, enum dma_data_direction direction,
|
2016-08-04 03:46:00 +07:00
|
|
|
unsigned long attrs)
|
2011-09-20 00:45:05 +07:00
|
|
|
{
|
2015-06-05 13:35:05 +07:00
|
|
|
u64 proto_tce = iommu_direction_to_tce_perm(direction);
|
2015-06-05 13:35:14 +07:00
|
|
|
u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
|
|
|
|
long i;
|
2011-09-20 00:45:05 +07:00
|
|
|
|
2016-02-17 14:26:31 +07:00
|
|
|
if (proto_tce & TCE_PCI_WRITE)
|
|
|
|
proto_tce |= TCE_PCI_READ;
|
|
|
|
|
2015-06-05 13:35:14 +07:00
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
unsigned long newtce = proto_tce |
|
|
|
|
((rpn + i) << tbl->it_page_shift);
|
|
|
|
unsigned long idx = index - tbl->it_offset + i;
|
2011-11-07 01:55:59 +07:00
|
|
|
|
2015-06-05 13:35:14 +07:00
|
|
|
*(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
|
|
|
|
}
|
2011-09-20 00:45:05 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-06-05 13:35:15 +07:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
int pnv_tce_xchg(struct iommu_table *tbl, long index,
|
|
|
|
unsigned long *hpa, enum dma_data_direction *direction)
|
|
|
|
{
|
|
|
|
u64 proto_tce = iommu_direction_to_tce_perm(*direction);
|
|
|
|
unsigned long newtce = *hpa | proto_tce, oldtce;
|
|
|
|
unsigned long idx = index - tbl->it_offset;
|
|
|
|
|
|
|
|
BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
|
|
|
|
|
2016-02-17 14:26:31 +07:00
|
|
|
if (newtce & TCE_PCI_WRITE)
|
|
|
|
newtce |= TCE_PCI_READ;
|
|
|
|
|
2016-07-20 11:26:51 +07:00
|
|
|
oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)));
|
|
|
|
*hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
|
2015-06-05 13:35:15 +07:00
|
|
|
*direction = iommu_tce_direction(oldtce);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-06-05 13:35:06 +07:00
|
|
|
void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
|
2011-09-20 00:45:05 +07:00
|
|
|
{
|
2015-06-05 13:35:14 +07:00
|
|
|
long i;
|
2011-11-07 01:55:59 +07:00
|
|
|
|
2015-06-05 13:35:14 +07:00
|
|
|
for (i = 0; i < npages; i++) {
|
|
|
|
unsigned long idx = index - tbl->it_offset + i;
|
2011-09-20 00:45:05 +07:00
|
|
|
|
2015-06-05 13:35:14 +07:00
|
|
|
*(pnv_tce(tbl, idx)) = cpu_to_be64(0);
|
|
|
|
}
|
2011-09-20 00:45:05 +07:00
|
|
|
}
|
|
|
|
|
2015-06-05 13:35:06 +07:00
|
|
|
unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
|
2012-09-04 22:19:35 +07:00
|
|
|
{
|
2015-06-05 13:35:14 +07:00
|
|
|
return *(pnv_tce(tbl, index - tbl->it_offset));
|
2012-09-04 22:19:35 +07:00
|
|
|
}
|
|
|
|
|
2015-06-05 13:35:09 +07:00
|
|
|
struct iommu_table *pnv_pci_table_alloc(int nid)
|
|
|
|
{
|
|
|
|
struct iommu_table *tbl;
|
|
|
|
|
|
|
|
tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
|
|
|
|
INIT_LIST_HEAD_RCU(&tbl->it_group_list);
|
|
|
|
|
|
|
|
return tbl;
|
|
|
|
}
|
|
|
|
|
|
|
|
long pnv_pci_link_table_and_group(int node, int num,
|
|
|
|
struct iommu_table *tbl,
|
|
|
|
struct iommu_table_group *table_group)
|
|
|
|
{
|
|
|
|
struct iommu_table_group_link *tgl = NULL;
|
|
|
|
|
|
|
|
if (WARN_ON(!tbl || !table_group))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
|
|
|
|
node);
|
|
|
|
if (!tgl)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
tgl->table_group = table_group;
|
|
|
|
list_add_rcu(&tgl->next, &tbl->it_group_list);
|
|
|
|
|
|
|
|
table_group->tables[num] = tbl;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pnv_iommu_table_group_link_free(struct rcu_head *head)
|
|
|
|
{
|
|
|
|
struct iommu_table_group_link *tgl = container_of(head,
|
|
|
|
struct iommu_table_group_link, rcu);
|
|
|
|
|
|
|
|
kfree(tgl);
|
|
|
|
}
|
|
|
|
|
|
|
|
void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
|
|
|
|
struct iommu_table_group *table_group)
|
|
|
|
{
|
|
|
|
long i;
|
|
|
|
bool found;
|
|
|
|
struct iommu_table_group_link *tgl;
|
|
|
|
|
|
|
|
if (!tbl || !table_group)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Remove link to a group from table's list of attached groups */
|
|
|
|
found = false;
|
|
|
|
list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
|
|
|
|
if (tgl->table_group == table_group) {
|
|
|
|
list_del_rcu(&tgl->next);
|
|
|
|
call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (WARN_ON(!found))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Clean a pointer to iommu_table in iommu_table_group::tables[] */
|
|
|
|
found = false;
|
|
|
|
for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
|
|
|
|
if (table_group->tables[i] == tbl) {
|
|
|
|
table_group->tables[i] = NULL;
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
WARN_ON(!found);
|
|
|
|
}
|
|
|
|
|
2011-09-20 00:45:05 +07:00
|
|
|
void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
|
|
|
|
void *tce_mem, u64 tce_size,
|
2014-06-06 15:44:03 +07:00
|
|
|
u64 dma_offset, unsigned page_shift)
|
2011-09-20 00:45:05 +07:00
|
|
|
{
|
|
|
|
tbl->it_blocksize = 16;
|
|
|
|
tbl->it_base = (unsigned long)tce_mem;
|
2014-06-06 15:44:03 +07:00
|
|
|
tbl->it_page_shift = page_shift;
|
2013-12-09 14:17:02 +07:00
|
|
|
tbl->it_offset = dma_offset >> tbl->it_page_shift;
|
2011-09-20 00:45:05 +07:00
|
|
|
tbl->it_index = 0;
|
|
|
|
tbl->it_size = tce_size >> 3;
|
|
|
|
tbl->it_busno = 0;
|
|
|
|
tbl->it_type = TCE_PCI;
|
|
|
|
}
|
|
|
|
|
2015-04-28 12:12:05 +07:00
|
|
|
void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
|
2011-09-20 00:45:05 +07:00
|
|
|
{
|
|
|
|
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
2015-03-25 15:23:57 +07:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
struct pci_dn *pdn;
|
|
|
|
|
|
|
|
/* Fix the VF pdn PE number */
|
|
|
|
if (pdev->is_virtfn) {
|
|
|
|
pdn = pci_get_pdn(pdev);
|
|
|
|
WARN_ON(pdn->pe_number != IODA_INVALID_PE);
|
|
|
|
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
|
|
|
|
if (pe->rid == ((pdev->bus->number << 8) |
|
|
|
|
(pdev->devfn & 0xff))) {
|
|
|
|
pdn->pe_number = pe->pe_number;
|
|
|
|
pe->pdev = pdev;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_PCI_IOV */
|
2011-09-20 00:45:05 +07:00
|
|
|
|
|
|
|
if (phb && phb->dma_dev_setup)
|
|
|
|
phb->dma_dev_setup(phb, pdev);
|
|
|
|
}
|
|
|
|
|
2016-02-09 11:50:22 +07:00
|
|
|
void pnv_pci_dma_bus_setup(struct pci_bus *bus)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose = bus->sysdata;
|
|
|
|
struct pnv_phb *phb = hose->private_data;
|
|
|
|
struct pnv_ioda_pe *pe;
|
|
|
|
|
|
|
|
list_for_each_entry(pe, &phb->ioda.pe_list, list) {
|
|
|
|
if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!pe->pbus)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (bus->number == ((pe->rid >> 8) & 0xFF)) {
|
|
|
|
pe->pbus = bus;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-10 13:59:18 +07:00
|
|
|
void pnv_pci_shutdown(void)
|
|
|
|
{
|
|
|
|
struct pci_controller *hose;
|
|
|
|
|
2015-05-27 13:06:59 +07:00
|
|
|
list_for_each_entry(hose, &hose_list, list_node)
|
|
|
|
if (hose->controller_ops.shutdown)
|
|
|
|
hose->controller_ops.shutdown(hose);
|
2013-05-10 13:59:18 +07:00
|
|
|
}
|
|
|
|
|
2013-04-26 02:20:57 +07:00
|
|
|
/* Fixup wrong class code in p7ioc and p8 root complex */
|
2012-12-22 05:04:10 +07:00
|
|
|
static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
|
2011-11-07 01:56:00 +07:00
|
|
|
{
|
|
|
|
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
|
|
|
|
}
|
|
|
|
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
|
|
|
|
|
2011-09-20 00:45:05 +07:00
|
|
|
void __init pnv_pci_init(void)
|
|
|
|
{
|
|
|
|
struct device_node *np;
|
|
|
|
|
2012-02-24 10:18:58 +07:00
|
|
|
pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
|
2011-09-20 00:45:05 +07:00
|
|
|
|
2015-03-12 13:27:11 +07:00
|
|
|
/* If we don't have OPAL, eg. in sim, just skip PCI probe */
|
|
|
|
if (!firmware_has_feature(FW_FEATURE_OPAL))
|
|
|
|
return;
|
2011-11-16 00:29:08 +07:00
|
|
|
|
2016-02-08 11:08:20 +07:00
|
|
|
/* Look for IODA IO-Hubs. */
|
2015-03-12 13:27:11 +07:00
|
|
|
for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
|
|
|
|
pnv_pci_init_ioda_hub(np);
|
|
|
|
}
|
2011-09-20 00:45:05 +07:00
|
|
|
|
2015-03-12 13:27:11 +07:00
|
|
|
/* Look for ioda2 built-in PHB3's */
|
|
|
|
for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
|
|
|
|
pnv_pci_init_ioda2_phb(np);
|
2011-09-20 00:45:05 +07:00
|
|
|
|
2016-07-08 13:37:09 +07:00
|
|
|
/* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
|
|
|
|
for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
|
|
|
|
pnv_pci_init_ioda2_phb(np);
|
|
|
|
|
2015-12-17 09:43:13 +07:00
|
|
|
/* Look for NPU PHBs */
|
|
|
|
for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
|
|
|
|
pnv_pci_init_npu_phb(np);
|
|
|
|
|
2017-01-10 11:41:44 +07:00
|
|
|
/*
|
|
|
|
* Look for NPU2 PHBs which we treat mostly as NPU PHBs with
|
|
|
|
* the exception of TCE kill which requires an OPAL call.
|
|
|
|
*/
|
|
|
|
for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb")
|
|
|
|
pnv_pci_init_npu_phb(np);
|
|
|
|
|
2011-09-20 00:45:05 +07:00
|
|
|
/* Configure IOMMU DMA hooks */
|
|
|
|
set_pci_dma_ops(&dma_iommu_ops);
|
|
|
|
}
|
2013-11-21 13:43:14 +07:00
|
|
|
|
2014-07-15 19:22:24 +07:00
|
|
|
machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
|