mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-12 10:46:47 +07:00
Merge branch 'pci/misc'
- Mark expected switch fall-throughs (Gustavo A. R. Silva) - Remove unused pci_request_region_exclusive() (Johannes Thumshirn) - Fix x86 PCI IRQ routing table memory leak (Wenwen Wang) - Reset Lenovo ThinkPad P50 if firmware didn't do it on reboot (Lyude Paul) - Add and use pci_dev_id() helper to simplify PCI_DEVID() usage (touches several places outside drivers/pci/) (Heiner Kallweit) - Transition Mobiveil PCI maintenance to Karthikeyan M and Hou Zhiqiang (Subrahmanya Lingappa) * pci/misc: MAINTAINERS: Add Karthikeyan Mitran and Hou Zhiqiang for Mobiveil PCI platform/chrome: chromeos_laptop: use pci_dev_id() helper stmmac: pci: Use pci_dev_id() helper iommu/vt-d: Use pci_dev_id() helper iommu/amd: Use pci_dev_id() helper drm/amdkfd: Use pci_dev_id() helper powerpc/powernv/npu: Use pci_dev_id() helper r8169: use pci_dev_id() helper PCI: Add pci_dev_id() helper PCI: Reset Lenovo ThinkPad P50 nvgpu at boot if necessary x86/PCI: Fix PCI IRQ routing table memory leak PCI: Remove unused pci_request_region_exclusive() PCI: Mark expected switch fall-throughs
This commit is contained in:
commit
09fdd75c18
@ -11880,7 +11880,8 @@ F: include/linux/switchtec.h
|
||||
F: drivers/ntb/hw/mscc/
|
||||
|
||||
PCI DRIVER FOR MOBIVEIL PCIE IP
|
||||
M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
|
||||
M: Karthikeyan Mitran <m.karthikeyan@mobiveil.co.in>
|
||||
M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
|
||||
|
@ -1213,9 +1213,8 @@ int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
|
||||
* Currently we only support radix and non-zero LPCR only makes sense
|
||||
* for hash tables so skiboot expects the LPCR parameter to be a zero.
|
||||
*/
|
||||
ret = opal_npu_map_lpar(nphb->opal_id,
|
||||
PCI_DEVID(gpdev->bus->number, gpdev->devfn), lparid,
|
||||
0 /* LPCR bits */);
|
||||
ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), lparid,
|
||||
0 /* LPCR bits */);
|
||||
if (ret) {
|
||||
dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
|
||||
return ret;
|
||||
@ -1224,7 +1223,7 @@ int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid,
|
||||
dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n",
|
||||
nphb->opal_id, msr);
|
||||
ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr,
|
||||
PCI_DEVID(gpdev->bus->number, gpdev->devfn));
|
||||
pci_dev_id(gpdev));
|
||||
if (ret < 0)
|
||||
dev_err(&gpdev->dev, "Failed to init context: %d\n", ret);
|
||||
else
|
||||
@ -1258,7 +1257,7 @@ int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
|
||||
dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n",
|
||||
nphb->opal_id);
|
||||
ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/,
|
||||
PCI_DEVID(gpdev->bus->number, gpdev->devfn));
|
||||
pci_dev_id(gpdev));
|
||||
if (ret < 0) {
|
||||
dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret);
|
||||
return ret;
|
||||
@ -1266,9 +1265,8 @@ int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev)
|
||||
|
||||
/* Set LPID to 0 anyway, just to be safe */
|
||||
dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id);
|
||||
ret = opal_npu_map_lpar(nphb->opal_id,
|
||||
PCI_DEVID(gpdev->bus->number, gpdev->devfn), 0 /*LPID*/,
|
||||
0 /* LPCR bits */);
|
||||
ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), 0 /*LPID*/,
|
||||
0 /* LPCR bits */);
|
||||
if (ret)
|
||||
dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret);
|
||||
|
||||
|
@ -1119,6 +1119,8 @@ static const struct dmi_system_id pciirq_dmi_table[] __initconst = {
|
||||
|
||||
void __init pcibios_irq_init(void)
|
||||
{
|
||||
struct irq_routing_table *rtable = NULL;
|
||||
|
||||
DBG(KERN_DEBUG "PCI: IRQ init\n");
|
||||
|
||||
if (raw_pci_ops == NULL)
|
||||
@ -1129,8 +1131,10 @@ void __init pcibios_irq_init(void)
|
||||
pirq_table = pirq_find_routing_table();
|
||||
|
||||
#ifdef CONFIG_PCI_BIOS
|
||||
if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
|
||||
if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) {
|
||||
pirq_table = pcibios_get_irq_routing_table();
|
||||
rtable = pirq_table;
|
||||
}
|
||||
#endif
|
||||
if (pirq_table) {
|
||||
pirq_peer_trick();
|
||||
@ -1145,8 +1149,10 @@ void __init pcibios_irq_init(void)
|
||||
* If we're using the I/O APIC, avoid using the PCI IRQ
|
||||
* routing table
|
||||
*/
|
||||
if (io_apic_assign_pci_irqs)
|
||||
if (io_apic_assign_pci_irqs) {
|
||||
kfree(rtable);
|
||||
pirq_table = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
x86_init.pci.fixup_irqs();
|
||||
|
@ -1270,8 +1270,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
|
||||
|
||||
dev->node_props.vendor_id = gpu->pdev->vendor;
|
||||
dev->node_props.device_id = gpu->pdev->device;
|
||||
dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number,
|
||||
gpu->pdev->devfn);
|
||||
dev->node_props.location_id = pci_dev_id(gpu->pdev);
|
||||
dev->node_props.max_engine_clk_fcompute =
|
||||
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
|
||||
dev->node_props.max_engine_clk_ccompute =
|
||||
|
@ -165,7 +165,7 @@ static inline u16 get_pci_device_id(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
return PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
return pci_dev_id(pdev);
|
||||
}
|
||||
|
||||
static inline int get_acpihid_device_id(struct device *dev,
|
||||
|
@ -1391,7 +1391,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
|
||||
|
||||
/* pdev will be returned if device is not a vf */
|
||||
pf_pdev = pci_physfn(pdev);
|
||||
info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn);
|
||||
info->pfsid = pci_dev_id(pf_pdev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_INTEL_IOMMU_SVM
|
||||
|
@ -424,7 +424,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
|
||||
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias);
|
||||
else
|
||||
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
|
||||
PCI_DEVID(dev->bus->number, dev->devfn));
|
||||
pci_dev_id(dev));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -7182,8 +7182,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
|
||||
new_bus->priv = tp;
|
||||
new_bus->parent = &pdev->dev;
|
||||
new_bus->irq[0] = PHY_IGNORE_INTERRUPT;
|
||||
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x",
|
||||
PCI_DEVID(pdev->bus->number, pdev->devfn));
|
||||
snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev));
|
||||
|
||||
new_bus->read = r8169_mdio_read_reg;
|
||||
new_bus->write = r8169_mdio_write_reg;
|
||||
|
@ -204,7 +204,7 @@ static int quark_default_data(struct pci_dev *pdev,
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
plat->bus_id = pci_dev_id(pdev);
|
||||
plat->phy_addr = ret;
|
||||
plat->interface = PHY_INTERFACE_MODE_RMII;
|
||||
|
||||
|
@ -1338,7 +1338,7 @@ irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev,
|
||||
struct msi_desc *desc)
|
||||
{
|
||||
return (irq_hw_number_t)desc->msi_attrib.entry_nr |
|
||||
PCI_DEVID(dev->bus->number, dev->devfn) << 11 |
|
||||
pci_dev_id(dev) << 11 |
|
||||
(pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
|
||||
}
|
||||
|
||||
@ -1508,7 +1508,7 @@ static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
|
||||
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
|
||||
{
|
||||
struct device_node *of_node;
|
||||
u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
u32 rid = pci_dev_id(pdev);
|
||||
|
||||
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
|
||||
|
||||
@ -1531,7 +1531,7 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
|
||||
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
|
||||
{
|
||||
struct irq_domain *dom;
|
||||
u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
u32 rid = pci_dev_id(pdev);
|
||||
|
||||
pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
|
||||
dom = of_msi_map_get_device_domain(&pdev->dev, rid);
|
||||
|
@ -3706,31 +3706,6 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
|
||||
}
|
||||
EXPORT_SYMBOL(pci_request_region);
|
||||
|
||||
/**
|
||||
* pci_request_region_exclusive - Reserved PCI I/O and memory resource
|
||||
* @pdev: PCI device whose resources are to be reserved
|
||||
* @bar: BAR to be reserved
|
||||
* @res_name: Name to be associated with resource.
|
||||
*
|
||||
* Mark the PCI region associated with PCI device @pdev BR @bar as
|
||||
* being reserved by owner @res_name. Do not access any
|
||||
* address inside the PCI regions unless this call returns
|
||||
* successfully.
|
||||
*
|
||||
* Returns 0 on success, or %EBUSY on error. A warning
|
||||
* message is also printed on failure.
|
||||
*
|
||||
* The key difference that _exclusive makes it that userspace is
|
||||
* explicitly not allowed to map the resource via /dev/mem or
|
||||
* sysfs.
|
||||
*/
|
||||
int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
|
||||
const char *res_name)
|
||||
{
|
||||
return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_request_region_exclusive);
|
||||
|
||||
/**
|
||||
* pci_release_selected_regions - Release selected PCI I/O and memory resources
|
||||
* @pdev: PCI device whose resources were previously reserved
|
||||
|
@ -222,6 +222,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
|
||||
}
|
||||
/* If arch decided it can't, fall through... */
|
||||
#endif /* HAVE_PCI_MMAP */
|
||||
/* fall through */
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
|
@ -5137,3 +5137,61 @@ SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */
|
||||
SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */
|
||||
SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */
|
||||
SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */
|
||||
|
||||
/*
|
||||
* On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does
|
||||
* not always reset the secondary Nvidia GPU between reboots if the system
|
||||
* is configured to use Hybrid Graphics mode. This results in the GPU
|
||||
* being left in whatever state it was in during the *previous* boot, which
|
||||
* causes spurious interrupts from the GPU, which in turn causes us to
|
||||
* disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly,
|
||||
* this also completely breaks nouveau.
|
||||
*
|
||||
* Luckily, it seems a simple reset of the Nvidia GPU brings it back to a
|
||||
* clean state and fixes all these issues.
|
||||
*
|
||||
* When the machine is configured in Dedicated display mode, the issue
|
||||
* doesn't occur. Fortunately the GPU advertises NoReset+ when in this
|
||||
* mode, so we can detect that and avoid resetting it.
|
||||
*/
|
||||
static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
|
||||
{
|
||||
void __iomem *map;
|
||||
int ret;
|
||||
|
||||
if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
|
||||
pdev->subsystem_device != 0x222e ||
|
||||
!pdev->reset_fn)
|
||||
return;
|
||||
|
||||
if (pci_enable_device_mem(pdev))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Based on nvkm_device_ctor() in
|
||||
* drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
|
||||
*/
|
||||
map = pci_iomap(pdev, 0, 0x23000);
|
||||
if (!map) {
|
||||
pci_err(pdev, "Can't map MMIO space\n");
|
||||
goto out_disable;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the GPU looks like it's been POSTed before resetting
|
||||
* it.
|
||||
*/
|
||||
if (ioread32(map + 0x2240c) & 0x2) {
|
||||
pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
|
||||
ret = pci_reset_function(pdev);
|
||||
if (ret < 0)
|
||||
pci_err(pdev, "Failed to reset GPU: %d\n", ret);
|
||||
}
|
||||
|
||||
iounmap(map);
|
||||
out_disable:
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
|
||||
PCI_CLASS_DISPLAY_VGA, 8,
|
||||
quirk_reset_lenovo_thinkpad_p50_nvgpu);
|
||||
|
@ -33,7 +33,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
|
||||
struct pci_bus *bus;
|
||||
int ret;
|
||||
|
||||
ret = fn(pdev, PCI_DEVID(pdev->bus->number, pdev->devfn), data);
|
||||
ret = fn(pdev, pci_dev_id(pdev), data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -88,9 +88,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
|
||||
return ret;
|
||||
continue;
|
||||
case PCI_EXP_TYPE_PCIE_BRIDGE:
|
||||
ret = fn(tmp,
|
||||
PCI_DEVID(tmp->bus->number,
|
||||
tmp->devfn), data);
|
||||
ret = fn(tmp, pci_dev_id(tmp), data);
|
||||
if (ret)
|
||||
return ret;
|
||||
continue;
|
||||
@ -101,9 +99,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev,
|
||||
PCI_DEVID(tmp->subordinate->number,
|
||||
PCI_DEVFN(0, 0)), data);
|
||||
else
|
||||
ret = fn(tmp,
|
||||
PCI_DEVID(tmp->bus->number,
|
||||
tmp->devfn), data);
|
||||
ret = fn(tmp, pci_dev_id(tmp), data);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -1104,7 +1104,7 @@ static void __ref pcifront_backend_changed(struct xenbus_device *xdev,
|
||||
case XenbusStateClosed:
|
||||
if (xdev->state == XenbusStateClosed)
|
||||
break;
|
||||
/* Missed the backend's CLOSING state -- fallthrough */
|
||||
/* fall through - Missed the backend's CLOSING state. */
|
||||
case XenbusStateClosing:
|
||||
dev_warn(&xdev->dev, "backend going away!\n");
|
||||
pcifront_try_disconnect(pdev);
|
||||
|
@ -125,7 +125,7 @@ static bool chromeos_laptop_match_adapter_devid(struct device *dev, u32 devid)
|
||||
return false;
|
||||
|
||||
pdev = to_pci_dev(dev);
|
||||
return devid == PCI_DEVID(pdev->bus->number, pdev->devfn);
|
||||
return devid == pci_dev_id(pdev);
|
||||
}
|
||||
|
||||
static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter)
|
||||
|
@ -598,6 +598,11 @@ struct pci_bus {
|
||||
|
||||
#define to_pci_bus(n) container_of(n, struct pci_bus, dev)
|
||||
|
||||
static inline u16 pci_dev_id(struct pci_dev *dev)
|
||||
{
|
||||
return PCI_DEVID(dev->bus->number, dev->devfn);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the PCI bus is root (behind host-PCI bridge),
|
||||
* false otherwise
|
||||
@ -1235,7 +1240,6 @@ int __must_check pci_request_regions(struct pci_dev *, const char *);
|
||||
int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
|
||||
void pci_release_regions(struct pci_dev *);
|
||||
int __must_check pci_request_region(struct pci_dev *, int, const char *);
|
||||
int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *);
|
||||
void pci_release_region(struct pci_dev *, int);
|
||||
int pci_request_selected_regions(struct pci_dev *, int, const char *);
|
||||
int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *);
|
||||
|
Loading…
Reference in New Issue
Block a user