mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 19:06:23 +07:00
2bedea8f26
This patch removes the bcma_core_pci_power_save() call from
the bcma_core_pci_{up,down}() functions as it tries to schedule
thus requiring to call them from non-atomic context. The function
bcma_core_pci_power_save() is now exported so the calling module
can explicitly use it in non-atomic context. This fixes the
'scheduling while atomic' issue reported by Tod Jackson and
Joe Perches.
[ 13.210710] BUG: scheduling while atomic: dhcpcd/1800/0x00000202
[ 13.210718] Modules linked in: brcmsmac nouveau coretemp kvm_intel kvm cordic brcmutil bcma dell_wmi atl1c ttm mxm_wmi wmi
[ 13.210756] CPU: 2 PID: 1800 Comm: dhcpcd Not tainted 3.11.0-wl #1
[ 13.210762] Hardware name: Alienware M11x R2/M11x R2, BIOS A04 11/23/2010
[ 13.210767] ffff880177c92c40 ffff880170fd1948 ffffffff8169af5b 0000000000000007
[ 13.210777] ffff880170fd1ab0 ffff880170fd1958 ffffffff81697ee2 ffff880170fd19d8
[ 13.210785] ffffffff816a19f5 00000000000f4240 000000000000d080 ffff880170fd1fd8
[ 13.210794] Call Trace:
[ 13.210813] [<ffffffff8169af5b>] dump_stack+0x4f/0x84
[ 13.210826] [<ffffffff81697ee2>] __schedule_bug+0x43/0x51
[ 13.210837] [<ffffffff816a19f5>] __schedule+0x6e5/0x810
[ 13.210845] [<ffffffff816a1c34>] schedule+0x24/0x70
[ 13.210855] [<ffffffff816a04fc>] schedule_hrtimeout_range_clock+0x10c/0x150
[ 13.210867] [<ffffffff810684e0>] ? update_rmtp+0x60/0x60
[ 13.210877] [<ffffffff8106915f>] ? hrtimer_start_range_ns+0xf/0x20
[ 13.210887] [<ffffffff816a054e>] schedule_hrtimeout_range+0xe/0x10
[ 13.210897] [<ffffffff8104f6fb>] usleep_range+0x3b/0x40
[ 13.210910] [<ffffffffa00371af>] bcma_pcie_mdio_set_phy.isra.3+0x4f/0x80 [bcma]
[ 13.210921] [<ffffffffa003729f>] bcma_pcie_mdio_write.isra.4+0xbf/0xd0 [bcma]
[ 13.210932] [<ffffffffa0037498>] bcma_pcie_mdio_writeread.isra.6.constprop.13+0x18/0x30 [bcma]
[ 13.210942] [<ffffffffa00374ee>] bcma_core_pci_power_save+0x3e/0x80 [bcma]
[ 13.210953] [<ffffffffa003765d>] bcma_core_pci_up+0x2d/0x60 [bcma]
[ 13.210975] [<ffffffffa03dc17c>] brcms_c_up+0xfc/0x430 [brcmsmac]
[ 13.210989] [<ffffffffa03d1a7d>] brcms_up+0x1d/0x20 [brcmsmac]
[ 13.211003] [<ffffffffa03d2498>] brcms_ops_start+0x298/0x340 [brcmsmac]
[ 13.211020] [<ffffffff81600a12>] ? cfg80211_netdev_notifier_call+0xd2/0x5f0
[ 13.211030] [<ffffffff815fa53d>] ? packet_notifier+0xad/0x1d0
[ 13.211064] [<ffffffff81656e75>] ieee80211_do_open+0x325/0xf80
[ 13.211076] [<ffffffff8106ac09>] ? __raw_notifier_call_chain+0x9/0x10
[ 13.211086] [<ffffffff81657b41>] ieee80211_open+0x71/0x80
[ 13.211101] [<ffffffff81526267>] __dev_open+0x87/0xe0
[ 13.211109] [<ffffffff8152650c>] __dev_change_flags+0x9c/0x180
[ 13.211117] [<ffffffff815266a3>] dev_change_flags+0x23/0x70
[ 13.211127] [<ffffffff8158cd68>] devinet_ioctl+0x5b8/0x6a0
[ 13.211136] [<ffffffff8158d5c5>] inet_ioctl+0x75/0x90
[ 13.211147] [<ffffffff8150b38b>] sock_do_ioctl+0x2b/0x70
[ 13.211155] [<ffffffff8150b681>] sock_ioctl+0x71/0x2a0
[ 13.211169] [<ffffffff8114ed47>] do_vfs_ioctl+0x87/0x520
[ 13.211180] [<ffffffff8113f159>] ? ____fput+0x9/0x10
[ 13.211198] [<ffffffff8106228c>] ? task_work_run+0x9c/0xd0
[ 13.211202] [<ffffffff8114f271>] SyS_ioctl+0x91/0xb0
[ 13.211208] [<ffffffff816aa252>] system_call_fastpath+0x16/0x1b
[ 13.211217] NOHZ: local_softirq_pending 202
The issue was introduced in v3.11 kernel by following commit:
commit aa51e598d0
Author: Hauke Mehrtens <hauke@hauke-m.de>
Date: Sat Aug 24 00:32:31 2013 +0200
brcmsmac: use bcma PCIe up and down functions
replace the calls to bcma_core_pci_extend_L1timer() by calls to the
newly introduced bcma_core_pci_ip() and bcma_core_pci_down()
Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
Cc: Arend van Spriel <arend@broadcom.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This fix has been discussed with Hauke Mehrtens [1] selection
option 3) and is intended for v3.12.
Ref:
[1] http://mid.gmane.org/5239B12D.3040206@hauke-m.de
Cc: <stable@vger.kernel.org> # 3.11.x
Cc: Tod Jackson <tod.jackson@gmail.com>
Cc: Joe Perches <joe@perches.com>
Cc: Rafal Milecki <zajec5@gmail.com>
Cc: Hauke Mehrtens <hauke@hauke-m.de>
Reviewed-by: Hante Meuleman <meuleman@broadcom.com>
Signed-off-by: Arend van Spriel <arend@broadcom.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
336 lines
8.9 KiB
C
336 lines
8.9 KiB
C
/*
|
|
* Broadcom specific AMBA
|
|
* PCI Core
|
|
*
|
|
* Copyright 2005, 2011, Broadcom Corporation
|
|
* Copyright 2006, 2007, Michael Buesch <m@bues.ch>
|
|
* Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
|
|
*
|
|
* Licensed under the GNU/GPL. See COPYING for details.
|
|
*/
|
|
|
|
#include "bcma_private.h"
|
|
#include <linux/export.h>
|
|
#include <linux/bcma/bcma.h>
|
|
|
|
/**************************************************
|
|
* R/W ops.
|
|
**************************************************/
|
|
|
|
u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
|
|
{
|
|
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
|
|
pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
|
|
return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
|
|
}
|
|
|
|
static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
|
|
{
|
|
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
|
|
pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
|
|
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
|
|
}
|
|
|
|
static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy)
|
|
{
|
|
u32 v;
|
|
int i;
|
|
|
|
v = BCMA_CORE_PCI_MDIODATA_START;
|
|
v |= BCMA_CORE_PCI_MDIODATA_WRITE;
|
|
v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
|
|
BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
|
|
v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR <<
|
|
BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
|
|
v |= BCMA_CORE_PCI_MDIODATA_TA;
|
|
v |= (phy << 4);
|
|
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
|
|
|
|
udelay(10);
|
|
for (i = 0; i < 200; i++) {
|
|
v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
|
|
if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
|
|
break;
|
|
usleep_range(1000, 2000);
|
|
}
|
|
}
|
|
|
|
static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
|
|
{
|
|
int max_retries = 10;
|
|
u16 ret = 0;
|
|
u32 v;
|
|
int i;
|
|
|
|
/* enable mdio access to SERDES */
|
|
v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
|
|
v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
|
|
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
|
|
|
|
if (pc->core->id.rev >= 10) {
|
|
max_retries = 200;
|
|
bcma_pcie_mdio_set_phy(pc, device);
|
|
v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
|
|
BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
|
|
v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
|
|
} else {
|
|
v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
|
|
v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
|
|
}
|
|
|
|
v = BCMA_CORE_PCI_MDIODATA_START;
|
|
v |= BCMA_CORE_PCI_MDIODATA_READ;
|
|
v |= BCMA_CORE_PCI_MDIODATA_TA;
|
|
|
|
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
|
|
/* Wait for the device to complete the transaction */
|
|
udelay(10);
|
|
for (i = 0; i < max_retries; i++) {
|
|
v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
|
|
if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) {
|
|
udelay(10);
|
|
ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
|
|
break;
|
|
}
|
|
usleep_range(1000, 2000);
|
|
}
|
|
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
|
|
return ret;
|
|
}
|
|
|
|
static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
|
|
u8 address, u16 data)
|
|
{
|
|
int max_retries = 10;
|
|
u32 v;
|
|
int i;
|
|
|
|
/* enable mdio access to SERDES */
|
|
v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
|
|
v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
|
|
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
|
|
|
|
if (pc->core->id.rev >= 10) {
|
|
max_retries = 200;
|
|
bcma_pcie_mdio_set_phy(pc, device);
|
|
v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
|
|
BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
|
|
v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
|
|
} else {
|
|
v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
|
|
v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
|
|
}
|
|
|
|
v = BCMA_CORE_PCI_MDIODATA_START;
|
|
v |= BCMA_CORE_PCI_MDIODATA_WRITE;
|
|
v |= BCMA_CORE_PCI_MDIODATA_TA;
|
|
v |= data;
|
|
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
|
|
/* Wait for the device to complete the transaction */
|
|
udelay(10);
|
|
for (i = 0; i < max_retries; i++) {
|
|
v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
|
|
if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
|
|
break;
|
|
usleep_range(1000, 2000);
|
|
}
|
|
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
|
|
}
|
|
|
|
static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
|
|
u8 address, u16 data)
|
|
{
|
|
bcma_pcie_mdio_write(pc, device, address, data);
|
|
return bcma_pcie_mdio_read(pc, device, address);
|
|
}
|
|
|
|
/**************************************************
|
|
* Workarounds.
|
|
**************************************************/
|
|
|
|
static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc)
|
|
{
|
|
u32 tmp;
|
|
|
|
tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG);
|
|
if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT)
|
|
return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE |
|
|
BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY;
|
|
else
|
|
return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE;
|
|
}
|
|
|
|
static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
|
|
{
|
|
u16 tmp;
|
|
|
|
bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX,
|
|
BCMA_CORE_PCI_SERDES_RX_CTRL,
|
|
bcma_pcicore_polarity_workaround(pc));
|
|
tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
|
|
BCMA_CORE_PCI_SERDES_PLL_CTRL);
|
|
if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN)
|
|
bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
|
|
BCMA_CORE_PCI_SERDES_PLL_CTRL,
|
|
tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
|
|
}
|
|
|
|
static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
|
|
{
|
|
struct bcma_device *core = pc->core;
|
|
u16 val16, core_index;
|
|
uint regoff;
|
|
|
|
regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
|
|
core_index = (u16)core->core_index;
|
|
|
|
val16 = pcicore_read16(pc, regoff);
|
|
if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
|
|
!= core_index) {
|
|
val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
|
|
(val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
|
|
pcicore_write16(pc, regoff, val16);
|
|
}
|
|
}
|
|
|
|
/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
|
|
/* Needs to happen when coming out of 'standby'/'hibernate' */
|
|
static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
|
|
{
|
|
u16 val16;
|
|
uint regoff;
|
|
|
|
regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_MISC_CONFIG);
|
|
|
|
val16 = pcicore_read16(pc, regoff);
|
|
|
|
if (!(val16 & BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST)) {
|
|
val16 |= BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST;
|
|
pcicore_write16(pc, regoff, val16);
|
|
}
|
|
}
|
|
|
|
/**************************************************
|
|
* Init.
|
|
**************************************************/
|
|
|
|
static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
|
|
{
|
|
bcma_core_pci_fixcfg(pc);
|
|
bcma_pcicore_serdes_workaround(pc);
|
|
bcma_core_pci_config_fixup(pc);
|
|
}
|
|
|
|
void bcma_core_pci_init(struct bcma_drv_pci *pc)
|
|
{
|
|
if (pc->setup_done)
|
|
return;
|
|
|
|
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
|
|
pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
|
|
if (pc->hostmode)
|
|
bcma_core_pci_hostmode_init(pc);
|
|
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
|
|
|
|
if (!pc->hostmode)
|
|
bcma_core_pci_clientmode_init(pc);
|
|
}
|
|
|
|
void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
|
|
{
|
|
struct bcma_drv_pci *pc;
|
|
u16 data;
|
|
|
|
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
|
|
return;
|
|
|
|
pc = &bus->drv_pci[0];
|
|
|
|
if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
|
|
data = up ? 0x74 : 0x7C;
|
|
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
|
|
BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
|
|
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
|
|
BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
|
|
} else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
|
|
data = up ? 0x75 : 0x7D;
|
|
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
|
|
BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
|
|
bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
|
|
BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
|
|
|
|
int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
|
|
bool enable)
|
|
{
|
|
struct pci_dev *pdev;
|
|
u32 coremask, tmp;
|
|
int err = 0;
|
|
|
|
if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
|
|
/* This bcma device is not on a PCI host-bus. So the IRQs are
|
|
* not routed through the PCI core.
|
|
* So we must not enable routing through the PCI core. */
|
|
goto out;
|
|
}
|
|
|
|
pdev = pc->core->bus->host_pci;
|
|
|
|
err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
|
|
if (err)
|
|
goto out;
|
|
|
|
coremask = BIT(core->core_index) << 8;
|
|
if (enable)
|
|
tmp |= coremask;
|
|
else
|
|
tmp &= ~coremask;
|
|
|
|
err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
|
|
|
|
out:
|
|
return err;
|
|
}
|
|
EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
|
|
|
|
static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
|
|
{
|
|
u32 w;
|
|
|
|
w = bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
|
|
if (extend)
|
|
w |= BCMA_CORE_PCI_ASPMTIMER_EXTEND;
|
|
else
|
|
w &= ~BCMA_CORE_PCI_ASPMTIMER_EXTEND;
|
|
bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
|
|
bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
|
|
}
|
|
|
|
void bcma_core_pci_up(struct bcma_bus *bus)
|
|
{
|
|
struct bcma_drv_pci *pc;
|
|
|
|
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
|
|
return;
|
|
|
|
pc = &bus->drv_pci[0];
|
|
|
|
bcma_core_pci_extend_L1timer(pc, true);
|
|
}
|
|
EXPORT_SYMBOL_GPL(bcma_core_pci_up);
|
|
|
|
void bcma_core_pci_down(struct bcma_bus *bus)
|
|
{
|
|
struct bcma_drv_pci *pc;
|
|
|
|
if (bus->hosttype != BCMA_HOSTTYPE_PCI)
|
|
return;
|
|
|
|
pc = &bus->drv_pci[0];
|
|
|
|
bcma_core_pci_extend_L1timer(pc, false);
|
|
}
|
|
EXPORT_SYMBOL_GPL(bcma_core_pci_down);
|