Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next

John Linville says:

====================
Several drivers see updates: mwifiex, ath9k, iwlwifi, brcmsmac,
wlcore/wl12xx/wl18xx, and a handful of others.  The bcma bus got a
lot of attention from Hauke Mehrtens.  The cfg80211 component gets
a flurry of patches for multi-channel support, and the mac80211
component gets the first few VHT (11ac) and 60GHz (11ad) patches.
This also includes the removal of the iwmc3200 drivers, since the
hardware never became available to normal people.

Additionally, the NFC subsystem gets a series of updates.  According to
Samuel, "Here are the interesting bits:

- A better error management for the HCI stack.
- An LLCP "late" binding implementation for a better NFC SAP usage. SAPs are
  now reserved only when there's a client for it.
- Support for Sony RC-S360 (a.k.a. PaSoRi) pn533 based dongle. We can read and
  write NFC tags and also establish a p2p link with this dongle now.
- A few LLCP fixes."

Finally, this includes another pull of the fixes from the wireless
tree in order to resolve some merge issues.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2012-07-13 23:02:28 -07:00
commit 921a678cb6
198 changed files with 4711 additions and 15838 deletions

View File

@ -178,3 +178,36 @@ ANY_GET_PARAMETER to the reader A gate to get information on the target
that was discovered).
Typically, such an event will be propagated to NFC Core from MSGRXWQ context.
Error management
----------------
Errors that occur synchronously with the execution of an NFC Core request are
simply returned as the execution result of the request. These are easy.
Errors that occur asynchronously (e.g. in a background protocol handling thread)
must be reported such that upper layers don't stay ignorant that something
went wrong below and know that expected events will probably never happen.
Handling of these errors is done as follows:
- driver (pn544) fails to deliver an incoming frame: it stores the error such
that any subsequent call to the driver will result in this error. Then it calls
the standard nfc_shdlc_recv_frame() with a NULL argument to report the problem
above. shdlc stores a EREMOTEIO sticky status, which will trigger SMW to
report above in turn.
- SMW is basically a background thread to handle incoming and outgoing shdlc
frames. This thread will also check the shdlc sticky status and report to HCI
when it discovers it is not able to run anymore because of an unrecoverable
error that happened within shdlc or below. If the problem occurs during shdlc
connection, the error is reported through the connect completion.
- HCI: if an internal HCI error happens (frame is lost), or HCI is reported an
error from a lower layer, HCI will either complete the currently executing
command with that error, or notify NFC Core directly if no command is executing.
- NFC Core: when NFC Core is notified of an error from below and polling is
active, it will send a tag discovered event with an empty tag list to the user
space to let it know that the poll operation will never be able to detect a tag.
If polling is not active and the error was sticky, lower levels will return it
at next invocation.

View File

@ -3661,14 +3661,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
S: Supported
F: drivers/net/wireless/iwlwifi/
INTEL WIRELESS MULTICOMM 3200 WIFI (iwmc3200wifi)
M: Samuel Ortiz <samuel.ortiz@intel.com>
M: Intel Linux Wireless <ilw@linux.intel.com>
L: linux-wireless@vger.kernel.org
S: Supported
W: http://wireless.kernel.org/en/users/Drivers/iwmc3200wifi
F: drivers/net/wireless/iwmc3200wifi/
INTEL MANAGEMENT ENGINE (mei)
M: Tomas Winkler <tomas.winkler@intel.com>
L: linux-kernel@vger.kernel.org

View File

@ -10,6 +10,15 @@
#define BCMA_CORE_SIZE 0x1000
#define bcma_err(bus, fmt, ...) \
pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_warn(bus, fmt, ...) \
pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_info(bus, fmt, ...) \
pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
#define bcma_debug(bus, fmt, ...) \
pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
struct bcma_bus;
/* main.c */

View File

@ -75,7 +75,7 @@ void bcma_core_set_clockmode(struct bcma_device *core,
udelay(10);
}
if (i)
pr_err("HT force timeout\n");
bcma_err(core->bus, "HT force timeout\n");
break;
case BCMA_CLKMODE_DYNAMIC:
bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
@ -102,9 +102,9 @@ void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
udelay(10);
}
if (i)
pr_err("PLL enable timeout\n");
bcma_err(core->bus, "PLL enable timeout\n");
} else {
pr_warn("Disabling PLL not supported yet!\n");
bcma_warn(core->bus, "Disabling PLL not supported yet!\n");
}
}
EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
@ -120,8 +120,8 @@ u32 bcma_core_dma_translation(struct bcma_device *core)
else
return BCMA_DMA_TRANSLATION_DMA32_CMT;
default:
pr_err("DMA translation unknown for host %d\n",
core->bus->hosttype);
bcma_err(core->bus, "DMA translation unknown for host %d\n",
core->bus->hosttype);
}
return BCMA_DMA_TRANSLATION_NONE;
}

View File

@ -44,7 +44,7 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
if (cc->capabilities & BCMA_CC_CAP_PMU)
bcma_pmu_init(cc);
if (cc->capabilities & BCMA_CC_CAP_PCTL)
pr_err("Power control not implemented!\n");
bcma_err(cc->core->bus, "Power control not implemented!\n");
if (cc->core->id.rev >= 16) {
if (cc->core->bus->sprom.leddc_on_time &&
@ -137,8 +137,7 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
| BCMA_CC_CORECTL_UARTCLKEN);
}
} else {
pr_err("serial not supported on this device ccrev: 0x%x\n",
ccrev);
bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n", ccrev);
return;
}

View File

@ -3,7 +3,8 @@
* ChipCommon Power Management Unit driver
*
* Copyright 2009, Michael Buesch <m@bues.ch>
* Copyright 2007, Broadcom Corporation
* Copyright 2007, 2011, Broadcom Corporation
* Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
*
* Licensed under the GNU/GPL. See COPYING for details.
*/
@ -54,39 +55,19 @@ void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
}
EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
case 0x4313:
case 0x4331:
case 43224:
case 43225:
break;
default:
pr_err("PLL init unknown for device 0x%04X\n",
bus->chipinfo.id);
}
}
static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
u32 min_msk = 0, max_msk = 0;
switch (bus->chipinfo.id) {
case 0x4313:
case BCMA_CHIP_ID_BCM4313:
min_msk = 0x200D;
max_msk = 0xFFFF;
break;
case 0x4331:
case 43224:
case 43225:
break;
default:
pr_err("PMU resource config unknown for device 0x%04X\n",
bus->chipinfo.id);
bcma_debug(bus, "PMU resource config unknown or not needed for device 0x%04X\n",
bus->chipinfo.id);
}
/* Set the resource masks. */
@ -94,22 +75,9 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
if (max_msk)
bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
}
void bcma_pmu_swreg_init(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
case 0x4313:
case 0x4331:
case 43224:
case 43225:
break;
default:
pr_err("PMU switch/regulators init unknown for device "
"0x%04X\n", bus->chipinfo.id);
}
/* Add some delay; allow resources to come up and settle. */
mdelay(2);
}
/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
@ -123,8 +91,11 @@ void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
val |= BCMA_CHIPCTL_4331_EXTPA_EN;
if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11)
val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
else if (bus->chipinfo.rev > 0)
val |= BCMA_CHIPCTL_4331_EXTPA_EN2;
} else {
val &= ~BCMA_CHIPCTL_4331_EXTPA_EN;
val &= ~BCMA_CHIPCTL_4331_EXTPA_EN2;
val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
}
bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
@ -135,28 +106,38 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
case 0x4313:
bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
case BCMA_CHIP_ID_BCM4313:
/* enable 12 mA drive strenth for 4313 and set chipControl
register bit 1 */
bcma_chipco_chipctl_maskset(cc, 0,
BCMA_CCTRL_4313_12MA_LED_DRIVE,
BCMA_CCTRL_4313_12MA_LED_DRIVE);
break;
case 0x4331:
case 43431:
case BCMA_CHIP_ID_BCM4331:
case BCMA_CHIP_ID_BCM43431:
/* Ext PA lines must be enabled for tx on BCM4331 */
bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
break;
case 43224:
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43421:
/* enable 12 mA drive strenth for 43224 and set chipControl
register bit 15 */
if (bus->chipinfo.rev == 0) {
pr_err("Workarounds for 43224 rev 0 not fully "
"implemented\n");
bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x00F000F0);
bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
BCMA_CCTRL_43224_GPIO_TOGGLE,
BCMA_CCTRL_43224_GPIO_TOGGLE);
bcma_chipco_chipctl_maskset(cc, 0,
BCMA_CCTRL_43224A0_12MA_LED_DRIVE,
BCMA_CCTRL_43224A0_12MA_LED_DRIVE);
} else {
bcma_chipco_chipctl_maskset(cc, 0, ~0, 0xF0);
bcma_chipco_chipctl_maskset(cc, 0,
BCMA_CCTRL_43224B0_12MA_LED_DRIVE,
BCMA_CCTRL_43224B0_12MA_LED_DRIVE);
}
break;
case 43225:
break;
default:
pr_err("Workarounds unknown for device 0x%04X\n",
bus->chipinfo.id);
bcma_debug(bus, "Workarounds unknown or not needed for device 0x%04X\n",
bus->chipinfo.id);
}
}
@ -167,8 +148,8 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP);
cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
pr_debug("Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
pmucap);
bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
cc->pmu.rev, pmucap);
if (cc->pmu.rev == 1)
bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
@ -177,12 +158,7 @@ void bcma_pmu_init(struct bcma_drv_cc *cc)
bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
BCMA_CC_PMU_CTL_NOILPONW);
if (cc->core->id.id == 0x4329 && cc->core->id.rev == 2)
pr_err("Fix for 4329b0 bad LPOM state not implemented!\n");
bcma_pmu_pll_init(cc);
bcma_pmu_resources_init(cc);
bcma_pmu_swreg_init(cc);
bcma_pmu_workarounds(cc);
}
@ -191,23 +167,22 @@ u32 bcma_pmu_alp_clock(struct bcma_drv_cc *cc)
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
case 0x4716:
case 0x4748:
case 47162:
case 0x4313:
case 0x5357:
case 0x4749:
case 53572:
case BCMA_CHIP_ID_BCM4716:
case BCMA_CHIP_ID_BCM4748:
case BCMA_CHIP_ID_BCM47162:
case BCMA_CHIP_ID_BCM4313:
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM4749:
case BCMA_CHIP_ID_BCM53572:
/* always 20Mhz */
return 20000 * 1000;
case 0x5356:
case 0x5300:
case BCMA_CHIP_ID_BCM5356:
case BCMA_CHIP_ID_BCM4706:
/* always 25Mhz */
return 25000 * 1000;
default:
pr_warn("No ALP clock specified for %04X device, "
"pmu rev. %d, using default %d Hz\n",
bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
bcma_warn(bus, "No ALP clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
}
return BCMA_CC_PMU_ALP_CLOCK;
}
@ -224,7 +199,8 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
BUG_ON(!m || m > 4);
if (bus->chipinfo.id == 0x5357 || bus->chipinfo.id == 0x4749) {
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) {
/* Detect failure in clock setting */
tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
if (tmp & 0x40000)
@ -250,33 +226,62 @@ static u32 bcma_pmu_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
return (fc / div) * 1000000;
}
static u32 bcma_pmu_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
{
u32 tmp, ndiv, p1div, p2div;
u32 clock;
BUG_ON(!m || m > 4);
/* Get N, P1 and P2 dividers to determine CPU clock */
tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PMU6_4706_PROCPLL_OFF);
ndiv = (tmp & BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK)
>> BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT;
p1div = (tmp & BCMA_CC_PMU6_4706_PROC_P1DIV_MASK)
>> BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT;
p2div = (tmp & BCMA_CC_PMU6_4706_PROC_P2DIV_MASK)
>> BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT;
tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
if (tmp & BCMA_CC_CHIPST_4706_PKG_OPTION)
/* Low cost bonding: Fixed reference clock 25MHz and m = 4 */
clock = (25000000 / 4) * ndiv * p2div / p1div;
else
/* Fixed reference clock 25MHz and m = 2 */
clock = (25000000 / 2) * ndiv * p2div / p1div;
if (m == BCMA_CC_PMU5_MAINPLL_SSB)
clock = clock / 4;
return clock;
}
/* query bus clock frequency for PMU-enabled chipcommon */
u32 bcma_pmu_get_clockcontrol(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
case 0x4716:
case 0x4748:
case 47162:
case BCMA_CHIP_ID_BCM4716:
case BCMA_CHIP_ID_BCM4748:
case BCMA_CHIP_ID_BCM47162:
return bcma_pmu_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
case 0x5356:
case BCMA_CHIP_ID_BCM5356:
return bcma_pmu_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
case 0x5357:
case 0x4749:
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM4749:
return bcma_pmu_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
case 0x5300:
return bcma_pmu_clock(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
case 53572:
case BCMA_CHIP_ID_BCM4706:
return bcma_pmu_clock_bcm4706(cc, BCMA_CC_PMU4706_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_SSB);
case BCMA_CHIP_ID_BCM53572:
return 75000000;
default:
pr_warn("No backplane clock specified for %04X device, "
"pmu rev. %d, using default %d Hz\n",
bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
bcma_warn(bus, "No backplane clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
}
return BCMA_CC_PMU_HT_CLOCK;
}
@ -286,17 +291,21 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
{
struct bcma_bus *bus = cc->core->bus;
if (bus->chipinfo.id == 53572)
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53572)
return 300000000;
if (cc->pmu.rev >= 5) {
u32 pll;
switch (bus->chipinfo.id) {
case 0x5356:
case BCMA_CHIP_ID_BCM4706:
return bcma_pmu_clock_bcm4706(cc,
BCMA_CC_PMU4706_MAINPLL_PLL0,
BCMA_CC_PMU5_MAINPLL_CPU);
case BCMA_CHIP_ID_BCM5356:
pll = BCMA_CC_PMU5356_MAINPLL_PLL0;
break;
case 0x5357:
case 0x4749:
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM4749:
pll = BCMA_CC_PMU5357_MAINPLL_PLL0;
break;
default:
@ -304,10 +313,188 @@ u32 bcma_pmu_get_clockcpu(struct bcma_drv_cc *cc)
break;
}
/* TODO: if (bus->chipinfo.id == 0x5300)
return si_4706_pmu_clock(sih, osh, cc, PMU4706_MAINPLL_PLL0, PMU5_MAINPLL_CPU); */
return bcma_pmu_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
}
return bcma_pmu_get_clockcontrol(cc);
}
static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
u32 value)
{
bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
}
void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
{
u32 tmp = 0;
u8 phypll_offset = 0;
u8 bcm5357_bcm43236_p1div[] = {0x1, 0x5, 0x5};
u8 bcm5357_bcm43236_ndiv[] = {0x30, 0xf6, 0xfc};
struct bcma_bus *bus = cc->core->bus;
switch (bus->chipinfo.id) {
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM4749:
case BCMA_CHIP_ID_BCM53572:
/* 5357[ab]0, 43236[ab]0, and 6362b0 */
/* BCM5357 needs to touch PLL1_PLLCTL[02],
so offset PLL0_PLLCTL[02] by 6 */
phypll_offset = (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4749 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
/* RMW only the P1 divider */
bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
/* RMW only the int feedback divider */
bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
tmp = 1 << 10;
break;
case BCMA_CHIP_ID_BCM4331:
case BCMA_CHIP_ID_BCM43431:
if (spuravoid == 2) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11500014);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x0FC00a08);
} else if (spuravoid == 1) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11500014);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x0F600a08);
} else {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11100014);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x03000a08);
}
tmp = 1 << 10;
break;
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43225:
case BCMA_CHIP_ID_BCM43421:
if (spuravoid == 1) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11500010);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x000C0C06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x0F600a08);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x2001E920);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
} else {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11100010);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x000c0c06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x03000a08);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x200005c0);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
}
tmp = 1 << 10;
break;
case BCMA_CHIP_ID_BCM4716:
case BCMA_CHIP_ID_BCM4748:
case BCMA_CHIP_ID_BCM47162:
if (spuravoid == 1) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11500060);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x080C0C06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x0F600000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x2001E924);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
} else {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11100060);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x080c0c06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x03000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x200005c0);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
}
tmp = 3 << 9;
break;
case BCMA_CHIP_ID_BCM43227:
case BCMA_CHIP_ID_BCM43228:
case BCMA_CHIP_ID_BCM43428:
/* LCNXN */
/* PLL Settings for spur avoidance on/off mode,
no on2 support for 43228A0 */
if (spuravoid == 1) {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x01100014);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x040C0C06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x03140A08);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00333333);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x202C2820);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
} else {
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
0x11100014);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
0x040c0c06);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
0x03000a08);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
0x00000000);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
0x200005c0);
bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
0x88888815);
}
tmp = 1 << 10;
break;
default:
bcma_err(bus, "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
bus->chipinfo.id);
break;
}
tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL);
bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp);
}
EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);

View File

@ -22,15 +22,15 @@
/* The 47162a0 hangs when reading MIPS DMP registers registers */
static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
{
return dev->bus->chipinfo.id == 47162 && dev->bus->chipinfo.rev == 0 &&
dev->id.id == BCMA_CORE_MIPS_74K;
return dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM47162 &&
dev->bus->chipinfo.rev == 0 && dev->id.id == BCMA_CORE_MIPS_74K;
}
/* The 5357b0 hangs when reading USB20H DMP registers */
static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev)
{
return (dev->bus->chipinfo.id == 0x5357 ||
dev->bus->chipinfo.id == 0x4749) &&
return (dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) &&
dev->bus->chipinfo.pkg == 11 &&
dev->id.id == BCMA_CORE_USB20_HOST;
}
@ -143,8 +143,8 @@ static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
1 << irqflag);
}
pr_info("set_irq: core 0x%04x, irq %d => %d\n",
dev->id.id, oldirq + 2, irq + 2);
bcma_info(bus, "set_irq: core 0x%04x, irq %d => %d\n",
dev->id.id, oldirq + 2, irq + 2);
}
static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
@ -173,7 +173,7 @@ u32 bcma_cpu_clock(struct bcma_drv_mips *mcore)
if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU)
return bcma_pmu_get_clockcpu(&bus->drv_cc);
pr_err("No PMU available, need this to get the cpu clock\n");
bcma_err(bus, "No PMU available, need this to get the cpu clock\n");
return 0;
}
EXPORT_SYMBOL(bcma_cpu_clock);
@ -185,10 +185,10 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
switch (bus->drv_cc.capabilities & BCMA_CC_CAP_FLASHT) {
case BCMA_CC_FLASHT_STSER:
case BCMA_CC_FLASHT_ATSER:
pr_err("Serial flash not supported.\n");
bcma_err(bus, "Serial flash not supported.\n");
break;
case BCMA_CC_FLASHT_PARA:
pr_info("found parallel flash.\n");
bcma_info(bus, "found parallel flash.\n");
bus->drv_cc.pflash.window = 0x1c000000;
bus->drv_cc.pflash.window_size = 0x02000000;
@ -199,7 +199,7 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
bus->drv_cc.pflash.buswidth = 2;
break;
default:
pr_err("flash not supported.\n");
bcma_err(bus, "flash not supported.\n");
}
}
@ -209,7 +209,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
struct bcma_device *core;
bus = mcore->core->bus;
pr_info("Initializing MIPS core...\n");
bcma_info(bus, "Initializing MIPS core...\n");
if (!mcore->setup_done)
mcore->assigned_irqs = 1;
@ -244,7 +244,7 @@ void bcma_core_mips_init(struct bcma_drv_mips *mcore)
break;
}
}
pr_info("IRQ reconfiguration done\n");
bcma_info(bus, "IRQ reconfiguration done\n");
bcma_core_mips_dump_irq(bus);
if (mcore->setup_done)

View File

@ -36,7 +36,7 @@ bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
return false;
if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
pr_info("This PCI core is disabled and not working\n");
bcma_info(bus, "This PCI core is disabled and not working\n");
return false;
}
@ -215,7 +215,8 @@ static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
} else {
writel(val, mmio);
if (chipid == 0x4716 || chipid == 0x4748)
if (chipid == BCMA_CHIP_ID_BCM4716 ||
chipid == BCMA_CHIP_ID_BCM4748)
readl(mmio);
}
@ -340,6 +341,7 @@ static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
*/
static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
{
struct bcma_bus *bus = pc->core->bus;
u8 cap_ptr, root_ctrl, root_cap, dev;
u16 val16;
int i;
@ -378,7 +380,8 @@ static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
udelay(10);
}
if (val16 == 0x1)
pr_err("PCI: Broken device in slot %d\n", dev);
bcma_err(bus, "PCI: Broken device in slot %d\n",
dev);
}
}
}
@ -391,11 +394,11 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
u32 pci_membase_1G;
unsigned long io_map_base;
pr_info("PCIEcore in host mode found\n");
bcma_info(bus, "PCIEcore in host mode found\n");
pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
if (!pc_host) {
pr_err("can not allocate memory");
bcma_err(bus, "can not allocate memory");
return;
}
@ -434,13 +437,14 @@ void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
* as mips can't generate 64-bit address on the
* backplane.
*/
if (bus->chipinfo.id == 0x4716 || bus->chipinfo.id == 0x4748) {
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
BCMA_SOC_PCI_MEM_SZ - 1;
pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
} else if (bus->chipinfo.id == 0x5300) {
} else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;

View File

@ -18,7 +18,7 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2,
core->wrap);
core->bus->mapped_core = core;
pr_debug("Switched to core: 0x%X\n", core->id.id);
bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
}
/* Provides access to the requested core. Returns base offset that has to be
@ -188,7 +188,7 @@ static int __devinit bcma_host_pci_probe(struct pci_dev *dev,
/* SSB needed additional powering up, do we have any AMBA PCI cards? */
if (!pci_is_pcie(dev))
pr_err("PCI card detected, report problems.\n");
bcma_err(bus, "PCI card detected, report problems.\n");
/* Map MMIO */
err = -ENOMEM;
@ -268,6 +268,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },

View File

@ -118,8 +118,9 @@ static int bcma_register_cores(struct bcma_bus *bus)
err = device_register(&core->dev);
if (err) {
pr_err("Could not register dev for core 0x%03X\n",
core->id.id);
bcma_err(bus,
"Could not register dev for core 0x%03X\n",
core->id.id);
continue;
}
core->dev_registered = true;
@ -151,7 +152,7 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
/* Scan for devices (cores) */
err = bcma_bus_scan(bus);
if (err) {
pr_err("Failed to scan: %d\n", err);
bcma_err(bus, "Failed to scan: %d\n", err);
return -1;
}
@ -179,14 +180,14 @@ int __devinit bcma_bus_register(struct bcma_bus *bus)
/* Try to get SPROM */
err = bcma_sprom_get(bus);
if (err == -ENOENT) {
pr_err("No SPROM available\n");
bcma_err(bus, "No SPROM available\n");
} else if (err)
pr_err("Failed to get SPROM: %d\n", err);
bcma_err(bus, "Failed to get SPROM: %d\n", err);
/* Register found cores */
bcma_register_cores(bus);
pr_info("Bus registered\n");
bcma_info(bus, "Bus registered\n");
return 0;
}
@ -214,7 +215,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
/* Scan for chip common core */
err = bcma_bus_scan_early(bus, &match, core_cc);
if (err) {
pr_err("Failed to scan for common core: %d\n", err);
bcma_err(bus, "Failed to scan for common core: %d\n", err);
return -1;
}
@ -226,7 +227,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
/* Scan for mips core */
err = bcma_bus_scan_early(bus, &match, core_mips);
if (err) {
pr_err("Failed to scan for mips core: %d\n", err);
bcma_err(bus, "Failed to scan for mips core: %d\n", err);
return -1;
}
@ -244,7 +245,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
bcma_core_mips_init(&bus->drv_mips);
}
pr_info("Early bus registered\n");
bcma_info(bus, "Early bus registered\n");
return 0;
}

View File

@ -340,7 +340,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
if (tmp <= 0) {
return -EILSEQ;
} else {
pr_info("Bridge found\n");
bcma_info(bus, "Bridge found\n");
return -ENXIO;
}
}
@ -427,8 +427,8 @@ void bcma_init_bus(struct bcma_bus *bus)
chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
pr_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
chipinfo->id, chipinfo->rev, chipinfo->pkg);
bcma_info(bus, "Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
chipinfo->id, chipinfo->rev, chipinfo->pkg);
bus->init_done = true;
}
@ -482,11 +482,10 @@ int bcma_bus_scan(struct bcma_bus *bus)
other_core = bcma_find_core_reverse(bus, core->id.id);
core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1;
pr_info("Core %d found: %s "
"(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
core->core_index, bcma_device_name(&core->id),
core->id.manuf, core->id.id, core->id.rev,
core->id.class);
bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
core->core_index, bcma_device_name(&core->id),
core->id.manuf, core->id.id, core->id.rev,
core->id.class);
list_add(&core->list, &bus->cores);
}
@ -538,11 +537,10 @@ int __init bcma_bus_scan_early(struct bcma_bus *bus,
core->core_index = core_num++;
bus->nr_cores++;
pr_info("Core %d found: %s "
"(manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
core->core_index, bcma_device_name(&core->id),
core->id.manuf, core->id.id, core->id.rev,
core->id.class);
bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
core->core_index, bcma_device_name(&core->id),
core->id.manuf, core->id.id, core->id.rev,
core->id.class);
list_add(&core->list, &bus->cores);
err = 0;

View File

@ -60,11 +60,11 @@ static int bcma_fill_sprom_with_fallback(struct bcma_bus *bus,
if (err)
goto fail;
pr_debug("Using SPROM revision %d provided by"
" platform.\n", bus->sprom.revision);
bcma_debug(bus, "Using SPROM revision %d provided by platform.\n",
bus->sprom.revision);
return 0;
fail:
pr_warn("Using fallback SPROM failed (err %d)\n", err);
bcma_warn(bus, "Using fallback SPROM failed (err %d)\n", err);
return err;
}
@ -468,11 +468,11 @@ static bool bcma_sprom_ext_available(struct bcma_bus *bus)
/* older chipcommon revisions use chip status register */
chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
switch (bus->chipinfo.id) {
case 0x4313:
case BCMA_CHIP_ID_BCM4313:
present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT;
break;
case 0x4331:
case BCMA_CHIP_ID_BCM4331:
present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT;
break;
@ -494,16 +494,16 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
switch (bus->chipinfo.id) {
case 0x4313:
case BCMA_CHIP_ID_BCM4313:
present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT;
break;
case 0x4331:
case BCMA_CHIP_ID_BCM4331:
present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
break;
case 43224:
case 43225:
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43225:
/* for these chips OTP is always available */
present = true;
break;
@ -579,13 +579,15 @@ int bcma_sprom_get(struct bcma_bus *bus)
if (!sprom)
return -ENOMEM;
if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
pr_debug("SPROM offset 0x%x\n", offset);
bcma_debug(bus, "SPROM offset 0x%x\n", offset);
bcma_sprom_read(bus, offset, sprom);
if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
err = bcma_sprom_valid(sprom);

View File

@ -511,7 +511,6 @@ config USB_SWITCH_FSA9480
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
source "drivers/misc/iwmc3200top/Kconfig"
source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"

View File

@ -36,7 +36,6 @@ obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/

View File

@ -1,20 +0,0 @@
config IWMC3200TOP
tristate "Intel Wireless MultiCom Top Driver"
depends on MMC && EXPERIMENTAL
select FW_LOADER
---help---
Intel Wireless MultiCom 3200 Top driver is responsible for
for firmware load and enabled coms enumeration
config IWMC3200TOP_DEBUG
bool "Enable full debug output of iwmc3200top Driver"
depends on IWMC3200TOP
---help---
Enable full debug output of iwmc3200top Driver
config IWMC3200TOP_DEBUGFS
bool "Enable Debugfs debugging interface for iwmc3200top"
depends on IWMC3200TOP
---help---
Enable creation of debugfs files for iwmc3200top

View File

@ -1,29 +0,0 @@
# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
# drivers/misc/iwmc3200top/Makefile
#
# Copyright (C) 2009 Intel Corporation. All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version
# 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
#
# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
# -
#
#
obj-$(CONFIG_IWMC3200TOP) += iwmc3200top.o
iwmc3200top-objs := main.o fw-download.o
iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o
iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o

View File

@ -1,137 +0,0 @@
/*
* iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
* drivers/misc/iwmc3200top/debufs.c
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
* -
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio.h>
#include <linux/debugfs.h>
#include "iwmc3200top.h"
#include "fw-msg.h"
#include "log.h"
#include "debugfs.h"
/* Constants definition */
#define HEXADECIMAL_RADIX 16
/* Functions definition */
#define DEBUGFS_ADD(name, parent) do { \
dbgfs->dbgfs_##parent##_files.file_##name = \
debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \
&iwmct_dbgfs_##name##_ops); \
} while (0)
#define DEBUGFS_RM(name) do { \
debugfs_remove(name); \
name = NULL; \
} while (0)
#define DEBUGFS_READ_FUNC(name) \
ssize_t iwmct_dbgfs_##name##_read(struct file *file, \
char __user *user_buf, \
size_t count, loff_t *ppos);
#define DEBUGFS_WRITE_FUNC(name) \
ssize_t iwmct_dbgfs_##name##_write(struct file *file, \
const char __user *user_buf, \
size_t count, loff_t *ppos);
#define DEBUGFS_READ_FILE_OPS(name) \
DEBUGFS_READ_FUNC(name) \
static const struct file_operations iwmct_dbgfs_##name##_ops = { \
.read = iwmct_dbgfs_##name##_read, \
.open = iwmct_dbgfs_open_file_generic, \
.llseek = generic_file_llseek, \
};
#define DEBUGFS_WRITE_FILE_OPS(name) \
DEBUGFS_WRITE_FUNC(name) \
static const struct file_operations iwmct_dbgfs_##name##_ops = { \
.write = iwmct_dbgfs_##name##_write, \
.open = iwmct_dbgfs_open_file_generic, \
.llseek = generic_file_llseek, \
};
#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
DEBUGFS_READ_FUNC(name) \
DEBUGFS_WRITE_FUNC(name) \
static const struct file_operations iwmct_dbgfs_##name##_ops = {\
.write = iwmct_dbgfs_##name##_write, \
.read = iwmct_dbgfs_##name##_read, \
.open = iwmct_dbgfs_open_file_generic, \
.llseek = generic_file_llseek, \
};
/* Debugfs file ops definitions */
/*
* Create the debugfs files and directories
*
*/
void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
{
struct iwmct_debugfs *dbgfs;
dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL);
if (!dbgfs) {
LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n",
sizeof(struct iwmct_debugfs));
return;
}
priv->dbgfs = dbgfs;
dbgfs->name = name;
dbgfs->dir_drv = debugfs_create_dir(name, NULL);
if (!dbgfs->dir_drv) {
LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n");
return;
}
return;
}
/**
* Remove the debugfs files and directories
*
*/
void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
{
if (!dbgfs)
return;
DEBUGFS_RM(dbgfs->dir_drv);
kfree(dbgfs);
dbgfs = NULL;
}

View File

@ -1,58 +0,0 @@
/*
* iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
* drivers/misc/iwmc3200top/debufs.h
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
* -
*
*/
#ifndef __DEBUGFS_H__
#define __DEBUGFS_H__
#ifdef CONFIG_IWMC3200TOP_DEBUGFS
struct iwmct_debugfs {
const char *name;
struct dentry *dir_drv;
struct dir_drv_files {
} dbgfs_drv_files;
};
void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name);
void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs);
#else /* CONFIG_IWMC3200TOP_DEBUGFS */
struct iwmct_debugfs;
static inline void
iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
{}
static inline void
iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
{}
#endif /* CONFIG_IWMC3200TOP_DEBUGFS */
#endif /* __DEBUGFS_H__ */

View File

@ -1,358 +0,0 @@
/*
* iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
* drivers/misc/iwmc3200top/fw-download.c
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
* -
*
*/
#include <linux/firmware.h>
#include <linux/mmc/sdio_func.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
#include "iwmc3200top.h"
#include "log.h"
#include "fw-msg.h"
#define CHECKSUM_BYTES_NUM sizeof(u32)
/**
init parser struct with file
*/
static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
size_t file_size, size_t block_size)
{
struct iwmct_parser *parser = &priv->parser;
struct iwmct_fw_hdr *fw_hdr = &parser->versions;
LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
parser->file = file;
parser->file_size = file_size;
parser->cur_pos = 0;
parser->entry_point = 0;
parser->buf = kzalloc(block_size, GFP_KERNEL);
if (!parser->buf) {
LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
return -ENOMEM;
}
parser->buf_size = block_size;
/* extract fw versions */
memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr));
LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n"
"top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n",
fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision,
fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision,
fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision,
fw_hdr->tic_name);
parser->cur_pos += sizeof(struct iwmct_fw_hdr);
LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return 0;
}
static bool iwmct_checksum(struct iwmct_priv *priv)
{
struct iwmct_parser *parser = &priv->parser;
__le32 *file = (__le32 *)parser->file;
int i, pad, steps;
u32 accum = 0;
u32 checksum;
u32 mask = 0xffffffff;
pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4;
steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4;
LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps);
for (i = 0; i < steps; i++)
accum += le32_to_cpu(file[i]);
if (pad) {
mask <<= 8 * (4 - pad);
accum += le32_to_cpu(file[steps]) & mask;
}
checksum = get_unaligned_le32((__le32 *)(parser->file +
parser->file_size - CHECKSUM_BYTES_NUM));
LOG_INFO(priv, FW_DOWNLOAD,
"compare checksum accum=0x%x to checksum=0x%x\n",
accum, checksum);
return checksum == accum;
}
static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
size_t *sec_size, __le32 *sec_addr)
{
struct iwmct_parser *parser = &priv->parser;
struct iwmct_dbg *dbg = &priv->dbg;
struct iwmct_fw_sec_hdr *sec_hdr;
LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
<= parser->file_size) {
sec_hdr = (struct iwmct_fw_sec_hdr *)
(parser->file + parser->cur_pos);
parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr);
LOG_INFO(priv, FW_DOWNLOAD,
"sec hdr: type=%s addr=0x%x size=%d\n",
sec_hdr->type, sec_hdr->target_addr,
sec_hdr->data_size);
if (strcmp(sec_hdr->type, "ENT") == 0)
parser->entry_point = le32_to_cpu(sec_hdr->target_addr);
else if (strcmp(sec_hdr->type, "LBL") == 0)
strcpy(dbg->label_fw, parser->file + parser->cur_pos);
else if (((strcmp(sec_hdr->type, "TOP") == 0) &&
(priv->barker & BARKER_DNLOAD_TOP_MSK)) ||
((strcmp(sec_hdr->type, "GPS") == 0) &&
(priv->barker & BARKER_DNLOAD_GPS_MSK)) ||
((strcmp(sec_hdr->type, "BTH") == 0) &&
(priv->barker & BARKER_DNLOAD_BT_MSK))) {
*sec_addr = sec_hdr->target_addr;
*sec_size = le32_to_cpu(sec_hdr->data_size);
*p_sec = parser->file + parser->cur_pos;
parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
return 1;
} else if (strcmp(sec_hdr->type, "LOG") != 0)
LOG_WARNING(priv, FW_DOWNLOAD,
"skipping section type %s\n",
sec_hdr->type);
parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
LOG_INFO(priv, FW_DOWNLOAD,
"finished with section cur_pos=%zd\n", parser->cur_pos);
}
LOG_TRACE(priv, INIT, "<--\n");
return 0;
}
static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
size_t sec_size, __le32 addr)
{
struct iwmct_parser *parser = &priv->parser;
struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
const u8 *cur_block = p_sec;
size_t sent = 0;
int cnt = 0;
int ret = 0;
u32 cmd = 0;
LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
addr, sec_size);
while (sent < sec_size) {
int i;
u32 chksm = 0;
u32 reset = atomic_read(&priv->reset);
/* actual FW data */
u32 data_size = min(parser->buf_size - sizeof(*hdr),
sec_size - sent);
/* Pad to block size */
u32 trans_size = (data_size + sizeof(*hdr) +
IWMC_SDIO_BLK_SIZE - 1) &
~(IWMC_SDIO_BLK_SIZE - 1);
++cnt;
/* in case of reset, interrupt FW DOWNLAOD */
if (reset) {
LOG_INFO(priv, FW_DOWNLOAD,
"Reset detected. Abort FW download!!!");
ret = -ECANCELED;
goto exit;
}
memset(parser->buf, 0, parser->buf_size);
cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS;
cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS;
cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS;
hdr->data_size = cpu_to_le32(data_size);
hdr->target_addr = addr;
/* checksum is allowed for sizes divisible by 4 */
if (data_size & 0x3)
cmd &= ~CMD_HDR_USE_CHECKSUM_MSK;
memcpy(hdr->data, cur_block, data_size);
if (cmd & CMD_HDR_USE_CHECKSUM_MSK) {
chksm = data_size + le32_to_cpu(addr) + cmd;
for (i = 0; i < data_size >> 2; i++)
chksm += ((u32 *)cur_block)[i];
hdr->block_chksm = cpu_to_le32(chksm);
LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n",
hdr->block_chksm);
}
LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, "
"sec_size=%zd, startAddress 0x%X\n",
cnt, trans_size, sent, sec_size, addr);
if (priv->dbg.dump)
LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size);
hdr->cmd = cpu_to_le32(cmd);
/* send it down */
/* TODO: add more proper sending and error checking */
ret = iwmct_tx(priv, parser->buf, trans_size);
if (ret != 0) {
LOG_INFO(priv, FW_DOWNLOAD,
"iwmct_tx returned %d\n", ret);
goto exit;
}
addr = cpu_to_le32(le32_to_cpu(addr) + data_size);
sent += data_size;
cur_block = p_sec + sent;
if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) {
LOG_INFO(priv, FW_DOWNLOAD,
"Block number limit is reached [%d]\n",
priv->dbg.blocks);
break;
}
}
if (sent < sec_size)
ret = -EINVAL;
exit:
LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return ret;
}
static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
{
struct iwmct_parser *parser = &priv->parser;
struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
int ret;
u32 cmd;
LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
memset(parser->buf, 0, parser->buf_size);
cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
if (jump) {
cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS;
hdr->target_addr = cpu_to_le32(parser->entry_point);
LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n",
parser->entry_point);
} else {
cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS;
LOG_INFO(priv, FW_DOWNLOAD, "last command\n");
}
hdr->cmd = cpu_to_le32(cmd);
LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
/* send it down */
/* TODO: add more proper sending and error checking */
ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE);
if (ret)
LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return 0;
}
int iwmct_fw_load(struct iwmct_priv *priv)
{
const u8 *fw_name = FW_NAME(FW_API_VER);
const struct firmware *raw;
const u8 *pdata;
size_t len;
__le32 addr;
int ret;
LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n",
priv->barker);
LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n",
(priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n",
(priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n",
(priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
/* get the firmware */
ret = request_firmware(&raw, fw_name, &priv->func->dev);
if (ret < 0) {
LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n",
fw_name, ret);
goto exit;
}
if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) {
LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n",
fw_name, sizeof(struct iwmct_fw_sec_hdr), raw->size);
goto exit;
}
LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
/* clear parser struct */
ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
if (ret < 0) {
LOG_ERROR(priv, FW_DOWNLOAD,
"iwmct_parser_init failed: Reason %d\n", ret);
goto exit;
}
if (!iwmct_checksum(priv)) {
LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
ret = -EINVAL;
goto exit;
}
/* download firmware to device */
while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
ret = iwmct_download_section(priv, pdata, len, addr);
if (ret) {
LOG_ERROR(priv, FW_DOWNLOAD,
"%s download section failed\n", fw_name);
goto exit;
}
}
ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
exit:
kfree(priv->parser.buf);
release_firmware(raw);
return ret;
}

View File

@ -1,113 +0,0 @@
/*
* iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
* drivers/misc/iwmc3200top/fw-msg.h
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
* -
*
*/
#ifndef __FWMSG_H__
#define __FWMSG_H__
#define COMM_TYPE_D2H 0xFF
#define COMM_TYPE_H2D 0xEE
#define COMM_CATEGORY_OPERATIONAL 0x00
#define COMM_CATEGORY_DEBUG 0x01
#define COMM_CATEGORY_TESTABILITY 0x02
#define COMM_CATEGORY_DIAGNOSTICS 0x03
#define OP_DBG_ZSTR_MSG cpu_to_le16(0x1A)
#define FW_LOG_SRC_MAX 32
#define FW_LOG_SRC_ALL 255
#define FW_STRING_TABLE_ADDR cpu_to_le32(0x0C000000)
#define CMD_DBG_LOG_LEVEL cpu_to_le16(0x0001)
#define CMD_TST_DEV_RESET cpu_to_le16(0x0060)
#define CMD_TST_FUNC_RESET cpu_to_le16(0x0062)
#define CMD_TST_IFACE_RESET cpu_to_le16(0x0064)
#define CMD_TST_CPU_UTILIZATION cpu_to_le16(0x0065)
#define CMD_TST_TOP_DEEP_SLEEP cpu_to_le16(0x0080)
#define CMD_TST_WAKEUP cpu_to_le16(0x0081)
#define CMD_TST_FUNC_WAKEUP cpu_to_le16(0x0082)
#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST cpu_to_le16(0x0083)
#define CMD_TST_GET_MEM_DUMP cpu_to_le16(0x0096)
#define OP_OPR_ALIVE cpu_to_le16(0x0010)
#define OP_OPR_CMD_ACK cpu_to_le16(0x001F)
#define OP_OPR_CMD_NACK cpu_to_le16(0x0020)
#define OP_TST_MEM_DUMP cpu_to_le16(0x0043)
#define CMD_FLAG_PADDING_256 0x80
#define FW_HCMD_BLOCK_SIZE 256
struct msg_hdr {
u8 type;
u8 category;
__le16 opcode;
u8 seqnum;
u8 flags;
__le16 length;
} __attribute__((__packed__));
struct log_hdr {
__le32 timestamp;
u8 severity;
u8 logsource;
__le16 reserved;
} __attribute__((__packed__));
struct mdump_hdr {
u8 dmpid;
u8 frag;
__le16 size;
__le32 addr;
} __attribute__((__packed__));
struct top_msg {
struct msg_hdr hdr;
union {
/* D2H messages */
struct {
struct log_hdr log_hdr;
u8 data[1];
} __attribute__((__packed__)) log;
struct {
struct log_hdr log_hdr;
struct mdump_hdr md_hdr;
u8 data[1];
} __attribute__((__packed__)) mdump;
/* H2D messages */
struct {
u8 logsource;
u8 sevmask;
} __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX];
struct mdump_hdr mdump_req;
} u;
} __attribute__((__packed__));
#endif /* __FWMSG_H__ */

View File

@ -1,205 +0,0 @@
/*
* iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
* drivers/misc/iwmc3200top/iwmc3200top.h
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
* -
*
*/
#ifndef __IWMC3200TOP_H__
#define __IWMC3200TOP_H__
#include <linux/workqueue.h>
#define DRV_NAME "iwmc3200top"
#define FW_API_VER 1
#define _FW_NAME(api) DRV_NAME "." #api ".fw"
#define FW_NAME(api) _FW_NAME(api)
#define IWMC_SDIO_BLK_SIZE 256
#define IWMC_DEFAULT_TR_BLK 64
#define IWMC_SDIO_DATA_ADDR 0x0
#define IWMC_SDIO_INTR_ENABLE_ADDR 0x14
#define IWMC_SDIO_INTR_STATUS_ADDR 0x13
#define IWMC_SDIO_INTR_CLEAR_ADDR 0x13
#define IWMC_SDIO_INTR_GET_SIZE_ADDR 0x2C
#define COMM_HUB_HEADER_LENGTH 16
#define LOGGER_HEADER_LENGTH 10
#define BARKER_DNLOAD_BT_POS 0
#define BARKER_DNLOAD_BT_MSK BIT(BARKER_DNLOAD_BT_POS)
#define BARKER_DNLOAD_GPS_POS 1
#define BARKER_DNLOAD_GPS_MSK BIT(BARKER_DNLOAD_GPS_POS)
#define BARKER_DNLOAD_TOP_POS 2
#define BARKER_DNLOAD_TOP_MSK BIT(BARKER_DNLOAD_TOP_POS)
#define BARKER_DNLOAD_RESERVED1_POS 3
#define BARKER_DNLOAD_RESERVED1_MSK BIT(BARKER_DNLOAD_RESERVED1_POS)
#define BARKER_DNLOAD_JUMP_POS 4
#define BARKER_DNLOAD_JUMP_MSK BIT(BARKER_DNLOAD_JUMP_POS)
#define BARKER_DNLOAD_SYNC_POS 5
#define BARKER_DNLOAD_SYNC_MSK BIT(BARKER_DNLOAD_SYNC_POS)
#define BARKER_DNLOAD_RESERVED2_POS 6
#define BARKER_DNLOAD_RESERVED2_MSK (0x3 << BARKER_DNLOAD_RESERVED2_POS)
#define BARKER_DNLOAD_BARKER_POS 8
#define BARKER_DNLOAD_BARKER_MSK (0xffffff << BARKER_DNLOAD_BARKER_POS)
#define IWMC_BARKER_REBOOT (0xdeadbe << BARKER_DNLOAD_BARKER_POS)
/* whole field barker */
#define IWMC_BARKER_ACK 0xfeedbabe
#define IWMC_CMD_SIGNATURE 0xcbbc
#define CMD_HDR_OPCODE_POS 0
#define CMD_HDR_OPCODE_MSK_MSK (0xf << CMD_HDR_OPCODE_MSK_POS)
#define CMD_HDR_RESPONSE_CODE_POS 4
#define CMD_HDR_RESPONSE_CODE_MSK (0xf << CMD_HDR_RESPONSE_CODE_POS)
#define CMD_HDR_USE_CHECKSUM_POS 8
#define CMD_HDR_USE_CHECKSUM_MSK BIT(CMD_HDR_USE_CHECKSUM_POS)
#define CMD_HDR_RESPONSE_REQUIRED_POS 9
#define CMD_HDR_RESPONSE_REQUIRED_MSK BIT(CMD_HDR_RESPONSE_REQUIRED_POS)
#define CMD_HDR_DIRECT_ACCESS_POS 10
#define CMD_HDR_DIRECT_ACCESS_MSK BIT(CMD_HDR_DIRECT_ACCESS_POS)
#define CMD_HDR_RESERVED_POS 11
#define CMD_HDR_RESERVED_MSK BIT(0x1f << CMD_HDR_RESERVED_POS)
#define CMD_HDR_SIGNATURE_POS 16
#define CMD_HDR_SIGNATURE_MSK BIT(0xffff << CMD_HDR_SIGNATURE_POS)
enum {
IWMC_OPCODE_PING = 0,
IWMC_OPCODE_READ = 1,
IWMC_OPCODE_WRITE = 2,
IWMC_OPCODE_JUMP = 3,
IWMC_OPCODE_REBOOT = 4,
IWMC_OPCODE_PERSISTENT_WRITE = 5,
IWMC_OPCODE_PERSISTENT_READ = 6,
IWMC_OPCODE_READ_MODIFY_WRITE = 7,
IWMC_OPCODE_LAST_COMMAND = 15
};
struct iwmct_fw_load_hdr {
__le32 cmd;
__le32 target_addr;
__le32 data_size;
__le32 block_chksm;
u8 data[0];
};
/**
* struct iwmct_fw_hdr
* holds all sw components versions
*/
struct iwmct_fw_hdr {
u8 top_major;
u8 top_minor;
u8 top_revision;
u8 gps_major;
u8 gps_minor;
u8 gps_revision;
u8 bt_major;
u8 bt_minor;
u8 bt_revision;
u8 tic_name[31];
};
/**
* struct iwmct_fw_sec_hdr
* @type: function type
* @data_size: section's data size
* @target_addr: download address
*/
struct iwmct_fw_sec_hdr {
u8 type[4];
__le32 data_size;
__le32 target_addr;
};
/**
* struct iwmct_parser
* @file: fw image
* @file_size: fw size
* @cur_pos: position in file
* @buf: temp buf for download
* @buf_size: size of buf
* @entry_point: address to jump in fw kick-off
*/
struct iwmct_parser {
const u8 *file;
size_t file_size;
size_t cur_pos;
u8 *buf;
size_t buf_size;
u32 entry_point;
struct iwmct_fw_hdr versions;
};
struct iwmct_work_struct {
struct list_head list;
ssize_t iosize;
};
struct iwmct_dbg {
int blocks;
bool dump;
bool jump;
bool direct;
bool checksum;
bool fw_download;
int block_size;
int download_trans_blks;
char label_fw[256];
};
struct iwmct_debugfs;
struct iwmct_priv {
struct sdio_func *func;
struct iwmct_debugfs *dbgfs;
struct iwmct_parser parser;
atomic_t reset;
atomic_t dev_sync;
u32 trans_len;
u32 barker;
struct iwmct_dbg dbg;
/* drivers work items */
struct work_struct bus_rescan_worker;
struct work_struct isr_worker;
/* drivers wait queue */
wait_queue_head_t wait_q;
/* rx request list */
struct list_head read_req_list;
};
extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count);
extern int iwmct_fw_load(struct iwmct_priv *priv);
extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv);
extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv);
extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len);
#endif /* __IWMC3200TOP_H__ */

View File

@ -1,348 +0,0 @@
/*
* iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
* drivers/misc/iwmc3200top/log.c
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
* -
*
*/
#include <linux/kernel.h>
#include <linux/mmc/sdio_func.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include "fw-msg.h"
#include "iwmc3200top.h"
#include "log.h"
/* Maximal hexadecimal string size of the FW memdump message */
#define LOG_MSG_SIZE_MAX 12400
/* iwmct_logdefs is a global used by log macros */
u8 iwmct_logdefs[LOG_SRC_MAX];
static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX];
static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask)
{
int i;
if (src < size)
logdefs[src] = logmask;
else if (src == LOG_SRC_ALL)
for (i = 0; i < size; i++)
logdefs[i] = logmask;
else
return -1;
return 0;
}
int iwmct_log_set_filter(u8 src, u8 logmask)
{
return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask);
}
int iwmct_log_set_fw_filter(u8 src, u8 logmask)
{
return _log_set_log_filter(iwmct_fw_logdefs,
FW_LOG_SRC_MAX, src, logmask);
}
static int log_msg_format_hex(char *str, int slen, u8 *ibuf,
int ilen, char *pref)
{
int pos = 0;
int i;
int len;
for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++)
str[pos] = pref[i];
for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++)
len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]);
if (i < ilen)
return -1;
return 0;
}
/* NOTE: This function is not thread safe.
Currently it's called only from sdio rx worker - no race there
*/
void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len)
{
struct top_msg *msg;
static char logbuf[LOG_MSG_SIZE_MAX];
msg = (struct top_msg *)buf;
if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) {
LOG_ERROR(priv, FW_MSG, "Log message from TOP "
"is too short %d (expected %zd)\n",
len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr));
return;
}
if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] &
BIT(msg->u.log.log_hdr.severity)) ||
!(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity)))
return;
switch (msg->hdr.category) {
case COMM_CATEGORY_TESTABILITY:
if (!(iwmct_logdefs[LOG_SRC_TST] &
BIT(msg->u.log.log_hdr.severity)))
return;
if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
le16_to_cpu(msg->hdr.length) +
sizeof(msg->hdr), "<TST>"))
LOG_WARNING(priv, TST,
"TOP TST message is too long, truncating...");
LOG_WARNING(priv, TST, "%s\n", logbuf);
break;
case COMM_CATEGORY_DEBUG:
if (msg->hdr.opcode == OP_DBG_ZSTR_MSG)
LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>",
((u8 *)msg) + sizeof(msg->hdr)
+ sizeof(msg->u.log.log_hdr));
else {
if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
le16_to_cpu(msg->hdr.length)
+ sizeof(msg->hdr),
"<DBG>"))
LOG_WARNING(priv, FW_MSG,
"TOP DBG message is too long,"
"truncating...");
LOG_WARNING(priv, FW_MSG, "%s\n", logbuf);
}
break;
default:
break;
}
}
static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size)
{
int i, pos, len;
for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) {
len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,",
i, logdefs[i]);
pos += len;
}
buf[pos-1] = '\n';
buf[pos] = '\0';
if (i < logdefsz)
return -1;
return 0;
}
int log_get_filter_str(char *buf, int size)
{
return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size);
}
int log_get_fw_filter_str(char *buf, int size)
{
return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size);
}
#define HEXADECIMAL_RADIX 16
#define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */
ssize_t show_iwmct_log_level(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwmct_priv *priv = dev_get_drvdata(d);
char *str_buf;
int buf_size;
ssize_t ret;
buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1;
str_buf = kzalloc(buf_size, GFP_KERNEL);
if (!str_buf) {
LOG_ERROR(priv, DEBUGFS,
"failed to allocate %d bytes\n", buf_size);
ret = -ENOMEM;
goto exit;
}
if (log_get_filter_str(str_buf, buf_size) < 0) {
ret = -EINVAL;
goto exit;
}
ret = sprintf(buf, "%s", str_buf);
exit:
kfree(str_buf);
return ret;
}
ssize_t store_iwmct_log_level(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct iwmct_priv *priv = dev_get_drvdata(d);
char *token, *str_buf = NULL;
long val;
ssize_t ret = count;
u8 src, mask;
if (!count)
goto exit;
str_buf = kzalloc(count, GFP_KERNEL);
if (!str_buf) {
LOG_ERROR(priv, DEBUGFS,
"failed to allocate %zd bytes\n", count);
ret = -ENOMEM;
goto exit;
}
memcpy(str_buf, buf, count);
while ((token = strsep(&str_buf, ",")) != NULL) {
while (isspace(*token))
++token;
if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
LOG_ERROR(priv, DEBUGFS,
"failed to convert string to long %s\n",
token);
ret = -EINVAL;
goto exit;
}
mask = val & 0xFF;
src = (val & 0XFF00) >> 8;
iwmct_log_set_filter(src, mask);
}
exit:
kfree(str_buf);
return ret;
}
ssize_t show_iwmct_log_level_fw(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwmct_priv *priv = dev_get_drvdata(d);
char *str_buf;
int buf_size;
ssize_t ret;
buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2;
str_buf = kzalloc(buf_size, GFP_KERNEL);
if (!str_buf) {
LOG_ERROR(priv, DEBUGFS,
"failed to allocate %d bytes\n", buf_size);
ret = -ENOMEM;
goto exit;
}
if (log_get_fw_filter_str(str_buf, buf_size) < 0) {
ret = -EINVAL;
goto exit;
}
ret = sprintf(buf, "%s", str_buf);
exit:
kfree(str_buf);
return ret;
}
ssize_t store_iwmct_log_level_fw(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct iwmct_priv *priv = dev_get_drvdata(d);
struct top_msg cmd;
char *token, *str_buf = NULL;
ssize_t ret = count;
u16 cmdlen = 0;
int i;
long val;
u8 src, mask;
if (!count)
goto exit;
str_buf = kzalloc(count, GFP_KERNEL);
if (!str_buf) {
LOG_ERROR(priv, DEBUGFS,
"failed to allocate %zd bytes\n", count);
ret = -ENOMEM;
goto exit;
}
memcpy(str_buf, buf, count);
cmd.hdr.type = COMM_TYPE_H2D;
cmd.hdr.category = COMM_CATEGORY_DEBUG;
cmd.hdr.opcode = CMD_DBG_LOG_LEVEL;
for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) &&
(i < FW_LOG_SRC_MAX); i++) {
while (isspace(*token))
++token;
if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
LOG_ERROR(priv, DEBUGFS,
"failed to convert string to long %s\n",
token);
ret = -EINVAL;
goto exit;
}
mask = val & 0xFF; /* LSB */
src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */
iwmct_log_set_fw_filter(src, mask);
cmd.u.logdefs[i].logsource = src;
cmd.u.logdefs[i].sevmask = mask;
}
cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0]));
cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr));
ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen);
if (ret) {
LOG_ERROR(priv, DEBUGFS,
"Failed to send %d bytes of fwcmd, ret=%zd\n",
cmdlen, ret);
goto exit;
} else
LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen);
ret = count;
exit:
kfree(str_buf);
return ret;
}

View File

@ -1,171 +0,0 @@
/*
* iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
* drivers/misc/iwmc3200top/log.h
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
* -
*
*/
#ifndef __LOG_H__
#define __LOG_H__
/* log severity:
* The log levels here match FW log levels
* so values need to stay as is */
#define LOG_SEV_CRITICAL 0
#define LOG_SEV_ERROR 1
#define LOG_SEV_WARNING 2
#define LOG_SEV_INFO 3
#define LOG_SEV_INFOEX 4
/* Log levels not defined for FW */
#define LOG_SEV_TRACE 5
#define LOG_SEV_DUMP 6
#define LOG_SEV_FW_FILTER_ALL \
(BIT(LOG_SEV_CRITICAL) | \
BIT(LOG_SEV_ERROR) | \
BIT(LOG_SEV_WARNING) | \
BIT(LOG_SEV_INFO) | \
BIT(LOG_SEV_INFOEX))
#define LOG_SEV_FILTER_ALL \
(BIT(LOG_SEV_CRITICAL) | \
BIT(LOG_SEV_ERROR) | \
BIT(LOG_SEV_WARNING) | \
BIT(LOG_SEV_INFO) | \
BIT(LOG_SEV_INFOEX) | \
BIT(LOG_SEV_TRACE) | \
BIT(LOG_SEV_DUMP))
/* log source */
#define LOG_SRC_INIT 0
#define LOG_SRC_DEBUGFS 1
#define LOG_SRC_FW_DOWNLOAD 2
#define LOG_SRC_FW_MSG 3
#define LOG_SRC_TST 4
#define LOG_SRC_IRQ 5
#define LOG_SRC_MAX 6
#define LOG_SRC_ALL 0xFF
/**
* Default intitialization runtime log level
*/
#ifndef LOG_SEV_FILTER_RUNTIME
#define LOG_SEV_FILTER_RUNTIME \
(BIT(LOG_SEV_CRITICAL) | \
BIT(LOG_SEV_ERROR) | \
BIT(LOG_SEV_WARNING))
#endif
#ifndef FW_LOG_SEV_FILTER_RUNTIME
#define FW_LOG_SEV_FILTER_RUNTIME LOG_SEV_FILTER_ALL
#endif
#ifdef CONFIG_IWMC3200TOP_DEBUG
/**
* Log macros
*/
#define priv2dev(priv) (&(priv->func)->dev)
#define LOG_CRITICAL(priv, src, fmt, args...) \
do { \
if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL)) \
dev_crit(priv2dev(priv), "%s %d: " fmt, \
__func__, __LINE__, ##args); \
} while (0)
#define LOG_ERROR(priv, src, fmt, args...) \
do { \
if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR)) \
dev_err(priv2dev(priv), "%s %d: " fmt, \
__func__, __LINE__, ##args); \
} while (0)
#define LOG_WARNING(priv, src, fmt, args...) \
do { \
if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING)) \
dev_warn(priv2dev(priv), "%s %d: " fmt, \
__func__, __LINE__, ##args); \
} while (0)
#define LOG_INFO(priv, src, fmt, args...) \
do { \
if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO)) \
dev_info(priv2dev(priv), "%s %d: " fmt, \
__func__, __LINE__, ##args); \
} while (0)
#define LOG_TRACE(priv, src, fmt, args...) \
do { \
if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \
dev_dbg(priv2dev(priv), "%s %d: " fmt, \
__func__, __LINE__, ##args); \
} while (0)
#define LOG_HEXDUMP(src, ptr, len) \
do { \
if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
16, 1, ptr, len, false); \
} while (0)
void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len);
extern u8 iwmct_logdefs[];
int iwmct_log_set_filter(u8 src, u8 logmask);
int iwmct_log_set_fw_filter(u8 src, u8 logmask);
ssize_t show_iwmct_log_level(struct device *d,
struct device_attribute *attr, char *buf);
ssize_t store_iwmct_log_level(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count);
ssize_t show_iwmct_log_level_fw(struct device *d,
struct device_attribute *attr, char *buf);
ssize_t store_iwmct_log_level_fw(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count);
#else
#define LOG_CRITICAL(priv, src, fmt, args...)
#define LOG_ERROR(priv, src, fmt, args...)
#define LOG_WARNING(priv, src, fmt, args...)
#define LOG_INFO(priv, src, fmt, args...)
#define LOG_TRACE(priv, src, fmt, args...)
#define LOG_HEXDUMP(src, ptr, len)
static inline void iwmct_log_top_message(struct iwmct_priv *priv,
u8 *buf, int len) {}
static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; }
static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; }
#endif /* CONFIG_IWMC3200TOP_DEBUG */
int log_get_filter_str(char *buf, int size);
int log_get_fw_filter_str(char *buf, int size);
#endif /* __LOG_H__ */

View File

@ -1,662 +0,0 @@
/*
* iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
* drivers/misc/iwmc3200top/main.c
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
* -
*
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/debugfs.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio.h>
#include "iwmc3200top.h"
#include "log.h"
#include "fw-msg.h"
#include "debugfs.h"
#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
#define DRIVER_VERSION "0.1.62"
MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_COPYRIGHT);
MODULE_FIRMWARE(FW_NAME(FW_API_VER));
static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count)
{
return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count);
}
int iwmct_tx(struct iwmct_priv *priv, void *src, int count)
{
int ret;
sdio_claim_host(priv->func);
ret = __iwmct_tx(priv, src, count);
sdio_release_host(priv->func);
return ret;
}
/*
* This workers main task is to wait for OP_OPR_ALIVE
* from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
* When OP_OPR_ALIVE received it will issue
* a call to "bus_rescan_devices".
*/
static void iwmct_rescan_worker(struct work_struct *ws)
{
struct iwmct_priv *priv;
int ret;
priv = container_of(ws, struct iwmct_priv, bus_rescan_worker);
LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n");
ret = bus_rescan_devices(priv->func->dev.bus);
if (ret < 0)
LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n");
}
static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
{
switch (msg->hdr.opcode) {
case OP_OPR_ALIVE:
LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
schedule_work(&priv->bus_rescan_worker);
break;
default:
LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
msg->hdr.opcode);
break;
}
}
static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len)
{
struct top_msg *msg;
msg = (struct top_msg *)buf;
if (msg->hdr.type != COMM_TYPE_D2H) {
LOG_ERROR(priv, FW_MSG,
"Message from TOP with invalid message type 0x%X\n",
msg->hdr.type);
return;
}
if (len < sizeof(msg->hdr)) {
LOG_ERROR(priv, FW_MSG,
"Message from TOP is too short for message header "
"received %d bytes, expected at least %zd bytes\n",
len, sizeof(msg->hdr));
return;
}
if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) {
LOG_ERROR(priv, FW_MSG,
"Message length (%d bytes) is shorter than "
"in header (%d bytes)\n",
len, le16_to_cpu(msg->hdr.length));
return;
}
switch (msg->hdr.category) {
case COMM_CATEGORY_OPERATIONAL:
op_top_message(priv, (struct top_msg *)buf);
break;
case COMM_CATEGORY_DEBUG:
case COMM_CATEGORY_TESTABILITY:
case COMM_CATEGORY_DIAGNOSTICS:
iwmct_log_top_message(priv, buf, len);
break;
default:
LOG_ERROR(priv, FW_MSG,
"Message from TOP with unknown category 0x%X\n",
msg->hdr.category);
break;
}
}
int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
{
int ret;
u8 *buf;
LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n");
/* add padding to 256 for IWMC */
((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
LOG_HEXDUMP(FW_MSG, cmd, len);
if (len > FW_HCMD_BLOCK_SIZE) {
LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n",
len, FW_HCMD_BLOCK_SIZE);
return -1;
}
buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL);
if (!buf) {
LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n",
FW_HCMD_BLOCK_SIZE);
return -1;
}
memcpy(buf, cmd, len);
ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE);
kfree(buf);
return ret;
}
static void iwmct_irq_read_worker(struct work_struct *ws)
{
struct iwmct_priv *priv;
struct iwmct_work_struct *read_req;
__le32 *buf = NULL;
int ret;
int iosize;
u32 barker;
bool is_barker;
priv = container_of(ws, struct iwmct_priv, isr_worker);
LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
/* --------------------- Handshake with device -------------------- */
sdio_claim_host(priv->func);
/* all list manipulations have to be protected by
* sdio_claim_host/sdio_release_host */
if (list_empty(&priv->read_req_list)) {
LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n");
goto exit_release;
}
read_req = list_entry(priv->read_req_list.next,
struct iwmct_work_struct, list);
list_del(&read_req->list);
iosize = read_req->iosize;
kfree(read_req);
buf = kzalloc(iosize, GFP_KERNEL);
if (!buf) {
LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize);
goto exit_release;
}
LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n",
iosize, buf, priv->func->num);
/* read from device */
ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize);
if (ret) {
LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret);
goto exit_release;
}
LOG_HEXDUMP(IRQ, (u8 *)buf, iosize);
barker = le32_to_cpu(buf[0]);
/* Verify whether it's a barker and if not - treat as regular Rx */
if (barker == IWMC_BARKER_ACK ||
(barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) {
/* Valid Barker is equal on first 4 dwords */
is_barker = (buf[1] == buf[0]) &&
(buf[2] == buf[0]) &&
(buf[3] == buf[0]);
if (!is_barker) {
LOG_WARNING(priv, IRQ,
"Potentially inconsistent barker "
"%08X_%08X_%08X_%08X\n",
le32_to_cpu(buf[0]), le32_to_cpu(buf[1]),
le32_to_cpu(buf[2]), le32_to_cpu(buf[3]));
}
} else {
is_barker = false;
}
/* Handle Top CommHub message */
if (!is_barker) {
sdio_release_host(priv->func);
handle_top_message(priv, (u8 *)buf, iosize);
goto exit;
} else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */
if (atomic_read(&priv->dev_sync) == 0) {
LOG_ERROR(priv, IRQ,
"ACK barker arrived out-of-sync\n");
goto exit_release;
}
/* Continuing to FW download (after Sync is completed)*/
atomic_set(&priv->dev_sync, 0);
LOG_INFO(priv, IRQ, "ACK barker arrived "
"- starting FW download\n");
} else { /* REBOOT barker */
LOG_INFO(priv, IRQ, "Received reboot barker: %x\n", barker);
priv->barker = barker;
if (barker & BARKER_DNLOAD_SYNC_MSK) {
/* Send the same barker back */
ret = __iwmct_tx(priv, buf, iosize);
if (ret) {
LOG_ERROR(priv, IRQ,
"error %d echoing barker\n", ret);
goto exit_release;
}
LOG_INFO(priv, IRQ, "Echoing barker to device\n");
atomic_set(&priv->dev_sync, 1);
goto exit_release;
}
/* Continuing to FW download (without Sync) */
LOG_INFO(priv, IRQ, "No sync requested "
"- starting FW download\n");
}
sdio_release_host(priv->func);
if (priv->dbg.fw_download)
iwmct_fw_load(priv);
else
LOG_ERROR(priv, IRQ, "FW download not allowed\n");
goto exit;
exit_release:
sdio_release_host(priv->func);
exit:
kfree(buf);
LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n");
}
static void iwmct_irq(struct sdio_func *func)
{
struct iwmct_priv *priv;
int val, ret;
int iosize;
int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR;
struct iwmct_work_struct *read_req;
priv = sdio_get_drvdata(func);
LOG_TRACE(priv, IRQ, "enter iwmct_irq\n");
/* read the function's status register */
val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
if (!val) {
LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
goto exit_clear_intr;
}
/*
* read 2 bytes of the transaction size
* IMPORTANT: sdio transaction size has to be read before clearing
* sdio interrupt!!!
*/
val = sdio_readb(priv->func, addr++, &ret);
iosize = val;
val = sdio_readb(priv->func, addr++, &ret);
iosize += val << 8;
LOG_INFO(priv, IRQ, "READ size %d\n", iosize);
if (iosize == 0) {
LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize);
goto exit_clear_intr;
}
/* allocate a work structure to pass iosize to the worker */
read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL);
if (!read_req) {
LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n");
goto exit_clear_intr;
}
INIT_LIST_HEAD(&read_req->list);
read_req->iosize = iosize;
list_add_tail(&priv->read_req_list, &read_req->list);
/* clear the function's interrupt request bit (write 1 to clear) */
sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
schedule_work(&priv->isr_worker);
LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
return;
exit_clear_intr:
/* clear the function's interrupt request bit (write 1 to clear) */
sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
}
static int blocks;
module_param(blocks, int, 0604);
MODULE_PARM_DESC(blocks, "max_blocks_to_send");
static bool dump;
module_param(dump, bool, 0604);
MODULE_PARM_DESC(dump, "dump_hex_content");
static bool jump = 1;
module_param(jump, bool, 0604);
static bool direct = 1;
module_param(direct, bool, 0604);
static bool checksum = 1;
module_param(checksum, bool, 0604);
static bool fw_download = 1;
module_param(fw_download, bool, 0604);
static int block_size = IWMC_SDIO_BLK_SIZE;
module_param(block_size, int, 0404);
static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
module_param(download_trans_blks, int, 0604);
static bool rubbish_barker;
module_param(rubbish_barker, bool, 0604);
#ifdef CONFIG_IWMC3200TOP_DEBUG
static int log_level[LOG_SRC_MAX];
static unsigned int log_level_argc;
module_param_array(log_level, int, &log_level_argc, 0604);
MODULE_PARM_DESC(log_level, "log_level");
static int log_level_fw[FW_LOG_SRC_MAX];
static unsigned int log_level_fw_argc;
module_param_array(log_level_fw, int, &log_level_fw_argc, 0604);
MODULE_PARM_DESC(log_level_fw, "log_level_fw");
#endif
void iwmct_dbg_init_params(struct iwmct_priv *priv)
{
#ifdef CONFIG_IWMC3200TOP_DEBUG
int i;
for (i = 0; i < log_level_argc; i++) {
dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n",
i, log_level[i]);
iwmct_log_set_filter((log_level[i] >> 8) & 0xFF,
log_level[i] & 0xFF);
}
for (i = 0; i < log_level_fw_argc; i++) {
dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n",
i, log_level_fw[i]);
iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF,
log_level_fw[i] & 0xFF);
}
#endif
priv->dbg.blocks = blocks;
LOG_INFO(priv, INIT, "blocks=%d\n", blocks);
priv->dbg.dump = (bool)dump;
LOG_INFO(priv, INIT, "dump=%d\n", dump);
priv->dbg.jump = (bool)jump;
LOG_INFO(priv, INIT, "jump=%d\n", jump);
priv->dbg.direct = (bool)direct;
LOG_INFO(priv, INIT, "direct=%d\n", direct);
priv->dbg.checksum = (bool)checksum;
LOG_INFO(priv, INIT, "checksum=%d\n", checksum);
priv->dbg.fw_download = (bool)fw_download;
LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download);
priv->dbg.block_size = block_size;
LOG_INFO(priv, INIT, "block_size=%d\n", block_size);
priv->dbg.download_trans_blks = download_trans_blks;
LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks);
}
/*****************************************************************************
*
* sysfs attributes
*
*****************************************************************************/
static ssize_t show_iwmct_fw_version(struct device *d,
struct device_attribute *attr, char *buf)
{
struct iwmct_priv *priv = dev_get_drvdata(d);
return sprintf(buf, "%s\n", priv->dbg.label_fw);
}
static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL);
#ifdef CONFIG_IWMC3200TOP_DEBUG
static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO,
show_iwmct_log_level, store_iwmct_log_level);
static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO,
show_iwmct_log_level_fw, store_iwmct_log_level_fw);
#endif
static struct attribute *iwmct_sysfs_entries[] = {
&dev_attr_cc_label_fw.attr,
#ifdef CONFIG_IWMC3200TOP_DEBUG
&dev_attr_log_level.attr,
&dev_attr_log_level_fw.attr,
#endif
NULL
};
static struct attribute_group iwmct_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = iwmct_sysfs_entries,
};
static int iwmct_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
struct iwmct_priv *priv;
int ret;
int val = 1;
int addr = IWMC_SDIO_INTR_ENABLE_ADDR;
dev_dbg(&func->dev, "enter iwmct_probe\n");
dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
jiffies_to_msecs(2147483647), HZ);
priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
if (!priv) {
dev_err(&func->dev, "kzalloc error\n");
return -ENOMEM;
}
priv->func = func;
sdio_set_drvdata(func, priv);
INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
init_waitqueue_head(&priv->wait_q);
sdio_claim_host(func);
/* FIXME: Remove after it is fixed in the Boot ROM upgrade */
func->enable_timeout = 10;
/* In our HW, setting the block size also wakes up the boot rom. */
ret = sdio_set_block_size(func, priv->dbg.block_size);
if (ret) {
LOG_ERROR(priv, INIT,
"sdio_set_block_size() failure: %d\n", ret);
goto error_sdio_enable;
}
ret = sdio_enable_func(func);
if (ret) {
LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
goto error_sdio_enable;
}
/* init reset and dev_sync states */
atomic_set(&priv->reset, 0);
atomic_set(&priv->dev_sync, 0);
/* init read req queue */
INIT_LIST_HEAD(&priv->read_req_list);
/* process configurable parameters */
iwmct_dbg_init_params(priv);
ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
if (ret) {
LOG_ERROR(priv, INIT, "Failed to register attributes and "
"initialize module_params\n");
goto error_dev_attrs;
}
iwmct_dbgfs_register(priv, DRV_NAME);
if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
LOG_INFO(priv, INIT,
"Reducing transaction to 8 blocks = 2K (from %d)\n",
priv->dbg.download_trans_blks);
priv->dbg.download_trans_blks = 8;
}
priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);
ret = sdio_claim_irq(func, iwmct_irq);
if (ret) {
LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
goto error_claim_irq;
}
/* Enable function's interrupt */
sdio_writeb(priv->func, val, addr, &ret);
if (ret) {
LOG_ERROR(priv, INIT, "Failure writing to "
"Interrupt Enable Register (%d): %d\n", addr, ret);
goto error_enable_int;
}
sdio_release_host(func);
LOG_INFO(priv, INIT, "exit iwmct_probe\n");
return ret;
error_enable_int:
sdio_release_irq(func);
error_claim_irq:
sdio_disable_func(func);
error_dev_attrs:
iwmct_dbgfs_unregister(priv->dbgfs);
sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
error_sdio_enable:
sdio_release_host(func);
return ret;
}
static void iwmct_remove(struct sdio_func *func)
{
struct iwmct_work_struct *read_req;
struct iwmct_priv *priv = sdio_get_drvdata(func);
LOG_INFO(priv, INIT, "enter\n");
sdio_claim_host(func);
sdio_release_irq(func);
sdio_release_host(func);
/* Make sure works are finished */
flush_work_sync(&priv->bus_rescan_worker);
flush_work_sync(&priv->isr_worker);
sdio_claim_host(func);
sdio_disable_func(func);
sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
iwmct_dbgfs_unregister(priv->dbgfs);
sdio_release_host(func);
/* free read requests */
while (!list_empty(&priv->read_req_list)) {
read_req = list_entry(priv->read_req_list.next,
struct iwmct_work_struct, list);
list_del(&read_req->list);
kfree(read_req);
}
kfree(priv);
}
static const struct sdio_device_id iwmct_ids[] = {
/* Intel Wireless MultiCom 3200 Top Driver */
{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)},
{ }, /* Terminating entry */
};
MODULE_DEVICE_TABLE(sdio, iwmct_ids);
static struct sdio_driver iwmct_driver = {
.probe = iwmct_probe,
.remove = iwmct_remove,
.name = DRV_NAME,
.id_table = iwmct_ids,
};
static int __init iwmct_init(void)
{
int rc;
/* Default log filter settings */
iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL);
iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
rc = sdio_register_driver(&iwmct_driver);
return rc;
}
static void __exit iwmct_exit(void)
{
sdio_unregister_driver(&iwmct_driver);
}
module_init(iwmct_init);
module_exit(iwmct_exit);

View File

@ -7,9 +7,6 @@ config WIMAX_I2400M
comment "Enable USB support to see WiMAX USB drivers"
depends on USB = n
comment "Enable MMC support to see WiMAX SDIO drivers"
depends on MMC = n
config WIMAX_I2400M_USB
tristate "Intel Wireless WiMAX Connection 2400 over USB (including 5x50)"
depends on WIMAX && USB
@ -21,25 +18,6 @@ config WIMAX_I2400M_USB
If unsure, it is safe to select M (module).
config WIMAX_I2400M_SDIO
tristate "Intel Wireless WiMAX Connection 2400 over SDIO"
depends on WIMAX && MMC
select WIMAX_I2400M
help
Select if you have a device based on the Intel WiMAX
Connection 2400 over SDIO.
If unsure, it is safe to select M (module).
config WIMAX_IWMC3200_SDIO
bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO (EXPERIMENTAL)"
depends on WIMAX_I2400M_SDIO
depends on EXPERIMENTAL
select IWMC3200TOP
help
Select if you have a device based on the Intel Multicom WiMAX
Connection 3200 over SDIO.
config WIMAX_I2400M_DEBUG_LEVEL
int "WiMAX i2400m debug level"
depends on WIMAX_I2400M

View File

@ -1,7 +1,6 @@
obj-$(CONFIG_WIMAX_I2400M) += i2400m.o
obj-$(CONFIG_WIMAX_I2400M_USB) += i2400m-usb.o
obj-$(CONFIG_WIMAX_I2400M_SDIO) += i2400m-sdio.o
i2400m-y := \
control.o \
@ -21,10 +20,3 @@ i2400m-usb-y := \
usb-tx.o \
usb-rx.o \
usb.o
i2400m-sdio-y := \
sdio.o \
sdio-tx.o \
sdio-fw.o \
sdio-rx.o

View File

@ -754,8 +754,7 @@ EXPORT_SYMBOL_GPL(i2400m_error_recovery);
/*
* Alloc the command and ack buffers for boot mode
*
* Get the buffers needed to deal with boot mode messages. These
* buffers need to be allocated before the sdio receive irq is setup.
* Get the buffers needed to deal with boot mode messages.
*/
static
int i2400m_bm_buf_alloc(struct i2400m *i2400m)

View File

@ -51,8 +51,7 @@
* firmware. Normal hardware takes only signed firmware.
*
* On boot mode, in USB, we write to the device using the bulk out
* endpoint and read from it in the notification endpoint. In SDIO we
* talk to it via the write address and read from the read address.
* endpoint and read from it in the notification endpoint.
*
* Upon entrance to boot mode, the device sends (preceded with a few
* zero length packets (ZLPs) on the notification endpoint in USB) a

View File

@ -1,157 +0,0 @@
/*
* Intel Wireless WiMAX Connection 2400m
* SDIO-specific i2400m driver definitions
*
*
* Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <linux-wimax@intel.com>
* Brian Bian <brian.bian@intel.com>
* Dirk Brandewie <dirk.j.brandewie@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* Yanir Lubetkin <yanirx.lubetkin@intel.com>
* - Initial implementation
*
*
* This driver implements the bus-specific part of the i2400m for
* SDIO. Check i2400m.h for a generic driver description.
*
* ARCHITECTURE
*
* This driver sits under the bus-generic i2400m driver, providing the
* connection to the device.
*
* When probed, all the function pointers are setup and then the
* bus-generic code called. The generic driver will then use the
* provided pointers for uploading firmware (i2400ms_bus_bm*() in
* sdio-fw.c) and then setting up the device (i2400ms_dev_*() in
* sdio.c).
*
* Once firmware is uploaded, TX functions (sdio-tx.c) are called when
* data is ready for transmission in the TX fifo; then the SDIO IRQ is
* fired and data is available (sdio-rx.c), it is sent to the generic
* driver for processing with i2400m_rx.
*/
#ifndef __I2400M_SDIO_H__
#define __I2400M_SDIO_H__
#include "i2400m.h"
/* Host-Device interface for SDIO */
enum {
I2400M_SDIO_BOOT_RETRIES = 3,
I2400MS_BLK_SIZE = 256,
I2400MS_PL_SIZE_MAX = 0x3E00,
I2400MS_DATA_ADDR = 0x0,
I2400MS_INTR_STATUS_ADDR = 0x13,
I2400MS_INTR_CLEAR_ADDR = 0x13,
I2400MS_INTR_ENABLE_ADDR = 0x14,
I2400MS_INTR_GET_SIZE_ADDR = 0x2C,
/* The number of ticks to wait for the device to signal that
* it is ready */
I2400MS_INIT_SLEEP_INTERVAL = 100,
/* How long to wait for the device to settle after reset */
I2400MS_SETTLE_TIME = 40,
/* The number of msec to wait for IOR after sending IOE */
IWMC3200_IOR_TIMEOUT = 10,
};
/**
* struct i2400ms - descriptor for a SDIO connected i2400m
*
* @i2400m: bus-generic i2400m implementation; has to be first (see
* it's documentation in i2400m.h).
*
* @func: pointer to our SDIO function
*
* @tx_worker: workqueue struct used to TX data when the bus-generic
* code signals packets are pending for transmission to the device.
*
* @tx_workqueue: workqeueue used for data TX; we don't use the
* system's workqueue as that might cause deadlocks with code in
* the bus-generic driver. The read/write operation to the queue
* is protected with spinlock (tx_lock in struct i2400m) to avoid
* the queue being destroyed in the middle of a the queue read/write
* operation.
*
* @debugfs_dentry: dentry for the SDIO specific debugfs files
*
* Note this value is set to NULL upon destruction; this is
* because some routinges use it to determine if we are inside the
* probe() path or some other path. When debugfs is disabled,
* creation sets the dentry to '(void*) -ENODEV', which is valid
* for the test.
*/
struct i2400ms {
struct i2400m i2400m; /* FIRST! See doc */
struct sdio_func *func;
struct work_struct tx_worker;
struct workqueue_struct *tx_workqueue;
char tx_wq_name[32];
struct dentry *debugfs_dentry;
wait_queue_head_t bm_wfa_wq;
int bm_wait_result;
size_t bm_ack_size;
/* Device is any of the iwmc3200 SKUs */
unsigned iwmc3200:1;
};
static inline
void i2400ms_init(struct i2400ms *i2400ms)
{
i2400m_init(&i2400ms->i2400m);
}
extern int i2400ms_rx_setup(struct i2400ms *);
extern void i2400ms_rx_release(struct i2400ms *);
extern int i2400ms_tx_setup(struct i2400ms *);
extern void i2400ms_tx_release(struct i2400ms *);
extern void i2400ms_bus_tx_kick(struct i2400m *);
extern ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *,
const struct i2400m_bootrom_header *,
size_t, int);
extern ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *,
struct i2400m_bootrom_header *,
size_t);
extern void i2400ms_bus_bm_release(struct i2400m *);
extern int i2400ms_bus_bm_setup(struct i2400m *);
#endif /* #ifndef __I2400M_SDIO_H__ */

View File

@ -46,7 +46,7 @@
* - bus generic driver (this part)
*
* The bus specific driver sets up stuff specific to the bus the
* device is connected to (USB, SDIO, PCI, tam-tam...non-authoritative
* device is connected to (USB, PCI, tam-tam...non-authoritative
* nor binding list) which is basically the device-model management
* (probe/disconnect, etc), moving data from device to kernel and
* back, doing the power saving details and reseting the device.
@ -238,14 +238,13 @@ struct i2400m_barker_db;
* amount needed for loading firmware, where us dev_start/stop setup
* the rest needed to do full data/control traffic.
*
* @bus_tx_block_size: [fill] SDIO imposes a 256 block size, USB 16,
* so we have a tx_blk_size variable that the bus layer sets to
* tell the engine how much of that we need.
* @bus_tx_block_size: [fill] USB imposes a 16 block size, but other
* busses will differ. So we have a tx_blk_size variable that the
* bus layer sets to tell the engine how much of that we need.
*
* @bus_tx_room_min: [fill] Minimum room required while allocating
* TX queue's buffer space for message header. SDIO requires
* 224 bytes and USB 16 bytes. Refer bus specific driver code
* for details.
* TX queue's buffer space for message header. USB requires
* 16 bytes. Refer to bus specific driver code for details.
*
* @bus_pl_size_max: [fill] Maximum payload size.
*

View File

@ -1,22 +0,0 @@
/*
* debug levels control file for the i2400m module's
*/
#ifndef __debug_levels__h__
#define __debug_levels__h__
/* Maximum compile and run time debug level for all submodules */
#define D_MODULENAME i2400m_sdio
#define D_MASTER CONFIG_WIMAX_I2400M_DEBUG_LEVEL
#include <linux/wimax/debug.h>
/* List of all the enabled modules */
enum d_module {
D_SUBMODULE_DECLARE(main),
D_SUBMODULE_DECLARE(tx),
D_SUBMODULE_DECLARE(rx),
D_SUBMODULE_DECLARE(fw)
};
#endif /* #ifndef __debug_levels__h__ */

View File

@ -1,210 +0,0 @@
/*
* Intel Wireless WiMAX Connection 2400m
* Firmware uploader's SDIO specifics
*
*
* Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <linux-wimax@intel.com>
* Yanir Lubetkin <yanirx.lubetkin@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - Initial implementation
*
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - Bus generic/specific split for USB
*
* Dirk Brandewie <dirk.j.brandewie@intel.com>
* - Initial implementation for SDIO
*
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* - SDIO rehash for changes in the bus-driver model
*
* Dirk Brandewie <dirk.j.brandewie@intel.com>
* - Make it IRQ based, not polling
*
* THE PROCEDURE
*
* See fw.c for the generic description of this procedure.
*
* This file implements only the SDIO specifics. It boils down to how
* to send a command and waiting for an acknowledgement from the
* device.
*
* All this code is sequential -- all i2400ms_bus_bm_*() functions are
* executed in the same thread, except i2400ms_bm_irq() [on its own by
* the SDIO driver]. This makes it possible to avoid locking.
*
* COMMAND EXECUTION
*
* The generic firmware upload code will call i2400m_bus_bm_cmd_send()
* to send commands.
*
* The SDIO devices expects things in 256 byte blocks, so it will pad
* it, compute the checksum (if needed) and pass it to SDIO.
*
* ACK RECEPTION
*
* This works in IRQ mode -- the fw loader says when to wait for data
* and for that it calls i2400ms_bus_bm_wait_for_ack().
*
* This checks if there is any data available (RX size > 0); if not,
* waits for the IRQ handler to notify about it. Once there is data,
* it is read and passed to the caller. Doing it this way we don't
* need much coordination/locking, and it makes it much more difficult
* for an interrupt to be lost and the wait_for_ack() function getting
* stuck even when data is pending.
*/
#include <linux/mmc/sdio_func.h>
#include "i2400m-sdio.h"
#define D_SUBMODULE fw
#include "sdio-debug-levels.h"
/*
* Send a boot-mode command to the SDIO function
*
* We use a bounce buffer (i2400m->bm_cmd_buf) because we need to
* touch the header if the RAW flag is not set.
*
* @flags: pass thru from i2400m_bm_cmd()
* @return: cmd_size if ok, < 0 errno code on error.
*
* Note the command is padded to the SDIO block size for the device.
*/
ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m,
const struct i2400m_bootrom_header *_cmd,
size_t cmd_size, int flags)
{
ssize_t result;
struct device *dev = i2400m_dev(i2400m);
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
int opcode = _cmd == NULL ? -1 : i2400m_brh_get_opcode(_cmd);
struct i2400m_bootrom_header *cmd;
/* SDIO restriction */
size_t cmd_size_a = ALIGN(cmd_size, I2400MS_BLK_SIZE);
d_fnstart(5, dev, "(i2400m %p cmd %p size %zu)\n",
i2400m, _cmd, cmd_size);
result = -E2BIG;
if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
goto error_too_big;
if (_cmd != i2400m->bm_cmd_buf)
memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
cmd = i2400m->bm_cmd_buf;
if (cmd_size_a > cmd_size) /* Zero pad space */
memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
if ((flags & I2400M_BM_CMD_RAW) == 0) {
if (WARN_ON(i2400m_brh_get_response_required(cmd) == 0))
dev_warn(dev, "SW BUG: response_required == 0\n");
i2400m_bm_cmd_prepare(cmd);
}
d_printf(4, dev, "BM cmd %d: %zu bytes (%zu padded)\n",
opcode, cmd_size, cmd_size_a);
d_dump(5, dev, cmd, cmd_size);
sdio_claim_host(i2400ms->func); /* Send & check */
result = sdio_memcpy_toio(i2400ms->func, I2400MS_DATA_ADDR,
i2400m->bm_cmd_buf, cmd_size_a);
sdio_release_host(i2400ms->func);
if (result < 0) {
dev_err(dev, "BM cmd %d: cannot send: %ld\n",
opcode, (long) result);
goto error_cmd_send;
}
result = cmd_size;
error_cmd_send:
error_too_big:
d_fnend(5, dev, "(i2400m %p cmd %p size %zu) = %d\n",
i2400m, _cmd, cmd_size, (int) result);
return result;
}
/*
* Read an ack from the device's boot-mode
*
* @i2400m:
* @_ack: pointer to where to store the read data
* @ack_size: how many bytes we should read
*
* Returns: < 0 errno code on error; otherwise, amount of received bytes.
*
* The ACK for a BM command is always at least sizeof(*ack) bytes, so
* check for that. We don't need to check for device reboots
*
*/
ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
struct i2400m_bootrom_header *ack,
size_t ack_size)
{
ssize_t result;
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
struct sdio_func *func = i2400ms->func;
struct device *dev = &func->dev;
int size;
BUG_ON(sizeof(*ack) > ack_size);
d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
i2400m, ack, ack_size);
result = wait_event_timeout(i2400ms->bm_wfa_wq,
i2400ms->bm_ack_size != -EINPROGRESS,
2 * HZ);
if (result == 0) {
result = -ETIMEDOUT;
dev_err(dev, "BM: error waiting for an ack\n");
goto error_timeout;
}
spin_lock(&i2400m->rx_lock);
result = i2400ms->bm_ack_size;
BUG_ON(result == -EINPROGRESS);
if (result < 0) /* so we exit when rx_release() is called */
dev_err(dev, "BM: %s failed: %zd\n", __func__, result);
else {
size = min(ack_size, i2400ms->bm_ack_size);
memcpy(ack, i2400m->bm_ack_buf, size);
}
/*
* Remember always to clear the bm_ack_size to -EINPROGRESS
* after the RX data is processed
*/
i2400ms->bm_ack_size = -EINPROGRESS;
spin_unlock(&i2400m->rx_lock);
error_timeout:
d_fnend(5, dev, "(i2400m %p ack %p size %zu) = %zd\n",
i2400m, ack, ack_size, result);
return result;
}

View File

@ -1,301 +0,0 @@
/*
* Intel Wireless WiMAX Connection 2400m
* SDIO RX handling
*
*
* Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <linux-wimax@intel.com>
* Dirk Brandewie <dirk.j.brandewie@intel.com>
* - Initial implementation
*
*
* This handles the RX path on SDIO.
*
* The SDIO bus driver calls the "irq" routine when data is available.
* This is not a traditional interrupt routine since the SDIO bus
* driver calls us from its irq thread context. Because of this
* sleeping in the SDIO RX IRQ routine is okay.
*
* From there on, we obtain the size of the data that is available,
* allocate an skb, copy it and then pass it to the generic driver's
* RX routine [i2400m_rx()].
*
* ROADMAP
*
* i2400ms_irq()
* i2400ms_rx()
* __i2400ms_rx_get_size()
* i2400m_is_boot_barker()
* i2400m_rx()
*
* i2400ms_rx_setup()
*
* i2400ms_rx_release()
*/
#include <linux/workqueue.h>
#include <linux/wait.h>
#include <linux/skbuff.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/slab.h>
#include "i2400m-sdio.h"
#define D_SUBMODULE rx
#include "sdio-debug-levels.h"
static const __le32 i2400m_ACK_BARKER[4] = {
__constant_cpu_to_le32(I2400M_ACK_BARKER),
__constant_cpu_to_le32(I2400M_ACK_BARKER),
__constant_cpu_to_le32(I2400M_ACK_BARKER),
__constant_cpu_to_le32(I2400M_ACK_BARKER)
};
/*
* Read and return the amount of bytes available for RX
*
* The RX size has to be read like this: byte reads of three
* sequential locations; then glue'em together.
*
* sdio_readl() doesn't work.
*/
static ssize_t __i2400ms_rx_get_size(struct i2400ms *i2400ms)
{
int ret, cnt, val;
ssize_t rx_size;
unsigned xfer_size_addr;
struct sdio_func *func = i2400ms->func;
struct device *dev = &i2400ms->func->dev;
d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
xfer_size_addr = I2400MS_INTR_GET_SIZE_ADDR;
rx_size = 0;
for (cnt = 0; cnt < 3; cnt++) {
val = sdio_readb(func, xfer_size_addr + cnt, &ret);
if (ret < 0) {
dev_err(dev, "RX: Can't read byte %d of RX size from "
"0x%08x: %d\n", cnt, xfer_size_addr + cnt, ret);
rx_size = ret;
goto error_read;
}
rx_size = rx_size << 8 | (val & 0xff);
}
d_printf(6, dev, "RX: rx_size is %ld\n", (long) rx_size);
error_read:
d_fnend(7, dev, "(i2400ms %p) = %ld\n", i2400ms, (long) rx_size);
return rx_size;
}
/*
* Read data from the device (when in normal)
*
* Allocate an SKB of the right size, read the data in and then
* deliver it to the generic layer.
*
* We also check for a reboot barker. That means the device died and
* we have to reboot it.
*/
static
void i2400ms_rx(struct i2400ms *i2400ms)
{
int ret;
struct sdio_func *func = i2400ms->func;
struct device *dev = &func->dev;
struct i2400m *i2400m = &i2400ms->i2400m;
struct sk_buff *skb;
ssize_t rx_size;
d_fnstart(7, dev, "(i2400ms %p)\n", i2400ms);
rx_size = __i2400ms_rx_get_size(i2400ms);
if (rx_size < 0) {
ret = rx_size;
goto error_get_size;
}
/*
* Hardware quirk: make sure to clear the INTR status register
* AFTER getting the data transfer size.
*/
sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
ret = -ENOMEM;
skb = alloc_skb(rx_size, GFP_ATOMIC);
if (NULL == skb) {
dev_err(dev, "RX: unable to alloc skb\n");
goto error_alloc_skb;
}
ret = sdio_memcpy_fromio(func, skb->data,
I2400MS_DATA_ADDR, rx_size);
if (ret < 0) {
dev_err(dev, "RX: SDIO data read failed: %d\n", ret);
goto error_memcpy_fromio;
}
rmb(); /* make sure we get boot_mode from dev_reset_handle */
if (unlikely(i2400m->boot_mode == 1)) {
spin_lock(&i2400m->rx_lock);
i2400ms->bm_ack_size = rx_size;
spin_unlock(&i2400m->rx_lock);
memcpy(i2400m->bm_ack_buf, skb->data, rx_size);
wake_up(&i2400ms->bm_wfa_wq);
d_printf(5, dev, "RX: SDIO boot mode message\n");
kfree_skb(skb);
goto out;
}
ret = -EIO;
if (unlikely(rx_size < sizeof(__le32))) {
dev_err(dev, "HW BUG? only %zu bytes received\n", rx_size);
goto error_bad_size;
}
if (likely(i2400m_is_d2h_barker(skb->data))) {
skb_put(skb, rx_size);
i2400m_rx(i2400m, skb);
} else if (unlikely(i2400m_is_boot_barker(i2400m,
skb->data, rx_size))) {
ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
dev_err(dev, "RX: SDIO reboot barker\n");
kfree_skb(skb);
} else {
i2400m_unknown_barker(i2400m, skb->data, rx_size);
kfree_skb(skb);
}
out:
d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms);
return;
error_memcpy_fromio:
kfree_skb(skb);
error_alloc_skb:
error_get_size:
error_bad_size:
d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
}
/*
* Process an interrupt from the SDIO card
*
* FIXME: need to process other events that are not just ready-to-read
*
* Checks there is data ready and then proceeds to read it.
*/
static
void i2400ms_irq(struct sdio_func *func)
{
int ret;
struct i2400ms *i2400ms = sdio_get_drvdata(func);
struct device *dev = &func->dev;
int val;
d_fnstart(6, dev, "(i2400ms %p)\n", i2400ms);
val = sdio_readb(func, I2400MS_INTR_STATUS_ADDR, &ret);
if (ret < 0) {
dev_err(dev, "RX: Can't read interrupt status: %d\n", ret);
goto error_no_irq;
}
if (!val) {
dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n");
goto error_no_irq;
}
i2400ms_rx(i2400ms);
error_no_irq:
d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
}
/*
* Setup SDIO RX
*
* Hooks up the IRQ handler and then enables IRQs.
*/
int i2400ms_rx_setup(struct i2400ms *i2400ms)
{
int result;
struct sdio_func *func = i2400ms->func;
struct device *dev = &func->dev;
struct i2400m *i2400m = &i2400ms->i2400m;
d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
init_waitqueue_head(&i2400ms->bm_wfa_wq);
spin_lock(&i2400m->rx_lock);
i2400ms->bm_wait_result = -EINPROGRESS;
/*
* Before we are about to enable the RX interrupt, make sure
* bm_ack_size is cleared to -EINPROGRESS which indicates
* no RX interrupt happened yet or the previous interrupt
* has been handled, we are ready to take the new interrupt
*/
i2400ms->bm_ack_size = -EINPROGRESS;
spin_unlock(&i2400m->rx_lock);
sdio_claim_host(func);
result = sdio_claim_irq(func, i2400ms_irq);
if (result < 0) {
dev_err(dev, "Cannot claim IRQ: %d\n", result);
goto error_irq_claim;
}
result = 0;
sdio_writeb(func, 1, I2400MS_INTR_ENABLE_ADDR, &result);
if (result < 0) {
sdio_release_irq(func);
dev_err(dev, "Failed to enable interrupts %d\n", result);
}
error_irq_claim:
sdio_release_host(func);
d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
return result;
}
/*
* Tear down SDIO RX
*
* Disables IRQs in the device and removes the IRQ handler.
*/
void i2400ms_rx_release(struct i2400ms *i2400ms)
{
int result;
struct sdio_func *func = i2400ms->func;
struct device *dev = &func->dev;
struct i2400m *i2400m = &i2400ms->i2400m;
d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
spin_lock(&i2400m->rx_lock);
i2400ms->bm_ack_size = -EINTR;
spin_unlock(&i2400m->rx_lock);
wake_up_all(&i2400ms->bm_wfa_wq);
sdio_claim_host(func);
sdio_writeb(func, 0, I2400MS_INTR_ENABLE_ADDR, &result);
sdio_release_irq(func);
sdio_release_host(func);
d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
}

View File

@ -1,177 +0,0 @@
/*
* Intel Wireless WiMAX Connection 2400m
* SDIO TX transaction backends
*
*
* Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <linux-wimax@intel.com>
* Dirk Brandewie <dirk.j.brandewie@intel.com>
* - Initial implementation
*
*
* Takes the TX messages in the i2400m's driver TX FIFO and sends them
* to the device until there are no more.
*
* If we fail sending the message, we just drop it. There isn't much
* we can do at this point. Most of the traffic is network, which has
* recovery methods for dropped packets.
*
* The SDIO functions are not atomic, so we can't run from the context
* where i2400m->bus_tx_kick() [i2400ms_bus_tx_kick()] is being called
* (some times atomic). Thus, the actual TX work is deferred to a
* workqueue.
*
* ROADMAP
*
* i2400ms_bus_tx_kick()
* i2400ms_tx_submit() [through workqueue]
*
* i2400m_tx_setup()
*
* i2400m_tx_release()
*/
#include <linux/mmc/sdio_func.h>
#include "i2400m-sdio.h"
#define D_SUBMODULE tx
#include "sdio-debug-levels.h"
/*
* Pull TX transations from the TX FIFO and send them to the device
* until there are no more.
*/
static
void i2400ms_tx_submit(struct work_struct *ws)
{
int result;
struct i2400ms *i2400ms = container_of(ws, struct i2400ms, tx_worker);
struct i2400m *i2400m = &i2400ms->i2400m;
struct sdio_func *func = i2400ms->func;
struct device *dev = &func->dev;
struct i2400m_msg_hdr *tx_msg;
size_t tx_msg_size;
d_fnstart(4, dev, "(i2400ms %p, i2400m %p)\n", i2400ms, i2400ms);
while (NULL != (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size))) {
d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size);
d_dump(5, dev, tx_msg, tx_msg_size);
sdio_claim_host(func);
result = sdio_memcpy_toio(func, 0, tx_msg, tx_msg_size);
sdio_release_host(func);
i2400m_tx_msg_sent(i2400m);
if (result < 0) {
dev_err(dev, "TX: cannot submit TX; tx_msg @%zu %zu B:"
" %d\n", (void *) tx_msg - i2400m->tx_buf,
tx_msg_size, result);
}
if (result == -ETIMEDOUT) {
i2400m_error_recovery(i2400m);
break;
}
d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
}
d_fnend(4, dev, "(i2400ms %p) = void\n", i2400ms);
}
/*
* The generic driver notifies us that there is data ready for TX
*
* Schedule a run of i2400ms_tx_submit() to handle it.
*/
void i2400ms_bus_tx_kick(struct i2400m *i2400m)
{
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
struct device *dev = &i2400ms->func->dev;
unsigned long flags;
d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
/* schedule tx work, this is because tx may block, therefore
* it has to run in a thread context.
*/
spin_lock_irqsave(&i2400m->tx_lock, flags);
if (i2400ms->tx_workqueue != NULL)
queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
}
int i2400ms_tx_setup(struct i2400ms *i2400ms)
{
int result;
struct device *dev = &i2400ms->func->dev;
struct i2400m *i2400m = &i2400ms->i2400m;
struct workqueue_struct *tx_workqueue;
unsigned long flags;
d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
"%s-tx", i2400m->wimax_dev.name);
tx_workqueue =
create_singlethread_workqueue(i2400ms->tx_wq_name);
if (tx_workqueue == NULL) {
dev_err(dev, "TX: failed to create workqueue\n");
result = -ENOMEM;
} else
result = 0;
spin_lock_irqsave(&i2400m->tx_lock, flags);
i2400ms->tx_workqueue = tx_workqueue;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
return result;
}
void i2400ms_tx_release(struct i2400ms *i2400ms)
{
struct i2400m *i2400m = &i2400ms->i2400m;
struct workqueue_struct *tx_workqueue;
unsigned long flags;
tx_workqueue = i2400ms->tx_workqueue;
spin_lock_irqsave(&i2400m->tx_lock, flags);
i2400ms->tx_workqueue = NULL;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
if (tx_workqueue)
destroy_workqueue(tx_workqueue);
}

View File

@ -1,602 +0,0 @@
/*
* Intel Wireless WiMAX Connection 2400m
* Linux driver model glue for the SDIO device, reset & fw upload
*
*
* Copyright (C) 2007-2008 Intel Corporation <linux-wimax@intel.com>
* Dirk Brandewie <dirk.j.brandewie@intel.com>
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
* Yanir Lubetkin <yanirx.lubetkin@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*
* See i2400m-sdio.h for a general description of this driver.
*
* This file implements driver model glue, and hook ups for the
* generic driver to implement the bus-specific functions (device
* communication setup/tear down, firmware upload and resetting).
*
* ROADMAP
*
* i2400m_probe()
* alloc_netdev()
* i2400ms_netdev_setup()
* i2400ms_init()
* i2400m_netdev_setup()
* i2400ms_enable_function()
* i2400m_setup()
*
* i2400m_remove()
* i2400m_release()
* free_netdev(net_dev)
*
* i2400ms_bus_reset() Called by i2400m_reset
* __i2400ms_reset()
* __i2400ms_send_barker()
*/
#include <linux/slab.h>
#include <linux/debugfs.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include "i2400m-sdio.h"
#include <linux/wimax/i2400m.h>
#include <linux/module.h>
#define D_SUBMODULE main
#include "sdio-debug-levels.h"
/* IOE WiMAX function timeout in seconds */
static int ioe_timeout = 2;
module_param(ioe_timeout, int, 0);
static char i2400ms_debug_params[128];
module_param_string(debug, i2400ms_debug_params, sizeof(i2400ms_debug_params),
0644);
MODULE_PARM_DESC(debug,
"String of space-separated NAME:VALUE pairs, where NAMEs "
"are the different debug submodules and VALUE are the "
"initial debug value to set.");
/* Our firmware file name list */
static const char *i2400ms_bus_fw_names[] = {
#define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-1.3.sbcf"
I2400MS_FW_FILE_NAME,
NULL
};
static const struct i2400m_poke_table i2400ms_pokes[] = {
I2400M_FW_POKE(0x6BE260, 0x00000088),
I2400M_FW_POKE(0x080550, 0x00000005),
I2400M_FW_POKE(0xAE0000, 0x00000000),
I2400M_FW_POKE(0x000000, 0x00000000), /* MUST be 0 terminated or bad
* things will happen */
};
/*
* Enable the SDIO function
*
* Tries to enable the SDIO function; might fail if it is still not
* ready (in some hardware, the SDIO WiMAX function is only enabled
* when we ask it to explicitly doing). Tries until a timeout is
* reached.
*
* The @maxtries argument indicates how many times (at most) it should
* be tried to enable the function. 0 means forever. This acts along
* with the timeout (ie: it'll stop trying as soon as the maximum
* number of tries is reached _or_ as soon as the timeout is reached).
*
* The reverse of this is...sdio_disable_function()
*
* Returns: 0 if the SDIO function was enabled, < 0 errno code on
* error (-ENODEV when it was unable to enable the function).
*/
static
int i2400ms_enable_function(struct i2400ms *i2400ms, unsigned maxtries)
{
struct sdio_func *func = i2400ms->func;
u64 timeout;
int err;
struct device *dev = &func->dev;
unsigned tries = 0;
d_fnstart(3, dev, "(func %p)\n", func);
/* Setup timeout (FIXME: This needs to read the CIS table to
* get a real timeout) and then wait for the device to signal
* it is ready */
timeout = get_jiffies_64() + ioe_timeout * HZ;
err = -ENODEV;
while (err != 0 && time_before64(get_jiffies_64(), timeout)) {
sdio_claim_host(func);
/*
* There is a sillicon bug on the IWMC3200, where the
* IOE timeout will cause problems on Moorestown
* platforms (system hang). We explicitly overwrite
* func->enable_timeout here to work around the issue.
*/
if (i2400ms->iwmc3200)
func->enable_timeout = IWMC3200_IOR_TIMEOUT;
err = sdio_enable_func(func);
if (0 == err) {
sdio_release_host(func);
d_printf(2, dev, "SDIO function enabled\n");
goto function_enabled;
}
d_printf(2, dev, "SDIO function failed to enable: %d\n", err);
sdio_release_host(func);
if (maxtries > 0 && ++tries >= maxtries) {
err = -ETIME;
break;
}
msleep(I2400MS_INIT_SLEEP_INTERVAL);
}
/* If timed out, device is not there yet -- get -ENODEV so
* the device driver core will retry later on. */
if (err == -ETIME) {
dev_err(dev, "Can't enable WiMAX function; "
" has the function been enabled?\n");
err = -ENODEV;
}
function_enabled:
d_fnend(3, dev, "(func %p) = %d\n", func, err);
return err;
}
/*
* Setup minimal device communication infrastructure needed to at
* least be able to update the firmware.
*
* Note the ugly trick: if we are in the probe path
* (i2400ms->debugfs_dentry == NULL), we only retry function
* enablement one, to avoid racing with the iwmc3200 top controller.
*/
static
int i2400ms_bus_setup(struct i2400m *i2400m)
{
int result;
struct i2400ms *i2400ms =
container_of(i2400m, struct i2400ms, i2400m);
struct device *dev = i2400m_dev(i2400m);
struct sdio_func *func = i2400ms->func;
int retries;
sdio_claim_host(func);
result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
sdio_release_host(func);
if (result < 0) {
dev_err(dev, "Failed to set block size: %d\n", result);
goto error_set_blk_size;
}
if (i2400ms->iwmc3200 && i2400ms->debugfs_dentry == NULL)
retries = 1;
else
retries = 0;
result = i2400ms_enable_function(i2400ms, retries);
if (result < 0) {
dev_err(dev, "Cannot enable SDIO function: %d\n", result);
goto error_func_enable;
}
result = i2400ms_tx_setup(i2400ms);
if (result < 0)
goto error_tx_setup;
result = i2400ms_rx_setup(i2400ms);
if (result < 0)
goto error_rx_setup;
return 0;
error_rx_setup:
i2400ms_tx_release(i2400ms);
error_tx_setup:
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
error_func_enable:
error_set_blk_size:
return result;
}
/*
* Tear down minimal device communication infrastructure needed to at
* least be able to update the firmware.
*/
static
void i2400ms_bus_release(struct i2400m *i2400m)
{
struct i2400ms *i2400ms =
container_of(i2400m, struct i2400ms, i2400m);
struct sdio_func *func = i2400ms->func;
i2400ms_rx_release(i2400ms);
i2400ms_tx_release(i2400ms);
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
}
/*
* Setup driver resources needed to communicate with the device
*
* The fw needs some time to settle, and it was just uploaded,
* so give it a break first. I'd prefer to just wait for the device to
* send something, but seems the poking we do to enable SDIO stuff
* interferes with it, so just give it a break before starting...
*/
static
int i2400ms_bus_dev_start(struct i2400m *i2400m)
{
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
struct sdio_func *func = i2400ms->func;
struct device *dev = &func->dev;
d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
msleep(200);
d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, 0);
return 0;
}
/*
* Sends a barker buffer to the device
*
* This helper will allocate a kmalloced buffer and use it to transmit
* (then free it). Reason for this is that the SDIO host controller
* expects alignment (unknown exactly which) which the stack won't
* really provide and certain arches/host-controller combinations
* cannot use stack/vmalloc/text areas for DMA transfers.
*/
static
int __i2400ms_send_barker(struct i2400ms *i2400ms,
const __le32 *barker, size_t barker_size)
{
int ret;
struct sdio_func *func = i2400ms->func;
struct device *dev = &func->dev;
void *buffer;
ret = -ENOMEM;
buffer = kmalloc(I2400MS_BLK_SIZE, GFP_KERNEL);
if (buffer == NULL)
goto error_kzalloc;
memcpy(buffer, barker, barker_size);
sdio_claim_host(func);
ret = sdio_memcpy_toio(func, 0, buffer, I2400MS_BLK_SIZE);
sdio_release_host(func);
if (ret < 0)
d_printf(0, dev, "E: barker error: %d\n", ret);
kfree(buffer);
error_kzalloc:
return ret;
}
/*
* Reset a device at different levels (warm, cold or bus)
*
* @i2400ms: device descriptor
* @reset_type: soft, warm or bus reset (I2400M_RT_WARM/SOFT/BUS)
*
* FIXME: not tested -- need to confirm expected effects
*
* Warm and cold resets get an SDIO reset if they fail (unimplemented)
*
* Warm reset:
*
* The device will be fully reset internally, but won't be
* disconnected from the bus (so no reenumeration will
* happen). Firmware upload will be necessary.
*
* The device will send a reboot barker that will trigger the driver
* to reinitialize the state via __i2400m_dev_reset_handle.
*
*
* Cold and bus reset:
*
* The device will be fully reset internally, disconnected from the
* bus an a reenumeration will happen. Firmware upload will be
* necessary. Thus, we don't do any locking or struct
* reinitialization, as we are going to be fully disconnected and
* reenumerated.
*
* Note we need to return -ENODEV if a warm reset was requested and we
* had to resort to a bus reset. See i2400m_op_reset(), wimax_reset()
* and wimax_dev->op_reset.
*
* WARNING: no driver state saved/fixed
*/
static
int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
{
int result = 0;
struct i2400ms *i2400ms =
container_of(i2400m, struct i2400ms, i2400m);
struct device *dev = i2400m_dev(i2400m);
static const __le32 i2400m_WARM_BOOT_BARKER[4] = {
cpu_to_le32(I2400M_WARM_RESET_BARKER),
cpu_to_le32(I2400M_WARM_RESET_BARKER),
cpu_to_le32(I2400M_WARM_RESET_BARKER),
cpu_to_le32(I2400M_WARM_RESET_BARKER),
};
static const __le32 i2400m_COLD_BOOT_BARKER[4] = {
cpu_to_le32(I2400M_COLD_RESET_BARKER),
cpu_to_le32(I2400M_COLD_RESET_BARKER),
cpu_to_le32(I2400M_COLD_RESET_BARKER),
cpu_to_le32(I2400M_COLD_RESET_BARKER),
};
if (rt == I2400M_RT_WARM)
result = __i2400ms_send_barker(i2400ms, i2400m_WARM_BOOT_BARKER,
sizeof(i2400m_WARM_BOOT_BARKER));
else if (rt == I2400M_RT_COLD)
result = __i2400ms_send_barker(i2400ms, i2400m_COLD_BOOT_BARKER,
sizeof(i2400m_COLD_BOOT_BARKER));
else if (rt == I2400M_RT_BUS) {
do_bus_reset:
i2400ms_bus_release(i2400m);
/* Wait for the device to settle */
msleep(40);
result = i2400ms_bus_setup(i2400m);
} else
BUG();
if (result < 0 && rt != I2400M_RT_BUS) {
dev_err(dev, "%s reset failed (%d); trying SDIO reset\n",
rt == I2400M_RT_WARM ? "warm" : "cold", result);
rt = I2400M_RT_BUS;
goto do_bus_reset;
}
return result;
}
static
void i2400ms_netdev_setup(struct net_device *net_dev)
{
struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
i2400ms_init(i2400ms);
i2400m_netdev_setup(net_dev);
}
/*
* Debug levels control; see debug.h
*/
struct d_level D_LEVEL[] = {
D_SUBMODULE_DEFINE(main),
D_SUBMODULE_DEFINE(tx),
D_SUBMODULE_DEFINE(rx),
D_SUBMODULE_DEFINE(fw),
};
size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
#define __debugfs_register(prefix, name, parent) \
do { \
result = d_level_register_debugfs(prefix, name, parent); \
if (result < 0) \
goto error; \
} while (0)
static
int i2400ms_debugfs_add(struct i2400ms *i2400ms)
{
int result;
struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry;
dentry = debugfs_create_dir("i2400m-sdio", dentry);
result = PTR_ERR(dentry);
if (IS_ERR(dentry)) {
if (result == -ENODEV)
result = 0; /* No debugfs support */
goto error;
}
i2400ms->debugfs_dentry = dentry;
__debugfs_register("dl_", main, dentry);
__debugfs_register("dl_", tx, dentry);
__debugfs_register("dl_", rx, dentry);
__debugfs_register("dl_", fw, dentry);
return 0;
error:
debugfs_remove_recursive(i2400ms->debugfs_dentry);
i2400ms->debugfs_dentry = NULL;
return result;
}
static struct device_type i2400ms_type = {
.name = "wimax",
};
/*
* Probe a i2400m interface and register it
*
* @func: SDIO function
* @id: SDIO device ID
* @returns: 0 if ok, < 0 errno code on error.
*
* Alloc a net device, initialize the bus-specific details and then
* calls the bus-generic initialization routine. That will register
* the wimax and netdev devices, upload the firmware [using
* _bus_bm_*()], call _bus_dev_start() to finalize the setup of the
* communication with the device and then will start to talk to it to
* finnish setting it up.
*
* Initialization is tricky; some instances of the hw are packed with
* others in a way that requires a third driver that enables the WiMAX
* function. In those cases, we can't enable the SDIO function and
* we'll return with -ENODEV. When the driver that enables the WiMAX
* function does its thing, it has to do a bus_rescan_devices() on the
* SDIO bus so this driver is called again to enumerate the WiMAX
* function.
*/
static
int i2400ms_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int result;
struct net_device *net_dev;
struct device *dev = &func->dev;
struct i2400m *i2400m;
struct i2400ms *i2400ms;
/* Allocate instance [calls i2400m_netdev_setup() on it]. */
result = -ENOMEM;
net_dev = alloc_netdev(sizeof(*i2400ms), "wmx%d",
i2400ms_netdev_setup);
if (net_dev == NULL) {
dev_err(dev, "no memory for network device instance\n");
goto error_alloc_netdev;
}
SET_NETDEV_DEV(net_dev, dev);
SET_NETDEV_DEVTYPE(net_dev, &i2400ms_type);
i2400m = net_dev_to_i2400m(net_dev);
i2400ms = container_of(i2400m, struct i2400ms, i2400m);
i2400m->wimax_dev.net_dev = net_dev;
i2400ms->func = func;
sdio_set_drvdata(func, i2400ms);
i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
/*
* Room required in the TX queue for SDIO message to accommodate
* a smallest payload while allocating header space is 224 bytes,
* which is the smallest message size(the block size 256 bytes)
* minus the smallest message header size(32 bytes).
*/
i2400m->bus_tx_room_min = I2400MS_BLK_SIZE - I2400M_PL_ALIGN * 2;
i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
i2400m->bus_setup = i2400ms_bus_setup;
i2400m->bus_dev_start = i2400ms_bus_dev_start;
i2400m->bus_dev_stop = NULL;
i2400m->bus_release = i2400ms_bus_release;
i2400m->bus_tx_kick = i2400ms_bus_tx_kick;
i2400m->bus_reset = i2400ms_bus_reset;
/* The iwmc3200-wimax sometimes requires the driver to try
* hard when we paint it into a corner. */
i2400m->bus_bm_retries = I2400M_SDIO_BOOT_RETRIES;
i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send;
i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack;
i2400m->bus_fw_names = i2400ms_bus_fw_names;
i2400m->bus_bm_mac_addr_impaired = 1;
i2400m->bus_bm_pokes_table = &i2400ms_pokes[0];
switch (func->device) {
case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX:
case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5:
i2400ms->iwmc3200 = 1;
break;
default:
i2400ms->iwmc3200 = 0;
}
result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT);
if (result < 0) {
dev_err(dev, "cannot setup device: %d\n", result);
goto error_setup;
}
result = i2400ms_debugfs_add(i2400ms);
if (result < 0) {
dev_err(dev, "cannot create SDIO debugfs: %d\n",
result);
goto error_debugfs_add;
}
return 0;
error_debugfs_add:
i2400m_release(i2400m);
error_setup:
sdio_set_drvdata(func, NULL);
free_netdev(net_dev);
error_alloc_netdev:
return result;
}
static
void i2400ms_remove(struct sdio_func *func)
{
struct device *dev = &func->dev;
struct i2400ms *i2400ms = sdio_get_drvdata(func);
struct i2400m *i2400m = &i2400ms->i2400m;
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
d_fnstart(3, dev, "SDIO func %p\n", func);
debugfs_remove_recursive(i2400ms->debugfs_dentry);
i2400ms->debugfs_dentry = NULL;
i2400m_release(i2400m);
sdio_set_drvdata(func, NULL);
free_netdev(net_dev);
d_fnend(3, dev, "SDIO func %p\n", func);
}
static
const struct sdio_device_id i2400ms_sdio_ids[] = {
/* Intel: i2400m WiMAX (iwmc3200) over SDIO */
{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX) },
{ SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5) },
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids);
static
struct sdio_driver i2400m_sdio_driver = {
.name = KBUILD_MODNAME,
.probe = i2400ms_probe,
.remove = i2400ms_remove,
.id_table = i2400ms_sdio_ids,
};
static
int __init i2400ms_driver_init(void)
{
d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400ms_debug_params,
"i2400m_sdio.debug");
return sdio_register_driver(&i2400m_sdio_driver);
}
module_init(i2400ms_driver_init);
static
void __exit i2400ms_driver_exit(void)
{
sdio_unregister_driver(&i2400m_sdio_driver);
}
module_exit(i2400ms_driver_exit);
MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
MODULE_DESCRIPTION("Intel 2400M WiMAX networking for SDIO");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(I2400MS_FW_FILE_NAME);

View File

@ -276,7 +276,6 @@ source "drivers/net/wireless/hostap/Kconfig"
source "drivers/net/wireless/ipw2x00/Kconfig"
source "drivers/net/wireless/iwlwifi/Kconfig"
source "drivers/net/wireless/iwlegacy/Kconfig"
source "drivers/net/wireless/iwmc3200wifi/Kconfig"
source "drivers/net/wireless/libertas/Kconfig"
source "drivers/net/wireless/orinoco/Kconfig"
source "drivers/net/wireless/p54/Kconfig"

View File

@ -53,8 +53,6 @@ obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
obj-$(CONFIG_WL_TI) += ti/
obj-$(CONFIG_IWM) += iwmc3200wifi/
obj-$(CONFIG_MWIFIEX) += mwifiex/
obj-$(CONFIG_BRCMFMAC) += brcm80211/

View File

@ -64,3 +64,11 @@ config ATH5K_PCI
---help---
This adds support for PCI type chipsets of the 5xxx Atheros
family.
config ATH5K_TEST_CHANNELS
bool "Enables testing channels on ath5k"
depends on ATH5K && CFG80211_CERTIFICATION_ONUS
---help---
This enables non-standard IEEE 802.11 channels on ath5k, which
can be used for research purposes. This option should be disabled
unless doing research.

View File

@ -74,10 +74,6 @@ bool ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
static bool modparam_all_channels;
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
static bool modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
@ -258,8 +254,15 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
\********************/
/*
* Returns true for the channel numbers used without all_channels modparam.
* Returns true for the channel numbers used.
*/
#ifdef CONFIG_ATH5K_TEST_CHANNELS
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
{
return true;
}
#else
static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
{
if (band == IEEE80211_BAND_2GHZ && chan <= 14)
@ -276,6 +279,7 @@ static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
/* 802.11j 4.9GHz (20MHz) */
(chan == 184 || chan == 188 || chan == 192 || chan == 196));
}
#endif
static unsigned int
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
@ -316,8 +320,7 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
if (!ath5k_channel_ok(ah, &channels[count]))
continue;
if (!modparam_all_channels &&
!ath5k_is_standard_channel(ch, band))
if (!ath5k_is_standard_channel(ch, band))
continue;
count++;

View File

@ -3627,6 +3627,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->cipher_suites = cipher_suites;
wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
#ifdef CONFIG_PM
wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
WIPHY_WOWLAN_GTK_REKEY_FAILURE |
@ -3636,6 +3637,7 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
wiphy->wowlan.pattern_min_len = 1;
wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
#endif
wiphy->max_sched_scan_ssids = MAX_PROBED_SSIDS;

View File

@ -35,6 +35,10 @@ static const struct platform_device_id ath9k_platform_id_table[] = {
.name = "ar934x_wmac",
.driver_data = AR9300_DEVID_AR9340,
},
{
.name = "qca955x_wmac",
.driver_data = AR9300_DEVID_QCA955X,
},
{},
};

View File

@ -159,14 +159,11 @@ static bool ar9003_hw_calibrate(struct ath_hw *ah,
}
}
/* Do NF cal only at longer intervals */
if (longcal) {
/*
* Get the value from the previous NF cal and update
* history buffer.
*/
ath9k_hw_getnf(ah, chan);
/*
* Do NF cal only at longer intervals. Get the value from
* the previous NF cal and update history buffer.
*/
if (longcal && ath9k_hw_getnf(ah, chan)) {
/*
* Load the NF from history buffer of the current channel.
* NF is slow time-variant, so it is OK to use a historical

View File

@ -3509,7 +3509,7 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
else if (AR_SREV_9462(ah))
else if (AR_SREV_9462(ah) || AR_SREV_9550(ah))
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
else {
REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
@ -3591,6 +3591,9 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
if (AR_SREV_9462(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9462_ALL, value);
} else if (AR_SREV_9550(ah)) {
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_AR9550_ALL, value);
} else
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM,
AR_SWITCH_TABLE_COM_ALL, value);
@ -3957,7 +3960,7 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
ar9003_hw_drive_strength_apply(ah);
ar9003_hw_atten_apply(ah, chan);
ar9003_hw_quick_drop_apply(ah, chan->channel);
if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah))
if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah) && !AR_SREV_9550(ah))
ar9003_hw_internal_regulator_apply(ah);
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
ar9003_hw_apply_tuning_caps(ah);

View File

@ -21,6 +21,7 @@
#include "ar9340_initvals.h"
#include "ar9330_1p1_initvals.h"
#include "ar9330_1p2_initvals.h"
#include "ar955x_1p0_initvals.h"
#include "ar9580_1p0_initvals.h"
#include "ar9462_2p0_initvals.h"
@ -327,7 +328,61 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->ini_japan2484, AR9462_BBC_TXIFR_COEFFJ,
ARRAY_SIZE(AR9462_BBC_TXIFR_COEFFJ), 2);
} else if (AR_SREV_9550(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
ar955x_1p0_mac_core,
ARRAY_SIZE(ar955x_1p0_mac_core), 2);
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
ar955x_1p0_mac_postamble,
ARRAY_SIZE(ar955x_1p0_mac_postamble), 5);
/* bb */
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
ar955x_1p0_baseband_core,
ARRAY_SIZE(ar955x_1p0_baseband_core), 2);
INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
ar955x_1p0_baseband_postamble,
ARRAY_SIZE(ar955x_1p0_baseband_postamble), 5);
/* radio */
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
ar955x_1p0_radio_core,
ARRAY_SIZE(ar955x_1p0_radio_core), 2);
INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
ar955x_1p0_radio_postamble,
ARRAY_SIZE(ar955x_1p0_radio_postamble), 5);
/* soc */
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
ar955x_1p0_soc_preamble,
ARRAY_SIZE(ar955x_1p0_soc_preamble), 2);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
ar955x_1p0_soc_postamble,
ARRAY_SIZE(ar955x_1p0_soc_postamble), 5);
/* rx/tx gain */
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar955x_1p0_common_wo_xlna_rx_gain_table,
ARRAY_SIZE(ar955x_1p0_common_wo_xlna_rx_gain_table),
2);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
ar955x_1p0_common_wo_xlna_rx_gain_bounds,
ARRAY_SIZE(ar955x_1p0_common_wo_xlna_rx_gain_bounds),
5);
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_xpa_tx_gain_table,
ARRAY_SIZE(ar955x_1p0_modes_xpa_tx_gain_table),
9);
/* Fast clock modal settings */
INIT_INI_ARRAY(&ah->iniModesFastClock,
ar955x_1p0_modes_fast_clock,
ARRAY_SIZE(ar955x_1p0_modes_fast_clock), 3);
} else if (AR_SREV_9580(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
@ -470,6 +525,11 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
ar9485_modes_lowest_ob_db_tx_gain_1_1,
ARRAY_SIZE(ar9485_modes_lowest_ob_db_tx_gain_1_1),
5);
else if (AR_SREV_9550(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_xpa_tx_gain_table,
ARRAY_SIZE(ar955x_1p0_modes_xpa_tx_gain_table),
9);
else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9580_1p0_lowest_ob_db_tx_gain_table,
@ -514,6 +574,11 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
ar9580_1p0_high_ob_db_tx_gain_table,
ARRAY_SIZE(ar9580_1p0_high_ob_db_tx_gain_table),
5);
else if (AR_SREV_9550(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar955x_1p0_modes_no_xpa_tx_gain_table,
ARRAY_SIZE(ar955x_1p0_modes_no_xpa_tx_gain_table),
9);
else if (AR_SREV_9462_20(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9462_modes_high_ob_db_tx_gain_table_2p0,
@ -635,7 +700,16 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
ar9485Common_wo_xlna_rx_gain_1_1,
ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_1),
2);
else if (AR_SREV_9580(ah))
else if (AR_SREV_9550(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar955x_1p0_common_rx_gain_table,
ARRAY_SIZE(ar955x_1p0_common_rx_gain_table),
2);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
ar955x_1p0_common_rx_gain_bounds,
ARRAY_SIZE(ar955x_1p0_common_rx_gain_bounds),
5);
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_rx_gain_table,
ARRAY_SIZE(ar9580_1p0_rx_gain_table),
@ -679,7 +753,16 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
ar9462_common_wo_xlna_rx_gain_table_2p0,
ARRAY_SIZE(ar9462_common_wo_xlna_rx_gain_table_2p0),
2);
else if (AR_SREV_9580(ah))
else if (AR_SREV_9550(ah)) {
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar955x_1p0_common_wo_xlna_rx_gain_table,
ARRAY_SIZE(ar955x_1p0_common_wo_xlna_rx_gain_table),
2);
INIT_INI_ARRAY(&ah->ini_modes_rx_gain_bounds,
ar955x_1p0_common_wo_xlna_rx_gain_bounds,
ARRAY_SIZE(ar955x_1p0_common_wo_xlna_rx_gain_bounds),
5);
} else if (AR_SREV_9580(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9580_1p0_wo_xlna_rx_gain_table,
ARRAY_SIZE(ar9580_1p0_wo_xlna_rx_gain_table),

View File

@ -1015,12 +1015,9 @@ void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
return;
if (mci->is_2g) {
if (!force) {
ar9003_mci_send_2g5g_status(ah, true);
ar9003_mci_send_lna_transfer(ah, true);
udelay(5);
}
ar9003_mci_send_2g5g_status(ah, true);
ar9003_mci_send_lna_transfer(ah, true);
udelay(5);
REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
@ -1030,10 +1027,8 @@ void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA))
ar9003_mci_osla_setup(ah, true);
} else {
if (!force) {
ar9003_mci_send_lna_take(ah, true);
udelay(5);
}
ar9003_mci_send_lna_take(ah, true);
udelay(5);
REG_SET_BIT(ah, AR_MCI_TX_CTRL,
AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
@ -1041,8 +1036,7 @@ void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force)
AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
ar9003_mci_osla_setup(ah, false);
if (!force)
ar9003_mci_send_2g5g_status(ah, true);
ar9003_mci_send_2g5g_status(ah, true);
}
}

View File

@ -211,7 +211,7 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
if (AR_SREV_9485(ah) || AR_SREV_9462(ah))
if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9550(ah))
REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
-3);

View File

@ -99,7 +99,7 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
channelSel = (freq * 4) / 120;
chan_frac = (((freq * 4) % 120) * 0x20000) / 120;
channelSel = (channelSel << 17) | chan_frac;
} else if (AR_SREV_9340(ah)) {
} else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
if (ah->is_clk_25mhz) {
u32 chan_frac;
@ -113,7 +113,8 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
/* Set to 2G mode */
bMode = 1;
} else {
if (AR_SREV_9340(ah) && ah->is_clk_25mhz) {
if ((AR_SREV_9340(ah) || AR_SREV_9550(ah)) &&
ah->is_clk_25mhz) {
u32 chan_frac;
channelSel = (freq * 2) / 75;
@ -180,7 +181,8 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
* is out-of-band and can be ignored.
*/
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) {
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
AR_SREV_9550(ah)) {
if (spur_fbin_ptr[0] == 0) /* No spur */
return;
max_spur_cnts = 5;
@ -205,7 +207,8 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
if (AR_SREV_9462(ah) && (i == 0 || i == 3))
continue;
negative = 0;
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
AR_SREV_9550(ah))
cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
IS_CHAN_2GHZ(chan));
else
@ -618,6 +621,50 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah,
}
}
static int ar9550_hw_get_modes_txgain_index(struct ath_hw *ah,
struct ath9k_channel *chan)
{
int ret;
switch (chan->chanmode) {
case CHANNEL_A:
case CHANNEL_A_HT20:
if (chan->channel <= 5350)
ret = 1;
else if ((chan->channel > 5350) && (chan->channel <= 5600))
ret = 3;
else
ret = 5;
break;
case CHANNEL_A_HT40PLUS:
case CHANNEL_A_HT40MINUS:
if (chan->channel <= 5350)
ret = 2;
else if ((chan->channel > 5350) && (chan->channel <= 5600))
ret = 4;
else
ret = 6;
break;
case CHANNEL_G:
case CHANNEL_G_HT20:
case CHANNEL_B:
ret = 8;
break;
case CHANNEL_G_HT40PLUS:
case CHANNEL_G_HT40MINUS:
ret = 7;
break;
default:
ret = -EINVAL;
}
return ret;
}
static int ar9003_hw_process_ini(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@ -659,7 +706,22 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
}
REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
if (AR_SREV_9550(ah))
REG_WRITE_ARRAY(&ah->ini_modes_rx_gain_bounds, modesIndex,
regWrites);
if (AR_SREV_9550(ah)) {
int modes_txgain_index;
modes_txgain_index = ar9550_hw_get_modes_txgain_index(ah, chan);
if (modes_txgain_index < 0)
return -EINVAL;
REG_WRITE_ARRAY(&ah->iniModesTxGain, modes_txgain_index,
regWrites);
} else {
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
}
/*
* For 5GHz channels requiring Fast Clock, apply

View File

@ -636,8 +636,8 @@
#define AR_CH0_TOP (AR_SREV_9300(ah) ? 0x16288 : \
((AR_SREV_9462(ah) ? 0x1628c : 0x16280)))
#define AR_CH0_TOP_XPABIASLVL (0x300)
#define AR_CH0_TOP_XPABIASLVL_S (8)
#define AR_CH0_TOP_XPABIASLVL (AR_SREV_9550(ah) ? 0x3c0 : 0x300)
#define AR_CH0_TOP_XPABIASLVL_S (AR_SREV_9550(ah) ? 6 : 8)
#define AR_CH0_THERM (AR_SREV_9300(ah) ? 0x16290 : \
((AR_SREV_9485(ah) ? 0x1628c : 0x16294)))
@ -650,6 +650,8 @@
#define AR_SWITCH_TABLE_COM_ALL_S (0)
#define AR_SWITCH_TABLE_COM_AR9462_ALL (0xffffff)
#define AR_SWITCH_TABLE_COM_AR9462_ALL_S (0)
#define AR_SWITCH_TABLE_COM_AR9550_ALL (0xffffff)
#define AR_SWITCH_TABLE_COM_AR9550_ALL_S (0)
#define AR_SWITCH_TABLE_COM_SPDT (0x00f00000)
#define AR_SWITCH_TABLE_COM_SPDT_ALL (0x0000fff0)
#define AR_SWITCH_TABLE_COM_SPDT_ALL_S (4)

File diff suppressed because it is too large Load Diff

View File

@ -481,6 +481,7 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc);
void ath9k_btcoex_timer_pause(struct ath_softc *sc);
void ath9k_btcoex_handle_interrupt(struct ath_softc *sc, u32 status);
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen);
void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc);
#else
static inline int ath9k_init_btcoex(struct ath_softc *sc)
{
@ -504,6 +505,9 @@ static inline u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc,
{
return 0;
}
static inline void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
{
}
#endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */
/********************/

View File

@ -194,6 +194,14 @@ static void ath_btcoex_period_timer(unsigned long data)
struct ath_mci_profile *mci = &btcoex->mci;
u32 timer_period;
bool is_btscan;
unsigned long flags;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
if (sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP) {
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
goto skip_hw_wakeup;
}
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
ath9k_ps_wakeup(sc);
if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
@ -232,6 +240,7 @@ static void ath_btcoex_period_timer(unsigned long data)
}
ath9k_ps_restore(sc);
skip_hw_wakeup:
timer_period = btcoex->btcoex_period;
mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period));
}
@ -305,7 +314,8 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
btcoex->op_flags &= ~(BT_OP_PRIORITY_DETECTED | BT_OP_SCAN);
clear_bit(BT_OP_PRIORITY_DETECTED, &btcoex->op_flags);
clear_bit(BT_OP_SCAN, &btcoex->op_flags);
mod_timer(&btcoex->period_timer, jiffies);
}
@ -327,6 +337,13 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
btcoex->hw_timer_enabled = false;
}
void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
}
u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen)
{
struct ath_btcoex *btcoex = &sc->btcoex;
@ -376,9 +393,9 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
if (ah->btcoex_hw.enabled &&
ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
ath9k_hw_btcoex_disable(ah);
if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
ath9k_btcoex_timer_pause(sc);
ath9k_hw_btcoex_disable(ah);
if (AR_SREV_9462(ah))
ath_mci_flush_profile(&sc->btcoex.mci);
}
@ -386,11 +403,13 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
void ath9k_deinit_btcoex(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
if ((sc->btcoex.no_stomp_timer) &&
ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
if (AR_SREV_9462(sc->sc_ah))
if (ath9k_hw_mci_is_enabled(ah))
ath_mci_cleanup(sc);
}

View File

@ -342,6 +342,9 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
val = REG_READ(ah, AR_SREV);
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
return;
case AR9300_DEVID_QCA955X:
ah->hw_version.macVersion = AR_SREV_VERSION_9550;
return;
}
val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
@ -646,6 +649,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
case AR_SREV_VERSION_9485:
case AR_SREV_VERSION_9340:
case AR_SREV_VERSION_9462:
case AR_SREV_VERSION_9550:
break;
default:
ath_err(common,
@ -655,7 +659,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
}
if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) ||
AR_SREV_9330(ah))
AR_SREV_9330(ah) || AR_SREV_9550(ah))
ah->is_pciexpress = false;
ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
@ -727,6 +731,7 @@ int ath9k_hw_init(struct ath_hw *ah)
case AR9300_DEVID_AR9485_PCIE:
case AR9300_DEVID_AR9330:
case AR9300_DEVID_AR9340:
case AR9300_DEVID_QCA955X:
case AR9300_DEVID_AR9580:
case AR9300_DEVID_AR9462:
break;
@ -865,7 +870,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
/* program BB PLL phase_shift */
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
} else if (AR_SREV_9340(ah)) {
} else if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
u32 regval, pll2_divint, pll2_divfrac, refdiv;
REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
@ -879,9 +884,15 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
pll2_divfrac = 0x1eb85;
refdiv = 3;
} else {
pll2_divint = 88;
pll2_divfrac = 0;
refdiv = 5;
if (AR_SREV_9340(ah)) {
pll2_divint = 88;
pll2_divfrac = 0;
refdiv = 5;
} else {
pll2_divint = 0x11;
pll2_divfrac = 0x26666;
refdiv = 1;
}
}
regval = REG_READ(ah, AR_PHY_PLL_MODE);
@ -894,8 +905,12 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
udelay(100);
regval = REG_READ(ah, AR_PHY_PLL_MODE);
regval = (regval & 0x80071fff) | (0x1 << 30) | (0x1 << 13) |
(0x4 << 26) | (0x18 << 19);
if (AR_SREV_9340(ah))
regval = (regval & 0x80071fff) | (0x1 << 30) |
(0x1 << 13) | (0x4 << 26) | (0x18 << 19);
else
regval = (regval & 0x80071fff) | (0x3 << 30) |
(0x1 << 13) | (0x4 << 26) | (0x60 << 19);
REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
REG_WRITE(ah, AR_PHY_PLL_MODE,
REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
@ -906,7 +921,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah) ||
AR_SREV_9550(ah))
udelay(1000);
/* Switch the core clock for ar9271 to 117Mhz */
@ -919,7 +935,7 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
if (AR_SREV_9340(ah)) {
if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
if (ah->is_clk_25mhz) {
REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
@ -943,7 +959,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
if (AR_SREV_9340(ah))
if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
if (AR_SREV_9300_20_OR_LATER(ah)) {
@ -1949,9 +1965,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
if (ath9k_hw_mci_is_enabled(ah) && ar9003_mci_end_reset(ah, chan, caldata))
return -EIO;
@ -1987,7 +2000,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
}
#ifdef __BIG_ENDIAN
else if (AR_SREV_9330(ah) || AR_SREV_9340(ah))
else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
AR_SREV_9550(ah))
REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
else
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
@ -2000,6 +2014,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ath9k_hw_mci_is_enabled(ah))
ar9003_mci_check_bt(ah);
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
@ -3137,6 +3154,7 @@ static struct {
{ AR_SREV_VERSION_9340, "9340" },
{ AR_SREV_VERSION_9485, "9485" },
{ AR_SREV_VERSION_9462, "9462" },
{ AR_SREV_VERSION_9550, "9550" },
};
/* For devices with external radios */

View File

@ -48,6 +48,7 @@
#define AR9300_DEVID_AR9580 0x0033
#define AR9300_DEVID_AR9462 0x0034
#define AR9300_DEVID_AR9330 0x0035
#define AR9300_DEVID_QCA955X 0x0038
#define AR5416_AR9100_DEVID 0x000b
@ -818,6 +819,7 @@ struct ath_hw {
struct ar5416IniArray iniModesFastClock;
struct ar5416IniArray iniAdditional;
struct ar5416IniArray iniModesRxGain;
struct ar5416IniArray ini_modes_rx_gain_bounds;
struct ar5416IniArray iniModesTxGain;
struct ar5416IniArray iniCckfirNormal;
struct ar5416IniArray iniCckfirJapan2484;

View File

@ -810,7 +810,7 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
return;
}
if (AR_SREV_9340(ah))
if (AR_SREV_9340(ah) || AR_SREV_9550(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
async_mask = AR_INTR_MAC_IRQ;

View File

@ -646,6 +646,7 @@ enum ath9k_rx_filter {
ATH9K_RX_FILTER_PHYRADAR = 0x00002000,
ATH9K_RX_FILTER_MCAST_BCAST_ALL = 0x00008000,
ATH9K_RX_FILTER_CONTROL_WRAPPER = 0x00080000,
ATH9K_RX_FILTER_4ADDRESS = 0x00100000,
};
#define ATH9K_RATESERIES_RTS_CTS 0x0001

View File

@ -130,6 +130,8 @@ void ath9k_ps_restore(struct ath_softc *sc)
PS_WAIT_FOR_PSPOLL_DATA |
PS_WAIT_FOR_TX_ACK))) {
mode = ATH9K_PM_NETWORK_SLEEP;
if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
ath9k_btcoex_stop_gen_timer(sc);
} else {
goto unlock;
}
@ -169,7 +171,8 @@ static void ath_restart_work(struct ath_softc *sc)
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
if (AR_SREV_9485(sc->sc_ah) || AR_SREV_9340(sc->sc_ah))
if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) ||
AR_SREV_9550(sc->sc_ah))
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
@ -666,8 +669,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
spin_unlock_bh(&sc->sc_pcu_lock);
ath9k_start_btcoex(sc);
if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
common->bus_ops->extn_synch_en(common);
@ -774,8 +775,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* Ensure HW is awake when we try to shut it down. */
ath9k_ps_wakeup(sc);
ath9k_stop_btcoex(sc);
spin_lock_bh(&sc->sc_pcu_lock);
/* prevent tasklets to enable interrupts once we disable them */
@ -1139,14 +1138,17 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
if (sc->ps_idle)
if (sc->ps_idle) {
ath_cancel_work(sc);
else
ath9k_stop_btcoex(sc);
} else {
ath9k_start_btcoex(sc);
/*
* The chip needs a reset to properly wake up from
* full sleep
*/
reset_channel = ah->chip_fullsleep;
}
}
/*

View File

@ -174,8 +174,8 @@ static void ath_mci_update_scheme(struct ath_softc *sc)
btcoex->btcoex_period >>= 1;
}
ath9k_hw_btcoex_disable(sc->sc_ah);
ath9k_btcoex_timer_pause(sc);
ath9k_hw_btcoex_disable(sc->sc_ah);
if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
return;

View File

@ -430,6 +430,9 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
rfilt |= ATH9K_RX_FILTER_MCAST_BCAST_ALL;
}
if (AR_SREV_9550(sc->sc_ah))
rfilt |= ATH9K_RX_FILTER_4ADDRESS;
return rfilt;
}

View File

@ -798,6 +798,7 @@
#define AR_SREV_REVISION_9580_10 4 /* AR9580 1.0 */
#define AR_SREV_VERSION_9462 0x280
#define AR_SREV_REVISION_9462_20 2
#define AR_SREV_VERSION_9550 0x400
#define AR_SREV_5416(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@ -905,6 +906,9 @@
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9462) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9462_20))
#define AR_SREV_9550(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9550))
#define AR_SREV_9580(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9580) && \
((_ah)->hw_version.macRev >= AR_SREV_REVISION_9580_10))
@ -1643,11 +1647,11 @@ enum {
#define AR_TPC 0x80e8
#define AR_TPC_ACK 0x0000003f
#define AR_TPC_ACK_S 0x00
#define AR_TPC_ACK_S 0
#define AR_TPC_CTS 0x00003f00
#define AR_TPC_CTS_S 0x08
#define AR_TPC_CTS_S 8
#define AR_TPC_CHIRP 0x003f0000
#define AR_TPC_CHIRP_S 0x16
#define AR_TPC_CHIRP_S 16
#define AR_QUIET1 0x80fc
#define AR_QUIET1_NEXT_QUIET_S 0
@ -2077,12 +2081,6 @@ enum {
AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \
AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
#define AR_MCI_CPU_INT 0x1840

View File

@ -289,6 +289,7 @@ struct ar9170 {
unsigned int mem_block_size;
unsigned int rx_size;
unsigned int tx_seq_table;
bool ba_filter;
} fw;
/* interface configuration combinations */
@ -425,6 +426,10 @@ struct ar9170 {
struct sk_buff *rx_failover;
int rx_failover_missing;
/* FIFO for collecting outstanding BlockAckRequest */
struct list_head bar_list[__AR9170_NUM_TXQ];
spinlock_t bar_list_lock[__AR9170_NUM_TXQ];
#ifdef CONFIG_CARL9170_WPC
struct {
bool pbc_state;
@ -468,6 +473,12 @@ enum carl9170_ps_off_override_reasons {
PS_OFF_BCN = BIT(1),
};
struct carl9170_bar_list_entry {
struct list_head list;
struct rcu_head head;
struct sk_buff *skb;
};
struct carl9170_ba_stats {
u8 ampdu_len;
u8 ampdu_ack_len;

View File

@ -307,6 +307,9 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
if (SUPP(CARL9170FW_WOL))
device_set_wakeup_enable(&ar->udev->dev, true);
if (SUPP(CARL9170FW_RX_BA_FILTER))
ar->fw.ba_filter = true;
if_comb_types = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_P2P_CLIENT);

View File

@ -78,6 +78,9 @@ enum carl9170fw_feature_list {
/* HW (ANI, CCA, MIB) tally counters */
CARL9170FW_HW_COUNTERS,
/* Firmware will pass BA when BARs are queued */
CARL9170FW_RX_BA_FILTER,
/* KEEP LAST */
__CARL9170FW_FEATURE_NUM
};

View File

@ -949,6 +949,9 @@ static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
u32 rx_filter = 0;
if (!ar->fw.ba_filter)
rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
rx_filter |= CARL9170_RX_FILTER_BAD;
@ -1753,6 +1756,9 @@ void *carl9170_alloc(size_t priv_size)
for (i = 0; i < ar->hw->queues; i++) {
skb_queue_head_init(&ar->tx_status[i]);
skb_queue_head_init(&ar->tx_pending[i]);
INIT_LIST_HEAD(&ar->bar_list[i]);
spin_lock_init(&ar->bar_list_lock[i]);
}
INIT_WORK(&ar->ps_work, carl9170_ps_work);
INIT_WORK(&ar->ping_work, carl9170_ping_work);

View File

@ -576,6 +576,53 @@ static void carl9170_ps_beacon(struct ar9170 *ar, void *data, unsigned int len)
}
}
static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
{
struct ieee80211_bar *bar = (void *) data;
struct carl9170_bar_list_entry *entry;
unsigned int queue;
if (likely(!ieee80211_is_back(bar->frame_control)))
return;
if (len <= sizeof(*bar) + FCS_LEN)
return;
queue = TID_TO_WME_AC(((le16_to_cpu(bar->control) &
IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
IEEE80211_BAR_CTRL_TID_INFO_SHIFT) & 7);
rcu_read_lock();
list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
struct sk_buff *entry_skb = entry->skb;
struct _carl9170_tx_superframe *super = (void *)entry_skb->data;
struct ieee80211_bar *entry_bar = (void *)super->frame_data;
#define TID_CHECK(a, b) ( \
((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \
((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \
if (bar->start_seq_num == entry_bar->start_seq_num &&
TID_CHECK(bar->control, entry_bar->control) &&
compare_ether_addr(bar->ra, entry_bar->ta) == 0 &&
compare_ether_addr(bar->ta, entry_bar->ra) == 0) {
struct ieee80211_tx_info *tx_info;
tx_info = IEEE80211_SKB_CB(entry_skb);
tx_info->flags |= IEEE80211_TX_STAT_ACK;
spin_lock_bh(&ar->bar_list_lock[queue]);
list_del_rcu(&entry->list);
spin_unlock_bh(&ar->bar_list_lock[queue]);
kfree_rcu(entry, head);
break;
}
}
rcu_read_unlock();
#undef TID_CHECK
}
static bool carl9170_ampdu_check(struct ar9170 *ar, u8 *buf, u8 ms)
{
__le16 fc;
@ -738,6 +785,8 @@ static void carl9170_handle_mpdu(struct ar9170 *ar, u8 *buf, int len)
carl9170_ps_beacon(ar, buf, mpdu_len);
carl9170_ba_check(ar, buf, mpdu_len);
skb = carl9170_rx_copy_data(buf, mpdu_len);
if (!skb)
goto drop;

View File

@ -277,11 +277,11 @@ static void carl9170_tx_release(struct kref *ref)
return;
BUILD_BUG_ON(
offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23);
offsetof(struct ieee80211_tx_info, status.ack_signal) != 20);
memset(&txinfo->status.ampdu_ack_len, 0,
memset(&txinfo->status.ack_signal, 0,
sizeof(struct ieee80211_tx_info) -
offsetof(struct ieee80211_tx_info, status.ampdu_ack_len));
offsetof(struct ieee80211_tx_info, status.ack_signal));
if (atomic_read(&ar->tx_total_queued))
ar->tx_schedule = true;
@ -436,6 +436,45 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
rcu_read_unlock();
}
static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb,
struct ieee80211_tx_info *tx_info)
{
struct _carl9170_tx_superframe *super = (void *) skb->data;
struct ieee80211_bar *bar = (void *) super->frame_data;
/*
* Unlike all other frames, the status report for BARs does
* not directly come from the hardware as it is incapable of
* matching a BA to a previously send BAR.
* Instead the RX-path will scan for incoming BAs and set the
* IEEE80211_TX_STAT_ACK if it sees one that was likely
* caused by a BAR from us.
*/
if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
struct carl9170_bar_list_entry *entry;
int queue = skb_get_queue_mapping(skb);
rcu_read_lock();
list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
if (entry->skb == skb) {
spin_lock_bh(&ar->bar_list_lock[queue]);
list_del_rcu(&entry->list);
spin_unlock_bh(&ar->bar_list_lock[queue]);
kfree_rcu(entry, head);
goto out;
}
}
WARN(1, "bar not found in %d - ra:%pM ta:%pM c:%x ssn:%x\n",
queue, bar->ra, bar->ta, bar->control,
bar->start_seq_num);
out:
rcu_read_unlock();
}
}
void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
const bool success)
{
@ -445,6 +484,8 @@ void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
txinfo = IEEE80211_SKB_CB(skb);
carl9170_tx_bar_status(ar, skb, txinfo);
if (success)
txinfo->flags |= IEEE80211_TX_STAT_ACK;
else
@ -1265,6 +1306,26 @@ static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
return false;
}
static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb)
{
struct _carl9170_tx_superframe *super = (void *) skb->data;
struct ieee80211_bar *bar = (void *) super->frame_data;
if (unlikely(ieee80211_is_back_req(bar->frame_control)) &&
skb->len >= sizeof(struct ieee80211_bar)) {
struct carl9170_bar_list_entry *entry;
unsigned int queue = skb_get_queue_mapping(skb);
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!WARN_ON_ONCE(!entry)) {
entry->skb = skb;
spin_lock_bh(&ar->bar_list_lock[queue]);
list_add_tail_rcu(&entry->list, &ar->bar_list[queue]);
spin_unlock_bh(&ar->bar_list_lock[queue]);
}
}
}
static void carl9170_tx(struct ar9170 *ar)
{
struct sk_buff *skb;
@ -1287,6 +1348,8 @@ static void carl9170_tx(struct ar9170 *ar)
if (unlikely(carl9170_tx_ps_drop(ar, skb)))
continue;
carl9170_bar_check(ar, skb);
atomic_inc(&ar->tx_total_pending);
q = __carl9170_get_queue(ar, i);

View File

@ -1,7 +1,7 @@
#ifndef __CARL9170_SHARED_VERSION_H
#define __CARL9170_SHARED_VERSION_H
#define CARL9170FW_VERSION_YEAR 11
#define CARL9170FW_VERSION_MONTH 8
#define CARL9170FW_VERSION_DAY 15
#define CARL9170FW_VERSION_GIT "1.9.4"
#define CARL9170FW_VERSION_YEAR 12
#define CARL9170FW_VERSION_MONTH 7
#define CARL9170FW_VERSION_DAY 7
#define CARL9170FW_VERSION_GIT "1.9.6"
#endif /* __CARL9170_SHARED_VERSION_H */

View File

@ -1369,7 +1369,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
i << 2);
b43_nphy_poll_rssi(dev, 2, results[i], 8);
}
for (i = 0; i < 4; i++) {
for (i = 0; i < 4; i += 2) {
s32 curr;
s32 mind = 40;
s32 minpoll = 249;
@ -1415,14 +1415,15 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_nphy_scale_offset_rssi(dev, 0, 0, core + 1, 1, i);
b43_nphy_poll_rssi(dev, i, poll_results, 8);
for (j = 0; j < 4; j++) {
if (j / 2 == core)
if (j / 2 == core) {
offset[j] = 232 - poll_results[j];
if (offset[j] < 0)
offset[j] = -(abs(offset[j] + 4) / 8);
else
offset[j] = (offset[j] + 4) / 8;
b43_nphy_scale_offset_rssi(dev, 0,
offset[2 * core], core + 1, j % 2, i);
if (offset[j] < 0)
offset[j] = -(abs(offset[j] + 4) / 8);
else
offset[j] = (offset[j] + 4) / 8;
b43_nphy_scale_offset_rssi(dev, 0,
offset[2 * core], core + 1, j % 2, i);
}
}
}
}

View File

@ -318,10 +318,6 @@
#define IS_SIM(chippkg) \
((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
#ifdef DEBUG
#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
#else
@ -473,9 +469,6 @@ ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
}
/* figure out buscore */
sii->buscore = ai_findcore(&sii->pub, PCIE_CORE_ID, 0);
return true;
}
@ -483,11 +476,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
struct bcma_bus *pbus)
{
struct si_pub *sih = &sii->pub;
u32 w, savewin;
struct bcma_device *cc;
struct ssb_sprom *sprom = &pbus->sprom;
savewin = 0;
sii->icbus = pbus;
sii->pcibus = pbus->host_pci;
@ -510,47 +499,7 @@ static struct si_info *ai_doattach(struct si_info *sii,
/* PMU specific initializations */
if (ai_get_cccaps(sih) & CC_CAP_PMU) {
si_pmu_init(sih);
(void)si_pmu_measure_alpclk(sih);
si_pmu_res_init(sih);
}
/* setup the GPIO based LED powersave register */
w = (sprom->leddc_on_time << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
(sprom->leddc_off_time << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT);
if (w == 0)
w = DEFAULT_GPIOTIMERVAL;
ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
~0, w);
if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
/*
* enable 12 mA drive strenth for 43224 and
* set chipControl register bit 15
*/
if (ai_get_chiprev(sih) == 0) {
SI_MSG("Applying 43224A0 WARs\n");
ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol),
CCTRL43224_GPIO_TOGGLE,
CCTRL43224_GPIO_TOGGLE);
si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
CCTRL_43224A0_12MA_LED_DRIVE);
}
if (ai_get_chiprev(sih) >= 1) {
SI_MSG("Applying 43224B0+ WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
CCTRL_43224B0_12MA_LED_DRIVE);
}
}
if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
/*
* enable 12 mA drive strenth for 4313 and
* set chipControl register bit 1
*/
SI_MSG("Applying 4313 WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
CCTRL_4313_12MA_LED_DRIVE);
}
return sii;
@ -589,7 +538,7 @@ void ai_detach(struct si_pub *sih)
struct si_pub *si_local = NULL;
memcpy(&si_local, &sih, sizeof(struct si_pub **));
sii = (struct si_info *)sih;
sii = container_of(sih, struct si_info, pub);
if (sii == NULL)
return;
@ -597,27 +546,6 @@ void ai_detach(struct si_pub *sih)
kfree(sii);
}
/* return index of coreid or BADIDX if not found */
struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
{
struct bcma_device *core;
struct si_info *sii;
uint found;
sii = (struct si_info *)sih;
found = 0;
list_for_each_entry(core, &sii->icbus->cores, list)
if (core->id.id == coreid) {
if (found == coreunit)
return core;
found++;
}
return NULL;
}
/*
* read/modify chipcommon core register.
*/
@ -627,7 +555,7 @@ uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
u32 w;
struct si_info *sii;
sii = (struct si_info *)sih;
sii = container_of(sih, struct si_info, pub);
cc = sii->icbus->drv_cc.core;
/* mask and set */
@ -693,12 +621,13 @@ ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
/* initialize power control delay registers */
void ai_clkctl_init(struct si_pub *sih)
{
struct si_info *sii = container_of(sih, struct si_info, pub);
struct bcma_device *cc;
if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
return;
cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
cc = sii->icbus->drv_cc.core;
if (cc == NULL)
return;
@ -720,7 +649,7 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
uint slowminfreq;
u16 fpdelay;
sii = (struct si_info *)sih;
sii = container_of(sih, struct si_info, pub);
if (ai_get_cccaps(sih) & CC_CAP_PMU) {
fpdelay = si_pmu_fast_pwrup_delay(sih);
return fpdelay;
@ -730,7 +659,7 @@ u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
return 0;
fpdelay = 0;
cc = ai_findcore(sih, CC_CORE_ID, 0);
cc = sii->icbus->drv_cc.core;
if (cc) {
slowminfreq = ai_slowclk_freq(sih, false, cc);
fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2)
@ -752,12 +681,9 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
struct si_info *sii;
struct bcma_device *cc;
sii = (struct si_info *)sih;
sii = container_of(sih, struct si_info, pub);
if (PCI_FORCEHT(sih))
return mode == BCMA_CLKMODE_FAST;
cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
cc = sii->icbus->drv_cc.core;
bcma_core_set_clockmode(cc, mode);
return mode == BCMA_CLKMODE_FAST;
}
@ -765,16 +691,10 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
void ai_pci_up(struct si_pub *sih)
{
struct si_info *sii;
struct bcma_device *cc;
sii = (struct si_info *)sih;
sii = container_of(sih, struct si_info, pub);
if (PCI_FORCEHT(sih)) {
cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
bcma_core_set_clockmode(cc, BCMA_CLKMODE_FAST);
}
if (PCIE(sih))
if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, true);
}
@ -782,26 +702,20 @@ void ai_pci_up(struct si_pub *sih)
void ai_pci_down(struct si_pub *sih)
{
struct si_info *sii;
struct bcma_device *cc;
sii = (struct si_info *)sih;
sii = container_of(sih, struct si_info, pub);
/* release FORCEHT since chip is going to "down" state */
if (PCI_FORCEHT(sih)) {
cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
bcma_core_set_clockmode(cc, BCMA_CLKMODE_DYNAMIC);
}
if (PCIE(sih))
if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci, false);
}
/* Enable BT-COEX & Ex-PA for 4313 */
void ai_epa_4313war(struct si_pub *sih)
{
struct si_info *sii = container_of(sih, struct si_info, pub);
struct bcma_device *cc;
cc = ai_findcore(sih, CC_CORE_ID, 0);
cc = sii->icbus->drv_cc.core;
/* EPA Fix */
bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK);
@ -813,7 +727,7 @@ bool ai_deviceremoved(struct si_pub *sih)
u32 w;
struct si_info *sii;
sii = (struct si_info *)sih;
sii = container_of(sih, struct si_info, pub);
if (sii->icbus->hosttype != BCMA_HOSTTYPE_PCI)
return false;
@ -824,15 +738,3 @@ bool ai_deviceremoved(struct si_pub *sih)
return false;
}
uint ai_get_buscoretype(struct si_pub *sih)
{
struct si_info *sii = (struct si_info *)sih;
return sii->buscore->id.id;
}
uint ai_get_buscorerev(struct si_pub *sih)
{
struct si_info *sii = (struct si_info *)sih;
return sii->buscore->id.rev;
}

View File

@ -88,16 +88,6 @@
#define CLKD_OTP 0x000f0000
#define CLKD_OTP_SHIFT 16
/* Package IDs */
#define BCM4717_PKG_ID 9 /* 4717 package id */
#define BCM4718_PKG_ID 10 /* 4718 package id */
#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */
/* these are router chips */
#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */
#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */
#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */
/* dynamic clock control defines */
#define LPOMINFREQ 25000 /* low power oscillator min */
#define LPOMAXFREQ 43000 /* low power oscillator max */
@ -168,7 +158,6 @@ struct si_info {
struct si_pub pub; /* back plane public state (must be first) */
struct bcma_bus *icbus; /* handle to soc interconnect bus */
struct pci_dev *pcibus; /* handle to pci bus */
struct bcma_device *buscore;
u32 chipst; /* chip status */
};
@ -183,8 +172,6 @@ struct si_info {
/* AMBA Interconnect exported externs */
extern struct bcma_device *ai_findcore(struct si_pub *sih,
u16 coreid, u16 coreunit);
extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
/* === exported functions === */
@ -202,9 +189,6 @@ extern void ai_pci_up(struct si_pub *sih);
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);
extern uint ai_get_buscoretype(struct si_pub *sih);
extern uint ai_get_buscorerev(struct si_pub *sih);
static inline u32 ai_get_cccaps(struct si_pub *sih)
{
return sih->cccaps;

View File

@ -763,20 +763,17 @@ void brcms_c_regd_init(struct brcms_c_info *wlc)
int band_idx, i;
/* Disable any channels not supported by the phy */
for (band_idx = 0; band_idx < IEEE80211_NUM_BANDS; band_idx++) {
if (band_idx == IEEE80211_BAND_2GHZ)
band = wlc->bandstate[BAND_2G_INDEX];
else
band = wlc->bandstate[BAND_5G_INDEX];
/* skip if band not initialized */
if (band->pi == NULL)
continue;
for (band_idx = 0; band_idx < wlc->pub->_nbands; band_idx++) {
band = wlc->bandstate[band_idx];
wlc_phy_chanspec_band_validch(band->pi, band->bandtype,
&sup_chan);
sband = wiphy->bands[band_idx];
if (band_idx == BAND_2G_INDEX)
sband = wiphy->bands[IEEE80211_BAND_2GHZ];
else
sband = wiphy->bands[IEEE80211_BAND_5GHZ];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
if (!isset(sup_chan.vec, ch->hw_value))

View File

@ -573,6 +573,7 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
struct dma_info *di;
u8 rev = core->id.rev;
uint size;
struct si_info *sii = container_of(sih, struct si_info, pub);
/* allocate private info structure */
di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
@ -633,16 +634,20 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/
di->ddoffsetlow = 0;
di->dataoffsetlow = 0;
/* add offset for pcie with DMA64 bus */
di->ddoffsetlow = 0;
di->ddoffsethigh = SI_PCIE_DMA_H32;
/* for pci bus, add offset */
if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) {
/* add offset for pcie with DMA64 bus */
di->ddoffsetlow = 0;
di->ddoffsethigh = SI_PCIE_DMA_H32;
}
di->dataoffsetlow = di->ddoffsetlow;
di->dataoffsethigh = di->ddoffsethigh;
/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
if ((core->id.id == SDIOD_CORE_ID)
if ((core->id.id == BCMA_CORE_SDIO_DEV)
&& ((rev > 0) && (rev <= 2)))
di->addrext = false;
else if ((core->id.id == I2S_CORE_ID) &&
else if ((core->id.id == BCMA_CORE_I2S) &&
((rev == 0) || (rev == 1)))
di->addrext = false;
else

View File

@ -319,8 +319,7 @@ static void brcms_ops_stop(struct ieee80211_hw *hw)
return;
spin_lock_bh(&wl->lock);
status = brcms_c_chipmatch(wl->wlc->hw->vendorid,
wl->wlc->hw->deviceid);
status = brcms_c_chipmatch(wl->wlc->hw->d11core);
spin_unlock_bh(&wl->lock);
if (!status) {
wiphy_err(wl->wiphy,

View File

@ -269,7 +269,7 @@ struct brcms_c_bit_desc {
*/
/* Starting corerev for the fifo size table */
#define XMTFIFOTBL_STARTREV 20
#define XMTFIFOTBL_STARTREV 17
struct d11init {
__le16 addr;
@ -333,6 +333,12 @@ const u8 wlc_prio2prec_map[] = {
};
static const u16 xmtfifo_sz[][NFIFO] = {
/* corerev 17: 5120, 49152, 49152, 5376, 4352, 1280 */
{20, 192, 192, 21, 17, 5},
/* corerev 18: */
{0, 0, 0, 0, 0, 0},
/* corerev 19: */
{0, 0, 0, 0, 0, 0},
/* corerev 20: 5120, 49152, 49152, 5376, 4352, 1280 */
{20, 192, 192, 21, 17, 5},
/* corerev 21: 2304, 14848, 5632, 3584, 3584, 1280 */
@ -343,6 +349,14 @@ static const u16 xmtfifo_sz[][NFIFO] = {
{20, 192, 192, 21, 17, 5},
/* corerev 24: 2304, 14848, 5632, 3584, 3584, 1280 */
{9, 58, 22, 14, 14, 5},
/* corerev 25: */
{0, 0, 0, 0, 0, 0},
/* corerev 26: */
{0, 0, 0, 0, 0, 0},
/* corerev 27: */
{0, 0, 0, 0, 0, 0},
/* corerev 28: 2304, 14848, 5632, 3584, 3584, 1280 */
{9, 58, 22, 14, 14, 5},
};
#ifdef DEBUG
@ -1942,7 +1956,8 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
* accesses phyreg throughput mac. This can be skipped since
* only mac reg is accessed below
*/
flags |= SICF_PCLKE;
if (D11REV_GE(wlc_hw->corerev, 18))
flags |= SICF_PCLKE;
/*
* TODO: test suspend/resume
@ -2023,7 +2038,8 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
* phyreg throughput mac, AND phy_reset is skipped at early stage when
* band->pi is invalid. need to enable PHY CLK
*/
flags |= SICF_PCLKE;
if (D11REV_GE(wlc_hw->corerev, 18))
flags |= SICF_PCLKE;
/*
* reset the core
@ -2126,8 +2142,8 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
{
struct bcma_device *core = wlc_hw->d11core;
if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
(ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) {
if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43224) ||
(ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43225)) {
if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
@ -2791,7 +2807,7 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
tmp = 0;
if (on) {
if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM4313)) {
bcma_set32(core, D11REGOFFS(clk_ctl_st),
CCS_ERSRC_REQ_HT |
CCS_ERSRC_REQ_D11PLL |
@ -4218,9 +4234,8 @@ static void brcms_c_radio_timer(void *arg)
}
/* common low-level watchdog code */
static void brcms_b_watchdog(void *arg)
static void brcms_b_watchdog(struct brcms_c_info *wlc)
{
struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
struct brcms_hardware *wlc_hw = wlc->hw;
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
@ -4241,10 +4256,8 @@ static void brcms_b_watchdog(void *arg)
}
/* common watchdog code */
static void brcms_c_watchdog(void *arg)
static void brcms_c_watchdog(struct brcms_c_info *wlc)
{
struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
if (!wlc->pub->up)
@ -4284,7 +4297,9 @@ static void brcms_c_watchdog(void *arg)
static void brcms_c_watchdog_by_timer(void *arg)
{
brcms_c_watchdog(arg);
struct brcms_c_info *wlc = (struct brcms_c_info *) arg;
brcms_c_watchdog(wlc);
}
static bool brcms_c_timers_init(struct brcms_c_info *wlc, int unit)
@ -4454,11 +4469,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
}
/* verify again the device is supported */
if (core->bus->hosttype == BCMA_HOSTTYPE_PCI &&
!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
"vendor/device (0x%x/0x%x)\n",
unit, pcidev->vendor, pcidev->device);
if (!brcms_c_chipmatch(core)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported device\n",
unit);
err = 12;
goto fail;
}
@ -4528,7 +4541,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
else
wlc_hw->_nbands = 1;
if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
if ((ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM43225))
wlc_hw->_nbands = 1;
/* BMAC_NOTE: remove init of pub values when brcms_c_attach()
@ -4595,8 +4608,12 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
wlc_hw->machwcap_backup = wlc_hw->machwcap;
/* init tx fifo size */
WARN_ON((wlc_hw->corerev - XMTFIFOTBL_STARTREV) < 0 ||
(wlc_hw->corerev - XMTFIFOTBL_STARTREV) >
ARRAY_SIZE(xmtfifo_sz));
wlc_hw->xmtfifo_sz =
xmtfifo_sz[(wlc_hw->corerev - XMTFIFOTBL_STARTREV)];
WARN_ON(!wlc_hw->xmtfifo_sz[0]);
/* Get a phy for this band */
wlc_hw->band->pi =
@ -5036,7 +5053,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
wlc_hw->wlc->pub->hw_up = true;
if ((wlc_hw->boardflags & BFL_FEM)
&& (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
&& (ai_get_chip_id(wlc_hw->sih) == BCMA_CHIP_ID_BCM4313)) {
if (!
(wlc_hw->boardrev >= 0x1250
&& (wlc_hw->boardflags & BFL_FEM_BT)))
@ -5130,7 +5147,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
}
if ((wlc->pub->boardflags & BFL_FEM)
&& (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) {
&& (ai_get_chip_id(wlc->hw->sih) == BCMA_CHIP_ID_BCM4313)) {
if (wlc->pub->boardrev >= 0x1250
&& (wlc->pub->boardflags & BFL_FEM_BT))
brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
@ -5767,8 +5784,12 @@ void brcms_c_print_txstatus(struct tx_status *txs)
(txs->ackphyrxsh & PRXS1_SQ_MASK) >> PRXS1_SQ_SHIFT);
}
bool brcms_c_chipmatch(u16 vendor, u16 device)
static bool brcms_c_chipmatch_pci(struct bcma_device *core)
{
struct pci_dev *pcidev = core->bus->host_pci;
u16 vendor = pcidev->vendor;
u16 device = pcidev->device;
if (vendor != PCI_VENDOR_ID_BROADCOM) {
pr_err("unknown vendor id %04x\n", vendor);
return false;
@ -5787,6 +5808,30 @@ bool brcms_c_chipmatch(u16 vendor, u16 device)
return false;
}
static bool brcms_c_chipmatch_soc(struct bcma_device *core)
{
struct bcma_chipinfo *chipinfo = &core->bus->chipinfo;
if (chipinfo->id == BCMA_CHIP_ID_BCM4716)
return true;
pr_err("unknown chip id %04x\n", chipinfo->id);
return false;
}
bool brcms_c_chipmatch(struct bcma_device *core)
{
switch (core->bus->hosttype) {
case BCMA_HOSTTYPE_PCI:
return brcms_c_chipmatch_pci(core);
case BCMA_HOSTTYPE_SOC:
return brcms_c_chipmatch_soc(core);
default:
pr_err("unknown host type: %i\n", core->bus->hosttype);
return false;
}
}
#if defined(DEBUG)
void brcms_c_print_txdesc(struct d11txh *txh)
{

View File

@ -198,6 +198,8 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
if ((D11REV_GE(pi->sh->corerev, 24)) ||
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
@ -209,7 +211,8 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
}
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
if ((sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) &&
(++pi->phy_wreg >= pi->phy_wreg_limit)) {
(void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
pi->phy_wreg = 0;
}
@ -292,10 +295,13 @@ void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val);
if (addr == 0x72)
(void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
(void)bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
#else
struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
if ((sii->icbus->hosttype == BCMA_HOSTTYPE_PCI) &&
(++pi->phy_wreg >= pi->phy_wreg_limit)) {
pi->phy_wreg = 0;
(void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
}
@ -837,7 +843,7 @@ wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
pi->tbl_data_hi = tblDataHi;
pi->tbl_data_lo = tblDataLo;
if (pi->sh->chip == BCM43224_CHIP_ID &&
if (pi->sh->chip == BCMA_CHIP_ID_BCM43224 &&
pi->sh->chiprev == 1) {
pi->tbl_addr = tblAddr;
pi->tbl_save_id = tbl_id;
@ -847,7 +853,7 @@ wlc_phy_table_addr(struct brcms_phy *pi, uint tbl_id, uint tbl_offset,
void wlc_phy_table_data_write(struct brcms_phy *pi, uint width, u32 val)
{
if ((pi->sh->chip == BCM43224_CHIP_ID) &&
if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
(pi->sh->chiprev == 1) &&
(pi->tbl_save_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
read_phy_reg(pi, pi->tbl_data_lo);
@ -881,7 +887,7 @@ wlc_phy_write_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
if ((pi->sh->chip == BCM43224_CHIP_ID) &&
if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
(pi->sh->chiprev == 1) &&
(tbl_id == NPHY_TBL_ID_ANTSWCTRLLUT)) {
read_phy_reg(pi, tblDataLo);
@ -918,7 +924,7 @@ wlc_phy_read_table(struct brcms_phy *pi, const struct phytbl_info *ptbl_info,
for (idx = 0; idx < ptbl_info->tbl_len; idx++) {
if ((pi->sh->chip == BCM43224_CHIP_ID) &&
if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) &&
(pi->sh->chiprev == 1)) {
(void)read_phy_reg(pi, tblDataLo);
@ -2894,7 +2900,7 @@ const u8 *wlc_phy_get_ofdm_rate_lookup(void)
void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
{
if ((pi->sh->chip == BCM4313_CHIP_ID) &&
if ((pi->sh->chip == BCMA_CHIP_ID_BCM4313) &&
(pi->sh->boardflags & BFL_FEM)) {
if (mode) {
u16 txant = 0;

View File

@ -17893,6 +17893,8 @@ static u32 *wlc_phy_get_ipa_gaintbl_nphy(struct brcms_phy *pi)
nphy_tpc_txgain_ipa_2g_2057rev7;
} else if (NREV_IS(pi->pubpi.phy_rev, 6)) {
tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev6;
if (pi->sh->chip == BCMA_CHIP_ID_BCM47162)
tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
} else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
tx_pwrctrl_tbl = nphy_tpc_txgain_ipa_rev5;
} else {
@ -19254,8 +19256,14 @@ static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
case 38:
case 102:
case 118:
nphy_adj_tone_id_buf[0] = 0;
nphy_adj_noise_var_buf[0] = 0x0;
if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) &&
(pi->sh->chippkg == BCMA_PKG_ID_BCM4717)) {
nphy_adj_tone_id_buf[0] = 32;
nphy_adj_noise_var_buf[0] = 0x21f;
} else {
nphy_adj_tone_id_buf[0] = 0;
nphy_adj_noise_var_buf[0] = 0x0;
}
break;
case 134:
nphy_adj_tone_id_buf[0] = 32;
@ -19309,8 +19317,8 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
if ((ISNPHY(pi)) && (NREV_GE(pi->pubpi.phy_rev, 5)) &&
((pi->sh->chippkg == BCM4717_PKG_ID) ||
(pi->sh->chippkg == BCM4718_PKG_ID))) {
((pi->sh->chippkg == BCMA_PKG_ID_BCM4717) ||
(pi->sh->chippkg == BCMA_PKG_ID_BCM4718))) {
if ((pi->sh->boardflags & BFL_EXTLNA) &&
(CHSPEC_IS2G(pi->radio_chanspec)))
ai_cc_reg(pi->sh->sih,
@ -19318,6 +19326,10 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
0x40, 0x40);
}
if ((!PHY_IPA(pi)) && (pi->sh->chip == BCMA_CHIP_ID_BCM5357))
si_pmu_chipcontrol(pi->sh->sih, 1, CCTRL5357_EXTPA,
CCTRL5357_EXTPA);
if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
CHSPEC_IS40(pi->radio_chanspec)) {
@ -20695,12 +20707,22 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
write_radio_reg(pi, RADIO_2056_SYN_PLL_LOOPFILTER2 |
RADIO_2056_SYN, 0x1f);
write_radio_reg(pi,
RADIO_2056_SYN_PLL_LOOPFILTER4 |
RADIO_2056_SYN, 0xb);
write_radio_reg(pi,
RADIO_2056_SYN_PLL_CP2 |
RADIO_2056_SYN, 0x14);
if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
(pi->sh->chip == BCMA_CHIP_ID_BCM47162)) {
write_radio_reg(pi,
RADIO_2056_SYN_PLL_LOOPFILTER4 |
RADIO_2056_SYN, 0x14);
write_radio_reg(pi,
RADIO_2056_SYN_PLL_CP2 |
RADIO_2056_SYN, 0x00);
} else {
write_radio_reg(pi,
RADIO_2056_SYN_PLL_LOOPFILTER4 |
RADIO_2056_SYN, 0xb);
write_radio_reg(pi,
RADIO_2056_SYN_PLL_CP2 |
RADIO_2056_SYN, 0x14);
}
}
}
@ -20747,24 +20769,30 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
PADG_IDAC, 0xcc);
bias = 0x25;
cascbias = 0x20;
if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
(pi->sh->chip == BCMA_CHIP_ID_BCM47162)) {
bias = 0x40;
cascbias = 0x45;
pag_boost_tune = 0x5;
pgag_boost_tune = 0x33;
padg_boost_tune = 0x77;
mixg_boost_tune = 0x55;
} else {
bias = 0x25;
cascbias = 0x20;
if ((pi->sh->chip ==
BCM43224_CHIP_ID)
|| (pi->sh->chip ==
BCM43225_CHIP_ID)) {
if (pi->sh->chippkg ==
BCM43224_FAB_SMIC) {
if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224 ||
pi->sh->chip == BCMA_CHIP_ID_BCM43225) &&
pi->sh->chippkg == BCMA_PKG_ID_BCM43224_FAB_SMIC) {
bias = 0x2a;
cascbias = 0x38;
}
}
pag_boost_tune = 0x4;
pgag_boost_tune = 0x03;
padg_boost_tune = 0x77;
mixg_boost_tune = 0x65;
pag_boost_tune = 0x4;
pgag_boost_tune = 0x03;
padg_boost_tune = 0x77;
mixg_boost_tune = 0x65;
}
WRITE_RADIO_REG2(pi, RADIO_2056, TX, core,
INTPAG_IMAIN_STAT, bias);
@ -20863,11 +20891,10 @@ wlc_phy_chanspec_radio2056_setup(struct brcms_phy *pi,
cascbias = 0x30;
if ((pi->sh->chip == BCM43224_CHIP_ID) ||
(pi->sh->chip == BCM43225_CHIP_ID)) {
if (pi->sh->chippkg == BCM43224_FAB_SMIC)
cascbias = 0x35;
}
if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224 ||
pi->sh->chip == BCMA_CHIP_ID_BCM43225) &&
pi->sh->chippkg == BCMA_PKG_ID_BCM43224_FAB_SMIC)
cascbias = 0x35;
pabias = (pi->phy_pabias == 0) ? 0x30 : pi->phy_pabias;
@ -21106,6 +21133,7 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
const struct nphy_sfo_cfg *ci)
{
u16 val;
struct si_info *sii = container_of(pi->sh->sih, struct si_info, pub);
val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
if (CHSPEC_IS5G(chanspec) && !val) {
@ -21178,22 +21206,32 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
} else if (NREV_GE(pi->pubpi.phy_rev, 7)) {
if (val == 54)
spuravoid = 1;
} else {
if (pi->nphy_aband_spurwar_en &&
((val == 38) || (val == 102)
|| (val == 118)))
} else if (pi->nphy_aband_spurwar_en &&
((val == 38) || (val == 102) || (val == 118))) {
if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716)
&& (pi->sh->chippkg == BCMA_PKG_ID_BCM4717)) {
spuravoid = 0;
} else {
spuravoid = 1;
}
}
if (pi->phy_spuravoid == SPURAVOID_FORCEON)
spuravoid = 1;
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid);
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
if ((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
(pi->sh->chip == BCMA_CHIP_ID_BCM43225)) {
bcma_pmu_spuravoid_pllupdate(&sii->icbus->drv_cc,
spuravoid);
} else {
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
bcma_pmu_spuravoid_pllupdate(&sii->icbus->drv_cc,
spuravoid);
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
}
if ((pi->sh->chip == BCM43224_CHIP_ID) ||
(pi->sh->chip == BCM43225_CHIP_ID)) {
if ((pi->sh->chip == BCMA_CHIP_ID_BCM43224) ||
(pi->sh->chip == BCMA_CHIP_ID_BCM43225)) {
if (spuravoid == 1) {
bcma_write16(pi->d11core,
D11REGOFFS(tsf_clk_frac_l),
@ -21209,7 +21247,9 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
}
}
wlapi_bmac_core_phypll_reset(pi->sh->physhim);
if (!((pi->sh->chip == BCMA_CHIP_ID_BCM4716) ||
(pi->sh->chip == BCMA_CHIP_ID_BCM47162)))
wlapi_bmac_core_phypll_reset(pi->sh->physhim);
mod_phy_reg(pi, 0x01, (0x1 << 15),
((spuravoid > 0) ? (0x1 << 15) : 0));
@ -22171,9 +22211,15 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
wlc_phy_table_write_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x03, 16,
&auxADC_rssi_ctrlH_save);
radio_temp[0] = (179 * (radio_temp[1] + radio_temp2[1])
+ 82 * (auxADC_Vl) - 28861 +
128) / 256;
if (pi->sh->chip == BCMA_CHIP_ID_BCM5357) {
radio_temp[0] = (193 * (radio_temp[1] + radio_temp2[1])
+ 88 * (auxADC_Vl) - 27111 +
128) / 256;
} else {
radio_temp[0] = (179 * (radio_temp[1] + radio_temp2[1])
+ 82 * (auxADC_Vl) - 28861 +
128) / 256;
}
offset = (s16) pi->phy_tempsense_offset;
@ -24923,14 +24969,16 @@ wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *txgains,
if (txgains->useindex) {
phy_a4 = 15 - ((txgains->index) >> 3);
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (NREV_GE(pi->pubpi.phy_rev, 6))
phy_a5 = 0x00f7 | (phy_a4 << 8);
else
if (NREV_IS(pi->pubpi.phy_rev, 5))
if (NREV_GE(pi->pubpi.phy_rev, 6) &&
pi->sh->chip == BCMA_CHIP_ID_BCM47162) {
phy_a5 = 0x10f7 | (phy_a4 << 8);
else
} else if (NREV_GE(pi->pubpi.phy_rev, 6)) {
phy_a5 = 0x00f7 | (phy_a4 << 8);
} else if (NREV_IS(pi->pubpi.phy_rev, 5)) {
phy_a5 = 0x10f7 | (phy_a4 << 8);
} else {
phy_a5 = 0x50f7 | (phy_a4 << 8);
}
} else {
phy_a5 = 0x70f7 | (phy_a4 << 8);
}

View File

@ -74,16 +74,6 @@
* PMU<rev>_PLL<num>_XX where <rev> is PMU corerev and <num> is an arbitrary
* number to differentiate different PLLs controlled by the same PMU rev.
*/
/* pllcontrol registers:
* ndiv_pwrdn, pwrdn_ch<x>, refcomp_pwrdn, dly_ch<x>,
* p1div, p2div, _bypass_sdmod
*/
#define PMU1_PLL0_PLLCTL0 0
#define PMU1_PLL0_PLLCTL1 1
#define PMU1_PLL0_PLLCTL2 2
#define PMU1_PLL0_PLLCTL3 3
#define PMU1_PLL0_PLLCTL4 4
#define PMU1_PLL0_PLLCTL5 5
/* pmu XtalFreqRatio */
#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF
@ -108,118 +98,14 @@
#define RES4313_HT_AVAIL_RSRC 14
#define RES4313_MACPHY_CLK_AVAIL_RSRC 15
/* Determine min/max rsrc masks. Value 0 leaves hardware at default. */
static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
{
u32 min_mask = 0, max_mask = 0;
uint rsrcs;
/* # resources */
rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
/* determine min/max rsrc masks */
switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
/* ??? */
break;
case BCM4313_CHIP_ID:
min_mask = PMURES_BIT(RES4313_BB_PU_RSRC) |
PMURES_BIT(RES4313_XTAL_PU_RSRC) |
PMURES_BIT(RES4313_ALP_AVAIL_RSRC) |
PMURES_BIT(RES4313_BB_PLL_PWRSW_RSRC);
max_mask = 0xffff;
break;
default:
break;
}
*pmin = min_mask;
*pmax = max_mask;
}
void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid)
{
u32 tmp = 0;
struct bcma_device *core;
/* switch to chipc */
core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
if (spuravoid == 1) {
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL0);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x11500010);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL1);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x000C0C06);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL2);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x0F600a08);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL3);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x00000000);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL4);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x2001E920);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL5);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x88888815);
} else {
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL0);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x11100010);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL1);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x000c0c06);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL2);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x03000a08);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL3);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x00000000);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL4);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x200005c0);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
PMU1_PLL0_PLLCTL5);
bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
0x88888815);
}
tmp = 1 << 10;
break;
default:
/* bail out */
return;
}
bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp);
}
u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
{
uint delay = PMU_MAX_TRANSITION_DLY;
switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM4313_CHIP_ID:
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43225:
case BCMA_CHIP_ID_BCM4313:
delay = 3700;
break;
default:
@ -270,9 +156,9 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
return clock;
switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM4313_CHIP_ID:
case BCMA_CHIP_ID_BCM43224:
case BCMA_CHIP_ID_BCM43225:
case BCMA_CHIP_ID_BCM4313:
/* always 20Mhz */
clock = 20000 * 1000;
break;
@ -283,51 +169,9 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
return clock;
}
/* initialize PMU */
void si_pmu_init(struct si_pub *sih)
{
struct bcma_device *core;
/* select chipc */
core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
if (ai_get_pmurev(sih) == 1)
bcma_mask32(core, CHIPCREGOFFS(pmucontrol),
~PCTL_NOILP_ON_WAIT);
else if (ai_get_pmurev(sih) >= 2)
bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT);
}
/* initialize PMU resources */
void si_pmu_res_init(struct si_pub *sih)
{
struct bcma_device *core;
u32 min_mask = 0, max_mask = 0;
/* select to chipc */
core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
/* Determine min/max rsrc masks */
si_pmu_res_masks(sih, &min_mask, &max_mask);
/* It is required to program max_mask first and then min_mask */
/* Program max resource mask */
if (max_mask)
bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask);
/* Program min resource mask */
if (min_mask)
bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask);
/* Add some delay; allow resources to come up and settle. */
mdelay(2);
}
u32 si_pmu_measure_alpclk(struct si_pub *sih)
{
struct si_info *sii = container_of(sih, struct si_info, pub);
struct bcma_device *core;
u32 alp_khz;
@ -335,7 +179,7 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih)
return 0;
/* Remember original core before switch to chipc */
core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
core = sii->icbus->drv_cc.core;
if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) {
u32 ilp_ctr, alp_hz;

View File

@ -26,10 +26,7 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_alp_clock(struct si_pub *sih);
extern void si_pmu_pllupd(struct si_pub *sih);
extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid);
extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern void si_pmu_init(struct si_pub *sih);
extern void si_pmu_res_init(struct si_pub *sih);
extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
#endif /* _BRCM_PMU_H_ */

View File

@ -311,7 +311,7 @@ extern uint brcms_c_detach(struct brcms_c_info *wlc);
extern int brcms_c_up(struct brcms_c_info *wlc);
extern uint brcms_c_down(struct brcms_c_info *wlc);
extern bool brcms_c_chipmatch(u16 vendor, u16 device);
extern bool brcms_c_chipmatch(struct bcma_device *core);
extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
extern void brcms_c_reset(struct brcms_c_info *wlc);

View File

@ -19,68 +19,6 @@
#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */
/* core codes */
#define NODEV_CORE_ID 0x700 /* Invalid coreid */
#define CC_CORE_ID 0x800 /* chipcommon core */
#define ILINE20_CORE_ID 0x801 /* iline20 core */
#define SRAM_CORE_ID 0x802 /* sram core */
#define SDRAM_CORE_ID 0x803 /* sdram core */
#define PCI_CORE_ID 0x804 /* pci core */
#define MIPS_CORE_ID 0x805 /* mips core */
#define ENET_CORE_ID 0x806 /* enet mac core */
#define CODEC_CORE_ID 0x807 /* v90 codec core */
#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
#define ADSL_CORE_ID 0x809 /* ADSL core */
#define ILINE100_CORE_ID 0x80a /* iline100 core */
#define IPSEC_CORE_ID 0x80b /* ipsec core */
#define UTOPIA_CORE_ID 0x80c /* utopia core */
#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
#define SOCRAM_CORE_ID 0x80e /* internal memory core */
#define MEMC_CORE_ID 0x80f /* memc sdram core */
#define OFDM_CORE_ID 0x810 /* OFDM phy core */
#define EXTIF_CORE_ID 0x811 /* external interface core */
#define D11_CORE_ID 0x812 /* 802.11 MAC core */
#define APHY_CORE_ID 0x813 /* 802.11a phy core */
#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
#define MIPS33_CORE_ID 0x816 /* mips3302 core */
#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
#define SDIOH_CORE_ID 0x81b /* sdio host core */
#define ROBO_CORE_ID 0x81c /* roboswitch core */
#define ATA100_CORE_ID 0x81d /* parallel ATA core */
#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
#define PCIE_CORE_ID 0x820 /* pci express core */
#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
#define PMU_CORE_ID 0x827 /* PMU core */
#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
#define SDIOD_CORE_ID 0x829 /* SDIO device core */
#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
#define SC_CORE_ID 0x831 /* shared common core */
#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
#define SPIH_CORE_ID 0x833 /* SPI host core */
#define I2S_CORE_ID 0x834 /* I2S core */
#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
* maps all unused address ranges
*/
/* Common core control flags */
#define SICF_BIST_EN 0x8000
#define SICF_PME_EN 0x4000

View File

@ -946,7 +946,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
case IEEE80211_BAND_5GHZ:
rs_sta->expected_tpt = il3945_expected_tpt_a;
break;
case IEEE80211_NUM_BANDS:
default:
BUG();
break;
}

View File

@ -190,6 +190,44 @@ enum {
REPLY_MAX = 0xff
};
/*
* Minimum number of queues. MAX_NUM is defined in hw specific files.
* Set the minimum to accommodate
* - 4 standard TX queues
* - the command queue
* - 4 PAN TX queues
* - the PAN multicast queue, and
* - the AUX (TX during scan dwell) queue.
*/
#define IWL_MIN_NUM_QUEUES 11
/*
* Command queue depends on iPAN support.
*/
#define IWL_DEFAULT_CMD_QUEUE_NUM 4
#define IWL_IPAN_CMD_QUEUE_NUM 9
#define IWL_TX_FIFO_BK 0 /* shared */
#define IWL_TX_FIFO_BE 1
#define IWL_TX_FIFO_VI 2 /* shared */
#define IWL_TX_FIFO_VO 3
#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
#define IWL_TX_FIFO_BE_IPAN 4
#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
#define IWL_TX_FIFO_VO_IPAN 5
/* re-uses the VO FIFO, uCode will properly flush/schedule */
#define IWL_TX_FIFO_AUX 5
#define IWL_TX_FIFO_UNUSED 255
#define IWLAGN_CMD_FIFO_NUM 7
/*
* This queue number is required for proper operation
* because the ucode will stop/start the scheduler as
* required.
*/
#define IWL_IPAN_MCAST_QUEUE 8
/******************************************************************************
* (0)
* Commonly used structures and definitions:
@ -755,8 +793,6 @@ struct iwl_qosparam_cmd {
#define IWLAGN_BROADCAST_ID 15
#define IWLAGN_STATION_COUNT 16
#define IWL_INVALID_STATION 255
#define IWL_MAX_TID_COUNT 8
#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)

View File

@ -83,7 +83,7 @@ static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
#define DEBUGFS_READ_FILE_OPS(name) \
DEBUGFS_READ_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = iwl_dbgfs_##name##_read, \
.read = iwl_dbgfs_##name##_read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
};
@ -2255,6 +2255,10 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
char buf[8];
int buf_size;
/* check that the interface is up */
if (!iwl_is_ready(priv))
return -EAGAIN;
memset(buf, 0, sizeof(buf));
buf_size = min(count, sizeof(buf) - 1);
if (copy_from_user(buf, user_buf, buf_size))

View File

@ -90,22 +90,6 @@
#define IWL_NUM_SCAN_RATES (2)
/*
* Minimum number of queues. MAX_NUM is defined in hw specific files.
* Set the minimum to accommodate
* - 4 standard TX queues
* - the command queue
* - 4 PAN TX queues
* - the PAN multicast queue, and
* - the AUX (TX during scan dwell) queue.
*/
#define IWL_MIN_NUM_QUEUES 11
/*
* Command queue depends on iPAN support.
*/
#define IWL_DEFAULT_CMD_QUEUE_NUM 4
#define IWL_IPAN_CMD_QUEUE_NUM 9
#define IEEE80211_DATA_LEN 2304
#define IEEE80211_4ADDR_LEN 30

View File

@ -518,49 +518,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
* queue/FIFO/AC mapping definitions
*/
#define IWL_TX_FIFO_BK 0 /* shared */
#define IWL_TX_FIFO_BE 1
#define IWL_TX_FIFO_VI 2 /* shared */
#define IWL_TX_FIFO_VO 3
#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
#define IWL_TX_FIFO_BE_IPAN 4
#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
#define IWL_TX_FIFO_VO_IPAN 5
/* re-uses the VO FIFO, uCode will properly flush/schedule */
#define IWL_TX_FIFO_AUX 5
#define IWL_TX_FIFO_UNUSED -1
#define IWLAGN_CMD_FIFO_NUM 7
/*
* This queue number is required for proper operation
* because the ucode will stop/start the scheduler as
* required.
*/
#define IWL_IPAN_MCAST_QUEUE 8
static const u8 iwlagn_default_queue_to_tx_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
IWL_TX_FIFO_BE,
IWL_TX_FIFO_BK,
IWLAGN_CMD_FIFO_NUM,
};
static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
IWL_TX_FIFO_BE,
IWL_TX_FIFO_BK,
IWL_TX_FIFO_BK_IPAN,
IWL_TX_FIFO_BE_IPAN,
IWL_TX_FIFO_VI_IPAN,
IWL_TX_FIFO_VO_IPAN,
IWL_TX_FIFO_BE_IPAN,
IWLAGN_CMD_FIFO_NUM,
IWL_TX_FIFO_AUX,
};
static const u8 iwlagn_bss_ac_to_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
@ -1350,6 +1307,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
else
trans_cfg.queue_watchdog_timeout = IWL_WATCHDOG_DISABLED;
trans_cfg.command_names = iwl_dvm_cmd_strings;
trans_cfg.cmd_fifo = IWLAGN_CMD_FIFO_NUM;
WARN_ON(sizeof(priv->transport_queue_stop) * BITS_PER_BYTE <
priv->cfg->base_params->num_of_queues);
@ -1363,15 +1321,9 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
trans_cfg.n_queue_to_fifo =
ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
} else {
priv->sta_key_max_num = STA_KEY_MAX_NUM;
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
trans_cfg.n_queue_to_fifo =
ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
}
/* Configure transport layer */
@ -1460,9 +1412,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
priv->sta_key_max_num = STA_KEY_MAX_NUM;
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
trans_cfg.n_queue_to_fifo =
ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
/* Configure transport layer again*/
iwl_trans_configure(priv->trans, &trans_cfg);
@ -1480,9 +1429,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
atomic_set(&priv->queue_stop_count[i], 0);
}
WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] !=
IWLAGN_CMD_FIFO_NUM);
if (iwl_init_drv(priv))
goto out_free_eeprom;

View File

@ -396,15 +396,21 @@ static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
{
struct iwl_rxon_context *ctx;
int limits[NUM_IWL_RXON_CTX] = {};
int n_active = 0;
u16 limit;
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
/*
* If we're associated, we clamp the dwell time 98%
* of the smallest beacon interval (minus 2 * channel
* tune time)
* of the beacon interval (minus 2 * channel tune time)
* If both contexts are active, we have to restrict to
* 1/2 of the minimum of them, because they might be in
* lock-step with the time inbetween only half of what
* time we'd have in each of them.
*/
for_each_context(priv, ctx) {
u16 value;
switch (ctx->staging.dev_type) {
case RXON_DEV_TYPE_P2P:
/* no timing constraints */
@ -424,14 +430,25 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
break;
}
value = ctx->beacon_int;
if (!value)
value = IWL_PASSIVE_DWELL_BASE;
value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
dwell_time = min(value, dwell_time);
limits[n_active++] = ctx->beacon_int ?: IWL_PASSIVE_DWELL_BASE;
}
return dwell_time;
switch (n_active) {
case 0:
return dwell_time;
case 2:
limit = (limits[1] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
limit /= 2;
dwell_time = min(limit, dwell_time);
/* fall through to limit further */
case 1:
limit = (limits[0] * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
limit /= n_active;
return min(limit, dwell_time);
default:
WARN_ON_ONCE(1);
return dwell_time;
}
}
static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,

View File

@ -226,13 +226,50 @@ int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
return ret;
}
static const u8 iwlagn_default_queue_to_tx_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
IWL_TX_FIFO_BE,
IWL_TX_FIFO_BK,
};
static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
IWL_TX_FIFO_VO,
IWL_TX_FIFO_VI,
IWL_TX_FIFO_BE,
IWL_TX_FIFO_BK,
IWL_TX_FIFO_BK_IPAN,
IWL_TX_FIFO_BE_IPAN,
IWL_TX_FIFO_VI_IPAN,
IWL_TX_FIFO_VO_IPAN,
IWL_TX_FIFO_BE_IPAN,
IWL_TX_FIFO_UNUSED,
IWL_TX_FIFO_AUX,
};
static int iwl_alive_notify(struct iwl_priv *priv)
{
const u8 *queue_to_txf;
u8 n_queues;
int ret;
int i;
iwl_trans_fw_alive(priv->trans);
if (priv->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PAN &&
priv->eeprom_data->sku & EEPROM_SKU_CAP_IPAN_ENABLE) {
n_queues = ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
queue_to_txf = iwlagn_ipan_queue_to_tx_fifo;
} else {
n_queues = ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
queue_to_txf = iwlagn_default_queue_to_tx_fifo;
}
for (i = 0; i < n_queues; i++)
if (queue_to_txf[i] != IWL_TX_FIFO_UNUSED)
iwl_trans_ac_txq_enable(priv->trans, i,
queue_to_txf[i]);
priv->passive_no_rx = false;
priv->transport_queue_stop = 0;

View File

@ -61,6 +61,9 @@
*
*****************************************************************************/
#define DEBUG
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include "iwl-debug.h"
@ -124,7 +127,7 @@ void __iwl_dbg(struct device *dev,
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_have_debug_level(level) &&
(!limit || net_ratelimit()))
dev_err(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
dev_dbg(dev, "%c %s %pV", in_interrupt() ? 'I' : 'U',
function, &vaf);
#endif
trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);

View File

@ -176,7 +176,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_msg
#define MAX_MSG_LEN 100
#define MAX_MSG_LEN 110
DECLARE_EVENT_CLASS(iwlwifi_msg_event,
TP_PROTO(struct va_format *vaf),
@ -189,7 +189,7 @@ DECLARE_EVENT_CLASS(iwlwifi_msg_event,
MAX_MSG_LEN, vaf->fmt,
*vaf->va) >= MAX_MSG_LEN);
),
TP_printk("%s", (char *)__get_dynamic_array(msg))
TP_printk("%s", __get_str(msg))
);
DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_err,

View File

@ -1013,6 +1013,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.power_level = IWL_POWER_INDEX_1,
.bt_ch_announce = true,
.auto_agg = true,
.wd_disable = true,
/* the rest are 0 by default */
};
EXPORT_SYMBOL_GPL(iwlwifi_mod_params);

View File

@ -290,16 +290,17 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
* currently supports
*/
#define IWL_MAX_HW_QUEUES 32
#define IWL_INVALID_STATION 255
#define IWL_MAX_TID_COUNT 8
#define IWL_FRAME_LIMIT 64
/**
* struct iwl_trans_config - transport configuration
*
* @op_mode: pointer to the upper layer.
* @queue_to_fifo: queue to FIFO mapping to set up by
* default
* @n_queue_to_fifo: number of queues to set up
* @cmd_queue: the index of the command queue.
* Must be set before start_fw.
* @cmd_fifo: the fifo for host commands
* @no_reclaim_cmds: Some devices erroneously don't set the
* SEQ_RX_FRAME bit on some notifications, this is the
* list of such notifications to filter. Max length is
@ -314,10 +315,9 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
*/
struct iwl_trans_config {
struct iwl_op_mode *op_mode;
const u8 *queue_to_fifo;
u8 n_queue_to_fifo;
u8 cmd_queue;
u8 cmd_fifo;
const u8 *no_reclaim_cmds;
int n_no_reclaim_cmds;
@ -355,9 +355,9 @@ struct iwl_trans;
* Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic
* @txq_enable: setup a tx queue for AMPDU - will be called once the HW is
* ready and a successful ADDBA response has been received.
* May sleep
* @txq_enable: setup a queue. To setup an AC queue, use the
* iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
* this one. The op_mode must not configure the HCMD queue. May sleep.
* @txq_disable: de-configure a Tx queue to send AMPDUs
* Must be atomic
* @wait_tx_queue_empty: wait until all tx queues are empty
@ -497,9 +497,9 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans)
{
might_sleep();
trans->ops->fw_alive(trans);
trans->state = IWL_TRANS_FW_ALIVE;
trans->ops->fw_alive(trans);
}
static inline int iwl_trans_start_fw(struct iwl_trans *trans,
@ -593,6 +593,13 @@ static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
frame_limit, ssn);
}
static inline void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue,
int fifo)
{
iwl_trans_txq_enable(trans, queue, fifo, IWL_INVALID_STATION,
IWL_MAX_TID_COUNT, IWL_FRAME_LIMIT, 0);
}
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
{
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,

View File

@ -269,10 +269,9 @@ struct iwl_trans_pcie {
wait_queue_head_t ucode_write_waitq;
unsigned long status;
u8 cmd_queue;
u8 cmd_fifo;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
u8 n_q_to_fifo;
bool rx_buf_size_8k;
u32 rx_page_order;

View File

@ -879,9 +879,6 @@ static irqreturn_t iwl_isr(int irq, void *data)
lockdep_assert_held(&trans_pcie->irq_lock);
if (!trans)
return IRQ_NONE;
trace_iwlwifi_dev_irq(trans->dev);
/* Disable (but don't clear!) interrupts here to avoid

View File

@ -1059,7 +1059,7 @@ static void iwl_tx_start(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 a;
int i, chan;
int chan;
u32 reg_val;
/* make sure all queue are not stopped/used */
@ -1091,12 +1091,8 @@ static void iwl_tx_start(struct iwl_trans *trans)
*/
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
int fifo = trans_pcie->setup_q_to_fifo[i];
iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
IWL_TID_NON_QOS, SCD_FRAME_LIMIT, 0);
}
iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
trans_pcie->cmd_fifo);
/* Activate all Tx DMA/FIFO channels */
iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
@ -1145,7 +1141,7 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000);
if (ret < 0)
IWL_ERR(trans,
"Failing on timeout while stopping DMA channel %d [0x%08x]",
"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
ch,
iwl_read_direct32(trans,
FH_TSSR_TX_STATUS_REG));
@ -1153,7 +1149,8 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
if (!trans_pcie->txq) {
IWL_WARN(trans, "Stopping tx queues that aren't allocated...");
IWL_WARN(trans,
"Stopping tx queues that aren't allocated...\n");
return 0;
}
@ -1430,7 +1427,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
err = iwl_prepare_card_hw(trans);
if (err) {
IWL_ERR(trans, "Error while preparing HW: %d", err);
IWL_ERR(trans, "Error while preparing HW: %d\n", err);
goto err_free_irq;
}
@ -1528,6 +1525,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->cmd_queue = trans_cfg->cmd_queue;
trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
else
@ -1536,17 +1534,6 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
trans_pcie->n_no_reclaim_cmds * sizeof(u8));
trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
/* at least the command queue must be mapped */
WARN_ON(!trans_pcie->n_q_to_fifo);
memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
trans_pcie->n_q_to_fifo * sizeof(u8));
trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
if (trans_pcie->rx_buf_size_8k)
trans_pcie->rx_page_order = get_order(8 * 1024);
@ -2141,13 +2128,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_printk(KERN_ERR, &pdev->dev, "pci_request_regions failed");
dev_printk(KERN_ERR, &pdev->dev,
"pci_request_regions failed\n");
goto out_pci_disable_device;
}
trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
if (!trans_pcie->hw_base) {
dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed");
dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
err = -ENODEV;
goto out_pci_release_regions;
}
@ -2168,7 +2156,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
err = pci_enable_msi(pdev);
if (err)
dev_printk(KERN_ERR, &pdev->dev,
"pci_enable_msi failed(0X%x)", err);
"pci_enable_msi failed(0X%x)\n", err);
trans->dev = &pdev->dev;
trans_pcie->irq = pdev->irq;

View File

@ -1,39 +0,0 @@
config IWM
tristate "Intel Wireless Multicomm 3200 WiFi driver (EXPERIMENTAL)"
depends on MMC && EXPERIMENTAL
depends on CFG80211
select FW_LOADER
select IWMC3200TOP
help
The Intel Wireless Multicomm 3200 hardware is a combo
card with GPS, Bluetooth, WiMax and 802.11 radios. It
runs over SDIO and is typically found on Moorestown
based platform. This driver takes care of the 802.11
part, which is a fullmac one.
If you choose to build it as a module, it'll be called
iwmc3200wifi.ko.
config IWM_DEBUG
bool "Enable full debugging output in iwmc3200wifi"
depends on IWM && DEBUG_FS
help
This option will enable debug tracing and setting for iwm
You can set the debug level and module through debugfs. By
default all modules are set to the IWL_DL_ERR level.
To see the list of debug modules and levels, see iwm/debug.h
For example, if you want the full MLME debug output:
echo 0xff > /sys/kernel/debug/iwm/phyN/debug/mlme
Or, if you want the full debug, for all modules:
echo 0xff > /sys/kernel/debug/iwm/phyN/debug/level
echo 0xff > /sys/kernel/debug/iwm/phyN/debug/modules
config IWM_TRACING
bool "Enable event tracing for iwmc3200wifi"
depends on IWM && EVENT_TRACING
help
Say Y here to trace all the commands and responses between
the driver and firmware (including TX/RX frames) with ftrace.

View File

@ -1,10 +0,0 @@
obj-$(CONFIG_IWM) := iwmc3200wifi.o
iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o
iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o
iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
iwmc3200wifi-$(CONFIG_IWM_TRACING) += trace.o
CFLAGS_trace.o := -I$(src)
ccflags-y += -D__CHECK_ENDIAN__

View File

@ -1,57 +0,0 @@
/*
* Intel Wireless Multicomm 3200 WiFi driver
*
* Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
* Samuel Ortiz <samuel.ortiz@intel.com>
* Zhu Yi <yi.zhu@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*/
#ifndef __IWM_BUS_H__
#define __IWM_BUS_H__
#include "iwm.h"
struct iwm_if_ops {
int (*enable)(struct iwm_priv *iwm);
int (*disable)(struct iwm_priv *iwm);
int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count);
void (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
void (*debugfs_exit)(struct iwm_priv *iwm);
const char *umac_name;
const char *calib_lmac_name;
const char *lmac_name;
};
static inline int iwm_bus_send_chunk(struct iwm_priv *iwm, u8 *buf, int count)
{
return iwm->bus_ops->send_chunk(iwm, buf, count);
}
static inline int iwm_bus_enable(struct iwm_priv *iwm)
{
return iwm->bus_ops->enable(iwm);
}
static inline int iwm_bus_disable(struct iwm_priv *iwm)
{
return iwm->bus_ops->disable(iwm);
}
#endif

View File

@ -1,882 +0,0 @@
/*
* Intel Wireless Multicomm 3200 WiFi driver
*
* Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
* Samuel Ortiz <samuel.ortiz@intel.com>
* Zhu Yi <yi.zhu@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
#include <linux/etherdevice.h>
#include <linux/wireless.h>
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <net/cfg80211.h>
#include "iwm.h"
#include "commands.h"
#include "cfg80211.h"
#include "debug.h"
#define RATETAB_ENT(_rate, _rateid, _flags) \
{ \
.bitrate = (_rate), \
.hw_value = (_rateid), \
.flags = (_flags), \
}
#define CHAN2G(_channel, _freq, _flags) { \
.band = IEEE80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
#define CHAN5G(_channel, _flags) { \
.band = IEEE80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
.max_antenna_gain = 0, \
.max_power = 30, \
}
static struct ieee80211_rate iwm_rates[] = {
RATETAB_ENT(10, 0x1, 0),
RATETAB_ENT(20, 0x2, 0),
RATETAB_ENT(55, 0x4, 0),
RATETAB_ENT(110, 0x8, 0),
RATETAB_ENT(60, 0x10, 0),
RATETAB_ENT(90, 0x20, 0),
RATETAB_ENT(120, 0x40, 0),
RATETAB_ENT(180, 0x80, 0),
RATETAB_ENT(240, 0x100, 0),
RATETAB_ENT(360, 0x200, 0),
RATETAB_ENT(480, 0x400, 0),
RATETAB_ENT(540, 0x800, 0),
};
#define iwm_a_rates (iwm_rates + 4)
#define iwm_a_rates_size 8
#define iwm_g_rates (iwm_rates + 0)
#define iwm_g_rates_size 12
static struct ieee80211_channel iwm_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
CHAN2G(3, 2422, 0),
CHAN2G(4, 2427, 0),
CHAN2G(5, 2432, 0),
CHAN2G(6, 2437, 0),
CHAN2G(7, 2442, 0),
CHAN2G(8, 2447, 0),
CHAN2G(9, 2452, 0),
CHAN2G(10, 2457, 0),
CHAN2G(11, 2462, 0),
CHAN2G(12, 2467, 0),
CHAN2G(13, 2472, 0),
CHAN2G(14, 2484, 0),
};
static struct ieee80211_channel iwm_5ghz_a_channels[] = {
CHAN5G(34, 0), CHAN5G(36, 0),
CHAN5G(38, 0), CHAN5G(40, 0),
CHAN5G(42, 0), CHAN5G(44, 0),
CHAN5G(46, 0), CHAN5G(48, 0),
CHAN5G(52, 0), CHAN5G(56, 0),
CHAN5G(60, 0), CHAN5G(64, 0),
CHAN5G(100, 0), CHAN5G(104, 0),
CHAN5G(108, 0), CHAN5G(112, 0),
CHAN5G(116, 0), CHAN5G(120, 0),
CHAN5G(124, 0), CHAN5G(128, 0),
CHAN5G(132, 0), CHAN5G(136, 0),
CHAN5G(140, 0), CHAN5G(149, 0),
CHAN5G(153, 0), CHAN5G(157, 0),
CHAN5G(161, 0), CHAN5G(165, 0),
CHAN5G(184, 0), CHAN5G(188, 0),
CHAN5G(192, 0), CHAN5G(196, 0),
CHAN5G(200, 0), CHAN5G(204, 0),
CHAN5G(208, 0), CHAN5G(212, 0),
CHAN5G(216, 0),
};
static struct ieee80211_supported_band iwm_band_2ghz = {
.channels = iwm_2ghz_channels,
.n_channels = ARRAY_SIZE(iwm_2ghz_channels),
.bitrates = iwm_g_rates,
.n_bitrates = iwm_g_rates_size,
};
static struct ieee80211_supported_band iwm_band_5ghz = {
.channels = iwm_5ghz_a_channels,
.n_channels = ARRAY_SIZE(iwm_5ghz_a_channels),
.bitrates = iwm_a_rates,
.n_bitrates = iwm_a_rates_size,
};
static int iwm_key_init(struct iwm_key *key, u8 key_index,
const u8 *mac_addr, struct key_params *params)
{
key->hdr.key_idx = key_index;
if (!mac_addr || is_broadcast_ether_addr(mac_addr)) {
key->hdr.multicast = 1;
memset(key->hdr.mac, 0xff, ETH_ALEN);
} else {
key->hdr.multicast = 0;
memcpy(key->hdr.mac, mac_addr, ETH_ALEN);
}
if (params) {
if (params->key_len > WLAN_MAX_KEY_LEN ||
params->seq_len > IW_ENCODE_SEQ_MAX_SIZE)
return -EINVAL;
key->cipher = params->cipher;
key->key_len = params->key_len;
key->seq_len = params->seq_len;
memcpy(key->key, params->key, key->key_len);
memcpy(key->seq, params->seq, key->seq_len);
}
return 0;
}
static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise, const u8 *mac_addr,
struct key_params *params)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
struct iwm_key *key;
int ret;
IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr);
if (key_index >= IWM_NUM_KEYS)
return -ENOENT;
key = &iwm->keys[key_index];
memset(key, 0, sizeof(struct iwm_key));
ret = iwm_key_init(key, key_index, mac_addr, params);
if (ret < 0) {
IWM_ERR(iwm, "Invalid key_params\n");
return ret;
}
return iwm_set_key(iwm, 0, key);
}
static int iwm_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise, const u8 *mac_addr,
void *cookie,
void (*callback)(void *cookie,
struct key_params*))
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
struct iwm_key *key;
struct key_params params;
IWM_DBG_WEXT(iwm, DBG, "Getting key %d\n", key_index);
if (key_index >= IWM_NUM_KEYS)
return -ENOENT;
memset(&params, 0, sizeof(params));
key = &iwm->keys[key_index];
params.cipher = key->cipher;
params.key_len = key->key_len;
params.seq_len = key->seq_len;
params.seq = key->seq;
params.key = key->key;
callback(cookie, &params);
return key->key_len ? 0 : -ENOENT;
}
static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise, const u8 *mac_addr)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
struct iwm_key *key;
if (key_index >= IWM_NUM_KEYS)
return -ENOENT;
key = &iwm->keys[key_index];
if (!iwm->keys[key_index].key_len) {
IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index);
return 0;
}
if (key_index == iwm->default_key)
iwm->default_key = -1;
return iwm_set_key(iwm, 1, key);
}
static int iwm_cfg80211_set_default_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool unicast,
bool multicast)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index);
if (key_index >= IWM_NUM_KEYS)
return -ENOENT;
if (!iwm->keys[key_index].key_len) {
IWM_ERR(iwm, "Key %d not used\n", key_index);
return -EINVAL;
}
iwm->default_key = key_index;
return iwm_set_tx_key(iwm, key_index);
}
static int iwm_cfg80211_get_station(struct wiphy *wiphy,
struct net_device *ndev,
u8 *mac, struct station_info *sinfo)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
if (memcmp(mac, iwm->bssid, ETH_ALEN))
return -ENOENT;
sinfo->filled |= STATION_INFO_TX_BITRATE;
sinfo->txrate.legacy = iwm->rate * 10;
if (test_bit(IWM_STATUS_ASSOCIATED, &iwm->status)) {
sinfo->filled |= STATION_INFO_SIGNAL;
sinfo->signal = iwm->wstats.qual.level;
}
return 0;
}
int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
{
struct wiphy *wiphy = iwm_to_wiphy(iwm);
struct iwm_bss_info *bss;
struct iwm_umac_notif_bss_info *umac_bss;
struct ieee80211_mgmt *mgmt;
struct ieee80211_channel *channel;
struct ieee80211_supported_band *band;
s32 signal;
int freq;
list_for_each_entry(bss, &iwm->bss_list, node) {
umac_bss = bss->bss;
mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
if (umac_bss->band == UMAC_BAND_2GHZ)
band = wiphy->bands[IEEE80211_BAND_2GHZ];
else if (umac_bss->band == UMAC_BAND_5GHZ)
band = wiphy->bands[IEEE80211_BAND_5GHZ];
else {
IWM_ERR(iwm, "Invalid band: %d\n", umac_bss->band);
return -EINVAL;
}
freq = ieee80211_channel_to_frequency(umac_bss->channel,
band->band);
channel = ieee80211_get_channel(wiphy, freq);
signal = umac_bss->rssi * 100;
if (!cfg80211_inform_bss_frame(wiphy, channel, mgmt,
le16_to_cpu(umac_bss->frame_len),
signal, GFP_KERNEL))
return -EINVAL;
}
return 0;
}
static int iwm_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
struct wireless_dev *wdev;
struct iwm_priv *iwm;
u32 old_mode;
wdev = ndev->ieee80211_ptr;
iwm = ndev_to_iwm(ndev);
old_mode = iwm->conf.mode;
switch (type) {
case NL80211_IFTYPE_STATION:
iwm->conf.mode = UMAC_MODE_BSS;
break;
case NL80211_IFTYPE_ADHOC:
iwm->conf.mode = UMAC_MODE_IBSS;
break;
default:
return -EOPNOTSUPP;
}
wdev->iftype = type;
if ((old_mode == iwm->conf.mode) || !iwm->umac_profile)
return 0;
iwm->umac_profile->mode = cpu_to_le32(iwm->conf.mode);
if (iwm->umac_profile_active)
iwm_invalidate_mlme_profile(iwm);
return 0;
}
static int iwm_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
int ret;
if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
IWM_ERR(iwm, "Scan while device is not ready\n");
return -EIO;
}
if (test_bit(IWM_STATUS_SCANNING, &iwm->status)) {
IWM_ERR(iwm, "Scanning already\n");
return -EAGAIN;
}
if (test_bit(IWM_STATUS_SCAN_ABORTING, &iwm->status)) {
IWM_ERR(iwm, "Scanning being aborted\n");
return -EAGAIN;
}
set_bit(IWM_STATUS_SCANNING, &iwm->status);
ret = iwm_scan_ssids(iwm, request->ssids, request->n_ssids);
if (ret) {
clear_bit(IWM_STATUS_SCANNING, &iwm->status);
return ret;
}
iwm->scan_request = request;
return 0;
}
static int iwm_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
if (changed & WIPHY_PARAM_RTS_THRESHOLD &&
(iwm->conf.rts_threshold != wiphy->rts_threshold)) {
int ret;
iwm->conf.rts_threshold = wiphy->rts_threshold;
ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
CFG_RTS_THRESHOLD,
iwm->conf.rts_threshold);
if (ret < 0)
return ret;
}
if (changed & WIPHY_PARAM_FRAG_THRESHOLD &&
(iwm->conf.frag_threshold != wiphy->frag_threshold)) {
int ret;
iwm->conf.frag_threshold = wiphy->frag_threshold;
ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_FA_CFG_FIX,
CFG_FRAG_THRESHOLD,
iwm->conf.frag_threshold);
if (ret < 0)
return ret;
}
return 0;
}
static int iwm_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ibss_params *params)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
struct ieee80211_channel *chan = params->channel;
if (!test_bit(IWM_STATUS_READY, &iwm->status))
return -EIO;
/* UMAC doesn't support creating or joining an IBSS network
* with specified bssid. */
if (params->bssid)
return -EOPNOTSUPP;
iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
iwm->umac_profile->ibss.band = chan->band;
iwm->umac_profile->ibss.channel = iwm->channel;
iwm->umac_profile->ssid.ssid_len = params->ssid_len;
memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
return iwm_send_mlme_profile(iwm);
}
static int iwm_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
if (iwm->umac_profile_active)
return iwm_invalidate_mlme_profile(iwm);
return 0;
}
static int iwm_set_auth_type(struct iwm_priv *iwm,
enum nl80211_auth_type sme_auth_type)
{
u8 *auth_type = &iwm->umac_profile->sec.auth_type;
switch (sme_auth_type) {
case NL80211_AUTHTYPE_AUTOMATIC:
case NL80211_AUTHTYPE_OPEN_SYSTEM:
IWM_DBG_WEXT(iwm, DBG, "OPEN auth\n");
*auth_type = UMAC_AUTH_TYPE_OPEN;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
if (iwm->umac_profile->sec.flags &
(UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) {
IWM_DBG_WEXT(iwm, DBG, "WPA auth alg\n");
*auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
} else {
IWM_DBG_WEXT(iwm, DBG, "WEP shared key auth alg\n");
*auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
}
break;
default:
IWM_ERR(iwm, "Unsupported auth alg: 0x%x\n", sme_auth_type);
return -ENOTSUPP;
}
return 0;
}
static int iwm_set_wpa_version(struct iwm_priv *iwm, u32 wpa_version)
{
IWM_DBG_WEXT(iwm, DBG, "wpa_version: %d\n", wpa_version);
if (!wpa_version) {
iwm->umac_profile->sec.flags = UMAC_SEC_FLG_LEGACY_PROFILE;
return 0;
}
if (wpa_version & NL80211_WPA_VERSION_1)
iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK;
if (wpa_version & NL80211_WPA_VERSION_2)
iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK;
return 0;
}
static int iwm_set_cipher(struct iwm_priv *iwm, u32 cipher, bool ucast)
{
u8 *profile_cipher = ucast ? &iwm->umac_profile->sec.ucast_cipher :
&iwm->umac_profile->sec.mcast_cipher;
if (!cipher) {
*profile_cipher = UMAC_CIPHER_TYPE_NONE;
return 0;
}
IWM_DBG_WEXT(iwm, DBG, "%ccast cipher is 0x%x\n", ucast ? 'u' : 'm',
cipher);
switch (cipher) {
case IW_AUTH_CIPHER_NONE:
*profile_cipher = UMAC_CIPHER_TYPE_NONE;
break;
case WLAN_CIPHER_SUITE_WEP40:
*profile_cipher = UMAC_CIPHER_TYPE_WEP_40;
break;
case WLAN_CIPHER_SUITE_WEP104:
*profile_cipher = UMAC_CIPHER_TYPE_WEP_104;
break;
case WLAN_CIPHER_SUITE_TKIP:
*profile_cipher = UMAC_CIPHER_TYPE_TKIP;
break;
case WLAN_CIPHER_SUITE_CCMP:
*profile_cipher = UMAC_CIPHER_TYPE_CCMP;
break;
default:
IWM_ERR(iwm, "Unsupported cipher: 0x%x\n", cipher);
return -ENOTSUPP;
}
return 0;
}
static int iwm_set_key_mgt(struct iwm_priv *iwm, u32 key_mgt)
{
u8 *auth_type = &iwm->umac_profile->sec.auth_type;
IWM_DBG_WEXT(iwm, DBG, "key_mgt: 0x%x\n", key_mgt);
if (key_mgt == WLAN_AKM_SUITE_8021X)
*auth_type = UMAC_AUTH_TYPE_8021X;
else if (key_mgt == WLAN_AKM_SUITE_PSK) {
if (iwm->umac_profile->sec.flags &
(UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK))
*auth_type = UMAC_AUTH_TYPE_RSNA_PSK;
else
*auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
} else {
IWM_ERR(iwm, "Invalid key mgt: 0x%x\n", key_mgt);
return -EINVAL;
}
return 0;
}
static int iwm_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
struct ieee80211_channel *chan = sme->channel;
struct key_params key_param;
int ret;
if (!test_bit(IWM_STATUS_READY, &iwm->status))
return -EIO;
if (!sme->ssid)
return -EINVAL;
if (iwm->umac_profile_active) {
ret = iwm_invalidate_mlme_profile(iwm);
if (ret) {
IWM_ERR(iwm, "Couldn't invalidate profile\n");
return ret;
}
}
if (chan)
iwm->channel =
ieee80211_frequency_to_channel(chan->center_freq);
iwm->umac_profile->ssid.ssid_len = sme->ssid_len;
memcpy(iwm->umac_profile->ssid.ssid, sme->ssid, sme->ssid_len);
if (sme->bssid) {
IWM_DBG_WEXT(iwm, DBG, "BSSID: %pM\n", sme->bssid);
memcpy(&iwm->umac_profile->bssid[0], sme->bssid, ETH_ALEN);
iwm->umac_profile->bss_num = 1;
} else {
memset(&iwm->umac_profile->bssid[0], 0, ETH_ALEN);
iwm->umac_profile->bss_num = 0;
}
ret = iwm_set_wpa_version(iwm, sme->crypto.wpa_versions);
if (ret < 0)
return ret;
ret = iwm_set_auth_type(iwm, sme->auth_type);
if (ret < 0)
return ret;
if (sme->crypto.n_ciphers_pairwise) {
ret = iwm_set_cipher(iwm, sme->crypto.ciphers_pairwise[0],
true);
if (ret < 0)
return ret;
}
ret = iwm_set_cipher(iwm, sme->crypto.cipher_group, false);
if (ret < 0)
return ret;
if (sme->crypto.n_akm_suites) {
ret = iwm_set_key_mgt(iwm, sme->crypto.akm_suites[0]);
if (ret < 0)
return ret;
}
/*
* We save the WEP key in case we want to do shared authentication.
* We have to do it so because UMAC will assert whenever it gets a
* key before a profile.
*/
if (sme->key) {
key_param.key = kmemdup(sme->key, sme->key_len, GFP_KERNEL);
if (key_param.key == NULL)
return -ENOMEM;
key_param.key_len = sme->key_len;
key_param.seq_len = 0;
key_param.cipher = sme->crypto.ciphers_pairwise[0];
ret = iwm_key_init(&iwm->keys[sme->key_idx], sme->key_idx,
NULL, &key_param);
kfree(key_param.key);
if (ret < 0) {
IWM_ERR(iwm, "Invalid key_params\n");
return ret;
}
iwm->default_key = sme->key_idx;
}
/* WPA and open AUTH type from wpa_s means WPS (a.k.a. WSC) */
if ((iwm->umac_profile->sec.flags &
(UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) &&
iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN) {
iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WSC_ON_MSK;
}
ret = iwm_send_mlme_profile(iwm);
if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK ||
sme->key == NULL)
return ret;
/*
* We want to do shared auth.
* We need to actually set the key we previously cached,
* and then tell the UMAC it's the default one.
* That will trigger the auth+assoc UMAC machinery, and again,
* this must be done after setting the profile.
*/
ret = iwm_set_key(iwm, 0, &iwm->keys[sme->key_idx]);
if (ret < 0)
return ret;
return iwm_set_tx_key(iwm, iwm->default_key);
}
static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
u16 reason_code)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
IWM_DBG_WEXT(iwm, DBG, "Active: %d\n", iwm->umac_profile_active);
if (iwm->umac_profile_active)
iwm_invalidate_mlme_profile(iwm);
return 0;
}
static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
enum nl80211_tx_power_setting type, int mbm)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
int ret;
switch (type) {
case NL80211_TX_POWER_AUTOMATIC:
return 0;
case NL80211_TX_POWER_FIXED:
if (mbm < 0 || (mbm % 100))
return -EOPNOTSUPP;
if (!test_bit(IWM_STATUS_READY, &iwm->status))
return 0;
ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
CFG_TX_PWR_LIMIT_USR,
MBM_TO_DBM(mbm) * 2);
if (ret < 0)
return ret;
return iwm_tx_power_trigger(iwm);
default:
IWM_ERR(iwm, "Unsupported power type: %d\n", type);
return -EOPNOTSUPP;
}
return 0;
}
static int iwm_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
*dbm = iwm->txpower >> 1;
return 0;
}
static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy,
struct net_device *dev,
bool enabled, int timeout)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
u32 power_index;
if (enabled)
power_index = IWM_POWER_INDEX_DEFAULT;
else
power_index = IWM_POWER_INDEX_MIN;
if (power_index == iwm->conf.power_index)
return 0;
iwm->conf.power_index = power_index;
return iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
CFG_POWER_INDEX, iwm->conf.power_index);
}
static int iwm_cfg80211_set_pmksa(struct wiphy *wiphy,
struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD);
}
static int iwm_cfg80211_del_pmksa(struct wiphy *wiphy,
struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL);
}
static int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy,
struct net_device *netdev)
{
struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
struct cfg80211_pmksa pmksa;
memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
return iwm_send_pmkid_update(iwm, &pmksa, IWM_CMD_PMKID_FLUSH);
}
static struct cfg80211_ops iwm_cfg80211_ops = {
.change_virtual_intf = iwm_cfg80211_change_iface,
.add_key = iwm_cfg80211_add_key,
.get_key = iwm_cfg80211_get_key,
.del_key = iwm_cfg80211_del_key,
.set_default_key = iwm_cfg80211_set_default_key,
.get_station = iwm_cfg80211_get_station,
.scan = iwm_cfg80211_scan,
.set_wiphy_params = iwm_cfg80211_set_wiphy_params,
.connect = iwm_cfg80211_connect,
.disconnect = iwm_cfg80211_disconnect,
.join_ibss = iwm_cfg80211_join_ibss,
.leave_ibss = iwm_cfg80211_leave_ibss,
.set_tx_power = iwm_cfg80211_set_txpower,
.get_tx_power = iwm_cfg80211_get_txpower,
.set_power_mgmt = iwm_cfg80211_set_power_mgmt,
.set_pmksa = iwm_cfg80211_set_pmksa,
.del_pmksa = iwm_cfg80211_del_pmksa,
.flush_pmksa = iwm_cfg80211_flush_pmksa,
};
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
};
struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev)
{
int ret = 0;
struct wireless_dev *wdev;
/*
* We're trying to have the following memory
* layout:
*
* +-------------------------+
* | struct wiphy |
* +-------------------------+
* | struct iwm_priv |
* +-------------------------+
* | bus private data |
* | (e.g. iwm_priv_sdio) |
* +-------------------------+
*
*/
wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
if (!wdev) {
dev_err(dev, "Couldn't allocate wireless device\n");
return ERR_PTR(-ENOMEM);
}
wdev->wiphy = wiphy_new(&iwm_cfg80211_ops,
sizeof(struct iwm_priv) + sizeof_bus);
if (!wdev->wiphy) {
dev_err(dev, "Couldn't allocate wiphy device\n");
ret = -ENOMEM;
goto out_err_new;
}
set_wiphy_dev(wdev->wiphy, dev);
wdev->wiphy->max_scan_ssids = UMAC_WIFI_IF_PROBE_OPTION_MAX;
wdev->wiphy->max_num_pmkids = UMAC_MAX_NUM_PMKIDS;
wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &iwm_band_2ghz;
wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &iwm_band_5ghz;
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wdev->wiphy->cipher_suites = cipher_suites;
wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
ret = wiphy_register(wdev->wiphy);
if (ret < 0) {
dev_err(dev, "Couldn't register wiphy device\n");
goto out_err_register;
}
return wdev;
out_err_register:
wiphy_free(wdev->wiphy);
out_err_new:
kfree(wdev);
return ERR_PTR(ret);
}
void iwm_wdev_free(struct iwm_priv *iwm)
{
struct wireless_dev *wdev = iwm_to_wdev(iwm);
if (!wdev)
return;
wiphy_unregister(wdev->wiphy);
wiphy_free(wdev->wiphy);
kfree(wdev);
}

View File

@ -1,31 +0,0 @@
/*
* Intel Wireless Multicomm 3200 WiFi driver
*
* Copyright (C) 2009 Intel Corporation <ilw@linux.intel.com>
* Samuel Ortiz <samuel.ortiz@intel.com>
* Zhu Yi <yi.zhu@intel.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA.
*
*/
#ifndef __IWM_CFG80211_H__
#define __IWM_CFG80211_H__
int iwm_cfg80211_inform_bss(struct iwm_priv *iwm);
struct wireless_dev *iwm_wdev_alloc(int sizeof_bus, struct device *dev);
void iwm_wdev_free(struct iwm_priv *iwm);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,509 +0,0 @@
/*
* Intel Wireless Multicomm 3200 WiFi driver
*
* Copyright (C) 2009 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*
* Intel Corporation <ilw@linux.intel.com>
* Samuel Ortiz <samuel.ortiz@intel.com>
* Zhu Yi <yi.zhu@intel.com>
*
*/
#ifndef __IWM_COMMANDS_H__
#define __IWM_COMMANDS_H__
#include <linux/ieee80211.h>
#define IWM_BARKER_REBOOT_NOTIFICATION 0xF
#define IWM_ACK_BARKER_NOTIFICATION 0x10
/* UMAC commands */
#define UMAC_RST_CTRL_FLG_LARC_CLK_EN 0x0001
#define UMAC_RST_CTRL_FLG_LARC_RESET 0x0002
#define UMAC_RST_CTRL_FLG_FUNC_RESET 0x0004
#define UMAC_RST_CTRL_FLG_DEV_RESET 0x0008
#define UMAC_RST_CTRL_FLG_WIFI_CORE_EN 0x0010
#define UMAC_RST_CTRL_FLG_WIFI_LINK_EN 0x0040
#define UMAC_RST_CTRL_FLG_WIFI_MLME_EN 0x0080
#define UMAC_RST_CTRL_FLG_NVM_RELOAD 0x0100
struct iwm_umac_cmd_reset {
__le32 flags;
} __packed;
#define UMAC_PARAM_TBL_ORD_FIX 0x0
#define UMAC_PARAM_TBL_ORD_VAR 0x1
#define UMAC_PARAM_TBL_CFG_FIX 0x2
#define UMAC_PARAM_TBL_CFG_VAR 0x3
#define UMAC_PARAM_TBL_BSS_TRK 0x4
#define UMAC_PARAM_TBL_FA_CFG_FIX 0x5
#define UMAC_PARAM_TBL_STA 0x6
#define UMAC_PARAM_TBL_CHN 0x7
#define UMAC_PARAM_TBL_STATISTICS 0x8
/* fast access table */
enum {
CFG_FRAG_THRESHOLD = 0,
CFG_FRAME_RETRY_LIMIT,
CFG_OS_QUEUE_UTIL_TH,
CFG_RX_FILTER,
/* <-- LAST --> */
FAST_ACCESS_CFG_TBL_FIX_LAST
};
/* fixed size table */
enum {
CFG_POWER_INDEX = 0,
CFG_PM_LEGACY_RX_TIMEOUT,
CFG_PM_LEGACY_TX_TIMEOUT,
CFG_PM_CTRL_FLAGS,
CFG_PM_KEEP_ALIVE_IN_BEACONS,
CFG_BT_ON_THRESHOLD,
CFG_RTS_THRESHOLD,
CFG_CTS_TO_SELF,
CFG_COEX_MODE,
CFG_WIRELESS_MODE,
CFG_ASSOCIATION_TIMEOUT,
CFG_ROAM_TIMEOUT,
CFG_CAPABILITY_SUPPORTED_RATES,
CFG_SCAN_ALLOWED_UNASSOC_FLAGS,
CFG_SCAN_ALLOWED_MAIN_ASSOC_FLAGS,
CFG_SCAN_ALLOWED_PAN_ASSOC_FLAGS,
CFG_SCAN_INTERNAL_PERIODIC_ENABLED,
CFG_SCAN_IMM_INTERNAL_PERIODIC_SCAN_ON_INIT,
CFG_SCAN_DEFAULT_PERIODIC_FREQ_SEC,
CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN,
CFG_TLC_SUPPORTED_TX_HT_RATES,
CFG_TLC_SUPPORTED_TX_RATES,
CFG_TLC_SPATIAL_STREAM_SUPPORTED,
CFG_TLC_RETRY_PER_RATE,
CFG_TLC_RETRY_PER_HT_RATE,
CFG_TLC_FIXED_MCS,
CFG_TLC_CONTROL_FLAGS,
CFG_TLC_SR_MIN_FAIL,
CFG_TLC_SR_MIN_PASS,
CFG_TLC_HT_STAY_IN_COL_PASS_THRESH,
CFG_TLC_HT_STAY_IN_COL_FAIL_THRESH,
CFG_TLC_LEGACY_STAY_IN_COL_PASS_THRESH,
CFG_TLC_LEGACY_STAY_IN_COL_FAIL_THRESH,
CFG_TLC_HT_FLUSH_STATS_PACKETS,
CFG_TLC_LEGACY_FLUSH_STATS_PACKETS,
CFG_TLC_LEGACY_FLUSH_STATS_MS,
CFG_TLC_HT_FLUSH_STATS_MS,
CFG_TLC_STAY_IN_COL_TIME_OUT,
CFG_TLC_AGG_SHORT_LIM,
CFG_TLC_AGG_LONG_LIM,
CFG_TLC_HT_SR_NO_DECREASE,
CFG_TLC_LEGACY_SR_NO_DECREASE,
CFG_TLC_SR_FORCE_DECREASE,
CFG_TLC_SR_ALLOW_INCREASE,
CFG_TLC_AGG_SET_LONG,
CFG_TLC_AUTO_AGGREGATION,
CFG_TLC_AGG_THRESHOLD,
CFG_TLC_TID_LOAD_THRESHOLD,
CFG_TLC_BLOCK_ACK_TIMEOUT,
CFG_TLC_NO_BA_COUNTED_AS_ONE,
CFG_TLC_NUM_BA_STREAMS_ALLOWED,
CFG_TLC_NUM_BA_STREAMS_PRESENT,
CFG_TLC_RENEW_ADDBA_DELAY,
CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD,
CFG_TLC_IS_STABLE_IN_HT,
CFG_TLC_SR_SIC_1ST_FAIL,
CFG_TLC_SR_SIC_1ST_PASS,
CFG_TLC_SR_SIC_TOTAL_FAIL,
CFG_TLC_SR_SIC_TOTAL_PASS,
CFG_RLC_CHAIN_CTRL,
CFG_TRK_TABLE_OP_MODE,
CFG_TRK_TABLE_RSSI_THRESHOLD,
CFG_TX_PWR_TARGET, /* Used By xVT */
CFG_TX_PWR_LIMIT_USR,
CFG_TX_PWR_LIMIT_BSS, /* 11d limit */
CFG_TX_PWR_LIMIT_BSS_CONSTRAINT, /* 11h constraint */
CFG_TX_PWR_MODE,
CFG_MLME_DBG_NOTIF_BLOCK,
CFG_BT_OFF_BECONS_INTERVALS,
CFG_BT_FRAG_DURATION,
CFG_ACTIVE_CHAINS,
CFG_CALIB_CTRL,
CFG_CAPABILITY_SUPPORTED_HT_RATES,
CFG_HT_MAC_PARAM_INFO,
CFG_MIMO_PS_MODE,
CFG_HT_DEFAULT_CAPABILIES_INFO,
CFG_LED_SC_RESOLUTION_FACTOR,
CFG_PTAM_ENERGY_CCK_DET_DEFAULT,
CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_DEFAULT,
CFG_PTAM_CORR40_4_TH_ADD_MIN_DEFAULT,
CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_DEFAULT,
CFG_PTAM_CORR32_4_TH_ADD_MIN_DEFAULT,
CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_DEFAULT,
CFG_PTAM_CORR32_1_TH_ADD_MIN_DEFAULT,
CFG_PTAM_ENERGY_CCK_DET_MIN_VAL,
CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MIN_VAL,
CFG_PTAM_CORR40_4_TH_ADD_MIN_MIN_VAL,
CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MIN_VAL,
CFG_PTAM_CORR32_4_TH_ADD_MIN_MIN_VAL,
CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MIN_VAL,
CFG_PTAM_CORR32_1_TH_ADD_MIN_MIN_VAL,
CFG_PTAM_ENERGY_CCK_DET_MAX_VAL,
CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MAX_VAL,
CFG_PTAM_CORR40_4_TH_ADD_MIN_MAX_VAL,
CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MAX_VAL,
CFG_PTAM_CORR32_4_TH_ADD_MIN_MAX_VAL,
CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MAX_VAL,
CFG_PTAM_CORR32_1_TH_ADD_MIN_MAX_VAL,
CFG_PTAM_ENERGY_CCK_DET_STEP_VAL,
CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_STEP_VAL,
CFG_PTAM_CORR40_4_TH_ADD_MIN_STEP_VAL,
CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_STEP_VAL,
CFG_PTAM_CORR32_4_TH_ADD_MIN_STEP_VAL,
CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_STEP_VAL,
CFG_PTAM_CORR32_1_TH_ADD_MIN_STEP_VAL,
CFG_PTAM_LINK_SENS_FA_OFDM_MAX,
CFG_PTAM_LINK_SENS_FA_OFDM_MIN,
CFG_PTAM_LINK_SENS_FA_CCK_MAX,
CFG_PTAM_LINK_SENS_FA_CCK_MIN,
CFG_PTAM_LINK_SENS_NRG_DIFF,
CFG_PTAM_LINK_SENS_NRG_MARGIN,
CFG_PTAM_LINK_SENS_MAX_NUMBER_OF_TIMES_IN_CCK_NO_FA,
CFG_PTAM_LINK_SENS_AUTO_CORR_MAX_TH_CCK,
CFG_AGG_MGG_TID_LOAD_ADDBA_THRESHOLD,
CFG_AGG_MGG_TID_LOAD_DELBA_THRESHOLD,
CFG_AGG_MGG_ADDBA_BUF_SIZE,
CFG_AGG_MGG_ADDBA_INACTIVE_TIMEOUT,
CFG_AGG_MGG_ADDBA_DEBUG_FLAGS,
CFG_SCAN_PERIODIC_RSSI_HIGH_THRESHOLD,
CFG_SCAN_PERIODIC_COEF_RSSI_HIGH,
CFG_11D_ENABLED,
CFG_11H_FEATURE_FLAGS,
/* <-- LAST --> */
CFG_TBL_FIX_LAST
};
/* variable size table */
enum {
CFG_NET_ADDR = 0,
CFG_LED_PATTERN_TABLE,
/* <-- LAST --> */
CFG_TBL_VAR_LAST
};
struct iwm_umac_cmd_set_param_fix {
__le16 tbl;
__le16 key;
__le32 value;
} __packed;
struct iwm_umac_cmd_set_param_var {
__le16 tbl;
__le16 key;
__le16 len;
__le16 reserved;
} __packed;
struct iwm_umac_cmd_get_param {
__le16 tbl;
__le16 key;
} __packed;
struct iwm_umac_cmd_get_param_resp {
__le16 tbl;
__le16 key;
__le16 len;
__le16 reserved;
} __packed;
struct iwm_umac_cmd_eeprom_proxy_hdr {
__le32 type;
__le32 offset;
__le32 len;
} __packed;
struct iwm_umac_cmd_eeprom_proxy {
struct iwm_umac_cmd_eeprom_proxy_hdr hdr;
u8 buf[0];
} __packed;
#define IWM_UMAC_CMD_EEPROM_TYPE_READ 0x1
#define IWM_UMAC_CMD_EEPROM_TYPE_WRITE 0x2
#define UMAC_CHANNEL_FLAG_VALID BIT(0)
#define UMAC_CHANNEL_FLAG_IBSS BIT(1)
#define UMAC_CHANNEL_FLAG_ACTIVE BIT(3)
#define UMAC_CHANNEL_FLAG_RADAR BIT(4)
#define UMAC_CHANNEL_FLAG_DFS BIT(7)
struct iwm_umac_channel_info {
u8 band;
u8 type;
u8 reserved;
u8 flags;
__le32 channels_mask;
} __packed;
struct iwm_umac_cmd_get_channel_list {
__le16 count;
__le16 reserved;
struct iwm_umac_channel_info ch[0];
} __packed;
/* UMAC WiFi interface commands */
/* Coexistence mode */
#define COEX_MODE_SA 0x1
#define COEX_MODE_XOR 0x2
#define COEX_MODE_CM 0x3
#define COEX_MODE_MAX 0x4
/* Wireless mode */
#define WIRELESS_MODE_11A 0x1
#define WIRELESS_MODE_11G 0x2
#define WIRELESS_MODE_11N 0x4
#define UMAC_PROFILE_EX_IE_REQUIRED 0x1
#define UMAC_PROFILE_QOS_ALLOWED 0x2
/* Scanning */
#define UMAC_WIFI_IF_PROBE_OPTION_MAX 10
#define UMAC_WIFI_IF_SCAN_TYPE_USER 0x0
#define UMAC_WIFI_IF_SCAN_TYPE_UMAC_RESERVED 0x1
#define UMAC_WIFI_IF_SCAN_TYPE_HOST_PERIODIC 0x2
#define UMAC_WIFI_IF_SCAN_TYPE_MAX 0x3
struct iwm_umac_ssid {
u8 ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 reserved[3];
} __packed;
struct iwm_umac_cmd_scan_request {
struct iwm_umac_wifi_if hdr;
__le32 type; /* UMAC_WIFI_IF_SCAN_TYPE_* */
u8 ssid_num;
u8 seq_num;
u8 timeout; /* In seconds */
u8 reserved;
struct iwm_umac_ssid ssids[UMAC_WIFI_IF_PROBE_OPTION_MAX];
} __packed;
#define UMAC_CIPHER_TYPE_NONE 0xFF
#define UMAC_CIPHER_TYPE_USE_GROUPCAST 0x00
#define UMAC_CIPHER_TYPE_WEP_40 0x01
#define UMAC_CIPHER_TYPE_WEP_104 0x02
#define UMAC_CIPHER_TYPE_TKIP 0x04
#define UMAC_CIPHER_TYPE_CCMP 0x08
/* Supported authentication types - bitmap */
#define UMAC_AUTH_TYPE_OPEN 0x00
#define UMAC_AUTH_TYPE_LEGACY_PSK 0x01
#define UMAC_AUTH_TYPE_8021X 0x02
#define UMAC_AUTH_TYPE_RSNA_PSK 0x04
/* iwm_umac_security.flag is WPA supported -- bits[0:0] */
#define UMAC_SEC_FLG_WPA_ON_POS 0
#define UMAC_SEC_FLG_WPA_ON_SEED 1
#define UMAC_SEC_FLG_WPA_ON_MSK (UMAC_SEC_FLG_WPA_ON_SEED << \
UMAC_SEC_FLG_WPA_ON_POS)
/* iwm_umac_security.flag is WPA2 supported -- bits [1:1] */
#define UMAC_SEC_FLG_RSNA_ON_POS 1
#define UMAC_SEC_FLG_RSNA_ON_SEED 1
#define UMAC_SEC_FLG_RSNA_ON_MSK (UMAC_SEC_FLG_RSNA_ON_SEED << \
UMAC_SEC_FLG_RSNA_ON_POS)
/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */
#define UMAC_SEC_FLG_WSC_ON_POS 2
#define UMAC_SEC_FLG_WSC_ON_SEED 1
#define UMAC_SEC_FLG_WSC_ON_MSK (UMAC_SEC_FLG_WSC_ON_SEED << \
UMAC_SEC_FLG_WSC_ON_POS)
/* Legacy profile can use only WEP40 and WEP104 for encryption and
* OPEN or PSK for authentication */
#define UMAC_SEC_FLG_LEGACY_PROFILE 0
struct iwm_umac_security {
u8 auth_type;
u8 ucast_cipher;
u8 mcast_cipher;
u8 flags;
} __packed;
struct iwm_umac_ibss {
u8 beacon_interval; /* in millisecond */
u8 atim; /* in millisecond */
s8 join_only;
u8 band;
u8 channel;
u8 reserved[3];
} __packed;
#define UMAC_MODE_BSS 0
#define UMAC_MODE_IBSS 1
#define UMAC_BSSID_MAX 4
struct iwm_umac_profile {
struct iwm_umac_wifi_if hdr;
__le32 mode;
struct iwm_umac_ssid ssid;
u8 bssid[UMAC_BSSID_MAX][ETH_ALEN];
struct iwm_umac_security sec;
struct iwm_umac_ibss ibss;
__le32 channel_2ghz;
__le32 channel_5ghz;
__le16 flags;
u8 wireless_mode;
u8 bss_num;
} __packed;
struct iwm_umac_invalidate_profile {
struct iwm_umac_wifi_if hdr;
u8 reason;
u8 reserved[3];
} __packed;
/* Encryption key commands */
struct iwm_umac_key_wep40 {
struct iwm_umac_wifi_if hdr;
struct iwm_umac_key_hdr key_hdr;
u8 key[WLAN_KEY_LEN_WEP40];
u8 static_key;
u8 reserved[2];
} __packed;
struct iwm_umac_key_wep104 {
struct iwm_umac_wifi_if hdr;
struct iwm_umac_key_hdr key_hdr;
u8 key[WLAN_KEY_LEN_WEP104];
u8 static_key;
u8 reserved[2];
} __packed;
#define IWM_TKIP_KEY_SIZE 16
#define IWM_TKIP_MIC_SIZE 8
struct iwm_umac_key_tkip {
struct iwm_umac_wifi_if hdr;
struct iwm_umac_key_hdr key_hdr;
u8 iv_count[6];
u8 reserved[2];
u8 tkip_key[IWM_TKIP_KEY_SIZE];
u8 mic_rx_key[IWM_TKIP_MIC_SIZE];
u8 mic_tx_key[IWM_TKIP_MIC_SIZE];
} __packed;
struct iwm_umac_key_ccmp {
struct iwm_umac_wifi_if hdr;
struct iwm_umac_key_hdr key_hdr;
u8 iv_count[6];
u8 reserved[2];
u8 key[WLAN_KEY_LEN_CCMP];
} __packed;
struct iwm_umac_key_remove {
struct iwm_umac_wifi_if hdr;
struct iwm_umac_key_hdr key_hdr;
} __packed;
struct iwm_umac_tx_key_id {
struct iwm_umac_wifi_if hdr;
u8 key_idx;
u8 reserved[3];
} __packed;
struct iwm_umac_pwr_trigger {
struct iwm_umac_wifi_if hdr;
__le32 reseved;
} __packed;
struct iwm_umac_cmd_stats_req {
__le32 flags;
} __packed;
struct iwm_umac_cmd_stop_resume_tx {
u8 flags;
u8 sta_id;
__le16 stop_resume_tid_msk;
__le16 last_seq_num[IWM_UMAC_TID_NR];
u16 reserved;
} __packed;
#define IWM_CMD_PMKID_ADD 1
#define IWM_CMD_PMKID_DEL 2
#define IWM_CMD_PMKID_FLUSH 3
struct iwm_umac_pmkid_update {
struct iwm_umac_wifi_if hdr;
__le32 command;
u8 bssid[ETH_ALEN];
__le16 reserved;
u8 pmkid[WLAN_PMKID_LEN];
} __packed;
/* LMAC commands */
int iwm_read_mac(struct iwm_priv *iwm, u8 *mac);
int iwm_send_prio_table(struct iwm_priv *iwm);
int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
int iwm_send_calib_results(struct iwm_priv *iwm);
int iwm_store_rxiq_calib_result(struct iwm_priv *iwm);
int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit);
/* UMAC commands */
int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
bool resp);
int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp);
int iwm_umac_set_config_fix(struct iwm_priv *iwm, u16 tbl, u16 key, u32 value);
int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
void *payload, u16 payload_size);
int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags);
int iwm_send_mlme_profile(struct iwm_priv *iwm);
int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key);
int iwm_tx_power_trigger(struct iwm_priv *iwm);
int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags);
int iwm_send_umac_channel_list(struct iwm_priv *iwm);
int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
int ssid_num);
int iwm_scan_one_ssid(struct iwm_priv *iwm, u8 *ssid, int ssid_len);
int iwm_send_umac_stop_resume_tx(struct iwm_priv *iwm,
struct iwm_umac_notif_stop_resume_tx *ntf);
int iwm_send_pmkid_update(struct iwm_priv *iwm,
struct cfg80211_pmksa *pmksa, u32 command);
/* UDMA commands */
int iwm_target_reset(struct iwm_priv *iwm);
#endif

Some files were not shown because too many files have changed in this diff Show More