Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (47 commits)
  ehea: Fix napi list corruption on ifconfig down
  igbvf: Allow VF driver to correctly recognize failure to set mac
  3c59x: Fix build failure with gcc 3.2
  sky2: Avoid transmits during sky2_down()
  iwlagn: do not send key clear commands when rfkill enabled
  libertas: Read buffer overflow
  drivers/net/wireless: introduce missing kfree
  drivers/net/wireless/iwlwifi: introduce missing kfree
  zd1211rw: fix unaligned access in zd_mac_rx
  cfg80211: fix regression on beacon world roaming feature
  cfg80211: add two missing NULL pointer checks
  ixgbe: Patch to modify 82598 PCIe completion timeout values
  bluetooth: rfcomm_init bug fix
  mlx4_en: Fix double pci unmapping.
  mISDN: Fix handling of receive buffer size in L1oIP
  pcnet32: VLB support fixes
  pcnet32: remove superfluous NULL pointer check in pcnet32_probe1()
  net: restore the original spinlock to protect unicast list
  netxen: fix coherent dma mask setting
  mISDN: Read buffer overflow
  ...
This commit is contained in:
Linus Torvalds 2009-08-04 15:38:34 -07:00
commit ae83060026
55 changed files with 364 additions and 165 deletions

View File

@ -449,8 +449,8 @@ printk(KERN_INFO "i = %u\n", i);
</para>
<programlisting>
__u32 ipaddress;
printk(KERN_INFO "my ip: %d.%d.%d.%d\n", NIPQUAD(ipaddress));
__be32 ipaddress;
printk(KERN_INFO "my ip: %pI4\n", &amp;ipaddress);
</programlisting>
<para>

View File

@ -1480,7 +1480,7 @@ l1oip_init(void)
return -ENOMEM;
l1oip_cnt = 0;
while (type[l1oip_cnt] && l1oip_cnt < MAX_CARDS) {
while (l1oip_cnt < MAX_CARDS && type[l1oip_cnt]) {
switch (type[l1oip_cnt] & 0xff) {
case 1:
pri = 0;

View File

@ -832,7 +832,9 @@ static int corkscrew_open(struct net_device *dev)
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
}
vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
if (i != 0)
vp->rx_ring[i - 1].next =
isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
}
if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */

View File

@ -2721,13 +2721,15 @@ dump_tx_ring(struct net_device *dev)
&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
issue_and_wait(dev, DownStall);
for (i = 0; i < TX_RING_SIZE; i++) {
pr_err(" %d: @%p length %8.8x status %8.8x\n", i,
&vp->tx_ring[i],
unsigned int length;
#if DO_ZEROCOPY
le32_to_cpu(vp->tx_ring[i].frag[0].length),
length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
#else
le32_to_cpu(vp->tx_ring[i].length),
length = le32_to_cpu(vp->tx_ring[i].length);
#endif
pr_err(" %d: @%p length %8.8x status %8.8x\n",
i, &vp->tx_ring[i], length,
le32_to_cpu(vp->tx_ring[i].status));
}
if (!stalled)

View File

@ -1474,13 +1474,13 @@ static void eexp_hw_init586(struct net_device *dev)
outw(0x0000, ioaddr + 0x800c);
outw(0x0000, ioaddr + 0x800e);
for (i = 0; i < (sizeof(start_code)); i+=32) {
for (i = 0; i < ARRAY_SIZE(start_code) * 2; i+=32) {
int j;
outw(i, ioaddr + SM_PTR);
for (j = 0; j < 16; j+=2)
for (j = 0; j < 16 && (i+j)/2 < ARRAY_SIZE(start_code); j+=2)
outw(start_code[(i+j)/2],
ioaddr+0x4000+j);
for (j = 0; j < 16; j+=2)
for (j = 0; j < 16 && (i+j+16)/2 < ARRAY_SIZE(start_code); j+=2)
outw(start_code[(i+j+16)/2],
ioaddr+0x8000+j);
}

View File

@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
#define DRV_VERSION "EHEA_0101"
#define DRV_VERSION "EHEA_0102"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1

View File

@ -1545,6 +1545,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
{
int ret, i;
if (pr->qp)
netif_napi_del(&pr->napi);
ret = ehea_destroy_qp(pr->qp);
if (!ret) {

View File

@ -366,9 +366,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
return -EINVAL;
}
priv->rxic = mk_ic_value(
gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs),
cvals->rx_max_coalesced_frames);
priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames,
gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
/* Set up tx coalescing */
if ((cvals->tx_coalesce_usecs == 0) ||
@ -390,9 +389,8 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
return -EINVAL;
}
priv->txic = mk_ic_value(
gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs),
cvals->tx_max_coalesced_frames);
priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames,
gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
gfar_write(&priv->regs->rxic, 0);
if (priv->rxcoalescing)

View File

@ -274,6 +274,8 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
err = mbx->ops.read_posted(hw, msgbuf, 2);
msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
/* if nacked the vlan was rejected */
if (!err && (msgbuf[0] == (E1000_VF_SET_VLAN | E1000_VT_MSGTYPE_NACK)))
err = -E1000_ERR_MAC_INIT;
@ -317,6 +319,8 @@ static void e1000_rar_set_vf(struct e1000_hw *hw, u8 * addr, u32 index)
if (!ret_val)
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
msgbuf[0] &= ~E1000_VT_MSGTYPE_CTS;
/* if nacked the address was rejected, use "perm_addr" */
if (!ret_val &&
(msgbuf[0] == (E1000_VF_SET_MAC_ADDR | E1000_VT_MSGTYPE_NACK)))

View File

@ -96,6 +96,8 @@
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
#define IXGBE_MAX_RSC_INT_RATE 162760
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {

View File

@ -49,6 +49,51 @@ static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw,
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
u8 *eeprom_data);
/**
* ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
* @hw: pointer to the HW structure
*
* The defaults for 82598 should be in the range of 50us to 50ms,
* however the hardware default for these parts is 500us to 1ms which is less
* than the 10ms recommended by the pci-e spec. To address this we need to
* increase the value to either 10ms to 250ms for capability version 1 config,
* or 16ms to 55ms for version 2.
**/
void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
{
struct ixgbe_adapter *adapter = hw->back;
u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
u16 pcie_devctl2;
/* only take action if timeout value is defaulted to 0 */
if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
goto out;
/*
* if capababilities version is type 1 we can write the
* timeout of 10ms to 250ms through the GCR register
*/
if (!(gcr & IXGBE_GCR_CAP_VER2)) {
gcr |= IXGBE_GCR_CMPL_TMOUT_10ms;
goto out;
}
/*
* for version 2 capabilities we need to write the config space
* directly in order to set the completion timeout value for
* 16ms to 55ms
*/
pci_read_config_word(adapter->pdev,
IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
pci_write_config_word(adapter->pdev,
IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND;
IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
}
/**
* ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
* @hw: pointer to hardware structure
@ -152,6 +197,26 @@ s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
return ret_val;
}
/**
* ixgbe_start_hw_82598 - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* Starts the hardware using the generic start_hw function.
* Then set pcie completion timeout
**/
s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
ret_val = ixgbe_start_hw_generic(hw);
/* set the completion timeout for interface */
if (ret_val == 0)
ixgbe_set_pcie_completion_timeout(hw);
return ret_val;
}
/**
* ixgbe_get_link_capabilities_82598 - Determines link capabilities
* @hw: pointer to hardware structure
@ -1085,7 +1150,7 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
static struct ixgbe_mac_operations mac_ops_82598 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82598,
.start_hw = &ixgbe_start_hw_generic,
.start_hw = &ixgbe_start_hw_82598,
.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
.get_media_type = &ixgbe_get_media_type_82598,
.get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,

View File

@ -1975,7 +1975,10 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
* any other value means disable eitr, which is best
* served by setting the interrupt rate very high
*/
adapter->eitr_param = IXGBE_MAX_INT_RATE;
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
adapter->eitr_param = IXGBE_MAX_RSC_INT_RATE;
else
adapter->eitr_param = IXGBE_MAX_INT_RATE;
adapter->itr_setting = 0;
}
@ -1999,13 +2002,13 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
ethtool_op_set_flags(netdev, data);
if (!(adapter->flags & IXGBE_FLAG2_RSC_CAPABLE))
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
return 0;
/* if state changes we need to update adapter->flags and reset */
if ((!!(data & ETH_FLAG_LRO)) !=
(!!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED))) {
adapter->flags ^= IXGBE_FLAG2_RSC_ENABLED;
(!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
else

View File

@ -780,7 +780,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
prefetch(next_rxd);
cleaned_count++;
if (adapter->flags & IXGBE_FLAG2_RSC_CAPABLE)
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
rsc_count = ixgbe_get_rsc_count(rx_desc);
if (rsc_count) {
@ -2036,7 +2036,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
}
} else {
if (!(adapter->flags & IXGBE_FLAG2_RSC_ENABLED) &&
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
(netdev->mtu <= ETH_DATA_LEN))
rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
else
@ -2165,7 +2165,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED) {
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
/* Enable 82599 HW-RSC */
for (i = 0; i < adapter->num_rx_queues; i++) {
j = adapter->rx_ring[i].reg_idx;
@ -3812,8 +3812,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
} else if (hw->mac.type == ixgbe_mac_82599EB) {
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags |= IXGBE_FLAG2_RSC_ENABLED;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
@ -5360,12 +5360,19 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
static void ixgbe_netpoll(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
disable_irq(adapter->pdev->irq);
adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
ixgbe_intr(adapter->pdev->irq, netdev);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (i = 0; i < num_q_vectors; i++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
ixgbe_msix_clean_many(0, q_vector);
}
} else {
ixgbe_intr(adapter->pdev->irq, netdev);
}
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
enable_irq(adapter->pdev->irq);
}
#endif
@ -5611,7 +5618,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
if (adapter->flags & IXGBE_FLAG2_RSC_ENABLED)
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
netdev->features |= NETIF_F_LRO;
/* make sure the EEPROM is good */

View File

@ -718,6 +718,12 @@
#define IXGBE_ECC_STATUS_82599 0x110E0
#define IXGBE_BAR_CTRL_82599 0x110F4
/* PCI Express Control */
#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000
#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000
#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
#define IXGBE_GCR_CAP_VER2 0x00040000
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@ -1521,6 +1527,7 @@
/* PCI Bus Info */
#define IXGBE_PCI_LINK_STATUS 0xB2
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
#define IXGBE_PCI_LINK_WIDTH_2 0x20
@ -1531,6 +1538,7 @@
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
/* Number of 100 microseconds we wait for PCI Express master disable */
#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800

View File

@ -249,6 +249,7 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
pci_unmap_page(mdev->pdev,
(dma_addr_t) be64_to_cpu(data->addr),
frag->size, PCI_DMA_TODEVICE);
++data;
}
}
/* Stamp the freed descriptor */

View File

@ -221,7 +221,7 @@ netxen_napi_disable(struct netxen_adapter *adapter)
}
}
static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
static int nx_set_dma_mask(struct netxen_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
uint64_t mask, cmask;
@ -229,19 +229,17 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
adapter->pci_using_dac = 0;
mask = DMA_BIT_MASK(32);
/*
* Consistent DMA mask is set to 32 bit because it cannot be set to
* 35 bits. For P3 also leave it at 32 bits for now. Only the rings
* come off this pool.
*/
cmask = DMA_BIT_MASK(32);
if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
#ifndef CONFIG_IA64
if (revision_id >= NX_P3_B0)
mask = DMA_BIT_MASK(39);
else if (revision_id == NX_P2_C1)
mask = DMA_BIT_MASK(35);
#endif
} else {
mask = DMA_BIT_MASK(39);
cmask = mask;
}
if (pci_set_dma_mask(pdev, mask) == 0 &&
pci_set_consistent_dma_mask(pdev, cmask) == 0) {
adapter->pci_using_dac = 1;
@ -256,7 +254,7 @@ static int
nx_update_dma_mask(struct netxen_adapter *adapter)
{
int change, shift, err;
uint64_t mask, old_mask;
uint64_t mask, old_mask, old_cmask;
struct pci_dev *pdev = adapter->pdev;
change = 0;
@ -272,14 +270,29 @@ nx_update_dma_mask(struct netxen_adapter *adapter)
if (change) {
old_mask = pdev->dma_mask;
old_cmask = pdev->dev.coherent_dma_mask;
mask = (1ULL<<(32+shift)) - 1;
err = pci_set_dma_mask(pdev, mask);
if (err)
return pci_set_dma_mask(pdev, old_mask);
goto err_out;
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
err = pci_set_consistent_dma_mask(pdev, mask);
if (err)
goto err_out;
}
dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
}
return 0;
err_out:
pci_set_dma_mask(pdev, old_mask);
pci_set_consistent_dma_mask(pdev, old_cmask);
return err;
}
static void netxen_check_options(struct netxen_adapter *adapter)
@ -1006,7 +1019,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
revision_id = pdev->revision;
adapter->ahw.revision_id = revision_id;
err = nx_set_dma_mask(adapter, revision_id);
err = nx_set_dma_mask(adapter);
if (err)
goto err_out_free_netdev;

View File

@ -1611,8 +1611,11 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
if (pcnet32_dwio_read_csr(ioaddr, 0) == 4
&& pcnet32_dwio_check(ioaddr)) {
a = &pcnet32_dwio;
} else
} else {
if (pcnet32_debug & NETIF_MSG_PROBE)
printk(KERN_ERR PFX "No access methods\n");
goto err_release_region;
}
}
chip_version =
@ -1719,7 +1722,9 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
ret = -ENOMEM;
goto err_release_region;
}
SET_NETDEV_DEV(dev, &pdev->dev);
if (pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
if (pcnet32_debug & NETIF_MSG_PROBE)
printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
@ -1818,7 +1823,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
spin_lock_init(&lp->lock);
SET_NETDEV_DEV(dev, &pdev->dev);
lp->name = chipname;
lp->shared_irq = shared;
lp->tx_ring_size = TX_RING_SIZE; /* default tx ring size */
@ -1852,12 +1856,6 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
lp->options |= PCNET32_PORT_FD;
if (!a) {
if (pcnet32_debug & NETIF_MSG_PROBE)
printk(KERN_ERR PFX "No access methods\n");
ret = -ENODEV;
goto err_free_consistent;
}
lp->a = *a;
/* prior to register_netdev, dev->name is not yet correct */
@ -1973,14 +1971,13 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
return 0;
err_free_ring:
err_free_ring:
pcnet32_free_ring(dev);
err_free_consistent:
pci_free_consistent(lp->pci_dev, sizeof(*lp->init_block),
lp->init_block, lp->init_dma_addr);
err_free_netdev:
err_free_netdev:
free_netdev(dev);
err_release_region:
err_release_region:
release_region(ioaddr, PCNET32_TOTAL_SIZE);
return ret;
}
@ -2089,6 +2086,7 @@ static void pcnet32_free_ring(struct net_device *dev)
static int pcnet32_open(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
struct pci_dev *pdev = lp->pci_dev;
unsigned long ioaddr = dev->base_addr;
u16 val;
int i;
@ -2149,9 +2147,9 @@ static int pcnet32_open(struct net_device *dev)
lp->a.write_csr(ioaddr, 124, val);
/* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
if (lp->pci_dev->subsystem_vendor == PCI_VENDOR_ID_AT &&
(lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
lp->pci_dev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
(pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2700FX ||
pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
if (lp->options & PCNET32_PORT_ASEL) {
lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
if (netif_msg_link(lp))

View File

@ -1384,7 +1384,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/* create a fragment for each channel */
bits = B;
while (nfree > 0 && len > 0) {
while (len > 0) {
list = list->next;
if (list == &ppp->channels) {
i = 0;
@ -1431,29 +1431,31 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
*otherwise divide it according to the speed
*of the channel we are going to transmit on
*/
if (pch->speed == 0) {
flen = totlen/nfree ;
if (nbigger > 0) {
flen++;
nbigger--;
}
} else {
flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
((totspeed*totfree)/pch->speed)) - hdrlen;
if (nbigger > 0) {
flen += ((totfree - nzero)*pch->speed)/totspeed;
nbigger -= ((totfree - nzero)*pch->speed)/
if (nfree > 0) {
if (pch->speed == 0) {
flen = totlen/nfree ;
if (nbigger > 0) {
flen++;
nbigger--;
}
} else {
flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
((totspeed*totfree)/pch->speed)) - hdrlen;
if (nbigger > 0) {
flen += ((totfree - nzero)*pch->speed)/totspeed;
nbigger -= ((totfree - nzero)*pch->speed)/
totspeed;
}
}
nfree--;
}
nfree--;
/*
*check if we are on the last channel or
*we exceded the lenght of the data to
*fragment
*/
if ((nfree == 0) || (flen > len))
if ((nfree <= 0) || (flen > len))
flen = len;
/*
*it is not worth to tx on slow channels:
@ -1467,7 +1469,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
continue;
}
mtu = pch->chan->mtu + 2 - hdrlen;
mtu = pch->chan->mtu - hdrlen;
if (mtu < 4)
mtu = 4;
if (flen > mtu)

View File

@ -1063,6 +1063,7 @@ static void *pppoe_seq_next(struct seq_file *seq, void *v, loff_t *pos)
else {
int hash = hash_item(po->pppoe_pa.sid, po->pppoe_pa.remote);
po = NULL;
while (++hash < PPPOE_HASH_SIZE) {
po = pn->hash_table[hash];
if (po)

View File

@ -2680,6 +2680,7 @@ static int __init pppol2tp_init(void)
static void __exit pppol2tp_exit(void)
{
unregister_pppox_proto(PX_PROTO_OL2TP);
unregister_pernet_gen_device(pppol2tp_net_id, &pppol2tp_net_ops);
proto_unregister(&pppol2tp_sk_proto);
}

View File

@ -793,7 +793,7 @@ static inline int s6gmac_phy_start(struct net_device *dev)
struct s6gmac *pd = netdev_priv(dev);
int i = 0;
struct phy_device *p = NULL;
while ((!(p = pd->mii.bus->phy_map[i])) && (i < PHY_MAX_ADDR))
while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
i++;
p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link, 0,
PHY_INTERFACE_MODE_RGMII);

View File

@ -1488,6 +1488,8 @@ static int sky2_up(struct net_device *dev)
sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
#endif
sky2->restarting = 0;
err = sky2_rx_start(sky2);
if (err)
goto err_out;
@ -1500,6 +1502,9 @@ static int sky2_up(struct net_device *dev)
sky2_set_multicast(dev);
/* wake queue incase we are restarting */
netif_wake_queue(dev);
if (netif_msg_ifup(sky2))
printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
return 0;
@ -1533,6 +1538,8 @@ static inline int tx_dist(unsigned tail, unsigned head)
/* Number of list elements available for next tx */
static inline int tx_avail(const struct sky2_port *sky2)
{
if (unlikely(sky2->restarting))
return 0;
return sky2->tx_pending - tx_dist(sky2->tx_cons, sky2->tx_prod);
}
@ -1818,6 +1825,10 @@ static int sky2_down(struct net_device *dev)
if (netif_msg_ifdown(sky2))
printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
/* explicitly shut off tx incase we're restarting */
sky2->restarting = 1;
netif_tx_disable(dev);
/* Force flow control off */
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
@ -2359,7 +2370,7 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
{
struct sky2_port *sky2 = netdev_priv(dev);
if (netif_running(dev)) {
if (likely(netif_running(dev) && !sky2->restarting)) {
netif_tx_lock(dev);
sky2_tx_complete(sky2, last);
netif_tx_unlock(dev);
@ -4283,6 +4294,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
spin_lock_init(&sky2->phy_lock);
sky2->tx_pending = TX_DEF_PENDING;
sky2->rx_pending = RX_DEF_PENDING;
sky2->restarting = 0;
hw->dev[port] = dev;

View File

@ -2051,6 +2051,7 @@ struct sky2_port {
u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
u8 rx_csum;
u8 wol;
u8 restarting;
enum flow_control flow_mode;
enum flow_control flow_status;

View File

@ -5059,7 +5059,7 @@ mii_get_phy(struct net_device *dev)
if ((id == 0) || (id == 65535)) continue; /* Valid ID? */
for (j=0; j<limit; j++) { /* Search PHY table */
if (id != phy_info[j].id) continue; /* ID match? */
for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++);
for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
if (k < DE4X5_MAX_PHY) {
memcpy((char *)&lp->phy[k],
(char *)&phy_info[j], sizeof(struct phy_table));
@ -5072,7 +5072,7 @@ mii_get_phy(struct net_device *dev)
break;
}
if ((j == limit) && (i < DE4X5_MAX_MII)) {
for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++);
for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
lp->phy[k].addr = i;
lp->phy[k].id = id;
lp->phy[k].spd.reg = GENERIC_REG; /* ANLPA register */
@ -5091,7 +5091,7 @@ mii_get_phy(struct net_device *dev)
purgatory:
lp->active = 0;
if (lp->phy[0].id) { /* Reset the PHY devices */
for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/
for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) { /*For each PHY*/
mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);

View File

@ -5918,20 +5918,19 @@ static int airo_set_essid(struct net_device *dev,
readSsidRid(local, &SSID_rid);
/* Check if we asked for `any' */
if(dwrq->flags == 0) {
if (dwrq->flags == 0) {
/* Just send an empty SSID list */
memset(&SSID_rid, 0, sizeof(SSID_rid));
} else {
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
unsigned index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
/* Check the size of the string */
if(dwrq->length > IW_ESSID_MAX_SIZE) {
if (dwrq->length > IW_ESSID_MAX_SIZE)
return -E2BIG ;
}
/* Check if index is valid */
if((index < 0) || (index >= 4)) {
if (index >= ARRAY_SIZE(SSID_rid.ssids))
return -EINVAL;
}
/* Set the SSID */
memset(SSID_rid.ssids[index].ssid, 0,
@ -6819,7 +6818,7 @@ static int airo_set_txpow(struct net_device *dev,
return -EINVAL;
}
clear_bit (FLAG_RADIO_OFF, &local->flags);
for (i = 0; cap_rid.txPowerLevels[i] && (i < 8); i++)
for (i = 0; i < 8 && cap_rid.txPowerLevels[i]; i++)
if (v == cap_rid.txPowerLevels[i]) {
readConfigRid(local, 1);
local->config.txPower = v;

View File

@ -460,7 +460,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
integer = swab32(eep->modalHeader.antCtrlCommon);
eep->modalHeader.antCtrlCommon = integer;
for (i = 0; i < AR5416_MAX_CHAINS; i++) {
for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) {
integer = swab32(eep->modalHeader.antCtrlChain[i]);
eep->modalHeader.antCtrlChain[i] = integer;
}
@ -914,7 +914,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
ctlMode, numCtlModes, isHt40CtlMode,
(pCtlMode[ctlMode] & EXT_ADDITIVE));
for (i = 0; (i < AR5416_NUM_CTLS) &&
for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
pEepData->ctlIndex[i]; i++) {
DPRINTF(ah->ah_sc, ATH_DBG_EEPROM,
" LOOP-Ctlidx %d: cfgCtl 0x%2.2x "

View File

@ -112,7 +112,7 @@ enum iwl3945_antenna {
#define IWL_TX_FIFO_NONE 7
/* Minimum number of queues. MAX_NUM is defined in hw specific files */
#define IWL_MIN_NUM_QUEUES 4
#define IWL39_MIN_NUM_QUEUES 4
#define IEEE80211_DATA_LEN 2304
#define IEEE80211_4ADDR_LEN 30

View File

@ -1332,6 +1332,9 @@ int iwl_setup_mac(struct iwl_priv *priv)
hw->wiphy->custom_regulatory = true;
/* Firmware does not support this */
hw->wiphy->disable_beacon_hints = true;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
/* we create the 802.11 header and a zero-length SSID element */
hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;

View File

@ -308,18 +308,18 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
return -ENODATA;
}
ptr = priv->eeprom;
if (!ptr) {
IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
return -ENOMEM;
}
/* 4 characters for byte 0xYY */
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
return -ENOMEM;
}
ptr = priv->eeprom;
if (!ptr) {
IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
return -ENOMEM;
}
pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n",
(priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM");

View File

@ -258,8 +258,10 @@ struct iwl_channel_info {
#define IWL_TX_FIFO_HCCA_2 6
#define IWL_TX_FIFO_NONE 7
/* Minimum number of queues. MAX_NUM is defined in hw specific files */
#define IWL_MIN_NUM_QUEUES 4
/* Minimum number of queues. MAX_NUM is defined in hw specific files.
* Set the minimum to accommodate the 4 standard TX queues, 1 command
* queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
#define IWL_MIN_NUM_QUEUES 10
/* Power management (not Tx power) structures */

View File

@ -566,6 +566,8 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
unsigned long flags;
spin_lock_irqsave(&priv->sta_lock, flags);
IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
keyconf->keyidx);
if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
IWL_ERR(priv, "index %d not used in uCode key table.\n",
@ -573,6 +575,11 @@ int iwl_remove_default_wep_key(struct iwl_priv *priv,
priv->default_wep_key--;
memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
ret = iwl_send_static_wepkey_cmd(priv, 1);
IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
keyconf->keyidx, ret);
@ -853,6 +860,11 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
if (iwl_is_rfkill(priv)) {
IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n");
spin_unlock_irqrestore(&priv->sta_lock, flags);
return 0;
}
ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
spin_unlock_irqrestore(&priv->sta_lock, flags);
return ret;

View File

@ -720,8 +720,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
goto drop_unlock;
}
spin_unlock_irqrestore(&priv->lock, flags);
hdr_len = ieee80211_hdrlen(fc);
/* Find (or create) index into station table for destination station */
@ -729,7 +727,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
hdr->addr1);
goto drop;
goto drop_unlock;
}
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
@ -750,14 +748,17 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id);
}
priv->stations[sta_id].tid[tid].tfds_in_queue++;
}
txq = &priv->txq[txq_id];
q = &txq->q;
txq->swq_id = swq_id;
spin_lock_irqsave(&priv->lock, flags);
if (unlikely(iwl_queue_space(q) < q->high_mark))
goto drop_unlock;
if (ieee80211_is_data_qos(fc))
priv->stations[sta_id].tid[tid].tfds_in_queue++;
/* Set up driver data for this TFD */
memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
@ -902,7 +903,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
drop_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
drop:
return -1;
}
EXPORT_SYMBOL(iwl_tx_skb);
@ -1171,6 +1171,8 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
IWL_ERR(priv, "Start AGG on invalid station\n");
return -ENXIO;
}
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");

View File

@ -3968,6 +3968,9 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
hw->wiphy->custom_regulatory = true;
/* Firmware does not support this */
hw->wiphy->disable_beacon_hints = true;
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
/* we create the 802.11 header and a zero-length SSID element */
hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
@ -4018,10 +4021,10 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
SET_IEEE80211_DEV(hw, &pdev->dev);
if ((iwl3945_mod_params.num_of_queues > IWL39_MAX_NUM_QUEUES) ||
(iwl3945_mod_params.num_of_queues < IWL_MIN_NUM_QUEUES)) {
(iwl3945_mod_params.num_of_queues < IWL39_MIN_NUM_QUEUES)) {
IWL_ERR(priv,
"invalid queues_num, should be between %d and %d\n",
IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES);
IWL39_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES);
err = -EINVAL;
goto out_ieee80211_free_hw;
}

View File

@ -220,6 +220,7 @@ int iwm_store_rxiq_calib_result(struct iwm_priv *iwm)
eeprom_rxiq = iwm_eeprom_access(iwm, IWM_EEPROM_CALIB_RXIQ);
if (IS_ERR(eeprom_rxiq)) {
IWM_ERR(iwm, "Couldn't access EEPROM RX IQ entry\n");
kfree(rxiq);
return PTR_ERR(eeprom_rxiq);
}

View File

@ -106,10 +106,8 @@ void *iwm_if_alloc(int sizeof_bus, struct device *dev,
int ret = 0;
wdev = iwm_wdev_alloc(sizeof_bus, dev);
if (!wdev) {
dev_err(dev, "no memory for wireless device instance\n");
return ERR_PTR(-ENOMEM);
}
if (IS_ERR(wdev))
return wdev;
iwm = wdev_to_iwm(wdev);
iwm->bus_ops = if_ops;

View File

@ -47,7 +47,7 @@ static u8 lbs_region_2_code(u8 *region)
{
u8 i;
for (i = 0; region[i] && i < COUNTRY_CODE_LEN; i++)
for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++)
region[i] = toupper(region[i]);
for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {

View File

@ -1,6 +1,7 @@
/* Copyright (C) 2006, Red Hat, Inc. */
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
#include <linux/if_arp.h>
@ -43,21 +44,21 @@ static int get_common_rates(struct lbs_private *priv,
u16 *rates_size)
{
u8 *card_rates = lbs_bg_rates;
size_t num_card_rates = sizeof(lbs_bg_rates);
int ret = 0, i, j;
u8 tmp[30];
u8 tmp[(ARRAY_SIZE(lbs_bg_rates) - 1) * (*rates_size - 1)];
size_t tmp_size = 0;
/* For each rate in card_rates that exists in rate1, copy to tmp */
for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
for (j = 0; rates[j] && (j < *rates_size); j++) {
for (i = 0; i < ARRAY_SIZE(lbs_bg_rates) && card_rates[i]; i++) {
for (j = 0; j < *rates_size && rates[j]; j++) {
if (rates[j] == card_rates[i])
tmp[tmp_size++] = card_rates[i];
}
}
lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates);
lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates,
ARRAY_SIZE(lbs_bg_rates));
lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
@ -69,10 +70,7 @@ static int get_common_rates(struct lbs_private *priv,
lbs_pr_alert("Previously set fixed data rate %#x isn't "
"compatible with the network.\n", priv->cur_rate);
ret = -1;
goto done;
}
ret = 0;
done:
memset(rates, 0, *rates_size);
*rates_size = min_t(int, tmp_size, *rates_size);
@ -322,7 +320,7 @@ static int lbs_associate(struct lbs_private *priv,
rates = (struct mrvl_ie_rates_param_set *) pos;
rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
memcpy(&rates->rates, &bss->rates, MAX_RATES);
tmplen = MAX_RATES;
tmplen = min_t(u16, ARRAY_SIZE(rates->rates), MAX_RATES);
if (get_common_rates(priv, rates->rates, &tmplen)) {
ret = -1;
goto done;
@ -598,7 +596,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
/* Copy Data rates from the rates recorded in scan response */
memset(cmd.bss.rates, 0, sizeof(cmd.bss.rates));
ratesize = min_t(u16, sizeof(cmd.bss.rates), MAX_RATES);
ratesize = min_t(u16, ARRAY_SIZE(cmd.bss.rates), MAX_RATES);
memcpy(cmd.bss.rates, bss->rates, ratesize);
if (get_common_rates(priv, cmd.bss.rates, &ratesize)) {
lbs_deb_join("ADHOC_JOIN: get_common_rates returned error.\n");

View File

@ -5,6 +5,7 @@
* for sending scan commands to the firmware.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <asm/unaligned.h>
@ -876,7 +877,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv,
iwe.u.bitrate.disabled = 0;
iwe.u.bitrate.value = 0;
for (j = 0; bss->rates[j] && (j < sizeof(bss->rates)); j++) {
for (j = 0; j < ARRAY_SIZE(bss->rates) && bss->rates[j]; j++) {
/* Bit rate given in 500 kb/s units */
iwe.u.bitrate.value = bss->rates[j] * 500000;
current_val = iwe_stream_add_value(info, start, current_val,

View File

@ -698,7 +698,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
&& !mac->pass_ctrl)
return 0;
fc = *(__le16 *)buffer;
fc = get_unaligned((__le16*)buffer);
need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc);
skb = dev_alloc_skb(length + (need_padding ? 2 : 0));

View File

@ -990,7 +990,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
struct iscsi_uevent *ev;
int len = NLMSG_SPACE(sizeof(*ev) + data_size);
skb = alloc_skb(len, GFP_NOIO);
skb = alloc_skb(len, GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
return -ENOMEM;
@ -1012,7 +1012,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
memcpy((char *)ev + sizeof(*ev), data, data_size);
return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO);
return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(iscsi_offload_mesg);

View File

@ -82,7 +82,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_ANDCONF((in_dev), RP_FILTER)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
ACCEPT_SOURCE_ROUTE)
#define IN_DEV_BOOTP_RELAY(in_dev) IN_DEV_ANDCONF((in_dev), BOOTP_RELAY)

View File

@ -355,7 +355,17 @@ struct rfcomm_dev_list_req {
};
int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
#ifdef CONFIG_BT_RFCOMM_TTY
int rfcomm_init_ttys(void);
void rfcomm_cleanup_ttys(void);
#else
static inline int rfcomm_init_ttys(void)
{
return 0;
}
static inline void rfcomm_cleanup_ttys(void)
{
}
#endif
#endif /* __RFCOMM_H */

View File

@ -979,6 +979,10 @@ struct cfg80211_ops {
* channels at a later time. This can be used for devices which do not
* have calibration information gauranteed for frequencies or settings
* outside of its regulatory domain.
* @disable_beacon_hints: enable this if your driver needs to ensure that
* passive scan flags and beaconing flags may not be lifted by cfg80211
* due to regulatory beacon hints. For more information on beacon
* hints read the documenation for regulatory_hint_found_beacon()
* @reg_notifier: the driver's regulatory notification callback
* @regd: the driver's regulatory domain, if one was requested via
* the regulatory_hint() API. This can be used by the driver
@ -1004,6 +1008,7 @@ struct wiphy {
bool custom_regulatory;
bool strict_regulatory;
bool disable_beacon_hints;
enum cfg80211_signal_type signal_type;

View File

@ -2080,28 +2080,41 @@ static CLASS_ATTR(rfcomm_dlc, S_IRUGO, rfcomm_dlc_sysfs_show, NULL);
/* ---- Initialization ---- */
static int __init rfcomm_init(void)
{
int ret;
l2cap_load();
hci_register_cb(&rfcomm_cb);
rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd");
if (IS_ERR(rfcomm_thread)) {
hci_unregister_cb(&rfcomm_cb);
return PTR_ERR(rfcomm_thread);
ret = PTR_ERR(rfcomm_thread);
goto out_thread;
}
if (class_create_file(bt_class, &class_attr_rfcomm_dlc) < 0)
BT_ERR("Failed to create RFCOMM info file");
rfcomm_init_sockets();
ret = rfcomm_init_ttys();
if (ret)
goto out_tty;
#ifdef CONFIG_BT_RFCOMM_TTY
rfcomm_init_ttys();
#endif
ret = rfcomm_init_sockets();
if (ret)
goto out_sock;
BT_INFO("RFCOMM ver %s", VERSION);
return 0;
out_sock:
rfcomm_cleanup_ttys();
out_tty:
kthread_stop(rfcomm_thread);
out_thread:
hci_unregister_cb(&rfcomm_cb);
return ret;
}
static void __exit rfcomm_exit(void)
@ -2112,9 +2125,7 @@ static void __exit rfcomm_exit(void)
kthread_stop(rfcomm_thread);
#ifdef CONFIG_BT_RFCOMM_TTY
rfcomm_cleanup_ttys();
#endif
rfcomm_cleanup_sockets();
}

View File

@ -1132,7 +1132,7 @@ int __init rfcomm_init_sockets(void)
return err;
}
void __exit rfcomm_cleanup_sockets(void)
void rfcomm_cleanup_sockets(void)
{
class_remove_file(bt_class, &class_attr_rfcomm);

View File

@ -3865,10 +3865,12 @@ int dev_unicast_delete(struct net_device *dev, void *addr)
ASSERT_RTNL();
netif_addr_lock_bh(dev);
err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
NETDEV_HW_ADDR_T_UNICAST);
if (!err)
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
return err;
}
EXPORT_SYMBOL(dev_unicast_delete);
@ -3889,10 +3891,12 @@ int dev_unicast_add(struct net_device *dev, void *addr)
ASSERT_RTNL();
netif_addr_lock_bh(dev);
err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
NETDEV_HW_ADDR_T_UNICAST);
if (!err)
__dev_set_rx_mode(dev);
netif_addr_unlock_bh(dev);
return err;
}
EXPORT_SYMBOL(dev_unicast_add);
@ -3949,7 +3953,8 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
* @from: source device
*
* Add newly added addresses to the destination device and release
* addresses that have no users left.
* addresses that have no users left. The source device must be
* locked by netif_tx_lock_bh.
*
* This function is intended to be called from the dev->set_rx_mode
* function of layered software devices.
@ -3958,14 +3963,14 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
{
int err = 0;
ASSERT_RTNL();
if (to->addr_len != from->addr_len)
return -EINVAL;
netif_addr_lock_bh(to);
err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
netif_addr_unlock_bh(to);
return err;
}
EXPORT_SYMBOL(dev_unicast_sync);
@ -3981,28 +3986,30 @@ EXPORT_SYMBOL(dev_unicast_sync);
*/
void dev_unicast_unsync(struct net_device *to, struct net_device *from)
{
ASSERT_RTNL();
if (to->addr_len != from->addr_len)
return;
netif_addr_lock_bh(from);
netif_addr_lock(to);
__hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
__dev_set_rx_mode(to);
netif_addr_unlock(to);
netif_addr_unlock_bh(from);
}
EXPORT_SYMBOL(dev_unicast_unsync);
static void dev_unicast_flush(struct net_device *dev)
{
/* rtnl_mutex must be held here */
netif_addr_lock_bh(dev);
__hw_addr_flush(&dev->uc);
netif_addr_unlock_bh(dev);
}
static void dev_unicast_init(struct net_device *dev)
{
/* rtnl_mutex must be held here */
netif_addr_lock_bh(dev);
__hw_addr_init(&dev->uc);
netif_addr_unlock_bh(dev);
}

View File

@ -488,7 +488,7 @@ int net_assign_generic(struct net *net, int id, void *data)
*/
ng->len = id;
memcpy(&ng->ptr, &old_ng->ptr, old_ng->len);
memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
rcu_assign_pointer(net->gen, ng);
call_rcu(&old_ng->rcu, net_generic_release);

View File

@ -1304,7 +1304,9 @@ static void arp_format_neigh_entry(struct seq_file *seq,
hbuffer[k++] = hex_asc_lo(n->ha[j]);
hbuffer[k++] = ':';
}
hbuffer[--k] = 0;
if (k != 0)
--k;
hbuffer[k] = 0;
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
}
#endif

View File

@ -721,7 +721,7 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
{
struct ieee80211_local *local = (void *) data;
if (local->quiescing)
if (local->quiescing || local->suspended)
return;
queue_work(local->hw.workqueue, &local->dynamic_ps_enable_work);

View File

@ -55,15 +55,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
rcu_read_unlock();
/* flush again, in case driver queued work */
flush_workqueue(local->hw.workqueue);
/* stop hardware - this must stop RX */
if (local->open_count) {
ieee80211_led_radio(local, false);
drv_stop(local);
}
/* remove STAs */
spin_lock_irqsave(&local->sta_lock, flags);
list_for_each_entry(sta, &local->sta_list, list) {
@ -111,7 +102,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
drv_remove_interface(local, &conf);
}
/* stop hardware - this must stop RX */
if (local->open_count) {
ieee80211_led_radio(local, false);
drv_stop(local);
}
/*
* flush again, in case driver queued work -- it
* shouldn't be doing (or cancel everything in the
* stop callback) that but better safe than sorry.
*/
flush_workqueue(local->hw.workqueue);
local->suspended = true;
/* need suspended to be visible before quiescing is false */
barrier();
local->quiescing = false;
return 0;

View File

@ -2453,6 +2453,18 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
return;
}
/*
* If we're suspending, it is possible although not too likely
* that we'd be receiving frames after having already partially
* quiesced the stack. We can't process such frames then since
* that might, for example, cause stations to be added or other
* driver callbacks be invoked.
*/
if (unlikely(local->quiescing || local->suspended)) {
kfree_skb(skb);
return;
}
if (status->flag & RX_FLAG_HT) {
/* rate_idx is MCS index */
if (WARN_ON(status->rate_idx < 0 ||

View File

@ -151,7 +151,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
addr6 = addr;
mask6 = mask;
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
if (map4 == NULL)
if (map6 == NULL)
goto cfg_unlbl_map_add_failure;
map6->type = NETLBL_NLTYPE_UNLABELED;
ipv6_addr_copy(&map6->list.addr, addr6);

View File

@ -1089,17 +1089,18 @@ static void handle_reg_beacon(struct wiphy *wiphy,
chan->beacon_found = true;
if (wiphy->disable_beacon_hints)
return;
chan_before.center_freq = chan->center_freq;
chan_before.flags = chan->flags;
if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
!(chan->orig_flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) {
chan->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
channel_changed = true;
}
if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
!(chan->orig_flags & IEEE80211_CHAN_NO_IBSS)) {
if (chan->flags & IEEE80211_CHAN_NO_IBSS) {
chan->flags &= ~IEEE80211_CHAN_NO_IBSS;
channel_changed = true;
}

View File

@ -30,7 +30,8 @@ int set_regdom(const struct ieee80211_regdomain *rd);
* non-radar 5 GHz channels.
*
* Drivers do not need to call this, cfg80211 will do it for after a scan
* on a newly found BSS.
* on a newly found BSS. If you cannot make use of this feature you can
* set the wiphy->disable_beacon_hints to true.
*/
int regulatory_hint_found_beacon(struct wiphy *wiphy,
struct ieee80211_channel *beacon_chan,

View File

@ -118,7 +118,7 @@ static int cmp_ies(u8 num, u8 *ies1, size_t len1, u8 *ies2, size_t len2)
if (!ie1 && !ie2)
return 0;
if (!ie1)
if (!ie1 || !ie2)
return -1;
r = memcmp(ie1 + 2, ie2 + 2, min(ie1[1], ie2[1]));
@ -171,6 +171,8 @@ static bool is_mesh(struct cfg80211_bss *a,
ie = find_ie(WLAN_EID_MESH_CONFIG,
a->information_elements,
a->len_information_elements);
if (!ie)
return false;
if (ie[1] != IEEE80211_MESH_CONFIG_LEN)
return false;