2010-04-01 22:28:26 +07:00
|
|
|
/*
|
|
|
|
* This file is part of the Chelsio T4 Ethernet driver for Linux.
|
|
|
|
*
|
2014-06-20 11:37:15 +07:00
|
|
|
* Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
|
2010-04-01 22:28:26 +07:00
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the
|
|
|
|
* OpenIB.org BSD license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or
|
|
|
|
* without modification, are permitted provided that the following
|
|
|
|
* conditions are met:
|
|
|
|
*
|
|
|
|
* - Redistributions of source code must retain the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer.
|
|
|
|
*
|
|
|
|
* - Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials
|
|
|
|
* provided with the distribution.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
|
|
* SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/bitmap.h>
|
|
|
|
#include <linux/crc32.h>
|
|
|
|
#include <linux/ctype.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/err.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/firmware.h>
|
2011-08-16 13:29:00 +07:00
|
|
|
#include <linux/if.h>
|
2010-04-01 22:28:26 +07:00
|
|
|
#include <linux/if_vlan.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/log2.h>
|
|
|
|
#include <linux/mdio.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/moduleparam.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/aer.h>
|
|
|
|
#include <linux/rtnetlink.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/seq_file.h>
|
|
|
|
#include <linux/sockios.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <net/neighbour.h>
|
|
|
|
#include <net/netevent.h>
|
2013-07-04 17:40:46 +07:00
|
|
|
#include <net/addrconf.h>
|
2014-11-11 01:27:49 +07:00
|
|
|
#include <net/bonding.h>
|
2015-01-15 06:17:34 +07:00
|
|
|
#include <net/addrconf.h>
|
2010-04-01 22:28:26 +07:00
|
|
|
#include <asm/uaccess.h>
|
|
|
|
|
|
|
|
#include "cxgb4.h"
|
|
|
|
#include "t4_regs.h"
|
2015-01-05 18:00:43 +07:00
|
|
|
#include "t4_values.h"
|
2010-04-01 22:28:26 +07:00
|
|
|
#include "t4_msg.h"
|
|
|
|
#include "t4fw_api.h"
|
2015-01-27 21:42:52 +07:00
|
|
|
#include "t4fw_version.h"
|
2014-06-20 11:37:13 +07:00
|
|
|
#include "cxgb4_dcb.h"
|
2014-11-07 11:05:23 +07:00
|
|
|
#include "cxgb4_debugfs.h"
|
2015-01-15 06:17:34 +07:00
|
|
|
#include "clip_tbl.h"
|
2010-04-01 22:28:26 +07:00
|
|
|
#include "l2t.h"
|
|
|
|
|
2013-07-04 17:40:46 +07:00
|
|
|
#ifdef DRV_VERSION
|
|
|
|
#undef DRV_VERSION
|
|
|
|
#endif
|
2013-03-14 12:08:55 +07:00
|
|
|
#define DRV_VERSION "2.0.0-ko"
|
|
|
|
#define DRV_DESC "Chelsio T4/T5 Network Driver"
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
enum {
|
|
|
|
MAX_TXQ_ENTRIES = 16384,
|
|
|
|
MAX_CTRL_TXQ_ENTRIES = 1024,
|
|
|
|
MAX_RSPQ_ENTRIES = 16384,
|
|
|
|
MAX_RX_BUFFERS = 16384,
|
|
|
|
MIN_TXQ_ENTRIES = 32,
|
|
|
|
MIN_CTRL_TXQ_ENTRIES = 32,
|
|
|
|
MIN_RSPQ_ENTRIES = 128,
|
|
|
|
MIN_FL_ENTRIES = 16
|
|
|
|
};
|
|
|
|
|
2012-12-10 16:30:52 +07:00
|
|
|
/* Host shadow copy of ingress filter entry. This is in host native format
|
|
|
|
* and doesn't match the ordering or bit order, etc. of the hardware of the
|
|
|
|
* firmware command. The use of bit-field structure elements is purely to
|
|
|
|
* remind ourselves of the field size limitations and save memory in the case
|
|
|
|
* where the filter table is large.
|
|
|
|
*/
|
|
|
|
struct filter_entry {
|
|
|
|
/* Administrative fields for filter.
|
|
|
|
*/
|
|
|
|
u32 valid:1; /* filter allocated and valid */
|
|
|
|
u32 locked:1; /* filter is administratively locked */
|
|
|
|
|
|
|
|
u32 pending:1; /* filter action is pending firmware reply */
|
|
|
|
u32 smtidx:8; /* Source MAC Table index for smac */
|
|
|
|
struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
|
|
|
|
|
|
|
|
/* The filter itself. Most of this is a straight copy of information
|
|
|
|
* provided by the extended ioctl(). Some fields are translated to
|
|
|
|
* internal forms -- for instance the Ingress Queue ID passed in from
|
|
|
|
* the ioctl() is translated into the Absolute Ingress Queue ID.
|
|
|
|
*/
|
|
|
|
struct ch_filter_specification fs;
|
|
|
|
};
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
|
|
|
|
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
|
|
|
|
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
|
|
|
|
|
2014-11-25 10:03:58 +07:00
|
|
|
/* Macros needed to support the PCI Device ID Table ...
|
|
|
|
*/
|
|
|
|
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
|
|
|
|
static struct pci_device_id cxgb4_pci_tbl[] = {
|
|
|
|
#define CH_PCI_DEVICE_ID_FUNCTION 0x4
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2014-11-25 10:03:58 +07:00
|
|
|
/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
|
|
|
|
* called for both.
|
|
|
|
*/
|
|
|
|
#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
|
|
|
|
|
|
|
|
#define CH_PCI_ID_TABLE_ENTRY(devid) \
|
|
|
|
{PCI_VDEVICE(CHELSIO, (devid)), 4}
|
|
|
|
|
|
|
|
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
|
|
|
|
{ 0, } \
|
|
|
|
}
|
|
|
|
|
|
|
|
#include "t4_pci_id_tbl.h"
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2013-12-03 18:35:58 +07:00
|
|
|
#define FW4_FNAME "cxgb4/t4fw.bin"
|
2013-03-14 12:08:49 +07:00
|
|
|
#define FW5_FNAME "cxgb4/t5fw.bin"
|
2013-12-03 18:35:58 +07:00
|
|
|
#define FW4_CFNAME "cxgb4/t4-config.txt"
|
2013-03-14 12:08:49 +07:00
|
|
|
#define FW5_CFNAME "cxgb4/t5-config.txt"
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
MODULE_DESCRIPTION(DRV_DESC);
|
|
|
|
MODULE_AUTHOR("Chelsio Communications");
|
|
|
|
MODULE_LICENSE("Dual BSD/GPL");
|
|
|
|
MODULE_VERSION(DRV_VERSION);
|
|
|
|
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
|
2013-12-03 18:35:58 +07:00
|
|
|
MODULE_FIRMWARE(FW4_FNAME);
|
2013-03-14 12:08:49 +07:00
|
|
|
MODULE_FIRMWARE(FW5_FNAME);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
/*
|
|
|
|
* Normally we're willing to become the firmware's Master PF but will be happy
|
|
|
|
* if another PF has already become the Master and initialized the adapter.
|
|
|
|
* Setting "force_init" will cause this driver to forcibly establish itself as
|
|
|
|
* the Master PF and initialize the adapter.
|
|
|
|
*/
|
|
|
|
static uint force_init;
|
|
|
|
|
|
|
|
module_param(force_init, uint, 0644);
|
|
|
|
MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
|
|
|
|
|
2012-09-26 09:39:40 +07:00
|
|
|
/*
|
|
|
|
* Normally if the firmware we connect to has Configuration File support, we
|
|
|
|
* use that and only fall back to the old Driver-based initialization if the
|
|
|
|
* Configuration File fails for some reason. If force_old_init is set, then
|
|
|
|
* we'll always use the old Driver-based initialization sequence.
|
|
|
|
*/
|
|
|
|
static uint force_old_init;
|
|
|
|
|
|
|
|
module_param(force_old_init, uint, 0644);
|
2015-01-13 16:49:25 +07:00
|
|
|
MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
|
|
|
|
" parameter");
|
2012-09-26 09:39:40 +07:00
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
static int dflt_msg_enable = DFLT_MSG_ENABLE;
|
|
|
|
|
|
|
|
module_param(dflt_msg_enable, int, 0644);
|
|
|
|
MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The driver uses the best interrupt scheme available on a platform in the
|
|
|
|
* order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
|
|
|
|
* of these schemes the driver may consider as follows:
|
|
|
|
*
|
|
|
|
* msi = 2: choose from among all three options
|
|
|
|
* msi = 1: only consider MSI and INTx interrupts
|
|
|
|
* msi = 0: force INTx interrupts
|
|
|
|
*/
|
|
|
|
static int msi = 2;
|
|
|
|
|
|
|
|
module_param(msi, int, 0644);
|
|
|
|
MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Queue interrupt hold-off timer values. Queues default to the first of these
|
|
|
|
* upon creation.
|
|
|
|
*/
|
|
|
|
static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
|
|
|
|
|
|
|
|
module_param_array(intr_holdoff, uint, NULL, 0644);
|
|
|
|
MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
|
2015-01-13 16:49:25 +07:00
|
|
|
"0..4 in microseconds, deprecated parameter");
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
|
|
|
|
|
|
|
|
module_param_array(intr_cnt, uint, NULL, 0644);
|
|
|
|
MODULE_PARM_DESC(intr_cnt,
|
2015-01-13 16:49:25 +07:00
|
|
|
"thresholds 1..3 for queue interrupt packet counters, "
|
|
|
|
"deprecated parameter");
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
/*
|
|
|
|
* Normally we tell the chip to deliver Ingress Packets into our DMA buffers
|
|
|
|
* offset by 2 bytes in order to have the IP headers line up on 4-byte
|
|
|
|
* boundaries. This is a requirement for many architectures which will throw
|
|
|
|
* a machine check fault if an attempt is made to access one of the 4-byte IP
|
|
|
|
* header fields on a non-4-byte boundary. And it's a major performance issue
|
|
|
|
* even on some architectures which allow it like some implementations of the
|
|
|
|
* x86 ISA. However, some architectures don't mind this and for some very
|
|
|
|
* edge-case performance sensitive applications (like forwarding large volumes
|
|
|
|
* of small packets), setting this DMA offset to 0 will decrease the number of
|
|
|
|
* PCI-E Bus transfers enough to measurably affect performance.
|
|
|
|
*/
|
|
|
|
static int rx_dma_offset = 2;
|
|
|
|
|
2011-12-19 21:08:01 +07:00
|
|
|
static bool vf_acls;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_PCI_IOV
|
|
|
|
module_param(vf_acls, bool, 0644);
|
2015-01-13 16:49:25 +07:00
|
|
|
MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
|
|
|
|
"deprecated parameter");
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2013-03-14 12:08:56 +07:00
|
|
|
/* Configure the number of PCI-E Virtual Function which are to be instantiated
|
|
|
|
* on SR-IOV Capable Physical Functions.
|
2013-03-14 12:08:49 +07:00
|
|
|
*/
|
2013-03-14 12:08:56 +07:00
|
|
|
static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
module_param_array(num_vf, uint, NULL, 0644);
|
2013-03-14 12:08:56 +07:00
|
|
|
MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
|
2010-04-01 22:28:26 +07:00
|
|
|
#endif
|
|
|
|
|
2014-06-20 11:37:13 +07:00
|
|
|
/* TX Queue select used to determine what algorithm to use for selecting TX
|
|
|
|
* queue. Select between the kernel provided function (select_queue=0) or user
|
|
|
|
* cxgb_select_queue function (select_queue=1)
|
|
|
|
*
|
|
|
|
* Default: select_queue=0
|
|
|
|
*/
|
|
|
|
static int select_queue;
|
|
|
|
module_param(select_queue, int, 0644);
|
|
|
|
MODULE_PARM_DESC(select_queue,
|
|
|
|
"Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
|
|
|
|
|
2015-01-13 16:49:25 +07:00
|
|
|
static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
|
2012-09-26 09:39:40 +07:00
|
|
|
|
2012-12-10 16:30:52 +07:00
|
|
|
module_param(tp_vlan_pri_map, uint, 0644);
|
2015-01-13 16:49:25 +07:00
|
|
|
MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
|
|
|
|
"deprecated parameter");
|
2012-12-10 16:30:52 +07:00
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
static struct dentry *cxgb4_debugfs_root;
|
|
|
|
|
|
|
|
static LIST_HEAD(adapter_list);
|
|
|
|
static DEFINE_MUTEX(uld_mutex);
|
2013-07-04 17:40:46 +07:00
|
|
|
/* Adapter list to be accessed from atomic context */
|
|
|
|
static LIST_HEAD(adap_rcu_list);
|
|
|
|
static DEFINE_SPINLOCK(adap_rcu_lock);
|
2010-04-01 22:28:26 +07:00
|
|
|
static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
|
|
|
|
static const char *uld_str[] = { "RDMA", "iSCSI" };
|
|
|
|
|
|
|
|
static void link_report(struct net_device *dev)
|
|
|
|
{
|
|
|
|
if (!netif_carrier_ok(dev))
|
|
|
|
netdev_info(dev, "link down\n");
|
|
|
|
else {
|
|
|
|
static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
|
|
|
|
|
|
|
|
const char *s = "10Mbps";
|
|
|
|
const struct port_info *p = netdev_priv(dev);
|
|
|
|
|
|
|
|
switch (p->link_cfg.speed) {
|
2014-02-23 07:03:24 +07:00
|
|
|
case 10000:
|
2010-04-01 22:28:26 +07:00
|
|
|
s = "10Gbps";
|
|
|
|
break;
|
2014-02-23 07:03:24 +07:00
|
|
|
case 1000:
|
2010-04-01 22:28:26 +07:00
|
|
|
s = "1000Mbps";
|
|
|
|
break;
|
2014-02-23 07:03:24 +07:00
|
|
|
case 100:
|
2010-04-01 22:28:26 +07:00
|
|
|
s = "100Mbps";
|
|
|
|
break;
|
2014-02-23 07:03:24 +07:00
|
|
|
case 40000:
|
2014-02-18 19:26:08 +07:00
|
|
|
s = "40Gbps";
|
|
|
|
break;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
|
|
|
|
fc[p->link_cfg.fc]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-20 11:37:13 +07:00
|
|
|
#ifdef CONFIG_CHELSIO_T4_DCB
|
|
|
|
/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
|
|
|
|
static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
|
|
|
|
{
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
struct adapter *adap = pi->adapter;
|
|
|
|
struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* We use a simple mapping of Port TX Queue Index to DCB
|
|
|
|
* Priority when we're enabling DCB.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < pi->nqsets; i++, txq++) {
|
|
|
|
u32 name, value;
|
|
|
|
int err;
|
|
|
|
|
2014-11-21 14:22:02 +07:00
|
|
|
name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
|
|
|
|
FW_PARAMS_PARAM_X_V(
|
|
|
|
FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
|
|
|
|
FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
|
2014-06-20 11:37:13 +07:00
|
|
|
value = enable ? i : 0xffffffff;
|
|
|
|
|
|
|
|
/* Since we can be called while atomic (from "interrupt
|
|
|
|
* level") we need to issue the Set Parameters Commannd
|
|
|
|
* without sleeping (timeout < 0).
|
|
|
|
*/
|
|
|
|
err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
|
|
|
|
&name, &value);
|
|
|
|
|
|
|
|
if (err)
|
|
|
|
dev_err(adap->pdev_dev,
|
|
|
|
"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
|
|
|
|
enable ? "set" : "unset", pi->port_id, i, -err);
|
2014-08-08 06:14:03 +07:00
|
|
|
else
|
|
|
|
txq->dcb_prio = value;
|
2014-06-20 11:37:13 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_CHELSIO_T4_DCB */
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
|
|
|
|
{
|
|
|
|
struct net_device *dev = adapter->port[port_id];
|
|
|
|
|
|
|
|
/* Skip changes from disabled ports. */
|
|
|
|
if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
|
|
|
|
if (link_stat)
|
|
|
|
netif_carrier_on(dev);
|
2014-06-20 11:37:13 +07:00
|
|
|
else {
|
|
|
|
#ifdef CONFIG_CHELSIO_T4_DCB
|
|
|
|
cxgb4_dcb_state_init(dev);
|
|
|
|
dcb_tx_queue_prio_enable(dev, false);
|
|
|
|
#endif /* CONFIG_CHELSIO_T4_DCB */
|
2010-04-01 22:28:26 +07:00
|
|
|
netif_carrier_off(dev);
|
2014-06-20 11:37:13 +07:00
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
link_report(dev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void t4_os_portmod_changed(const struct adapter *adap, int port_id)
|
|
|
|
{
|
|
|
|
static const char *mod_str[] = {
|
2010-06-18 17:05:34 +07:00
|
|
|
NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
|
2010-04-01 22:28:26 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
const struct net_device *dev = adap->port[port_id];
|
|
|
|
const struct port_info *pi = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
|
|
|
|
netdev_info(dev, "port module unplugged\n");
|
2010-06-18 17:05:34 +07:00
|
|
|
else if (pi->mod_type < ARRAY_SIZE(mod_str))
|
2010-04-01 22:28:26 +07:00
|
|
|
netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Configure the exact and hash address filters to handle a port's multicast
|
|
|
|
* and secondary unicast MAC addresses.
|
|
|
|
*/
|
|
|
|
static int set_addr_filters(const struct net_device *dev, bool sleep)
|
|
|
|
{
|
|
|
|
u64 mhash = 0;
|
|
|
|
u64 uhash = 0;
|
|
|
|
bool free = true;
|
|
|
|
u16 filt_idx[7];
|
|
|
|
const u8 *addr[7];
|
|
|
|
int ret, naddr = 0;
|
|
|
|
const struct netdev_hw_addr *ha;
|
|
|
|
int uc_cnt = netdev_uc_count(dev);
|
2010-04-07 13:53:30 +07:00
|
|
|
int mc_cnt = netdev_mc_count(dev);
|
2010-04-01 22:28:26 +07:00
|
|
|
const struct port_info *pi = netdev_priv(dev);
|
2010-08-02 20:19:21 +07:00
|
|
|
unsigned int mb = pi->adapter->fn;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
/* first do the secondary unicast addresses */
|
|
|
|
netdev_for_each_uc_addr(ha, dev) {
|
|
|
|
addr[naddr++] = ha->addr;
|
|
|
|
if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
|
2010-04-01 22:28:26 +07:00
|
|
|
naddr, addr, filt_idx, &uhash, sleep);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
free = false;
|
|
|
|
naddr = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* next set up the multicast addresses */
|
2010-04-07 13:53:30 +07:00
|
|
|
netdev_for_each_mc_addr(ha, dev) {
|
|
|
|
addr[naddr++] = ha->addr;
|
|
|
|
if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
|
2010-04-01 22:28:26 +07:00
|
|
|
naddr, addr, filt_idx, &mhash, sleep);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
free = false;
|
|
|
|
naddr = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-08-02 20:19:21 +07:00
|
|
|
return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
|
2010-04-01 22:28:26 +07:00
|
|
|
uhash | mhash, sleep);
|
|
|
|
}
|
|
|
|
|
2012-05-18 16:59:26 +07:00
|
|
|
int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
|
|
|
|
module_param(dbfifo_int_thresh, int, 0644);
|
|
|
|
MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
|
|
|
|
|
2012-10-08 09:59:43 +07:00
|
|
|
/*
|
|
|
|
* usecs to sleep while draining the dbfifo
|
|
|
|
*/
|
|
|
|
static int dbfifo_drain_delay = 1000;
|
2012-05-18 16:59:26 +07:00
|
|
|
module_param(dbfifo_drain_delay, int, 0644);
|
|
|
|
MODULE_PARM_DESC(dbfifo_drain_delay,
|
|
|
|
"usecs to sleep while draining the dbfifo");
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
/*
|
|
|
|
* Set Rx properties of a port, such as promiscruity, address filters, and MTU.
|
|
|
|
* If @mtu is -1 it is left unchanged.
|
|
|
|
*/
|
|
|
|
static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
|
|
|
|
ret = set_addr_filters(dev, sleep_ok);
|
|
|
|
if (ret == 0)
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
|
2010-04-01 22:28:26 +07:00
|
|
|
(dev->flags & IFF_PROMISC) ? 1 : 0,
|
2010-05-10 22:58:07 +07:00
|
|
|
(dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
|
2010-04-01 22:28:26 +07:00
|
|
|
sleep_ok);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* link_start - enable a port
|
|
|
|
* @dev: the port to enable
|
|
|
|
*
|
|
|
|
* Performs the MAC and PHY actions needed to enable a port.
|
|
|
|
*/
|
|
|
|
static int link_start(struct net_device *dev)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
2010-08-02 20:19:21 +07:00
|
|
|
unsigned int mb = pi->adapter->fn;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We do not set address filters and promiscuity here, the stack does
|
|
|
|
* that step explicitly.
|
|
|
|
*/
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
|
2013-04-19 09:04:27 +07:00
|
|
|
!!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (ret == 0) {
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_change_mac(pi->adapter, mb, pi->viid,
|
2010-04-01 22:28:26 +07:00
|
|
|
pi->xact_addr_filt, dev->dev_addr, true,
|
2010-05-18 17:07:11 +07:00
|
|
|
true);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (ret >= 0) {
|
|
|
|
pi->xact_addr_filt = ret;
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ret == 0)
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
|
|
|
|
&pi->link_cfg);
|
2014-08-06 06:05:23 +07:00
|
|
|
if (ret == 0) {
|
|
|
|
local_bh_disable();
|
2014-06-20 11:37:13 +07:00
|
|
|
ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
|
|
|
|
true, CXGB4_DCB_ENABLED);
|
2014-08-06 06:05:23 +07:00
|
|
|
local_bh_enable();
|
|
|
|
}
|
2014-06-20 11:37:13 +07:00
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-06-20 11:37:13 +07:00
|
|
|
int cxgb4_dcb_enabled(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_CHELSIO_T4_DCB
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
|
2014-10-24 04:37:31 +07:00
|
|
|
if (!pi->dcb.enabled)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
|
|
|
|
(pi->dcb.state == CXGB4_DCB_STATE_HOST));
|
2014-06-20 11:37:13 +07:00
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_dcb_enabled);
|
|
|
|
|
|
|
|
#ifdef CONFIG_CHELSIO_T4_DCB
|
|
|
|
/* Handle a Data Center Bridging update message from the firmware. */
|
|
|
|
static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
|
|
|
|
{
|
2014-11-21 14:22:04 +07:00
|
|
|
int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
|
2014-06-20 11:37:13 +07:00
|
|
|
struct net_device *dev = adap->port[port];
|
|
|
|
int old_dcb_enabled = cxgb4_dcb_enabled(dev);
|
|
|
|
int new_dcb_enabled;
|
|
|
|
|
|
|
|
cxgb4_dcb_handle_fw_update(adap, pcmd);
|
|
|
|
new_dcb_enabled = cxgb4_dcb_enabled(dev);
|
|
|
|
|
|
|
|
/* If the DCB has become enabled or disabled on the port then we're
|
|
|
|
* going to need to set up/tear down DCB Priority parameters for the
|
|
|
|
* TX Queues associated with the port.
|
|
|
|
*/
|
|
|
|
if (new_dcb_enabled != old_dcb_enabled)
|
|
|
|
dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_CHELSIO_T4_DCB */
|
|
|
|
|
2012-12-10 16:30:52 +07:00
|
|
|
/* Clear a filter and release any of its resources that we own. This also
|
|
|
|
* clears the filter's "pending" status.
|
|
|
|
*/
|
|
|
|
static void clear_filter(struct adapter *adap, struct filter_entry *f)
|
|
|
|
{
|
|
|
|
/* If the new or old filter have loopback rewriteing rules then we'll
|
|
|
|
* need to free any existing Layer Two Table (L2T) entries of the old
|
|
|
|
* filter rule. The firmware will handle freeing up any Source MAC
|
|
|
|
* Table (SMT) entries used for rewriting Source MAC Addresses in
|
|
|
|
* loopback rules.
|
|
|
|
*/
|
|
|
|
if (f->l2t)
|
|
|
|
cxgb4_l2t_release(f->l2t);
|
|
|
|
|
|
|
|
/* The zeroing of the filter rule below clears the filter valid,
|
|
|
|
* pending, locked flags, l2t pointer, etc. so it's all we need for
|
|
|
|
* this operation.
|
|
|
|
*/
|
|
|
|
memset(f, 0, sizeof(*f));
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle a filter write/deletion reply.
|
|
|
|
*/
|
|
|
|
static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
|
|
|
|
{
|
|
|
|
unsigned int idx = GET_TID(rpl);
|
|
|
|
unsigned int nidx = idx - adap->tids.ftid_base;
|
|
|
|
unsigned int ret;
|
|
|
|
struct filter_entry *f;
|
|
|
|
|
|
|
|
if (idx >= adap->tids.ftid_base && nidx <
|
|
|
|
(adap->tids.nftids + adap->tids.nsftids)) {
|
|
|
|
idx = nidx;
|
2015-01-09 12:38:16 +07:00
|
|
|
ret = TCB_COOKIE_G(rpl->cookie);
|
2012-12-10 16:30:52 +07:00
|
|
|
f = &adap->tids.ftid_tab[idx];
|
|
|
|
|
|
|
|
if (ret == FW_FILTER_WR_FLT_DELETED) {
|
|
|
|
/* Clear the filter when we get confirmation from the
|
|
|
|
* hardware that the filter has been deleted.
|
|
|
|
*/
|
|
|
|
clear_filter(adap, f);
|
|
|
|
} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
|
|
|
|
dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
|
|
|
|
idx);
|
|
|
|
clear_filter(adap, f);
|
|
|
|
} else if (ret == FW_FILTER_WR_FLT_ADDED) {
|
|
|
|
f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
|
|
|
|
f->pending = 0; /* asynchronous setup completed */
|
|
|
|
f->valid = 1;
|
|
|
|
} else {
|
|
|
|
/* Something went wrong. Issue a warning about the
|
|
|
|
* problem and clear everything out.
|
|
|
|
*/
|
|
|
|
dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
|
|
|
|
idx, ret);
|
|
|
|
clear_filter(adap, f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Response queue handler for the FW event queue.
|
2010-04-01 22:28:26 +07:00
|
|
|
*/
|
|
|
|
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
|
|
|
|
const struct pkt_gl *gl)
|
|
|
|
{
|
|
|
|
u8 opcode = ((const struct rss_header *)rsp)->opcode;
|
|
|
|
|
|
|
|
rsp++; /* skip RSS header */
|
2013-04-29 11:04:40 +07:00
|
|
|
|
|
|
|
/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
|
|
|
|
*/
|
|
|
|
if (unlikely(opcode == CPL_FW4_MSG &&
|
|
|
|
((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
|
|
|
|
rsp++;
|
|
|
|
opcode = ((const struct rss_header *)rsp)->opcode;
|
|
|
|
rsp++;
|
|
|
|
if (opcode != CPL_SGE_EGR_UPDATE) {
|
|
|
|
dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
|
|
|
|
, opcode);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
|
|
|
|
const struct cpl_sge_egr_update *p = (void *)rsp;
|
2015-01-09 12:38:16 +07:00
|
|
|
unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
|
2010-08-24 00:20:58 +07:00
|
|
|
struct sge_txq *txq;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2010-08-24 00:20:58 +07:00
|
|
|
txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
|
2010-04-01 22:28:26 +07:00
|
|
|
txq->restarts++;
|
2010-08-24 00:20:58 +07:00
|
|
|
if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
|
2010-04-01 22:28:26 +07:00
|
|
|
struct sge_eth_txq *eq;
|
|
|
|
|
|
|
|
eq = container_of(txq, struct sge_eth_txq, q);
|
|
|
|
netif_tx_wake_queue(eq->txq);
|
|
|
|
} else {
|
|
|
|
struct sge_ofld_txq *oq;
|
|
|
|
|
|
|
|
oq = container_of(txq, struct sge_ofld_txq, q);
|
|
|
|
tasklet_schedule(&oq->qresume_tsk);
|
|
|
|
}
|
|
|
|
} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
|
|
|
|
const struct cpl_fw6_msg *p = (void *)rsp;
|
|
|
|
|
2014-06-20 11:37:13 +07:00
|
|
|
#ifdef CONFIG_CHELSIO_T4_DCB
|
|
|
|
const struct fw_port_cmd *pcmd = (const void *)p->data;
|
2014-11-07 11:05:25 +07:00
|
|
|
unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
|
2014-06-20 11:37:13 +07:00
|
|
|
unsigned int action =
|
2014-11-21 14:22:04 +07:00
|
|
|
FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
|
2014-06-20 11:37:13 +07:00
|
|
|
|
|
|
|
if (cmd == FW_PORT_CMD &&
|
|
|
|
action == FW_PORT_ACTION_GET_PORT_INFO) {
|
2014-11-21 14:22:04 +07:00
|
|
|
int port = FW_PORT_CMD_PORTID_G(
|
2014-06-20 11:37:13 +07:00
|
|
|
be32_to_cpu(pcmd->op_to_portid));
|
|
|
|
struct net_device *dev = q->adap->port[port];
|
|
|
|
int state_input = ((pcmd->u.info.dcbxdis_pkd &
|
2014-11-21 14:22:04 +07:00
|
|
|
FW_PORT_CMD_DCBXDIS_F)
|
2014-06-20 11:37:13 +07:00
|
|
|
? CXGB4_DCB_INPUT_FW_DISABLED
|
|
|
|
: CXGB4_DCB_INPUT_FW_ENABLED);
|
|
|
|
|
|
|
|
cxgb4_dcb_state_fsm(dev, state_input);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd == FW_PORT_CMD &&
|
|
|
|
action == FW_PORT_ACTION_L2_DCB_CFG)
|
|
|
|
dcb_rpl(q->adap, pcmd);
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
if (p->type == 0)
|
|
|
|
t4_handle_fw_rpl(q->adap, p->data);
|
2010-04-01 22:28:26 +07:00
|
|
|
} else if (opcode == CPL_L2T_WRITE_RPL) {
|
|
|
|
const struct cpl_l2t_write_rpl *p = (void *)rsp;
|
|
|
|
|
|
|
|
do_l2t_write_rpl(q->adap, p);
|
2012-12-10 16:30:52 +07:00
|
|
|
} else if (opcode == CPL_SET_TCB_RPL) {
|
|
|
|
const struct cpl_set_tcb_rpl *p = (void *)rsp;
|
|
|
|
|
|
|
|
filter_rpl(q->adap, p);
|
2010-04-01 22:28:26 +07:00
|
|
|
} else
|
|
|
|
dev_err(q->adap->pdev_dev,
|
|
|
|
"unexpected CPL %#x on FW event queue\n", opcode);
|
2013-04-29 11:04:40 +07:00
|
|
|
out:
|
2010-04-01 22:28:26 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* uldrx_handler - response queue handler for ULD queues
|
|
|
|
* @q: the response queue that received the packet
|
|
|
|
* @rsp: the response queue descriptor holding the offload message
|
|
|
|
* @gl: the gather list of packet fragments
|
|
|
|
*
|
|
|
|
* Deliver an ingress offload packet to a ULD. All processing is done by
|
|
|
|
* the ULD, we just maintain statistics.
|
|
|
|
*/
|
|
|
|
static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
|
|
|
|
const struct pkt_gl *gl)
|
|
|
|
{
|
|
|
|
struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
|
|
|
|
|
2013-04-29 11:04:40 +07:00
|
|
|
/* FW can send CPLs encapsulated in a CPL_FW4_MSG.
|
|
|
|
*/
|
|
|
|
if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
|
|
|
|
((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
|
|
|
|
rsp += 2;
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
|
|
|
|
rxq->stats.nomem++;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (gl == NULL)
|
|
|
|
rxq->stats.imm++;
|
|
|
|
else if (gl == CXGB4_MSG_AN)
|
|
|
|
rxq->stats.an++;
|
|
|
|
else
|
|
|
|
rxq->stats.pkts++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void disable_msi(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
if (adapter->flags & USING_MSIX) {
|
|
|
|
pci_disable_msix(adapter->pdev);
|
|
|
|
adapter->flags &= ~USING_MSIX;
|
|
|
|
} else if (adapter->flags & USING_MSI) {
|
|
|
|
pci_disable_msi(adapter->pdev);
|
|
|
|
adapter->flags &= ~USING_MSI;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Interrupt handler for non-data events used with MSI-X.
|
|
|
|
*/
|
|
|
|
static irqreturn_t t4_nondata_intr(int irq, void *cookie)
|
|
|
|
{
|
|
|
|
struct adapter *adap = cookie;
|
2015-01-05 18:00:47 +07:00
|
|
|
u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2015-01-05 18:00:47 +07:00
|
|
|
if (v & PFSW_F) {
|
2010-04-01 22:28:26 +07:00
|
|
|
adap->swintr = 1;
|
2015-01-05 18:00:47 +07:00
|
|
|
t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
t4_slow_intr_handler(adap);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Name the MSI-X interrupts.
|
|
|
|
*/
|
|
|
|
static void name_msix_vecs(struct adapter *adap)
|
|
|
|
{
|
2010-12-15 04:36:50 +07:00
|
|
|
int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
/* non-data interrupts */
|
2010-12-15 04:36:51 +07:00
|
|
|
snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
/* FW events */
|
2010-12-15 04:36:51 +07:00
|
|
|
snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
|
|
|
|
adap->port[0]->name);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
/* Ethernet queues */
|
|
|
|
for_each_port(adap, j) {
|
|
|
|
struct net_device *d = adap->port[j];
|
|
|
|
const struct port_info *pi = netdev_priv(d);
|
|
|
|
|
2010-12-15 04:36:50 +07:00
|
|
|
for (i = 0; i < pi->nqsets; i++, msi_idx++)
|
2010-04-01 22:28:26 +07:00
|
|
|
snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
|
|
|
|
d->name, i);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* offload queues */
|
2010-12-15 04:36:50 +07:00
|
|
|
for_each_ofldrxq(&adap->sge, i)
|
|
|
|
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
|
2010-12-15 04:36:51 +07:00
|
|
|
adap->port[0]->name, i);
|
2010-12-15 04:36:50 +07:00
|
|
|
|
|
|
|
for_each_rdmarxq(&adap->sge, i)
|
|
|
|
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
|
2010-12-15 04:36:51 +07:00
|
|
|
adap->port[0]->name, i);
|
2014-06-06 23:10:42 +07:00
|
|
|
|
|
|
|
for_each_rdmaciq(&adap->sge, i)
|
|
|
|
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
|
|
|
|
adap->port[0]->name, i);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int request_msix_queue_irqs(struct adapter *adap)
|
|
|
|
{
|
|
|
|
struct sge *s = &adap->sge;
|
2014-06-06 23:10:42 +07:00
|
|
|
int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
|
|
|
|
int msi_index = 2;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
|
|
|
|
adap->msix_info[1].desc, &s->fw_evtq);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
|
|
|
for_each_ethrxq(s, ethqidx) {
|
2012-10-08 09:59:43 +07:00
|
|
|
err = request_irq(adap->msix_info[msi_index].vec,
|
|
|
|
t4_sge_intr_msix, 0,
|
|
|
|
adap->msix_info[msi_index].desc,
|
2010-04-01 22:28:26 +07:00
|
|
|
&s->ethrxq[ethqidx].rspq);
|
|
|
|
if (err)
|
|
|
|
goto unwind;
|
2012-10-08 09:59:43 +07:00
|
|
|
msi_index++;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
for_each_ofldrxq(s, ofldqidx) {
|
2012-10-08 09:59:43 +07:00
|
|
|
err = request_irq(adap->msix_info[msi_index].vec,
|
|
|
|
t4_sge_intr_msix, 0,
|
|
|
|
adap->msix_info[msi_index].desc,
|
2010-04-01 22:28:26 +07:00
|
|
|
&s->ofldrxq[ofldqidx].rspq);
|
|
|
|
if (err)
|
|
|
|
goto unwind;
|
2012-10-08 09:59:43 +07:00
|
|
|
msi_index++;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
for_each_rdmarxq(s, rdmaqidx) {
|
2012-10-08 09:59:43 +07:00
|
|
|
err = request_irq(adap->msix_info[msi_index].vec,
|
|
|
|
t4_sge_intr_msix, 0,
|
|
|
|
adap->msix_info[msi_index].desc,
|
2010-04-01 22:28:26 +07:00
|
|
|
&s->rdmarxq[rdmaqidx].rspq);
|
|
|
|
if (err)
|
|
|
|
goto unwind;
|
2012-10-08 09:59:43 +07:00
|
|
|
msi_index++;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
2014-06-06 23:10:42 +07:00
|
|
|
for_each_rdmaciq(s, rdmaciqqidx) {
|
|
|
|
err = request_irq(adap->msix_info[msi_index].vec,
|
|
|
|
t4_sge_intr_msix, 0,
|
|
|
|
adap->msix_info[msi_index].desc,
|
|
|
|
&s->rdmaciq[rdmaciqqidx].rspq);
|
|
|
|
if (err)
|
|
|
|
goto unwind;
|
|
|
|
msi_index++;
|
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
unwind:
|
2014-06-06 23:10:42 +07:00
|
|
|
while (--rdmaciqqidx >= 0)
|
|
|
|
free_irq(adap->msix_info[--msi_index].vec,
|
|
|
|
&s->rdmaciq[rdmaciqqidx].rspq);
|
2010-04-01 22:28:26 +07:00
|
|
|
while (--rdmaqidx >= 0)
|
2012-10-08 09:59:43 +07:00
|
|
|
free_irq(adap->msix_info[--msi_index].vec,
|
2010-04-01 22:28:26 +07:00
|
|
|
&s->rdmarxq[rdmaqidx].rspq);
|
|
|
|
while (--ofldqidx >= 0)
|
2012-10-08 09:59:43 +07:00
|
|
|
free_irq(adap->msix_info[--msi_index].vec,
|
2010-04-01 22:28:26 +07:00
|
|
|
&s->ofldrxq[ofldqidx].rspq);
|
|
|
|
while (--ethqidx >= 0)
|
2012-10-08 09:59:43 +07:00
|
|
|
free_irq(adap->msix_info[--msi_index].vec,
|
|
|
|
&s->ethrxq[ethqidx].rspq);
|
2010-04-01 22:28:26 +07:00
|
|
|
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_msix_queue_irqs(struct adapter *adap)
|
|
|
|
{
|
2012-10-08 09:59:43 +07:00
|
|
|
int i, msi_index = 2;
|
2010-04-01 22:28:26 +07:00
|
|
|
struct sge *s = &adap->sge;
|
|
|
|
|
|
|
|
free_irq(adap->msix_info[1].vec, &s->fw_evtq);
|
|
|
|
for_each_ethrxq(s, i)
|
2012-10-08 09:59:43 +07:00
|
|
|
free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
|
2010-04-01 22:28:26 +07:00
|
|
|
for_each_ofldrxq(s, i)
|
2012-10-08 09:59:43 +07:00
|
|
|
free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
|
2010-04-01 22:28:26 +07:00
|
|
|
for_each_rdmarxq(s, i)
|
2012-10-08 09:59:43 +07:00
|
|
|
free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
|
2014-06-06 23:10:42 +07:00
|
|
|
for_each_rdmaciq(s, i)
|
|
|
|
free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
2010-07-11 19:01:17 +07:00
|
|
|
/**
|
|
|
|
* write_rss - write the RSS table for a given port
|
|
|
|
* @pi: the port
|
|
|
|
* @queues: array of queue indices for RSS
|
|
|
|
*
|
|
|
|
* Sets up the portion of the HW RSS table for the port's VI to distribute
|
|
|
|
* packets to the Rx queues in @queues.
|
|
|
|
*/
|
|
|
|
static int write_rss(const struct port_info *pi, const u16 *queues)
|
|
|
|
{
|
|
|
|
u16 *rss;
|
|
|
|
int i, err;
|
|
|
|
const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
|
|
|
|
|
|
|
|
rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
|
|
|
|
if (!rss)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* map the queue indices to queue ids */
|
|
|
|
for (i = 0; i < pi->rss_size; i++, queues++)
|
|
|
|
rss[i] = q[*queues].rspq.abs_id;
|
|
|
|
|
2010-08-02 20:19:21 +07:00
|
|
|
err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
|
|
|
|
pi->rss_size, rss, pi->rss_size);
|
2010-07-11 19:01:17 +07:00
|
|
|
kfree(rss);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
/**
|
|
|
|
* setup_rss - configure RSS
|
|
|
|
* @adap: the adapter
|
|
|
|
*
|
2010-07-11 19:01:17 +07:00
|
|
|
* Sets up RSS for each port.
|
2010-04-01 22:28:26 +07:00
|
|
|
*/
|
|
|
|
static int setup_rss(struct adapter *adap)
|
|
|
|
{
|
2010-07-11 19:01:17 +07:00
|
|
|
int i, err;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
for_each_port(adap, i) {
|
|
|
|
const struct port_info *pi = adap2pinfo(adap, i);
|
|
|
|
|
2010-07-11 19:01:17 +07:00
|
|
|
err = write_rss(pi, pi->rss);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-08-24 00:20:58 +07:00
|
|
|
/*
|
|
|
|
* Return the channel of the ingress queue with the given qid.
|
|
|
|
*/
|
|
|
|
static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
|
|
|
|
{
|
|
|
|
qid -= p->ingr_start;
|
|
|
|
return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
|
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
/*
|
|
|
|
* Wait until all NAPI handlers are descheduled.
|
|
|
|
*/
|
|
|
|
static void quiesce_rx(struct adapter *adap)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
|
|
|
|
struct sge_rspq *q = adap->sge.ingr_map[i];
|
|
|
|
|
2015-02-04 17:02:52 +07:00
|
|
|
if (q && q->handler) {
|
2010-04-01 22:28:26 +07:00
|
|
|
napi_disable(&q->napi);
|
2015-02-04 17:02:52 +07:00
|
|
|
local_bh_disable();
|
|
|
|
while (!cxgb_poll_lock_napi(q))
|
|
|
|
mdelay(1);
|
|
|
|
local_bh_enable();
|
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable NAPI scheduling and interrupt generation for all Rx queues.
|
|
|
|
*/
|
|
|
|
static void enable_rx(struct adapter *adap)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
|
|
|
|
struct sge_rspq *q = adap->sge.ingr_map[i];
|
|
|
|
|
|
|
|
if (!q)
|
|
|
|
continue;
|
2015-02-04 17:02:52 +07:00
|
|
|
if (q->handler) {
|
|
|
|
cxgb_busy_poll_init_lock(q);
|
2010-04-01 22:28:26 +07:00
|
|
|
napi_enable(&q->napi);
|
2015-02-04 17:02:52 +07:00
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
/* 0-increment GTS to start the timer and enable interrupts */
|
2015-01-05 18:00:43 +07:00
|
|
|
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
|
|
|
|
SEINTARM_V(q->intr_params) |
|
|
|
|
INGRESSQID_V(q->cntxt_id));
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-04 19:46:27 +07:00
|
|
|
static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
|
|
|
|
unsigned int nq, unsigned int per_chan, int msi_idx,
|
|
|
|
u16 *ids)
|
|
|
|
{
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
for (i = 0; i < nq; i++, q++) {
|
|
|
|
if (msi_idx > 0)
|
|
|
|
msi_idx++;
|
|
|
|
err = t4_sge_alloc_rxq(adap, &q->rspq, false,
|
|
|
|
adap->port[i / per_chan],
|
|
|
|
msi_idx, q->fl.size ? &q->fl : NULL,
|
|
|
|
uldrx_handler);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
memset(&q->stats, 0, sizeof(q->stats));
|
|
|
|
if (ids)
|
|
|
|
ids[i] = q->rspq.abs_id;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
/**
|
|
|
|
* setup_sge_queues - configure SGE Tx/Rx/response queues
|
|
|
|
* @adap: the adapter
|
|
|
|
*
|
|
|
|
* Determines how many sets of SGE queues to use and initializes them.
|
|
|
|
* We support multiple queue sets per port if we have MSI-X, otherwise
|
|
|
|
* just one queue set per port.
|
|
|
|
*/
|
|
|
|
static int setup_sge_queues(struct adapter *adap)
|
|
|
|
{
|
|
|
|
int err, msi_idx, i, j;
|
|
|
|
struct sge *s = &adap->sge;
|
|
|
|
|
|
|
|
bitmap_zero(s->starving_fl, MAX_EGRQ);
|
|
|
|
bitmap_zero(s->txq_maperr, MAX_EGRQ);
|
|
|
|
|
|
|
|
if (adap->flags & USING_MSIX)
|
|
|
|
msi_idx = 1; /* vector 0 is for non-queue interrupts */
|
|
|
|
else {
|
|
|
|
err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
|
|
|
|
NULL, NULL);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
msi_idx = -((int)s->intrq.abs_id + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
|
|
|
|
msi_idx, NULL, fwevtq_handler);
|
|
|
|
if (err) {
|
|
|
|
freeout: t4_free_sge_resources(adap);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
for_each_port(adap, i) {
|
|
|
|
struct net_device *dev = adap->port[i];
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
|
|
|
|
struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
|
|
|
|
|
|
|
|
for (j = 0; j < pi->nqsets; j++, q++) {
|
|
|
|
if (msi_idx > 0)
|
|
|
|
msi_idx++;
|
|
|
|
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
|
|
|
|
msi_idx, &q->fl,
|
|
|
|
t4_ethrx_handler);
|
|
|
|
if (err)
|
|
|
|
goto freeout;
|
|
|
|
q->rspq.idx = j;
|
|
|
|
memset(&q->stats, 0, sizeof(q->stats));
|
|
|
|
}
|
|
|
|
for (j = 0; j < pi->nqsets; j++, t++) {
|
|
|
|
err = t4_sge_alloc_eth_txq(adap, t, dev,
|
|
|
|
netdev_get_tx_queue(dev, j),
|
|
|
|
s->fw_evtq.cntxt_id);
|
|
|
|
if (err)
|
|
|
|
goto freeout;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
|
|
|
|
for_each_ofldrxq(s, i) {
|
2015-03-04 19:46:27 +07:00
|
|
|
err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
|
|
|
|
adap->port[i / j],
|
2010-04-01 22:28:26 +07:00
|
|
|
s->fw_evtq.cntxt_id);
|
|
|
|
if (err)
|
|
|
|
goto freeout;
|
|
|
|
}
|
|
|
|
|
2015-03-04 19:46:27 +07:00
|
|
|
#define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
|
|
|
|
err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
|
|
|
|
if (err) \
|
|
|
|
goto freeout; \
|
|
|
|
if (msi_idx > 0) \
|
|
|
|
msi_idx += nq; \
|
|
|
|
} while (0)
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2015-03-04 19:46:27 +07:00
|
|
|
ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
|
|
|
|
ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
|
2015-03-04 19:46:28 +07:00
|
|
|
j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
|
|
|
|
ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2015-03-04 19:46:27 +07:00
|
|
|
#undef ALLOC_OFLD_RXQS
|
2014-06-06 23:10:42 +07:00
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
for_each_port(adap, i) {
|
|
|
|
/*
|
|
|
|
* Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
|
|
|
|
* have RDMA queues, and that's the right value.
|
|
|
|
*/
|
|
|
|
err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
|
|
|
|
s->fw_evtq.cntxt_id,
|
|
|
|
s->rdmarxq[i].rspq.cntxt_id);
|
|
|
|
if (err)
|
|
|
|
goto freeout;
|
|
|
|
}
|
|
|
|
|
2014-09-01 21:24:57 +07:00
|
|
|
t4_write_reg(adap, is_t4(adap->params.chip) ?
|
2015-01-05 18:00:46 +07:00
|
|
|
MPS_TRC_RSS_CONTROL_A :
|
|
|
|
MPS_T5_TRC_RSS_CONTROL_A,
|
|
|
|
RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
|
|
|
|
QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
|
2010-04-01 22:28:26 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
|
|
|
|
* The allocated memory is cleared.
|
|
|
|
*/
|
|
|
|
void *t4_alloc_mem(size_t size)
|
|
|
|
{
|
2013-06-20 02:15:53 +07:00
|
|
|
void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
if (!p)
|
2010-11-22 07:15:06 +07:00
|
|
|
p = vzalloc(size);
|
2010-04-01 22:28:26 +07:00
|
|
|
return p;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free memory allocated through alloc_mem().
|
|
|
|
*/
|
2014-11-07 11:05:23 +07:00
|
|
|
void t4_free_mem(void *addr)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
if (is_vmalloc_addr(addr))
|
|
|
|
vfree(addr);
|
|
|
|
else
|
|
|
|
kfree(addr);
|
|
|
|
}
|
|
|
|
|
2012-12-10 16:30:52 +07:00
|
|
|
/* Send a Work Request to write the filter at a specified index. We construct
|
|
|
|
* a Firmware Filter Work Request to have the work done and put the indicated
|
|
|
|
* filter into "pending" mode which will prevent any further actions against
|
|
|
|
* it till we get a reply from the firmware on the completion status of the
|
|
|
|
* request.
|
|
|
|
*/
|
|
|
|
static int set_filter_wr(struct adapter *adapter, int fidx)
|
|
|
|
{
|
|
|
|
struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct fw_filter_wr *fwr;
|
|
|
|
unsigned int ftid;
|
|
|
|
|
|
|
|
/* If the new filter requires loopback Destination MAC and/or VLAN
|
|
|
|
* rewriting then we need to allocate a Layer 2 Table (L2T) entry for
|
|
|
|
* the filter.
|
|
|
|
*/
|
|
|
|
if (f->fs.newdmac || f->fs.newvlan) {
|
|
|
|
/* allocate L2T entry for new filter */
|
|
|
|
f->l2t = t4_l2t_alloc_switching(adapter->l2t);
|
|
|
|
if (f->l2t == NULL)
|
|
|
|
return -EAGAIN;
|
|
|
|
if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
|
|
|
|
f->fs.eport, f->fs.dmac)) {
|
|
|
|
cxgb4_l2t_release(f->l2t);
|
|
|
|
f->l2t = NULL;
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ftid = adapter->tids.ftid_base + fidx;
|
|
|
|
|
|
|
|
skb = alloc_skb(sizeof(*fwr), GFP_KERNEL | __GFP_NOFAIL);
|
|
|
|
fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
|
|
|
|
memset(fwr, 0, sizeof(*fwr));
|
|
|
|
|
|
|
|
/* It would be nice to put most of the following in t4_hw.c but most
|
|
|
|
* of the work is translating the cxgbtool ch_filter_specification
|
|
|
|
* into the Work Request and the definition of that structure is
|
|
|
|
* currently in cxgbtool.h which isn't appropriate to pull into the
|
|
|
|
* common code. We may eventually try to come up with a more neutral
|
|
|
|
* filter specification structure but for now it's easiest to simply
|
|
|
|
* put this fairly direct code in line ...
|
|
|
|
*/
|
2014-11-07 11:05:25 +07:00
|
|
|
fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
|
|
|
|
fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
|
2012-12-10 16:30:52 +07:00
|
|
|
fwr->tid_to_iq =
|
2014-11-21 14:22:01 +07:00
|
|
|
htonl(FW_FILTER_WR_TID_V(ftid) |
|
|
|
|
FW_FILTER_WR_RQTYPE_V(f->fs.type) |
|
|
|
|
FW_FILTER_WR_NOREPLY_V(0) |
|
|
|
|
FW_FILTER_WR_IQ_V(f->fs.iq));
|
2012-12-10 16:30:52 +07:00
|
|
|
fwr->del_filter_to_l2tix =
|
2014-11-21 14:22:01 +07:00
|
|
|
htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
|
|
|
|
FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
|
|
|
|
FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
|
|
|
|
FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
|
|
|
|
FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
|
|
|
|
FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
|
|
|
|
FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
|
|
|
|
FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
|
|
|
|
FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
|
2012-12-10 16:30:52 +07:00
|
|
|
f->fs.newvlan == VLAN_REWRITE) |
|
2014-11-21 14:22:01 +07:00
|
|
|
FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
|
2012-12-10 16:30:52 +07:00
|
|
|
f->fs.newvlan == VLAN_REWRITE) |
|
2014-11-21 14:22:01 +07:00
|
|
|
FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
|
|
|
|
FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
|
|
|
|
FW_FILTER_WR_PRIO_V(f->fs.prio) |
|
|
|
|
FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
|
2012-12-10 16:30:52 +07:00
|
|
|
fwr->ethtype = htons(f->fs.val.ethtype);
|
|
|
|
fwr->ethtypem = htons(f->fs.mask.ethtype);
|
|
|
|
fwr->frag_to_ovlan_vldm =
|
2014-11-21 14:22:01 +07:00
|
|
|
(FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
|
|
|
|
FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
|
|
|
|
FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
|
|
|
|
FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
|
|
|
|
FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
|
|
|
|
FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
|
2012-12-10 16:30:52 +07:00
|
|
|
fwr->smac_sel = 0;
|
|
|
|
fwr->rx_chan_rx_rpl_iq =
|
2014-11-21 14:22:01 +07:00
|
|
|
htons(FW_FILTER_WR_RX_CHAN_V(0) |
|
|
|
|
FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
|
2012-12-10 16:30:52 +07:00
|
|
|
fwr->maci_to_matchtypem =
|
2014-11-21 14:22:01 +07:00
|
|
|
htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
|
|
|
|
FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
|
|
|
|
FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
|
|
|
|
FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
|
|
|
|
FW_FILTER_WR_PORT_V(f->fs.val.iport) |
|
|
|
|
FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
|
|
|
|
FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
|
|
|
|
FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
|
2012-12-10 16:30:52 +07:00
|
|
|
fwr->ptcl = f->fs.val.proto;
|
|
|
|
fwr->ptclm = f->fs.mask.proto;
|
|
|
|
fwr->ttyp = f->fs.val.tos;
|
|
|
|
fwr->ttypm = f->fs.mask.tos;
|
|
|
|
fwr->ivlan = htons(f->fs.val.ivlan);
|
|
|
|
fwr->ivlanm = htons(f->fs.mask.ivlan);
|
|
|
|
fwr->ovlan = htons(f->fs.val.ovlan);
|
|
|
|
fwr->ovlanm = htons(f->fs.mask.ovlan);
|
|
|
|
memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
|
|
|
|
memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
|
|
|
|
memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
|
|
|
|
memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
|
|
|
|
fwr->lp = htons(f->fs.val.lport);
|
|
|
|
fwr->lpm = htons(f->fs.mask.lport);
|
|
|
|
fwr->fp = htons(f->fs.val.fport);
|
|
|
|
fwr->fpm = htons(f->fs.mask.fport);
|
|
|
|
if (f->fs.newsmac)
|
|
|
|
memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
|
|
|
|
|
|
|
|
/* Mark the filter as "pending" and ship off the Filter Work Request.
|
|
|
|
* When we get the Work Request Reply we'll clear the pending status.
|
|
|
|
*/
|
|
|
|
f->pending = 1;
|
|
|
|
set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
|
|
|
|
t4_ofld_send(adapter, skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Delete the filter at a specified index.
|
|
|
|
*/
|
|
|
|
static int del_filter_wr(struct adapter *adapter, int fidx)
|
|
|
|
{
|
|
|
|
struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct fw_filter_wr *fwr;
|
|
|
|
unsigned int len, ftid;
|
|
|
|
|
|
|
|
len = sizeof(*fwr);
|
|
|
|
ftid = adapter->tids.ftid_base + fidx;
|
|
|
|
|
|
|
|
skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL);
|
|
|
|
fwr = (struct fw_filter_wr *)__skb_put(skb, len);
|
|
|
|
t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
|
|
|
|
|
|
|
|
/* Mark the filter as "pending" and ship off the Filter Work Request.
|
|
|
|
* When we get the Work Request Reply we'll clear the pending status.
|
|
|
|
*/
|
|
|
|
f->pending = 1;
|
|
|
|
t4_mgmt_tx(adapter, skb);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-20 11:37:13 +07:00
|
|
|
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
|
|
|
|
void *accel_priv, select_queue_fallback_t fallback)
|
|
|
|
{
|
|
|
|
int txq;
|
|
|
|
|
|
|
|
#ifdef CONFIG_CHELSIO_T4_DCB
|
|
|
|
/* If a Data Center Bridging has been successfully negotiated on this
|
|
|
|
* link then we'll use the skb's priority to map it to a TX Queue.
|
|
|
|
* The skb's priority is determined via the VLAN Tag Priority Code
|
|
|
|
* Point field.
|
|
|
|
*/
|
|
|
|
if (cxgb4_dcb_enabled(dev)) {
|
|
|
|
u16 vlan_tci;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = vlan_get_tag(skb, &vlan_tci);
|
|
|
|
if (unlikely(err)) {
|
|
|
|
if (net_ratelimit())
|
|
|
|
netdev_warn(dev,
|
|
|
|
"TX Packet without VLAN Tag on DCB Link\n");
|
|
|
|
txq = 0;
|
|
|
|
} else {
|
|
|
|
txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
|
|
|
|
}
|
|
|
|
return txq;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_CHELSIO_T4_DCB */
|
|
|
|
|
|
|
|
if (select_queue) {
|
|
|
|
txq = (skb_rx_queue_recorded(skb)
|
|
|
|
? skb_get_rx_queue(skb)
|
|
|
|
: smp_processor_id());
|
|
|
|
|
|
|
|
while (unlikely(txq >= dev->real_num_tx_queues))
|
|
|
|
txq -= dev->real_num_tx_queues;
|
|
|
|
|
|
|
|
return txq;
|
|
|
|
}
|
|
|
|
|
|
|
|
return fallback(dev, skb) % dev->real_num_tx_queues;
|
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
static inline int is_offload(const struct adapter *adap)
|
|
|
|
{
|
|
|
|
return adap->params.offload;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Implementation of ethtool operations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static u32 get_msglevel(struct net_device *dev)
|
|
|
|
{
|
|
|
|
return netdev2adap(dev)->msg_enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void set_msglevel(struct net_device *dev, u32 val)
|
|
|
|
{
|
|
|
|
netdev2adap(dev)->msg_enable = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
static char stats_strings[][ETH_GSTRING_LEN] = {
|
|
|
|
"TxOctetsOK ",
|
|
|
|
"TxFramesOK ",
|
|
|
|
"TxBroadcastFrames ",
|
|
|
|
"TxMulticastFrames ",
|
|
|
|
"TxUnicastFrames ",
|
|
|
|
"TxErrorFrames ",
|
|
|
|
|
|
|
|
"TxFrames64 ",
|
|
|
|
"TxFrames65To127 ",
|
|
|
|
"TxFrames128To255 ",
|
|
|
|
"TxFrames256To511 ",
|
|
|
|
"TxFrames512To1023 ",
|
|
|
|
"TxFrames1024To1518 ",
|
|
|
|
"TxFrames1519ToMax ",
|
|
|
|
|
|
|
|
"TxFramesDropped ",
|
|
|
|
"TxPauseFrames ",
|
|
|
|
"TxPPP0Frames ",
|
|
|
|
"TxPPP1Frames ",
|
|
|
|
"TxPPP2Frames ",
|
|
|
|
"TxPPP3Frames ",
|
|
|
|
"TxPPP4Frames ",
|
|
|
|
"TxPPP5Frames ",
|
|
|
|
"TxPPP6Frames ",
|
|
|
|
"TxPPP7Frames ",
|
|
|
|
|
|
|
|
"RxOctetsOK ",
|
|
|
|
"RxFramesOK ",
|
|
|
|
"RxBroadcastFrames ",
|
|
|
|
"RxMulticastFrames ",
|
|
|
|
"RxUnicastFrames ",
|
|
|
|
|
|
|
|
"RxFramesTooLong ",
|
|
|
|
"RxJabberErrors ",
|
|
|
|
"RxFCSErrors ",
|
|
|
|
"RxLengthErrors ",
|
|
|
|
"RxSymbolErrors ",
|
|
|
|
"RxRuntFrames ",
|
|
|
|
|
|
|
|
"RxFrames64 ",
|
|
|
|
"RxFrames65To127 ",
|
|
|
|
"RxFrames128To255 ",
|
|
|
|
"RxFrames256To511 ",
|
|
|
|
"RxFrames512To1023 ",
|
|
|
|
"RxFrames1024To1518 ",
|
|
|
|
"RxFrames1519ToMax ",
|
|
|
|
|
|
|
|
"RxPauseFrames ",
|
|
|
|
"RxPPP0Frames ",
|
|
|
|
"RxPPP1Frames ",
|
|
|
|
"RxPPP2Frames ",
|
|
|
|
"RxPPP3Frames ",
|
|
|
|
"RxPPP4Frames ",
|
|
|
|
"RxPPP5Frames ",
|
|
|
|
"RxPPP6Frames ",
|
|
|
|
"RxPPP7Frames ",
|
|
|
|
|
|
|
|
"RxBG0FramesDropped ",
|
|
|
|
"RxBG1FramesDropped ",
|
|
|
|
"RxBG2FramesDropped ",
|
|
|
|
"RxBG3FramesDropped ",
|
|
|
|
"RxBG0FramesTrunc ",
|
|
|
|
"RxBG1FramesTrunc ",
|
|
|
|
"RxBG2FramesTrunc ",
|
|
|
|
"RxBG3FramesTrunc ",
|
|
|
|
|
|
|
|
"TSO ",
|
|
|
|
"TxCsumOffload ",
|
|
|
|
"RxCsumGood ",
|
|
|
|
"VLANextractions ",
|
|
|
|
"VLANinsertions ",
|
2010-05-10 22:58:09 +07:00
|
|
|
"GROpackets ",
|
|
|
|
"GROmerged ",
|
2013-03-14 12:08:51 +07:00
|
|
|
"WriteCoalSuccess ",
|
|
|
|
"WriteCoalFail ",
|
2010-04-01 22:28:26 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int get_sset_count(struct net_device *dev, int sset)
|
|
|
|
{
|
|
|
|
switch (sset) {
|
|
|
|
case ETH_SS_STATS:
|
|
|
|
return ARRAY_SIZE(stats_strings);
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define T4_REGMAP_SIZE (160 * 1024)
|
2013-03-14 12:08:50 +07:00
|
|
|
#define T5_REGMAP_SIZE (332 * 1024)
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
static int get_regs_len(struct net_device *dev)
|
|
|
|
{
|
2013-03-14 12:08:50 +07:00
|
|
|
struct adapter *adap = netdev2adap(dev);
|
2013-12-03 18:35:56 +07:00
|
|
|
if (is_t4(adap->params.chip))
|
2013-03-14 12:08:50 +07:00
|
|
|
return T4_REGMAP_SIZE;
|
|
|
|
else
|
|
|
|
return T5_REGMAP_SIZE;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int get_eeprom_len(struct net_device *dev)
|
|
|
|
{
|
|
|
|
return EEPROMSIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
|
|
|
|
{
|
|
|
|
struct adapter *adapter = netdev2adap(dev);
|
2015-02-09 13:37:30 +07:00
|
|
|
u32 exprom_vers;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2011-11-09 16:58:07 +07:00
|
|
|
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
|
|
|
|
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
|
|
|
|
strlcpy(info->bus_info, pci_name(adapter->pdev),
|
|
|
|
sizeof(info->bus_info));
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2011-11-21 17:54:05 +07:00
|
|
|
if (adapter->params.fw_vers)
|
2010-04-01 22:28:26 +07:00
|
|
|
snprintf(info->fw_version, sizeof(info->fw_version),
|
|
|
|
"%u.%u.%u.%u, TP %u.%u.%u.%u",
|
2014-11-21 14:22:05 +07:00
|
|
|
FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
|
|
|
|
FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
|
|
|
|
FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
|
|
|
|
FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers),
|
|
|
|
FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
|
|
|
|
FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
|
|
|
|
FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
|
|
|
|
FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
|
2015-02-09 13:37:30 +07:00
|
|
|
|
|
|
|
if (!t4_get_exprom_version(adapter, &exprom_vers))
|
|
|
|
snprintf(info->erom_version, sizeof(info->erom_version),
|
|
|
|
"%u.%u.%u.%u",
|
|
|
|
FW_HDR_FW_VER_MAJOR_G(exprom_vers),
|
|
|
|
FW_HDR_FW_VER_MINOR_G(exprom_vers),
|
|
|
|
FW_HDR_FW_VER_MICRO_G(exprom_vers),
|
|
|
|
FW_HDR_FW_VER_BUILD_G(exprom_vers));
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
|
|
|
{
|
|
|
|
if (stringset == ETH_SS_STATS)
|
|
|
|
memcpy(data, stats_strings, sizeof(stats_strings));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* port stats maintained per queue of the port. They should be in the same
|
|
|
|
* order as in stats_strings above.
|
|
|
|
*/
|
|
|
|
struct queue_port_stats {
|
|
|
|
u64 tso;
|
|
|
|
u64 tx_csum;
|
|
|
|
u64 rx_csum;
|
|
|
|
u64 vlan_ex;
|
|
|
|
u64 vlan_ins;
|
2010-05-10 22:58:09 +07:00
|
|
|
u64 gro_pkts;
|
|
|
|
u64 gro_merged;
|
2010-04-01 22:28:26 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static void collect_sge_port_stats(const struct adapter *adap,
|
|
|
|
const struct port_info *p, struct queue_port_stats *s)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
|
|
|
|
const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
|
|
|
|
|
|
|
|
memset(s, 0, sizeof(*s));
|
|
|
|
for (i = 0; i < p->nqsets; i++, rx++, tx++) {
|
|
|
|
s->tso += tx->tso;
|
|
|
|
s->tx_csum += tx->tx_cso;
|
|
|
|
s->rx_csum += rx->stats.rx_cso;
|
|
|
|
s->vlan_ex += rx->stats.vlan_ex;
|
|
|
|
s->vlan_ins += tx->vlan_ins;
|
2010-05-10 22:58:09 +07:00
|
|
|
s->gro_pkts += rx->stats.lro_pkts;
|
|
|
|
s->gro_merged += rx->stats.lro_merged;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
|
|
|
|
u64 *data)
|
|
|
|
{
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
struct adapter *adapter = pi->adapter;
|
2013-03-14 12:08:51 +07:00
|
|
|
u32 val1, val2;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
|
|
|
|
|
|
|
|
data += sizeof(struct port_stats) / sizeof(u64);
|
|
|
|
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
|
2013-03-14 12:08:51 +07:00
|
|
|
data += sizeof(struct queue_port_stats) / sizeof(u64);
|
2013-12-03 18:35:56 +07:00
|
|
|
if (!is_t4(adapter->params.chip)) {
|
2015-01-05 18:00:44 +07:00
|
|
|
t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
|
|
|
|
val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
|
|
|
|
val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
|
2013-03-14 12:08:51 +07:00
|
|
|
*data = val1 - val2;
|
|
|
|
data++;
|
|
|
|
*data = val2;
|
|
|
|
data++;
|
|
|
|
} else {
|
|
|
|
memset(data, 0, 2 * sizeof(u64));
|
|
|
|
*data += 2;
|
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return a version number to identify the type of adapter. The scheme is:
|
|
|
|
* - bits 0..9: chip version
|
|
|
|
* - bits 10..15: chip revision
|
2010-07-12 07:33:48 +07:00
|
|
|
* - bits 16..23: register dump version
|
2010-04-01 22:28:26 +07:00
|
|
|
*/
|
|
|
|
static inline unsigned int mk_adap_vers(const struct adapter *ap)
|
|
|
|
{
|
2013-12-03 18:35:56 +07:00
|
|
|
return CHELSIO_CHIP_VERSION(ap->params.chip) |
|
|
|
|
(CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
|
|
|
|
unsigned int end)
|
|
|
|
{
|
|
|
|
u32 *p = buf + start;
|
|
|
|
|
|
|
|
for ( ; start <= end; start += sizeof(u32))
|
|
|
|
*p++ = t4_read_reg(ap, start);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
|
|
|
void *buf)
|
|
|
|
{
|
2013-03-14 12:08:50 +07:00
|
|
|
static const unsigned int t4_reg_ranges[] = {
|
2010-04-01 22:28:26 +07:00
|
|
|
0x1008, 0x1108,
|
|
|
|
0x1180, 0x11b4,
|
|
|
|
0x11fc, 0x123c,
|
|
|
|
0x1300, 0x173c,
|
|
|
|
0x1800, 0x18fc,
|
|
|
|
0x3000, 0x30d8,
|
|
|
|
0x30e0, 0x5924,
|
|
|
|
0x5960, 0x59d4,
|
|
|
|
0x5a00, 0x5af8,
|
|
|
|
0x6000, 0x6098,
|
|
|
|
0x6100, 0x6150,
|
|
|
|
0x6200, 0x6208,
|
|
|
|
0x6240, 0x6248,
|
|
|
|
0x6280, 0x6338,
|
|
|
|
0x6370, 0x638c,
|
|
|
|
0x6400, 0x643c,
|
|
|
|
0x6500, 0x6524,
|
|
|
|
0x6a00, 0x6a38,
|
|
|
|
0x6a60, 0x6a78,
|
|
|
|
0x6b00, 0x6b84,
|
|
|
|
0x6bf0, 0x6c84,
|
|
|
|
0x6cf0, 0x6d84,
|
|
|
|
0x6df0, 0x6e84,
|
|
|
|
0x6ef0, 0x6f84,
|
|
|
|
0x6ff0, 0x7084,
|
|
|
|
0x70f0, 0x7184,
|
|
|
|
0x71f0, 0x7284,
|
|
|
|
0x72f0, 0x7384,
|
|
|
|
0x73f0, 0x7450,
|
|
|
|
0x7500, 0x7530,
|
|
|
|
0x7600, 0x761c,
|
|
|
|
0x7680, 0x76cc,
|
|
|
|
0x7700, 0x7798,
|
|
|
|
0x77c0, 0x77fc,
|
|
|
|
0x7900, 0x79fc,
|
|
|
|
0x7b00, 0x7c38,
|
|
|
|
0x7d00, 0x7efc,
|
|
|
|
0x8dc0, 0x8e1c,
|
|
|
|
0x8e30, 0x8e78,
|
|
|
|
0x8ea0, 0x8f6c,
|
|
|
|
0x8fc0, 0x9074,
|
|
|
|
0x90fc, 0x90fc,
|
|
|
|
0x9400, 0x9458,
|
|
|
|
0x9600, 0x96bc,
|
|
|
|
0x9800, 0x9808,
|
|
|
|
0x9820, 0x983c,
|
|
|
|
0x9850, 0x9864,
|
|
|
|
0x9c00, 0x9c6c,
|
|
|
|
0x9c80, 0x9cec,
|
|
|
|
0x9d00, 0x9d6c,
|
|
|
|
0x9d80, 0x9dec,
|
|
|
|
0x9e00, 0x9e6c,
|
|
|
|
0x9e80, 0x9eec,
|
|
|
|
0x9f00, 0x9f6c,
|
|
|
|
0x9f80, 0x9fec,
|
|
|
|
0xd004, 0xd03c,
|
|
|
|
0xdfc0, 0xdfe0,
|
|
|
|
0xe000, 0xea7c,
|
2014-09-01 21:24:59 +07:00
|
|
|
0xf000, 0x11110,
|
|
|
|
0x11118, 0x11190,
|
2010-07-12 07:33:48 +07:00
|
|
|
0x19040, 0x1906c,
|
|
|
|
0x19078, 0x19080,
|
|
|
|
0x1908c, 0x19124,
|
2010-04-01 22:28:26 +07:00
|
|
|
0x19150, 0x191b0,
|
|
|
|
0x191d0, 0x191e8,
|
|
|
|
0x19238, 0x1924c,
|
|
|
|
0x193f8, 0x19474,
|
|
|
|
0x19490, 0x194f8,
|
|
|
|
0x19800, 0x19f30,
|
|
|
|
0x1a000, 0x1a06c,
|
|
|
|
0x1a0b0, 0x1a120,
|
|
|
|
0x1a128, 0x1a138,
|
|
|
|
0x1a190, 0x1a1c4,
|
|
|
|
0x1a1fc, 0x1a1fc,
|
|
|
|
0x1e040, 0x1e04c,
|
2010-07-12 07:33:48 +07:00
|
|
|
0x1e284, 0x1e28c,
|
2010-04-01 22:28:26 +07:00
|
|
|
0x1e2c0, 0x1e2c0,
|
|
|
|
0x1e2e0, 0x1e2e0,
|
|
|
|
0x1e300, 0x1e384,
|
|
|
|
0x1e3c0, 0x1e3c8,
|
|
|
|
0x1e440, 0x1e44c,
|
2010-07-12 07:33:48 +07:00
|
|
|
0x1e684, 0x1e68c,
|
2010-04-01 22:28:26 +07:00
|
|
|
0x1e6c0, 0x1e6c0,
|
|
|
|
0x1e6e0, 0x1e6e0,
|
|
|
|
0x1e700, 0x1e784,
|
|
|
|
0x1e7c0, 0x1e7c8,
|
|
|
|
0x1e840, 0x1e84c,
|
2010-07-12 07:33:48 +07:00
|
|
|
0x1ea84, 0x1ea8c,
|
2010-04-01 22:28:26 +07:00
|
|
|
0x1eac0, 0x1eac0,
|
|
|
|
0x1eae0, 0x1eae0,
|
|
|
|
0x1eb00, 0x1eb84,
|
|
|
|
0x1ebc0, 0x1ebc8,
|
|
|
|
0x1ec40, 0x1ec4c,
|
2010-07-12 07:33:48 +07:00
|
|
|
0x1ee84, 0x1ee8c,
|
2010-04-01 22:28:26 +07:00
|
|
|
0x1eec0, 0x1eec0,
|
|
|
|
0x1eee0, 0x1eee0,
|
|
|
|
0x1ef00, 0x1ef84,
|
|
|
|
0x1efc0, 0x1efc8,
|
|
|
|
0x1f040, 0x1f04c,
|
2010-07-12 07:33:48 +07:00
|
|
|
0x1f284, 0x1f28c,
|
2010-04-01 22:28:26 +07:00
|
|
|
0x1f2c0, 0x1f2c0,
|
|
|
|
0x1f2e0, 0x1f2e0,
|
|
|
|
0x1f300, 0x1f384,
|
|
|
|
0x1f3c0, 0x1f3c8,
|
|
|
|
0x1f440, 0x1f44c,
|
2010-07-12 07:33:48 +07:00
|
|
|
0x1f684, 0x1f68c,
|
2010-04-01 22:28:26 +07:00
|
|
|
0x1f6c0, 0x1f6c0,
|
|
|
|
0x1f6e0, 0x1f6e0,
|
|
|
|
0x1f700, 0x1f784,
|
|
|
|
0x1f7c0, 0x1f7c8,
|
|
|
|
0x1f840, 0x1f84c,
|
2010-07-12 07:33:48 +07:00
|
|
|
0x1fa84, 0x1fa8c,
|
2010-04-01 22:28:26 +07:00
|
|
|
0x1fac0, 0x1fac0,
|
|
|
|
0x1fae0, 0x1fae0,
|
|
|
|
0x1fb00, 0x1fb84,
|
|
|
|
0x1fbc0, 0x1fbc8,
|
|
|
|
0x1fc40, 0x1fc4c,
|
2010-07-12 07:33:48 +07:00
|
|
|
0x1fe84, 0x1fe8c,
|
2010-04-01 22:28:26 +07:00
|
|
|
0x1fec0, 0x1fec0,
|
|
|
|
0x1fee0, 0x1fee0,
|
|
|
|
0x1ff00, 0x1ff84,
|
|
|
|
0x1ffc0, 0x1ffc8,
|
|
|
|
0x20000, 0x2002c,
|
|
|
|
0x20100, 0x2013c,
|
|
|
|
0x20190, 0x201c8,
|
|
|
|
0x20200, 0x20318,
|
|
|
|
0x20400, 0x20528,
|
|
|
|
0x20540, 0x20614,
|
|
|
|
0x21000, 0x21040,
|
|
|
|
0x2104c, 0x21060,
|
|
|
|
0x210c0, 0x210ec,
|
|
|
|
0x21200, 0x21268,
|
|
|
|
0x21270, 0x21284,
|
|
|
|
0x212fc, 0x21388,
|
|
|
|
0x21400, 0x21404,
|
|
|
|
0x21500, 0x21518,
|
|
|
|
0x2152c, 0x2153c,
|
|
|
|
0x21550, 0x21554,
|
|
|
|
0x21600, 0x21600,
|
|
|
|
0x21608, 0x21628,
|
|
|
|
0x21630, 0x2163c,
|
|
|
|
0x21700, 0x2171c,
|
|
|
|
0x21780, 0x2178c,
|
|
|
|
0x21800, 0x21c38,
|
|
|
|
0x21c80, 0x21d7c,
|
|
|
|
0x21e00, 0x21e04,
|
|
|
|
0x22000, 0x2202c,
|
|
|
|
0x22100, 0x2213c,
|
|
|
|
0x22190, 0x221c8,
|
|
|
|
0x22200, 0x22318,
|
|
|
|
0x22400, 0x22528,
|
|
|
|
0x22540, 0x22614,
|
|
|
|
0x23000, 0x23040,
|
|
|
|
0x2304c, 0x23060,
|
|
|
|
0x230c0, 0x230ec,
|
|
|
|
0x23200, 0x23268,
|
|
|
|
0x23270, 0x23284,
|
|
|
|
0x232fc, 0x23388,
|
|
|
|
0x23400, 0x23404,
|
|
|
|
0x23500, 0x23518,
|
|
|
|
0x2352c, 0x2353c,
|
|
|
|
0x23550, 0x23554,
|
|
|
|
0x23600, 0x23600,
|
|
|
|
0x23608, 0x23628,
|
|
|
|
0x23630, 0x2363c,
|
|
|
|
0x23700, 0x2371c,
|
|
|
|
0x23780, 0x2378c,
|
|
|
|
0x23800, 0x23c38,
|
|
|
|
0x23c80, 0x23d7c,
|
|
|
|
0x23e00, 0x23e04,
|
|
|
|
0x24000, 0x2402c,
|
|
|
|
0x24100, 0x2413c,
|
|
|
|
0x24190, 0x241c8,
|
|
|
|
0x24200, 0x24318,
|
|
|
|
0x24400, 0x24528,
|
|
|
|
0x24540, 0x24614,
|
|
|
|
0x25000, 0x25040,
|
|
|
|
0x2504c, 0x25060,
|
|
|
|
0x250c0, 0x250ec,
|
|
|
|
0x25200, 0x25268,
|
|
|
|
0x25270, 0x25284,
|
|
|
|
0x252fc, 0x25388,
|
|
|
|
0x25400, 0x25404,
|
|
|
|
0x25500, 0x25518,
|
|
|
|
0x2552c, 0x2553c,
|
|
|
|
0x25550, 0x25554,
|
|
|
|
0x25600, 0x25600,
|
|
|
|
0x25608, 0x25628,
|
|
|
|
0x25630, 0x2563c,
|
|
|
|
0x25700, 0x2571c,
|
|
|
|
0x25780, 0x2578c,
|
|
|
|
0x25800, 0x25c38,
|
|
|
|
0x25c80, 0x25d7c,
|
|
|
|
0x25e00, 0x25e04,
|
|
|
|
0x26000, 0x2602c,
|
|
|
|
0x26100, 0x2613c,
|
|
|
|
0x26190, 0x261c8,
|
|
|
|
0x26200, 0x26318,
|
|
|
|
0x26400, 0x26528,
|
|
|
|
0x26540, 0x26614,
|
|
|
|
0x27000, 0x27040,
|
|
|
|
0x2704c, 0x27060,
|
|
|
|
0x270c0, 0x270ec,
|
|
|
|
0x27200, 0x27268,
|
|
|
|
0x27270, 0x27284,
|
|
|
|
0x272fc, 0x27388,
|
|
|
|
0x27400, 0x27404,
|
|
|
|
0x27500, 0x27518,
|
|
|
|
0x2752c, 0x2753c,
|
|
|
|
0x27550, 0x27554,
|
|
|
|
0x27600, 0x27600,
|
|
|
|
0x27608, 0x27628,
|
|
|
|
0x27630, 0x2763c,
|
|
|
|
0x27700, 0x2771c,
|
|
|
|
0x27780, 0x2778c,
|
|
|
|
0x27800, 0x27c38,
|
|
|
|
0x27c80, 0x27d7c,
|
|
|
|
0x27e00, 0x27e04
|
|
|
|
};
|
|
|
|
|
2013-03-14 12:08:50 +07:00
|
|
|
static const unsigned int t5_reg_ranges[] = {
|
|
|
|
0x1008, 0x1148,
|
|
|
|
0x1180, 0x11b4,
|
|
|
|
0x11fc, 0x123c,
|
|
|
|
0x1280, 0x173c,
|
|
|
|
0x1800, 0x18fc,
|
|
|
|
0x3000, 0x3028,
|
|
|
|
0x3060, 0x30d8,
|
|
|
|
0x30e0, 0x30fc,
|
|
|
|
0x3140, 0x357c,
|
|
|
|
0x35a8, 0x35cc,
|
|
|
|
0x35ec, 0x35ec,
|
|
|
|
0x3600, 0x5624,
|
|
|
|
0x56cc, 0x575c,
|
|
|
|
0x580c, 0x5814,
|
|
|
|
0x5890, 0x58bc,
|
|
|
|
0x5940, 0x59dc,
|
|
|
|
0x59fc, 0x5a18,
|
|
|
|
0x5a60, 0x5a9c,
|
|
|
|
0x5b9c, 0x5bfc,
|
|
|
|
0x6000, 0x6040,
|
|
|
|
0x6058, 0x614c,
|
|
|
|
0x7700, 0x7798,
|
|
|
|
0x77c0, 0x78fc,
|
|
|
|
0x7b00, 0x7c54,
|
|
|
|
0x7d00, 0x7efc,
|
|
|
|
0x8dc0, 0x8de0,
|
|
|
|
0x8df8, 0x8e84,
|
|
|
|
0x8ea0, 0x8f84,
|
|
|
|
0x8fc0, 0x90f8,
|
|
|
|
0x9400, 0x9470,
|
|
|
|
0x9600, 0x96f4,
|
|
|
|
0x9800, 0x9808,
|
|
|
|
0x9820, 0x983c,
|
|
|
|
0x9850, 0x9864,
|
|
|
|
0x9c00, 0x9c6c,
|
|
|
|
0x9c80, 0x9cec,
|
|
|
|
0x9d00, 0x9d6c,
|
|
|
|
0x9d80, 0x9dec,
|
|
|
|
0x9e00, 0x9e6c,
|
|
|
|
0x9e80, 0x9eec,
|
|
|
|
0x9f00, 0x9f6c,
|
|
|
|
0x9f80, 0xa020,
|
|
|
|
0xd004, 0xd03c,
|
|
|
|
0xdfc0, 0xdfe0,
|
|
|
|
0xe000, 0x11088,
|
2014-09-01 21:24:59 +07:00
|
|
|
0x1109c, 0x11110,
|
|
|
|
0x11118, 0x1117c,
|
2013-03-14 12:08:50 +07:00
|
|
|
0x11190, 0x11204,
|
|
|
|
0x19040, 0x1906c,
|
|
|
|
0x19078, 0x19080,
|
|
|
|
0x1908c, 0x19124,
|
|
|
|
0x19150, 0x191b0,
|
|
|
|
0x191d0, 0x191e8,
|
|
|
|
0x19238, 0x19290,
|
|
|
|
0x193f8, 0x19474,
|
|
|
|
0x19490, 0x194cc,
|
|
|
|
0x194f0, 0x194f8,
|
|
|
|
0x19c00, 0x19c60,
|
|
|
|
0x19c94, 0x19e10,
|
|
|
|
0x19e50, 0x19f34,
|
|
|
|
0x19f40, 0x19f50,
|
|
|
|
0x19f90, 0x19fe4,
|
|
|
|
0x1a000, 0x1a06c,
|
|
|
|
0x1a0b0, 0x1a120,
|
|
|
|
0x1a128, 0x1a138,
|
|
|
|
0x1a190, 0x1a1c4,
|
|
|
|
0x1a1fc, 0x1a1fc,
|
|
|
|
0x1e008, 0x1e00c,
|
|
|
|
0x1e040, 0x1e04c,
|
|
|
|
0x1e284, 0x1e290,
|
|
|
|
0x1e2c0, 0x1e2c0,
|
|
|
|
0x1e2e0, 0x1e2e0,
|
|
|
|
0x1e300, 0x1e384,
|
|
|
|
0x1e3c0, 0x1e3c8,
|
|
|
|
0x1e408, 0x1e40c,
|
|
|
|
0x1e440, 0x1e44c,
|
|
|
|
0x1e684, 0x1e690,
|
|
|
|
0x1e6c0, 0x1e6c0,
|
|
|
|
0x1e6e0, 0x1e6e0,
|
|
|
|
0x1e700, 0x1e784,
|
|
|
|
0x1e7c0, 0x1e7c8,
|
|
|
|
0x1e808, 0x1e80c,
|
|
|
|
0x1e840, 0x1e84c,
|
|
|
|
0x1ea84, 0x1ea90,
|
|
|
|
0x1eac0, 0x1eac0,
|
|
|
|
0x1eae0, 0x1eae0,
|
|
|
|
0x1eb00, 0x1eb84,
|
|
|
|
0x1ebc0, 0x1ebc8,
|
|
|
|
0x1ec08, 0x1ec0c,
|
|
|
|
0x1ec40, 0x1ec4c,
|
|
|
|
0x1ee84, 0x1ee90,
|
|
|
|
0x1eec0, 0x1eec0,
|
|
|
|
0x1eee0, 0x1eee0,
|
|
|
|
0x1ef00, 0x1ef84,
|
|
|
|
0x1efc0, 0x1efc8,
|
|
|
|
0x1f008, 0x1f00c,
|
|
|
|
0x1f040, 0x1f04c,
|
|
|
|
0x1f284, 0x1f290,
|
|
|
|
0x1f2c0, 0x1f2c0,
|
|
|
|
0x1f2e0, 0x1f2e0,
|
|
|
|
0x1f300, 0x1f384,
|
|
|
|
0x1f3c0, 0x1f3c8,
|
|
|
|
0x1f408, 0x1f40c,
|
|
|
|
0x1f440, 0x1f44c,
|
|
|
|
0x1f684, 0x1f690,
|
|
|
|
0x1f6c0, 0x1f6c0,
|
|
|
|
0x1f6e0, 0x1f6e0,
|
|
|
|
0x1f700, 0x1f784,
|
|
|
|
0x1f7c0, 0x1f7c8,
|
|
|
|
0x1f808, 0x1f80c,
|
|
|
|
0x1f840, 0x1f84c,
|
|
|
|
0x1fa84, 0x1fa90,
|
|
|
|
0x1fac0, 0x1fac0,
|
|
|
|
0x1fae0, 0x1fae0,
|
|
|
|
0x1fb00, 0x1fb84,
|
|
|
|
0x1fbc0, 0x1fbc8,
|
|
|
|
0x1fc08, 0x1fc0c,
|
|
|
|
0x1fc40, 0x1fc4c,
|
|
|
|
0x1fe84, 0x1fe90,
|
|
|
|
0x1fec0, 0x1fec0,
|
|
|
|
0x1fee0, 0x1fee0,
|
|
|
|
0x1ff00, 0x1ff84,
|
|
|
|
0x1ffc0, 0x1ffc8,
|
|
|
|
0x30000, 0x30030,
|
|
|
|
0x30100, 0x30144,
|
|
|
|
0x30190, 0x301d0,
|
|
|
|
0x30200, 0x30318,
|
|
|
|
0x30400, 0x3052c,
|
|
|
|
0x30540, 0x3061c,
|
|
|
|
0x30800, 0x30834,
|
|
|
|
0x308c0, 0x30908,
|
|
|
|
0x30910, 0x309ac,
|
|
|
|
0x30a00, 0x30a04,
|
|
|
|
0x30a0c, 0x30a2c,
|
|
|
|
0x30a44, 0x30a50,
|
|
|
|
0x30a74, 0x30c24,
|
|
|
|
0x30d08, 0x30d14,
|
|
|
|
0x30d1c, 0x30d20,
|
|
|
|
0x30d3c, 0x30d50,
|
|
|
|
0x31200, 0x3120c,
|
|
|
|
0x31220, 0x31220,
|
|
|
|
0x31240, 0x31240,
|
|
|
|
0x31600, 0x31600,
|
|
|
|
0x31608, 0x3160c,
|
|
|
|
0x31a00, 0x31a1c,
|
|
|
|
0x31e04, 0x31e20,
|
|
|
|
0x31e38, 0x31e3c,
|
|
|
|
0x31e80, 0x31e80,
|
|
|
|
0x31e88, 0x31ea8,
|
|
|
|
0x31eb0, 0x31eb4,
|
|
|
|
0x31ec8, 0x31ed4,
|
|
|
|
0x31fb8, 0x32004,
|
|
|
|
0x32208, 0x3223c,
|
|
|
|
0x32600, 0x32630,
|
|
|
|
0x32a00, 0x32abc,
|
|
|
|
0x32b00, 0x32b70,
|
|
|
|
0x33000, 0x33048,
|
|
|
|
0x33060, 0x3309c,
|
|
|
|
0x330f0, 0x33148,
|
|
|
|
0x33160, 0x3319c,
|
|
|
|
0x331f0, 0x332e4,
|
|
|
|
0x332f8, 0x333e4,
|
|
|
|
0x333f8, 0x33448,
|
|
|
|
0x33460, 0x3349c,
|
|
|
|
0x334f0, 0x33548,
|
|
|
|
0x33560, 0x3359c,
|
|
|
|
0x335f0, 0x336e4,
|
|
|
|
0x336f8, 0x337e4,
|
|
|
|
0x337f8, 0x337fc,
|
|
|
|
0x33814, 0x33814,
|
|
|
|
0x3382c, 0x3382c,
|
|
|
|
0x33880, 0x3388c,
|
|
|
|
0x338e8, 0x338ec,
|
|
|
|
0x33900, 0x33948,
|
|
|
|
0x33960, 0x3399c,
|
|
|
|
0x339f0, 0x33ae4,
|
|
|
|
0x33af8, 0x33b10,
|
|
|
|
0x33b28, 0x33b28,
|
|
|
|
0x33b3c, 0x33b50,
|
|
|
|
0x33bf0, 0x33c10,
|
|
|
|
0x33c28, 0x33c28,
|
|
|
|
0x33c3c, 0x33c50,
|
|
|
|
0x33cf0, 0x33cfc,
|
|
|
|
0x34000, 0x34030,
|
|
|
|
0x34100, 0x34144,
|
|
|
|
0x34190, 0x341d0,
|
|
|
|
0x34200, 0x34318,
|
|
|
|
0x34400, 0x3452c,
|
|
|
|
0x34540, 0x3461c,
|
|
|
|
0x34800, 0x34834,
|
|
|
|
0x348c0, 0x34908,
|
|
|
|
0x34910, 0x349ac,
|
|
|
|
0x34a00, 0x34a04,
|
|
|
|
0x34a0c, 0x34a2c,
|
|
|
|
0x34a44, 0x34a50,
|
|
|
|
0x34a74, 0x34c24,
|
|
|
|
0x34d08, 0x34d14,
|
|
|
|
0x34d1c, 0x34d20,
|
|
|
|
0x34d3c, 0x34d50,
|
|
|
|
0x35200, 0x3520c,
|
|
|
|
0x35220, 0x35220,
|
|
|
|
0x35240, 0x35240,
|
|
|
|
0x35600, 0x35600,
|
|
|
|
0x35608, 0x3560c,
|
|
|
|
0x35a00, 0x35a1c,
|
|
|
|
0x35e04, 0x35e20,
|
|
|
|
0x35e38, 0x35e3c,
|
|
|
|
0x35e80, 0x35e80,
|
|
|
|
0x35e88, 0x35ea8,
|
|
|
|
0x35eb0, 0x35eb4,
|
|
|
|
0x35ec8, 0x35ed4,
|
|
|
|
0x35fb8, 0x36004,
|
|
|
|
0x36208, 0x3623c,
|
|
|
|
0x36600, 0x36630,
|
|
|
|
0x36a00, 0x36abc,
|
|
|
|
0x36b00, 0x36b70,
|
|
|
|
0x37000, 0x37048,
|
|
|
|
0x37060, 0x3709c,
|
|
|
|
0x370f0, 0x37148,
|
|
|
|
0x37160, 0x3719c,
|
|
|
|
0x371f0, 0x372e4,
|
|
|
|
0x372f8, 0x373e4,
|
|
|
|
0x373f8, 0x37448,
|
|
|
|
0x37460, 0x3749c,
|
|
|
|
0x374f0, 0x37548,
|
|
|
|
0x37560, 0x3759c,
|
|
|
|
0x375f0, 0x376e4,
|
|
|
|
0x376f8, 0x377e4,
|
|
|
|
0x377f8, 0x377fc,
|
|
|
|
0x37814, 0x37814,
|
|
|
|
0x3782c, 0x3782c,
|
|
|
|
0x37880, 0x3788c,
|
|
|
|
0x378e8, 0x378ec,
|
|
|
|
0x37900, 0x37948,
|
|
|
|
0x37960, 0x3799c,
|
|
|
|
0x379f0, 0x37ae4,
|
|
|
|
0x37af8, 0x37b10,
|
|
|
|
0x37b28, 0x37b28,
|
|
|
|
0x37b3c, 0x37b50,
|
|
|
|
0x37bf0, 0x37c10,
|
|
|
|
0x37c28, 0x37c28,
|
|
|
|
0x37c3c, 0x37c50,
|
|
|
|
0x37cf0, 0x37cfc,
|
|
|
|
0x38000, 0x38030,
|
|
|
|
0x38100, 0x38144,
|
|
|
|
0x38190, 0x381d0,
|
|
|
|
0x38200, 0x38318,
|
|
|
|
0x38400, 0x3852c,
|
|
|
|
0x38540, 0x3861c,
|
|
|
|
0x38800, 0x38834,
|
|
|
|
0x388c0, 0x38908,
|
|
|
|
0x38910, 0x389ac,
|
|
|
|
0x38a00, 0x38a04,
|
|
|
|
0x38a0c, 0x38a2c,
|
|
|
|
0x38a44, 0x38a50,
|
|
|
|
0x38a74, 0x38c24,
|
|
|
|
0x38d08, 0x38d14,
|
|
|
|
0x38d1c, 0x38d20,
|
|
|
|
0x38d3c, 0x38d50,
|
|
|
|
0x39200, 0x3920c,
|
|
|
|
0x39220, 0x39220,
|
|
|
|
0x39240, 0x39240,
|
|
|
|
0x39600, 0x39600,
|
|
|
|
0x39608, 0x3960c,
|
|
|
|
0x39a00, 0x39a1c,
|
|
|
|
0x39e04, 0x39e20,
|
|
|
|
0x39e38, 0x39e3c,
|
|
|
|
0x39e80, 0x39e80,
|
|
|
|
0x39e88, 0x39ea8,
|
|
|
|
0x39eb0, 0x39eb4,
|
|
|
|
0x39ec8, 0x39ed4,
|
|
|
|
0x39fb8, 0x3a004,
|
|
|
|
0x3a208, 0x3a23c,
|
|
|
|
0x3a600, 0x3a630,
|
|
|
|
0x3aa00, 0x3aabc,
|
|
|
|
0x3ab00, 0x3ab70,
|
|
|
|
0x3b000, 0x3b048,
|
|
|
|
0x3b060, 0x3b09c,
|
|
|
|
0x3b0f0, 0x3b148,
|
|
|
|
0x3b160, 0x3b19c,
|
|
|
|
0x3b1f0, 0x3b2e4,
|
|
|
|
0x3b2f8, 0x3b3e4,
|
|
|
|
0x3b3f8, 0x3b448,
|
|
|
|
0x3b460, 0x3b49c,
|
|
|
|
0x3b4f0, 0x3b548,
|
|
|
|
0x3b560, 0x3b59c,
|
|
|
|
0x3b5f0, 0x3b6e4,
|
|
|
|
0x3b6f8, 0x3b7e4,
|
|
|
|
0x3b7f8, 0x3b7fc,
|
|
|
|
0x3b814, 0x3b814,
|
|
|
|
0x3b82c, 0x3b82c,
|
|
|
|
0x3b880, 0x3b88c,
|
|
|
|
0x3b8e8, 0x3b8ec,
|
|
|
|
0x3b900, 0x3b948,
|
|
|
|
0x3b960, 0x3b99c,
|
|
|
|
0x3b9f0, 0x3bae4,
|
|
|
|
0x3baf8, 0x3bb10,
|
|
|
|
0x3bb28, 0x3bb28,
|
|
|
|
0x3bb3c, 0x3bb50,
|
|
|
|
0x3bbf0, 0x3bc10,
|
|
|
|
0x3bc28, 0x3bc28,
|
|
|
|
0x3bc3c, 0x3bc50,
|
|
|
|
0x3bcf0, 0x3bcfc,
|
|
|
|
0x3c000, 0x3c030,
|
|
|
|
0x3c100, 0x3c144,
|
|
|
|
0x3c190, 0x3c1d0,
|
|
|
|
0x3c200, 0x3c318,
|
|
|
|
0x3c400, 0x3c52c,
|
|
|
|
0x3c540, 0x3c61c,
|
|
|
|
0x3c800, 0x3c834,
|
|
|
|
0x3c8c0, 0x3c908,
|
|
|
|
0x3c910, 0x3c9ac,
|
|
|
|
0x3ca00, 0x3ca04,
|
|
|
|
0x3ca0c, 0x3ca2c,
|
|
|
|
0x3ca44, 0x3ca50,
|
|
|
|
0x3ca74, 0x3cc24,
|
|
|
|
0x3cd08, 0x3cd14,
|
|
|
|
0x3cd1c, 0x3cd20,
|
|
|
|
0x3cd3c, 0x3cd50,
|
|
|
|
0x3d200, 0x3d20c,
|
|
|
|
0x3d220, 0x3d220,
|
|
|
|
0x3d240, 0x3d240,
|
|
|
|
0x3d600, 0x3d600,
|
|
|
|
0x3d608, 0x3d60c,
|
|
|
|
0x3da00, 0x3da1c,
|
|
|
|
0x3de04, 0x3de20,
|
|
|
|
0x3de38, 0x3de3c,
|
|
|
|
0x3de80, 0x3de80,
|
|
|
|
0x3de88, 0x3dea8,
|
|
|
|
0x3deb0, 0x3deb4,
|
|
|
|
0x3dec8, 0x3ded4,
|
|
|
|
0x3dfb8, 0x3e004,
|
|
|
|
0x3e208, 0x3e23c,
|
|
|
|
0x3e600, 0x3e630,
|
|
|
|
0x3ea00, 0x3eabc,
|
|
|
|
0x3eb00, 0x3eb70,
|
|
|
|
0x3f000, 0x3f048,
|
|
|
|
0x3f060, 0x3f09c,
|
|
|
|
0x3f0f0, 0x3f148,
|
|
|
|
0x3f160, 0x3f19c,
|
|
|
|
0x3f1f0, 0x3f2e4,
|
|
|
|
0x3f2f8, 0x3f3e4,
|
|
|
|
0x3f3f8, 0x3f448,
|
|
|
|
0x3f460, 0x3f49c,
|
|
|
|
0x3f4f0, 0x3f548,
|
|
|
|
0x3f560, 0x3f59c,
|
|
|
|
0x3f5f0, 0x3f6e4,
|
|
|
|
0x3f6f8, 0x3f7e4,
|
|
|
|
0x3f7f8, 0x3f7fc,
|
|
|
|
0x3f814, 0x3f814,
|
|
|
|
0x3f82c, 0x3f82c,
|
|
|
|
0x3f880, 0x3f88c,
|
|
|
|
0x3f8e8, 0x3f8ec,
|
|
|
|
0x3f900, 0x3f948,
|
|
|
|
0x3f960, 0x3f99c,
|
|
|
|
0x3f9f0, 0x3fae4,
|
|
|
|
0x3faf8, 0x3fb10,
|
|
|
|
0x3fb28, 0x3fb28,
|
|
|
|
0x3fb3c, 0x3fb50,
|
|
|
|
0x3fbf0, 0x3fc10,
|
|
|
|
0x3fc28, 0x3fc28,
|
|
|
|
0x3fc3c, 0x3fc50,
|
|
|
|
0x3fcf0, 0x3fcfc,
|
|
|
|
0x40000, 0x4000c,
|
|
|
|
0x40040, 0x40068,
|
|
|
|
0x40080, 0x40144,
|
|
|
|
0x40180, 0x4018c,
|
|
|
|
0x40200, 0x40298,
|
|
|
|
0x402ac, 0x4033c,
|
|
|
|
0x403f8, 0x403fc,
|
2014-02-18 19:26:13 +07:00
|
|
|
0x41304, 0x413c4,
|
2013-03-14 12:08:50 +07:00
|
|
|
0x41400, 0x4141c,
|
|
|
|
0x41480, 0x414d0,
|
|
|
|
0x44000, 0x44078,
|
|
|
|
0x440c0, 0x44278,
|
|
|
|
0x442c0, 0x44478,
|
|
|
|
0x444c0, 0x44678,
|
|
|
|
0x446c0, 0x44878,
|
|
|
|
0x448c0, 0x449fc,
|
|
|
|
0x45000, 0x45068,
|
|
|
|
0x45080, 0x45084,
|
|
|
|
0x450a0, 0x450b0,
|
|
|
|
0x45200, 0x45268,
|
|
|
|
0x45280, 0x45284,
|
|
|
|
0x452a0, 0x452b0,
|
|
|
|
0x460c0, 0x460e4,
|
|
|
|
0x47000, 0x4708c,
|
|
|
|
0x47200, 0x47250,
|
|
|
|
0x47400, 0x47420,
|
|
|
|
0x47600, 0x47618,
|
|
|
|
0x47800, 0x47814,
|
|
|
|
0x48000, 0x4800c,
|
|
|
|
0x48040, 0x48068,
|
|
|
|
0x48080, 0x48144,
|
|
|
|
0x48180, 0x4818c,
|
|
|
|
0x48200, 0x48298,
|
|
|
|
0x482ac, 0x4833c,
|
|
|
|
0x483f8, 0x483fc,
|
2014-02-18 19:26:13 +07:00
|
|
|
0x49304, 0x493c4,
|
2013-03-14 12:08:50 +07:00
|
|
|
0x49400, 0x4941c,
|
|
|
|
0x49480, 0x494d0,
|
|
|
|
0x4c000, 0x4c078,
|
|
|
|
0x4c0c0, 0x4c278,
|
|
|
|
0x4c2c0, 0x4c478,
|
|
|
|
0x4c4c0, 0x4c678,
|
|
|
|
0x4c6c0, 0x4c878,
|
|
|
|
0x4c8c0, 0x4c9fc,
|
|
|
|
0x4d000, 0x4d068,
|
|
|
|
0x4d080, 0x4d084,
|
|
|
|
0x4d0a0, 0x4d0b0,
|
|
|
|
0x4d200, 0x4d268,
|
|
|
|
0x4d280, 0x4d284,
|
|
|
|
0x4d2a0, 0x4d2b0,
|
|
|
|
0x4e0c0, 0x4e0e4,
|
|
|
|
0x4f000, 0x4f08c,
|
|
|
|
0x4f200, 0x4f250,
|
|
|
|
0x4f400, 0x4f420,
|
|
|
|
0x4f600, 0x4f618,
|
|
|
|
0x4f800, 0x4f814,
|
|
|
|
0x50000, 0x500cc,
|
|
|
|
0x50400, 0x50400,
|
|
|
|
0x50800, 0x508cc,
|
|
|
|
0x50c00, 0x50c00,
|
|
|
|
0x51000, 0x5101c,
|
|
|
|
0x51300, 0x51308,
|
|
|
|
};
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
int i;
|
|
|
|
struct adapter *ap = netdev2adap(dev);
|
2013-03-14 12:08:50 +07:00
|
|
|
static const unsigned int *reg_ranges;
|
|
|
|
int arr_size = 0, buf_size = 0;
|
|
|
|
|
2013-12-03 18:35:56 +07:00
|
|
|
if (is_t4(ap->params.chip)) {
|
2013-03-14 12:08:50 +07:00
|
|
|
reg_ranges = &t4_reg_ranges[0];
|
|
|
|
arr_size = ARRAY_SIZE(t4_reg_ranges);
|
|
|
|
buf_size = T4_REGMAP_SIZE;
|
|
|
|
} else {
|
|
|
|
reg_ranges = &t5_reg_ranges[0];
|
|
|
|
arr_size = ARRAY_SIZE(t5_reg_ranges);
|
|
|
|
buf_size = T5_REGMAP_SIZE;
|
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
regs->version = mk_adap_vers(ap);
|
|
|
|
|
2013-03-14 12:08:50 +07:00
|
|
|
memset(buf, 0, buf_size);
|
|
|
|
for (i = 0; i < arr_size; i += 2)
|
2010-04-01 22:28:26 +07:00
|
|
|
reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int restart_autoneg(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct port_info *p = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (!netif_running(dev))
|
|
|
|
return -EAGAIN;
|
|
|
|
if (p->link_cfg.autoneg != AUTONEG_ENABLE)
|
|
|
|
return -EINVAL;
|
2010-08-02 20:19:21 +07:00
|
|
|
t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
|
2010-04-01 22:28:26 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-09 03:06:25 +07:00
|
|
|
static int identify_port(struct net_device *dev,
|
|
|
|
enum ethtool_phys_id_state state)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
2011-04-09 03:06:25 +07:00
|
|
|
unsigned int val;
|
2010-08-02 20:19:21 +07:00
|
|
|
struct adapter *adap = netdev2adap(dev);
|
|
|
|
|
2011-04-09 03:06:25 +07:00
|
|
|
if (state == ETHTOOL_ID_ACTIVE)
|
|
|
|
val = 0xffff;
|
|
|
|
else if (state == ETHTOOL_ID_INACTIVE)
|
|
|
|
val = 0;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2011-04-09 03:06:25 +07:00
|
|
|
return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
2014-12-12 13:37:57 +07:00
|
|
|
static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
unsigned int v = 0;
|
|
|
|
|
2010-06-18 17:05:34 +07:00
|
|
|
if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
|
|
|
|
type == FW_PORT_TYPE_BT_XAUI) {
|
2010-04-01 22:28:26 +07:00
|
|
|
v |= SUPPORTED_TP;
|
|
|
|
if (caps & FW_PORT_CAP_SPEED_100M)
|
|
|
|
v |= SUPPORTED_100baseT_Full;
|
|
|
|
if (caps & FW_PORT_CAP_SPEED_1G)
|
|
|
|
v |= SUPPORTED_1000baseT_Full;
|
|
|
|
if (caps & FW_PORT_CAP_SPEED_10G)
|
|
|
|
v |= SUPPORTED_10000baseT_Full;
|
|
|
|
} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
|
|
|
|
v |= SUPPORTED_Backplane;
|
|
|
|
if (caps & FW_PORT_CAP_SPEED_1G)
|
|
|
|
v |= SUPPORTED_1000baseKX_Full;
|
|
|
|
if (caps & FW_PORT_CAP_SPEED_10G)
|
|
|
|
v |= SUPPORTED_10000baseKX4_Full;
|
|
|
|
} else if (type == FW_PORT_TYPE_KR)
|
|
|
|
v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
|
2010-06-18 17:05:34 +07:00
|
|
|
else if (type == FW_PORT_TYPE_BP_AP)
|
2010-12-15 04:36:47 +07:00
|
|
|
v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
|
|
|
|
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
|
|
|
|
else if (type == FW_PORT_TYPE_BP4_AP)
|
|
|
|
v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
|
|
|
|
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
|
|
|
|
SUPPORTED_10000baseKX4_Full;
|
2010-06-18 17:05:34 +07:00
|
|
|
else if (type == FW_PORT_TYPE_FIBER_XFI ||
|
2014-12-12 13:37:57 +07:00
|
|
|
type == FW_PORT_TYPE_FIBER_XAUI ||
|
|
|
|
type == FW_PORT_TYPE_SFP ||
|
|
|
|
type == FW_PORT_TYPE_QSFP_10G ||
|
|
|
|
type == FW_PORT_TYPE_QSA) {
|
2010-04-01 22:28:26 +07:00
|
|
|
v |= SUPPORTED_FIBRE;
|
2014-11-28 20:05:14 +07:00
|
|
|
if (caps & FW_PORT_CAP_SPEED_1G)
|
|
|
|
v |= SUPPORTED_1000baseT_Full;
|
|
|
|
if (caps & FW_PORT_CAP_SPEED_10G)
|
|
|
|
v |= SUPPORTED_10000baseT_Full;
|
2014-12-12 13:37:57 +07:00
|
|
|
} else if (type == FW_PORT_TYPE_BP40_BA ||
|
|
|
|
type == FW_PORT_TYPE_QSFP) {
|
2014-02-18 19:26:08 +07:00
|
|
|
v |= SUPPORTED_40000baseSR4_Full;
|
2014-12-12 13:37:57 +07:00
|
|
|
v |= SUPPORTED_FIBRE;
|
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
if (caps & FW_PORT_CAP_ANEG)
|
|
|
|
v |= SUPPORTED_Autoneg;
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int to_fw_linkcaps(unsigned int caps)
|
|
|
|
{
|
|
|
|
unsigned int v = 0;
|
|
|
|
|
|
|
|
if (caps & ADVERTISED_100baseT_Full)
|
|
|
|
v |= FW_PORT_CAP_SPEED_100M;
|
|
|
|
if (caps & ADVERTISED_1000baseT_Full)
|
|
|
|
v |= FW_PORT_CAP_SPEED_1G;
|
|
|
|
if (caps & ADVERTISED_10000baseT_Full)
|
|
|
|
v |= FW_PORT_CAP_SPEED_10G;
|
2014-02-18 19:26:08 +07:00
|
|
|
if (caps & ADVERTISED_40000baseSR4_Full)
|
|
|
|
v |= FW_PORT_CAP_SPEED_40G;
|
2010-04-01 22:28:26 +07:00
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
|
|
{
|
|
|
|
const struct port_info *p = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
|
2010-06-18 17:05:34 +07:00
|
|
|
p->port_type == FW_PORT_TYPE_BT_XFI ||
|
2010-04-01 22:28:26 +07:00
|
|
|
p->port_type == FW_PORT_TYPE_BT_XAUI)
|
|
|
|
cmd->port = PORT_TP;
|
2010-06-18 17:05:34 +07:00
|
|
|
else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
|
|
|
|
p->port_type == FW_PORT_TYPE_FIBER_XAUI)
|
2010-04-01 22:28:26 +07:00
|
|
|
cmd->port = PORT_FIBRE;
|
2014-05-07 19:31:02 +07:00
|
|
|
else if (p->port_type == FW_PORT_TYPE_SFP ||
|
|
|
|
p->port_type == FW_PORT_TYPE_QSFP_10G ||
|
2014-12-12 13:37:57 +07:00
|
|
|
p->port_type == FW_PORT_TYPE_QSA ||
|
2014-05-07 19:31:02 +07:00
|
|
|
p->port_type == FW_PORT_TYPE_QSFP) {
|
|
|
|
if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
|
|
|
|
p->mod_type == FW_PORT_MOD_TYPE_SR ||
|
|
|
|
p->mod_type == FW_PORT_MOD_TYPE_ER ||
|
|
|
|
p->mod_type == FW_PORT_MOD_TYPE_LRM)
|
|
|
|
cmd->port = PORT_FIBRE;
|
|
|
|
else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
|
|
|
|
p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
|
2010-06-18 17:05:34 +07:00
|
|
|
cmd->port = PORT_DA;
|
|
|
|
else
|
2014-05-07 19:31:02 +07:00
|
|
|
cmd->port = PORT_OTHER;
|
2010-06-18 17:05:34 +07:00
|
|
|
} else
|
2010-04-01 22:28:26 +07:00
|
|
|
cmd->port = PORT_OTHER;
|
|
|
|
|
|
|
|
if (p->mdio_addr >= 0) {
|
|
|
|
cmd->phy_address = p->mdio_addr;
|
|
|
|
cmd->transceiver = XCVR_EXTERNAL;
|
|
|
|
cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
|
|
|
|
MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
|
|
|
|
} else {
|
|
|
|
cmd->phy_address = 0; /* not really, but no better option */
|
|
|
|
cmd->transceiver = XCVR_INTERNAL;
|
|
|
|
cmd->mdio_support = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
|
|
|
|
cmd->advertising = from_fw_linkcaps(p->port_type,
|
|
|
|
p->link_cfg.advertising);
|
2011-04-28 01:32:40 +07:00
|
|
|
ethtool_cmd_speed_set(cmd,
|
|
|
|
netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
|
2010-04-01 22:28:26 +07:00
|
|
|
cmd->duplex = DUPLEX_FULL;
|
|
|
|
cmd->autoneg = p->link_cfg.autoneg;
|
|
|
|
cmd->maxtxpkt = 0;
|
|
|
|
cmd->maxrxpkt = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned int speed_to_caps(int speed)
|
|
|
|
{
|
2014-02-23 07:03:24 +07:00
|
|
|
if (speed == 100)
|
2010-04-01 22:28:26 +07:00
|
|
|
return FW_PORT_CAP_SPEED_100M;
|
2014-02-23 07:03:24 +07:00
|
|
|
if (speed == 1000)
|
2010-04-01 22:28:26 +07:00
|
|
|
return FW_PORT_CAP_SPEED_1G;
|
2014-02-23 07:03:24 +07:00
|
|
|
if (speed == 10000)
|
2010-04-01 22:28:26 +07:00
|
|
|
return FW_PORT_CAP_SPEED_10G;
|
2014-02-23 07:03:24 +07:00
|
|
|
if (speed == 40000)
|
2014-02-18 19:26:08 +07:00
|
|
|
return FW_PORT_CAP_SPEED_40G;
|
2010-04-01 22:28:26 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
|
|
|
|
{
|
|
|
|
unsigned int cap;
|
|
|
|
struct port_info *p = netdev_priv(dev);
|
|
|
|
struct link_config *lc = &p->link_cfg;
|
2011-04-28 01:32:39 +07:00
|
|
|
u32 speed = ethtool_cmd_speed(cmd);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
|
|
|
|
/*
|
|
|
|
* PHY offers a single speed. See if that's what's
|
|
|
|
* being requested.
|
|
|
|
*/
|
|
|
|
if (cmd->autoneg == AUTONEG_DISABLE &&
|
2011-04-28 01:32:39 +07:00
|
|
|
(lc->supported & speed_to_caps(speed)))
|
|
|
|
return 0;
|
2010-04-01 22:28:26 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmd->autoneg == AUTONEG_DISABLE) {
|
2011-04-28 01:32:39 +07:00
|
|
|
cap = speed_to_caps(speed);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2014-02-18 19:26:08 +07:00
|
|
|
if (!(lc->supported & cap) ||
|
2014-02-23 07:03:24 +07:00
|
|
|
(speed == 1000) ||
|
|
|
|
(speed == 10000) ||
|
2014-02-18 19:26:08 +07:00
|
|
|
(speed == 40000))
|
2010-04-01 22:28:26 +07:00
|
|
|
return -EINVAL;
|
|
|
|
lc->requested_speed = cap;
|
|
|
|
lc->advertising = 0;
|
|
|
|
} else {
|
|
|
|
cap = to_fw_linkcaps(cmd->advertising);
|
|
|
|
if (!(lc->supported & cap))
|
|
|
|
return -EINVAL;
|
|
|
|
lc->requested_speed = 0;
|
|
|
|
lc->advertising = cap | FW_PORT_CAP_ANEG;
|
|
|
|
}
|
|
|
|
lc->autoneg = cmd->autoneg;
|
|
|
|
|
|
|
|
if (netif_running(dev))
|
2010-08-02 20:19:21 +07:00
|
|
|
return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
|
|
|
|
lc);
|
2010-04-01 22:28:26 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void get_pauseparam(struct net_device *dev,
|
|
|
|
struct ethtool_pauseparam *epause)
|
|
|
|
{
|
|
|
|
struct port_info *p = netdev_priv(dev);
|
|
|
|
|
|
|
|
epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
|
|
|
|
epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
|
|
|
|
epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_pauseparam(struct net_device *dev,
|
|
|
|
struct ethtool_pauseparam *epause)
|
|
|
|
{
|
|
|
|
struct port_info *p = netdev_priv(dev);
|
|
|
|
struct link_config *lc = &p->link_cfg;
|
|
|
|
|
|
|
|
if (epause->autoneg == AUTONEG_DISABLE)
|
|
|
|
lc->requested_fc = 0;
|
|
|
|
else if (lc->supported & FW_PORT_CAP_ANEG)
|
|
|
|
lc->requested_fc = PAUSE_AUTONEG;
|
|
|
|
else
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (epause->rx_pause)
|
|
|
|
lc->requested_fc |= PAUSE_RX;
|
|
|
|
if (epause->tx_pause)
|
|
|
|
lc->requested_fc |= PAUSE_TX;
|
|
|
|
if (netif_running(dev))
|
2010-08-02 20:19:21 +07:00
|
|
|
return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
|
|
|
|
lc);
|
2010-04-01 22:28:26 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
|
|
|
|
{
|
|
|
|
const struct port_info *pi = netdev_priv(dev);
|
|
|
|
const struct sge *s = &pi->adapter->sge;
|
|
|
|
|
|
|
|
e->rx_max_pending = MAX_RX_BUFFERS;
|
|
|
|
e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
|
|
|
|
e->rx_jumbo_max_pending = 0;
|
|
|
|
e->tx_max_pending = MAX_TXQ_ENTRIES;
|
|
|
|
|
|
|
|
e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
|
|
|
|
e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
|
|
|
|
e->rx_jumbo_pending = 0;
|
|
|
|
e->tx_pending = s->ethtxq[pi->first_qset].q.size;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
const struct port_info *pi = netdev_priv(dev);
|
|
|
|
struct adapter *adapter = pi->adapter;
|
|
|
|
struct sge *s = &adapter->sge;
|
|
|
|
|
|
|
|
if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
|
|
|
|
e->tx_pending > MAX_TXQ_ENTRIES ||
|
|
|
|
e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
|
|
|
|
e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
|
|
|
|
e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (adapter->flags & FULL_INIT_DONE)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
for (i = 0; i < pi->nqsets; ++i) {
|
|
|
|
s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
|
|
|
|
s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
|
|
|
|
s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int closest_timer(const struct sge *s, int time)
|
|
|
|
{
|
|
|
|
int i, delta, match = 0, min_delta = INT_MAX;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
|
|
|
|
delta = time - s->timer_val[i];
|
|
|
|
if (delta < 0)
|
|
|
|
delta = -delta;
|
|
|
|
if (delta < min_delta) {
|
|
|
|
min_delta = delta;
|
|
|
|
match = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return match;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int closest_thres(const struct sge *s, int thres)
|
|
|
|
{
|
|
|
|
int i, delta, match = 0, min_delta = INT_MAX;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
|
|
|
|
delta = thres - s->counter_val[i];
|
|
|
|
if (delta < 0)
|
|
|
|
delta = -delta;
|
|
|
|
if (delta < min_delta) {
|
|
|
|
min_delta = delta;
|
|
|
|
match = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return match;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return a queue's interrupt hold-off time in us. 0 means no timer.
|
|
|
|
*/
|
2015-01-27 15:17:45 +07:00
|
|
|
unsigned int qtimer_val(const struct adapter *adap,
|
|
|
|
const struct sge_rspq *q)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
unsigned int idx = q->intr_params >> 1;
|
|
|
|
|
|
|
|
return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2014-06-06 23:10:45 +07:00
|
|
|
* set_rspq_intr_params - set a queue's interrupt holdoff parameters
|
2010-04-01 22:28:26 +07:00
|
|
|
* @q: the Rx queue
|
|
|
|
* @us: the hold-off time in us, or 0 to disable timer
|
|
|
|
* @cnt: the hold-off packet count, or 0 to disable counter
|
|
|
|
*
|
|
|
|
* Sets an Rx queue's interrupt hold-off time and packet count. At least
|
|
|
|
* one of the two needs to be enabled for the queue to generate interrupts.
|
|
|
|
*/
|
2014-06-06 23:10:45 +07:00
|
|
|
static int set_rspq_intr_params(struct sge_rspq *q,
|
|
|
|
unsigned int us, unsigned int cnt)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
2014-06-06 23:10:45 +07:00
|
|
|
struct adapter *adap = q->adap;
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
if ((us | cnt) == 0)
|
|
|
|
cnt = 1;
|
|
|
|
|
|
|
|
if (cnt) {
|
|
|
|
int err;
|
|
|
|
u32 v, new_idx;
|
|
|
|
|
|
|
|
new_idx = closest_thres(&adap->sge, cnt);
|
|
|
|
if (q->desc && q->pktcnt_idx != new_idx) {
|
|
|
|
/* the queue has already been created, update it */
|
2014-11-21 14:22:02 +07:00
|
|
|
v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
|
|
|
|
FW_PARAMS_PARAM_X_V(
|
|
|
|
FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
|
|
|
|
FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
|
2010-08-02 20:19:21 +07:00
|
|
|
err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
|
|
|
|
&new_idx);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
q->pktcnt_idx = new_idx;
|
|
|
|
}
|
|
|
|
|
|
|
|
us = us == 0 ? 6 : closest_timer(&adap->sge, us);
|
|
|
|
q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-06-06 23:10:45 +07:00
|
|
|
/**
|
|
|
|
* set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
|
|
|
|
* @dev: the network device
|
|
|
|
* @us: the hold-off time in us, or 0 to disable timer
|
|
|
|
* @cnt: the hold-off packet count, or 0 to disable counter
|
|
|
|
*
|
|
|
|
* Set the RX interrupt hold-off parameters for a network device.
|
|
|
|
*/
|
|
|
|
static int set_rx_intr_params(struct net_device *dev,
|
|
|
|
unsigned int us, unsigned int cnt)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
2014-06-06 23:10:45 +07:00
|
|
|
int i, err;
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
2010-04-01 22:28:26 +07:00
|
|
|
struct adapter *adap = pi->adapter;
|
2014-06-06 23:10:45 +07:00
|
|
|
struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
|
|
|
|
|
|
|
|
for (i = 0; i < pi->nqsets; i++, q++) {
|
|
|
|
err = set_rspq_intr_params(&q->rspq, us, cnt);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2013-01-15 12:15:10 +07:00
|
|
|
}
|
2014-06-06 23:10:45 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-09-26 01:53:55 +07:00
|
|
|
static int set_adaptive_rx_setting(struct net_device *dev, int adaptive_rx)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
struct adapter *adap = pi->adapter;
|
|
|
|
struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
|
|
|
|
|
|
|
|
for (i = 0; i < pi->nqsets; i++, q++)
|
|
|
|
q->rspq.adaptive_rx = adaptive_rx;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_adaptive_rx_setting(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
struct adapter *adap = pi->adapter;
|
|
|
|
struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
|
|
|
|
|
|
|
|
return q->rspq.adaptive_rx;
|
|
|
|
}
|
|
|
|
|
2014-06-06 23:10:45 +07:00
|
|
|
static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
|
|
|
|
{
|
2014-09-26 01:53:55 +07:00
|
|
|
set_adaptive_rx_setting(dev, c->use_adaptive_rx_coalesce);
|
2014-06-06 23:10:45 +07:00
|
|
|
return set_rx_intr_params(dev, c->rx_coalesce_usecs,
|
|
|
|
c->rx_max_coalesced_frames);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
|
|
|
|
{
|
|
|
|
const struct port_info *pi = netdev_priv(dev);
|
|
|
|
const struct adapter *adap = pi->adapter;
|
|
|
|
const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
|
|
|
|
|
|
|
|
c->rx_coalesce_usecs = qtimer_val(adap, rq);
|
|
|
|
c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
|
|
|
|
adap->sge.counter_val[rq->pktcnt_idx] : 0;
|
2014-09-26 01:53:55 +07:00
|
|
|
c->use_adaptive_rx_coalesce = get_adaptive_rx_setting(dev);
|
2010-04-01 22:28:26 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-08-24 00:20:59 +07:00
|
|
|
/**
|
|
|
|
* eeprom_ptov - translate a physical EEPROM address to virtual
|
|
|
|
* @phys_addr: the physical EEPROM address
|
|
|
|
* @fn: the PCI function number
|
|
|
|
* @sz: size of function-specific area
|
|
|
|
*
|
|
|
|
* Translate a physical EEPROM address to virtual. The first 1K is
|
|
|
|
* accessed through virtual addresses starting at 31K, the rest is
|
|
|
|
* accessed through virtual addresses starting at 0.
|
|
|
|
*
|
|
|
|
* The mapping is as follows:
|
|
|
|
* [0..1K) -> [31K..32K)
|
|
|
|
* [1K..1K+A) -> [31K-A..31K)
|
|
|
|
* [1K+A..ES) -> [0..ES-A-1K)
|
|
|
|
*
|
|
|
|
* where A = @fn * @sz, and ES = EEPROM size.
|
2010-04-01 22:28:26 +07:00
|
|
|
*/
|
2010-08-24 00:20:59 +07:00
|
|
|
static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
2010-08-24 00:20:59 +07:00
|
|
|
fn *= sz;
|
2010-04-01 22:28:26 +07:00
|
|
|
if (phys_addr < 1024)
|
|
|
|
return phys_addr + (31 << 10);
|
2010-08-24 00:20:59 +07:00
|
|
|
if (phys_addr < 1024 + fn)
|
|
|
|
return 31744 - fn + phys_addr - 1024;
|
2010-04-01 22:28:26 +07:00
|
|
|
if (phys_addr < EEPROMSIZE)
|
2010-08-24 00:20:59 +07:00
|
|
|
return phys_addr - 1024 - fn;
|
2010-04-01 22:28:26 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The next two routines implement eeprom read/write from physical addresses.
|
|
|
|
*/
|
|
|
|
static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
|
|
|
|
{
|
2010-08-24 00:20:59 +07:00
|
|
|
int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
if (vaddr >= 0)
|
|
|
|
vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
|
|
|
|
return vaddr < 0 ? vaddr : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
|
|
|
|
{
|
2010-08-24 00:20:59 +07:00
|
|
|
int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
if (vaddr >= 0)
|
|
|
|
vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
|
|
|
|
return vaddr < 0 ? vaddr : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define EEPROM_MAGIC 0x38E2F10C
|
|
|
|
|
|
|
|
static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
|
|
|
|
u8 *data)
|
|
|
|
{
|
|
|
|
int i, err = 0;
|
|
|
|
struct adapter *adapter = netdev2adap(dev);
|
|
|
|
|
|
|
|
u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
e->magic = EEPROM_MAGIC;
|
|
|
|
for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
|
|
|
|
err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
|
|
|
|
|
|
|
|
if (!err)
|
|
|
|
memcpy(data, buf + e->offset, e->len);
|
|
|
|
kfree(buf);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
|
|
|
|
u8 *data)
|
|
|
|
{
|
|
|
|
u8 *buf;
|
|
|
|
int err = 0;
|
|
|
|
u32 aligned_offset, aligned_len, *p;
|
|
|
|
struct adapter *adapter = netdev2adap(dev);
|
|
|
|
|
|
|
|
if (eeprom->magic != EEPROM_MAGIC)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
aligned_offset = eeprom->offset & ~3;
|
|
|
|
aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
|
|
|
|
|
2010-08-24 00:20:59 +07:00
|
|
|
if (adapter->fn > 0) {
|
|
|
|
u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
|
|
|
|
|
|
|
|
if (aligned_offset < start ||
|
|
|
|
aligned_offset + aligned_len > start + EEPROMPFSIZE)
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
|
|
|
|
/*
|
|
|
|
* RMW possibly needed for first or last words.
|
|
|
|
*/
|
|
|
|
buf = kmalloc(aligned_len, GFP_KERNEL);
|
|
|
|
if (!buf)
|
|
|
|
return -ENOMEM;
|
|
|
|
err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
|
|
|
|
if (!err && aligned_len > 4)
|
|
|
|
err = eeprom_rd_phys(adapter,
|
|
|
|
aligned_offset + aligned_len - 4,
|
|
|
|
(u32 *)&buf[aligned_len - 4]);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
|
|
|
|
} else
|
|
|
|
buf = data;
|
|
|
|
|
|
|
|
err = t4_seeprom_wp(adapter, false);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
|
|
|
|
err = eeprom_wr_phys(adapter, aligned_offset, *p);
|
|
|
|
aligned_offset += 4;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!err)
|
|
|
|
err = t4_seeprom_wp(adapter, true);
|
|
|
|
out:
|
|
|
|
if (buf != data)
|
|
|
|
kfree(buf);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
const struct firmware *fw;
|
|
|
|
struct adapter *adap = netdev2adap(netdev);
|
2014-11-21 14:22:05 +07:00
|
|
|
unsigned int mbox = PCIE_FW_MASTER_M + 1;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
ef->data[sizeof(ef->data) - 1] = '\0';
|
|
|
|
ret = request_firmware(&fw, ef->data, adap->pdev_dev);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2014-10-15 03:24:14 +07:00
|
|
|
/* If the adapter has been fully initialized then we'll go ahead and
|
|
|
|
* try to get the firmware's cooperation in upgrading to the new
|
|
|
|
* firmware image otherwise we'll try to do the entire job from the
|
|
|
|
* host ... and we always "force" the operation in this path.
|
|
|
|
*/
|
|
|
|
if (adap->flags & FULL_INIT_DONE)
|
|
|
|
mbox = adap->mbox;
|
|
|
|
|
|
|
|
ret = t4_fw_upgrade(adap, mbox, fw->data, fw->size, 1);
|
2010-04-01 22:28:26 +07:00
|
|
|
release_firmware(fw);
|
|
|
|
if (!ret)
|
2014-10-15 03:24:14 +07:00
|
|
|
dev_info(adap->pdev_dev, "loaded firmware %s,"
|
|
|
|
" reload cxgb4 driver\n", ef->data);
|
2010-04-01 22:28:26 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
|
|
|
|
#define BCAST_CRC 0xa0ccc1a6
|
|
|
|
|
|
|
|
static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
|
|
|
{
|
|
|
|
wol->supported = WAKE_BCAST | WAKE_MAGIC;
|
|
|
|
wol->wolopts = netdev2adap(dev)->wol;
|
|
|
|
memset(&wol->sopass, 0, sizeof(wol->sopass));
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (wol->wolopts & ~WOL_SUPPORTED)
|
|
|
|
return -EINVAL;
|
|
|
|
t4_wol_magic_enable(pi->adapter, pi->tx_chan,
|
|
|
|
(wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
|
|
|
|
if (wol->wolopts & WAKE_BCAST) {
|
|
|
|
err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
|
|
|
|
~0ULL, 0, false);
|
|
|
|
if (!err)
|
|
|
|
err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
|
|
|
|
~6ULL, ~0ULL, BCAST_CRC, true);
|
|
|
|
} else
|
|
|
|
t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2011-11-15 22:29:55 +07:00
|
|
|
static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
|
2010-04-28 06:22:42 +07:00
|
|
|
{
|
2011-04-16 20:05:08 +07:00
|
|
|
const struct port_info *pi = netdev_priv(dev);
|
2011-11-15 22:29:55 +07:00
|
|
|
netdev_features_t changed = dev->features ^ features;
|
2010-10-21 18:29:56 +07:00
|
|
|
int err;
|
|
|
|
|
2013-04-19 09:04:27 +07:00
|
|
|
if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
|
2011-04-16 20:05:08 +07:00
|
|
|
return 0;
|
2010-10-21 18:29:56 +07:00
|
|
|
|
2011-04-16 20:05:08 +07:00
|
|
|
err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
|
|
|
|
-1, -1, -1,
|
2013-04-19 09:04:27 +07:00
|
|
|
!!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
|
2011-04-16 20:05:08 +07:00
|
|
|
if (unlikely(err))
|
2013-04-19 09:04:27 +07:00
|
|
|
dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
|
2010-10-21 18:29:56 +07:00
|
|
|
return err;
|
2010-04-28 06:22:42 +07:00
|
|
|
}
|
|
|
|
|
2011-12-15 20:55:01 +07:00
|
|
|
static u32 get_rss_table_size(struct net_device *dev)
|
2010-07-11 19:01:17 +07:00
|
|
|
{
|
|
|
|
const struct port_info *pi = netdev_priv(dev);
|
|
|
|
|
2011-12-15 20:55:01 +07:00
|
|
|
return pi->rss_size;
|
|
|
|
}
|
|
|
|
|
2014-12-02 23:12:10 +07:00
|
|
|
static int get_rss_table(struct net_device *dev, u32 *p, u8 *key, u8 *hfunc)
|
2011-12-15 20:55:01 +07:00
|
|
|
{
|
|
|
|
const struct port_info *pi = netdev_priv(dev);
|
|
|
|
unsigned int n = pi->rss_size;
|
|
|
|
|
2014-12-02 23:12:10 +07:00
|
|
|
if (hfunc)
|
|
|
|
*hfunc = ETH_RSS_HASH_TOP;
|
|
|
|
if (!p)
|
|
|
|
return 0;
|
2010-07-11 19:01:17 +07:00
|
|
|
while (n--)
|
2011-12-15 20:55:01 +07:00
|
|
|
p[n] = pi->rss[n];
|
2010-07-11 19:01:17 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-12-02 23:12:10 +07:00
|
|
|
static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key,
|
|
|
|
const u8 hfunc)
|
2010-07-11 19:01:17 +07:00
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
|
2014-12-02 23:12:10 +07:00
|
|
|
/* We require at least one supported parameter to be changed and no
|
|
|
|
* change in any of the unsupported parameters
|
|
|
|
*/
|
|
|
|
if (key ||
|
|
|
|
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
if (!p)
|
|
|
|
return 0;
|
|
|
|
|
2011-12-15 20:55:01 +07:00
|
|
|
for (i = 0; i < pi->rss_size; i++)
|
|
|
|
pi->rss[i] = p[i];
|
2010-07-11 19:01:17 +07:00
|
|
|
if (pi->adapter->flags & FULL_INIT_DONE)
|
|
|
|
return write_rss(pi, pi->rss);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
|
2011-09-06 20:49:12 +07:00
|
|
|
u32 *rules)
|
2010-07-11 19:01:17 +07:00
|
|
|
{
|
2010-07-11 19:01:18 +07:00
|
|
|
const struct port_info *pi = netdev_priv(dev);
|
|
|
|
|
2010-07-11 19:01:17 +07:00
|
|
|
switch (info->cmd) {
|
2010-07-11 19:01:18 +07:00
|
|
|
case ETHTOOL_GRXFH: {
|
|
|
|
unsigned int v = pi->rss_mode;
|
|
|
|
|
|
|
|
info->data = 0;
|
|
|
|
switch (info->flow_type) {
|
|
|
|
case TCP_V4_FLOW:
|
2014-11-21 14:22:05 +07:00
|
|
|
if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F)
|
2010-07-11 19:01:18 +07:00
|
|
|
info->data = RXH_IP_SRC | RXH_IP_DST |
|
|
|
|
RXH_L4_B_0_1 | RXH_L4_B_2_3;
|
2014-11-21 14:22:05 +07:00
|
|
|
else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
|
2010-07-11 19:01:18 +07:00
|
|
|
info->data = RXH_IP_SRC | RXH_IP_DST;
|
|
|
|
break;
|
|
|
|
case UDP_V4_FLOW:
|
2014-11-21 14:22:05 +07:00
|
|
|
if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F) &&
|
|
|
|
(v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
|
2010-07-11 19:01:18 +07:00
|
|
|
info->data = RXH_IP_SRC | RXH_IP_DST |
|
|
|
|
RXH_L4_B_0_1 | RXH_L4_B_2_3;
|
2014-11-21 14:22:05 +07:00
|
|
|
else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
|
2010-07-11 19:01:18 +07:00
|
|
|
info->data = RXH_IP_SRC | RXH_IP_DST;
|
|
|
|
break;
|
|
|
|
case SCTP_V4_FLOW:
|
|
|
|
case AH_ESP_V4_FLOW:
|
|
|
|
case IPV4_FLOW:
|
2014-11-21 14:22:05 +07:00
|
|
|
if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F)
|
2010-07-11 19:01:18 +07:00
|
|
|
info->data = RXH_IP_SRC | RXH_IP_DST;
|
|
|
|
break;
|
|
|
|
case TCP_V6_FLOW:
|
2014-11-21 14:22:05 +07:00
|
|
|
if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F)
|
2010-07-11 19:01:18 +07:00
|
|
|
info->data = RXH_IP_SRC | RXH_IP_DST |
|
|
|
|
RXH_L4_B_0_1 | RXH_L4_B_2_3;
|
2014-11-21 14:22:05 +07:00
|
|
|
else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
|
2010-07-11 19:01:18 +07:00
|
|
|
info->data = RXH_IP_SRC | RXH_IP_DST;
|
|
|
|
break;
|
|
|
|
case UDP_V6_FLOW:
|
2014-11-21 14:22:05 +07:00
|
|
|
if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F) &&
|
|
|
|
(v & FW_RSS_VI_CONFIG_CMD_UDPEN_F))
|
2010-07-11 19:01:18 +07:00
|
|
|
info->data = RXH_IP_SRC | RXH_IP_DST |
|
|
|
|
RXH_L4_B_0_1 | RXH_L4_B_2_3;
|
2014-11-21 14:22:05 +07:00
|
|
|
else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
|
2010-07-11 19:01:18 +07:00
|
|
|
info->data = RXH_IP_SRC | RXH_IP_DST;
|
|
|
|
break;
|
|
|
|
case SCTP_V6_FLOW:
|
|
|
|
case AH_ESP_V6_FLOW:
|
|
|
|
case IPV6_FLOW:
|
2014-11-21 14:22:05 +07:00
|
|
|
if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F)
|
2010-07-11 19:01:18 +07:00
|
|
|
info->data = RXH_IP_SRC | RXH_IP_DST;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2010-07-11 19:01:17 +07:00
|
|
|
case ETHTOOL_GRXRINGS:
|
2010-07-11 19:01:18 +07:00
|
|
|
info->data = pi->nqsets;
|
2010-07-11 19:01:17 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
2012-01-04 19:59:49 +07:00
|
|
|
static const struct ethtool_ops cxgb_ethtool_ops = {
|
2010-04-01 22:28:26 +07:00
|
|
|
.get_settings = get_settings,
|
|
|
|
.set_settings = set_settings,
|
|
|
|
.get_drvinfo = get_drvinfo,
|
|
|
|
.get_msglevel = get_msglevel,
|
|
|
|
.set_msglevel = set_msglevel,
|
|
|
|
.get_ringparam = get_sge_param,
|
|
|
|
.set_ringparam = set_sge_param,
|
|
|
|
.get_coalesce = get_coalesce,
|
|
|
|
.set_coalesce = set_coalesce,
|
|
|
|
.get_eeprom_len = get_eeprom_len,
|
|
|
|
.get_eeprom = get_eeprom,
|
|
|
|
.set_eeprom = set_eeprom,
|
|
|
|
.get_pauseparam = get_pauseparam,
|
|
|
|
.set_pauseparam = set_pauseparam,
|
|
|
|
.get_link = ethtool_op_get_link,
|
|
|
|
.get_strings = get_strings,
|
2011-04-09 03:06:25 +07:00
|
|
|
.set_phys_id = identify_port,
|
2010-04-01 22:28:26 +07:00
|
|
|
.nway_reset = restart_autoneg,
|
|
|
|
.get_sset_count = get_sset_count,
|
|
|
|
.get_ethtool_stats = get_stats,
|
|
|
|
.get_regs_len = get_regs_len,
|
|
|
|
.get_regs = get_regs,
|
|
|
|
.get_wol = get_wol,
|
|
|
|
.set_wol = set_wol,
|
2010-07-11 19:01:17 +07:00
|
|
|
.get_rxnfc = get_rxnfc,
|
2011-12-15 20:55:01 +07:00
|
|
|
.get_rxfh_indir_size = get_rss_table_size,
|
2014-05-15 07:25:27 +07:00
|
|
|
.get_rxfh = get_rss_table,
|
|
|
|
.set_rxfh = set_rss_table,
|
2010-04-01 22:28:26 +07:00
|
|
|
.flash_device = set_flash,
|
|
|
|
};
|
|
|
|
|
2012-12-03 21:23:02 +07:00
|
|
|
static int setup_debugfs(struct adapter *adap)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
if (IS_ERR_OR_NULL(adap->debugfs_root))
|
|
|
|
return -1;
|
|
|
|
|
2014-11-07 11:05:23 +07:00
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
|
t4_setup_debugfs(adap);
|
|
|
|
#endif
|
2010-04-01 22:28:26 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* upper-layer driver support
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate an active-open TID and set it to the supplied value.
|
|
|
|
*/
|
|
|
|
int cxgb4_alloc_atid(struct tid_info *t, void *data)
|
|
|
|
{
|
|
|
|
int atid = -1;
|
|
|
|
|
|
|
|
spin_lock_bh(&t->atid_lock);
|
|
|
|
if (t->afree) {
|
|
|
|
union aopen_entry *p = t->afree;
|
|
|
|
|
2012-12-10 16:30:52 +07:00
|
|
|
atid = (p - t->atid_tab) + t->atid_base;
|
2010-04-01 22:28:26 +07:00
|
|
|
t->afree = p->next;
|
|
|
|
p->data = data;
|
|
|
|
t->atids_in_use++;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&t->atid_lock);
|
|
|
|
return atid;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_alloc_atid);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release an active-open TID.
|
|
|
|
*/
|
|
|
|
void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
|
|
|
|
{
|
2012-12-10 16:30:52 +07:00
|
|
|
union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
spin_lock_bh(&t->atid_lock);
|
|
|
|
p->next = t->afree;
|
|
|
|
t->afree = p;
|
|
|
|
t->atids_in_use--;
|
|
|
|
spin_unlock_bh(&t->atid_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_free_atid);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate a server TID and set it to the supplied value.
|
|
|
|
*/
|
|
|
|
int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
|
|
|
|
{
|
|
|
|
int stid;
|
|
|
|
|
|
|
|
spin_lock_bh(&t->stid_lock);
|
|
|
|
if (family == PF_INET) {
|
|
|
|
stid = find_first_zero_bit(t->stid_bmap, t->nstids);
|
|
|
|
if (stid < t->nstids)
|
|
|
|
__set_bit(stid, t->stid_bmap);
|
|
|
|
else
|
|
|
|
stid = -1;
|
|
|
|
} else {
|
|
|
|
stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
|
|
|
|
if (stid < 0)
|
|
|
|
stid = -1;
|
|
|
|
}
|
|
|
|
if (stid >= 0) {
|
|
|
|
t->stid_tab[stid].data = data;
|
|
|
|
stid += t->stid_base;
|
2013-12-18 18:08:22 +07:00
|
|
|
/* IPv6 requires max of 520 bits or 16 cells in TCAM
|
|
|
|
* This is equivalent to 4 TIDs. With CLIP enabled it
|
|
|
|
* needs 2 TIDs.
|
|
|
|
*/
|
|
|
|
if (family == PF_INET)
|
|
|
|
t->stids_in_use++;
|
|
|
|
else
|
|
|
|
t->stids_in_use += 4;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
spin_unlock_bh(&t->stid_lock);
|
|
|
|
return stid;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_alloc_stid);
|
|
|
|
|
2012-12-10 16:30:53 +07:00
|
|
|
/* Allocate a server filter TID and set it to the supplied value.
|
|
|
|
*/
|
|
|
|
int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
|
|
|
|
{
|
|
|
|
int stid;
|
|
|
|
|
|
|
|
spin_lock_bh(&t->stid_lock);
|
|
|
|
if (family == PF_INET) {
|
|
|
|
stid = find_next_zero_bit(t->stid_bmap,
|
|
|
|
t->nstids + t->nsftids, t->nstids);
|
|
|
|
if (stid < (t->nstids + t->nsftids))
|
|
|
|
__set_bit(stid, t->stid_bmap);
|
|
|
|
else
|
|
|
|
stid = -1;
|
|
|
|
} else {
|
|
|
|
stid = -1;
|
|
|
|
}
|
|
|
|
if (stid >= 0) {
|
|
|
|
t->stid_tab[stid].data = data;
|
2013-12-18 18:08:21 +07:00
|
|
|
stid -= t->nstids;
|
|
|
|
stid += t->sftid_base;
|
2012-12-10 16:30:53 +07:00
|
|
|
t->stids_in_use++;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&t->stid_lock);
|
|
|
|
return stid;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_alloc_sftid);
|
|
|
|
|
|
|
|
/* Release a server TID.
|
2010-04-01 22:28:26 +07:00
|
|
|
*/
|
|
|
|
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
|
|
|
|
{
|
2013-12-18 18:08:21 +07:00
|
|
|
/* Is it a server filter TID? */
|
|
|
|
if (t->nsftids && (stid >= t->sftid_base)) {
|
|
|
|
stid -= t->sftid_base;
|
|
|
|
stid += t->nstids;
|
|
|
|
} else {
|
|
|
|
stid -= t->stid_base;
|
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
spin_lock_bh(&t->stid_lock);
|
|
|
|
if (family == PF_INET)
|
|
|
|
__clear_bit(stid, t->stid_bmap);
|
|
|
|
else
|
|
|
|
bitmap_release_region(t->stid_bmap, stid, 2);
|
|
|
|
t->stid_tab[stid].data = NULL;
|
2013-12-18 18:08:22 +07:00
|
|
|
if (family == PF_INET)
|
|
|
|
t->stids_in_use--;
|
|
|
|
else
|
|
|
|
t->stids_in_use -= 4;
|
2010-04-01 22:28:26 +07:00
|
|
|
spin_unlock_bh(&t->stid_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_free_stid);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Populate a TID_RELEASE WR. Caller must properly size the skb.
|
|
|
|
*/
|
|
|
|
static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
|
|
|
|
unsigned int tid)
|
|
|
|
{
|
|
|
|
struct cpl_tid_release *req;
|
|
|
|
|
|
|
|
set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
|
|
|
|
req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
|
|
|
|
INIT_TP_WR(req, tid);
|
|
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Queue a TID release request and if necessary schedule a work queue to
|
|
|
|
* process it.
|
|
|
|
*/
|
2010-10-18 12:39:18 +07:00
|
|
|
static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
|
|
|
|
unsigned int tid)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
void **p = &t->tid_tab[tid];
|
|
|
|
struct adapter *adap = container_of(t, struct adapter, tids);
|
|
|
|
|
|
|
|
spin_lock_bh(&adap->tid_release_lock);
|
|
|
|
*p = adap->tid_release_head;
|
|
|
|
/* Low 2 bits encode the Tx channel number */
|
|
|
|
adap->tid_release_head = (void **)((uintptr_t)p | chan);
|
|
|
|
if (!adap->tid_release_task_busy) {
|
|
|
|
adap->tid_release_task_busy = true;
|
2014-08-21 03:44:06 +07:00
|
|
|
queue_work(adap->workq, &adap->tid_release_task);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
spin_unlock_bh(&adap->tid_release_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process the list of pending TID release requests.
|
|
|
|
*/
|
|
|
|
static void process_tid_release_list(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct adapter *adap;
|
|
|
|
|
|
|
|
adap = container_of(work, struct adapter, tid_release_task);
|
|
|
|
|
|
|
|
spin_lock_bh(&adap->tid_release_lock);
|
|
|
|
while (adap->tid_release_head) {
|
|
|
|
void **p = adap->tid_release_head;
|
|
|
|
unsigned int chan = (uintptr_t)p & 3;
|
|
|
|
p = (void *)p - chan;
|
|
|
|
|
|
|
|
adap->tid_release_head = *p;
|
|
|
|
*p = NULL;
|
|
|
|
spin_unlock_bh(&adap->tid_release_lock);
|
|
|
|
|
|
|
|
while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
|
|
|
|
GFP_KERNEL)))
|
|
|
|
schedule_timeout_uninterruptible(1);
|
|
|
|
|
|
|
|
mk_tid_release(skb, chan, p - adap->tids.tid_tab);
|
|
|
|
t4_ofld_send(adap, skb);
|
|
|
|
spin_lock_bh(&adap->tid_release_lock);
|
|
|
|
}
|
|
|
|
adap->tid_release_task_busy = false;
|
|
|
|
spin_unlock_bh(&adap->tid_release_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release a TID and inform HW. If we are unable to allocate the release
|
|
|
|
* message we defer to a work queue.
|
|
|
|
*/
|
|
|
|
void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
|
|
|
|
{
|
|
|
|
void *old;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct adapter *adap = container_of(t, struct adapter, tids);
|
|
|
|
|
|
|
|
old = t->tid_tab[tid];
|
|
|
|
skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
|
|
|
|
if (likely(skb)) {
|
|
|
|
t->tid_tab[tid] = NULL;
|
|
|
|
mk_tid_release(skb, chan, tid);
|
|
|
|
t4_ofld_send(adap, skb);
|
|
|
|
} else
|
|
|
|
cxgb4_queue_tid_release(t, chan, tid);
|
|
|
|
if (old)
|
|
|
|
atomic_dec(&t->tids_in_use);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_remove_tid);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate and initialize the TID tables. Returns 0 on success.
|
|
|
|
*/
|
|
|
|
static int tid_init(struct tid_info *t)
|
|
|
|
{
|
|
|
|
size_t size;
|
2012-12-10 16:30:52 +07:00
|
|
|
unsigned int stid_bmap_size;
|
2010-04-01 22:28:26 +07:00
|
|
|
unsigned int natids = t->natids;
|
2013-12-18 18:08:19 +07:00
|
|
|
struct adapter *adap = container_of(t, struct adapter, tids);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2012-12-10 16:30:53 +07:00
|
|
|
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
|
2012-12-10 16:30:52 +07:00
|
|
|
size = t->ntids * sizeof(*t->tid_tab) +
|
|
|
|
natids * sizeof(*t->atid_tab) +
|
2010-04-01 22:28:26 +07:00
|
|
|
t->nstids * sizeof(*t->stid_tab) +
|
2012-12-10 16:30:53 +07:00
|
|
|
t->nsftids * sizeof(*t->stid_tab) +
|
2012-12-10 16:30:52 +07:00
|
|
|
stid_bmap_size * sizeof(long) +
|
2012-12-10 16:30:53 +07:00
|
|
|
t->nftids * sizeof(*t->ftid_tab) +
|
|
|
|
t->nsftids * sizeof(*t->ftid_tab);
|
2012-12-10 16:30:52 +07:00
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
t->tid_tab = t4_alloc_mem(size);
|
|
|
|
if (!t->tid_tab)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
|
|
|
|
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
|
2012-12-10 16:30:53 +07:00
|
|
|
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
|
2012-12-10 16:30:52 +07:00
|
|
|
t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
|
2010-04-01 22:28:26 +07:00
|
|
|
spin_lock_init(&t->stid_lock);
|
|
|
|
spin_lock_init(&t->atid_lock);
|
|
|
|
|
|
|
|
t->stids_in_use = 0;
|
|
|
|
t->afree = NULL;
|
|
|
|
t->atids_in_use = 0;
|
|
|
|
atomic_set(&t->tids_in_use, 0);
|
|
|
|
|
|
|
|
/* Setup the free list for atid_tab and clear the stid bitmap. */
|
|
|
|
if (natids) {
|
|
|
|
while (--natids)
|
|
|
|
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
|
|
|
|
t->afree = t->atid_tab;
|
|
|
|
}
|
2012-12-10 16:30:53 +07:00
|
|
|
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
|
2013-12-18 18:08:19 +07:00
|
|
|
/* Reserve stid 0 for T4/T5 adapters */
|
|
|
|
if (!t->stid_base &&
|
|
|
|
(is_t4(adap->params.chip) || is_t5(adap->params.chip)))
|
|
|
|
__set_bit(0, t->stid_bmap);
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cxgb4_create_server - create an IP server
|
|
|
|
* @dev: the device
|
|
|
|
* @stid: the server TID
|
|
|
|
* @sip: local IP address to bind server to
|
|
|
|
* @sport: the server's TCP port
|
|
|
|
* @queue: queue to direct messages from this server to
|
|
|
|
*
|
|
|
|
* Create an IP server for the given port and address.
|
|
|
|
* Returns <0 on error and one of the %NET_XMIT_* values on success.
|
|
|
|
*/
|
|
|
|
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
|
2012-12-10 16:30:56 +07:00
|
|
|
__be32 sip, __be16 sport, __be16 vlan,
|
|
|
|
unsigned int queue)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
unsigned int chan;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct adapter *adap;
|
|
|
|
struct cpl_pass_open_req *req;
|
2013-07-04 17:40:45 +07:00
|
|
|
int ret;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
|
|
|
|
if (!skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
adap = netdev2adap(dev);
|
|
|
|
req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
|
|
|
|
INIT_TP_WR(req, 0);
|
|
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
|
|
|
|
req->local_port = sport;
|
|
|
|
req->peer_port = htons(0);
|
|
|
|
req->local_ip = sip;
|
|
|
|
req->peer_ip = htonl(0);
|
2010-08-24 00:20:58 +07:00
|
|
|
chan = rxq_to_chan(&adap->sge, queue);
|
2014-11-13 08:15:57 +07:00
|
|
|
req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
|
2015-01-09 12:38:15 +07:00
|
|
|
req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
|
|
|
|
SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
|
2013-07-04 17:40:45 +07:00
|
|
|
ret = t4_mgmt_tx(adap, skb);
|
|
|
|
return net_xmit_eval(ret);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_create_server);
|
|
|
|
|
2013-07-04 17:40:45 +07:00
|
|
|
/* cxgb4_create_server6 - create an IPv6 server
|
|
|
|
* @dev: the device
|
|
|
|
* @stid: the server TID
|
|
|
|
* @sip: local IPv6 address to bind server to
|
|
|
|
* @sport: the server's TCP port
|
|
|
|
* @queue: queue to direct messages from this server to
|
|
|
|
*
|
|
|
|
* Create an IPv6 server for the given port and address.
|
|
|
|
* Returns <0 on error and one of the %NET_XMIT_* values on success.
|
|
|
|
*/
|
|
|
|
int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
|
|
|
|
const struct in6_addr *sip, __be16 sport,
|
|
|
|
unsigned int queue)
|
|
|
|
{
|
|
|
|
unsigned int chan;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct adapter *adap;
|
|
|
|
struct cpl_pass_open_req6 *req;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
|
|
|
|
if (!skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
adap = netdev2adap(dev);
|
|
|
|
req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
|
|
|
|
INIT_TP_WR(req, 0);
|
|
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
|
|
|
|
req->local_port = sport;
|
|
|
|
req->peer_port = htons(0);
|
|
|
|
req->local_ip_hi = *(__be64 *)(sip->s6_addr);
|
|
|
|
req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
|
|
|
|
req->peer_ip_hi = cpu_to_be64(0);
|
|
|
|
req->peer_ip_lo = cpu_to_be64(0);
|
|
|
|
chan = rxq_to_chan(&adap->sge, queue);
|
2014-11-13 08:15:57 +07:00
|
|
|
req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
|
2015-01-09 12:38:15 +07:00
|
|
|
req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
|
|
|
|
SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
|
2013-07-04 17:40:45 +07:00
|
|
|
ret = t4_mgmt_tx(adap, skb);
|
|
|
|
return net_xmit_eval(ret);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_create_server6);
|
|
|
|
|
|
|
|
int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
|
|
|
|
unsigned int queue, bool ipv6)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct adapter *adap;
|
|
|
|
struct cpl_close_listsvr_req *req;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
adap = netdev2adap(dev);
|
|
|
|
|
|
|
|
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
|
|
|
|
if (!skb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
|
|
|
|
INIT_TP_WR(req, 0);
|
|
|
|
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
|
2015-01-09 12:38:16 +07:00
|
|
|
req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
|
|
|
|
LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
|
2013-07-04 17:40:45 +07:00
|
|
|
ret = t4_mgmt_tx(adap, skb);
|
|
|
|
return net_xmit_eval(ret);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_remove_server);
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
/**
|
|
|
|
* cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
|
|
|
|
* @mtus: the HW MTU table
|
|
|
|
* @mtu: the target MTU
|
|
|
|
* @idx: index of selected entry in the MTU table
|
|
|
|
*
|
|
|
|
* Returns the index and the value in the HW MTU table that is closest to
|
|
|
|
* but does not exceed @mtu, unless @mtu is smaller than any value in the
|
|
|
|
* table, in which case that smallest available value is selected.
|
|
|
|
*/
|
|
|
|
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
|
|
|
|
unsigned int *idx)
|
|
|
|
{
|
|
|
|
unsigned int i = 0;
|
|
|
|
|
|
|
|
while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
|
|
|
|
++i;
|
|
|
|
if (idx)
|
|
|
|
*idx = i;
|
|
|
|
return mtus[i];
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_best_mtu);
|
|
|
|
|
2014-06-06 23:10:43 +07:00
|
|
|
/**
|
|
|
|
* cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
|
|
|
|
* @mtus: the HW MTU table
|
|
|
|
* @header_size: Header Size
|
|
|
|
* @data_size_max: maximum Data Segment Size
|
|
|
|
* @data_size_align: desired Data Segment Size Alignment (2^N)
|
|
|
|
* @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
|
|
|
|
*
|
|
|
|
* Similar to cxgb4_best_mtu() but instead of searching the Hardware
|
|
|
|
* MTU Table based solely on a Maximum MTU parameter, we break that
|
|
|
|
* parameter up into a Header Size and Maximum Data Segment Size, and
|
|
|
|
* provide a desired Data Segment Size Alignment. If we find an MTU in
|
|
|
|
* the Hardware MTU Table which will result in a Data Segment Size with
|
|
|
|
* the requested alignment _and_ that MTU isn't "too far" from the
|
|
|
|
* closest MTU, then we'll return that rather than the closest MTU.
|
|
|
|
*/
|
|
|
|
unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
|
|
|
|
unsigned short header_size,
|
|
|
|
unsigned short data_size_max,
|
|
|
|
unsigned short data_size_align,
|
|
|
|
unsigned int *mtu_idxp)
|
|
|
|
{
|
|
|
|
unsigned short max_mtu = header_size + data_size_max;
|
|
|
|
unsigned short data_size_align_mask = data_size_align - 1;
|
|
|
|
int mtu_idx, aligned_mtu_idx;
|
|
|
|
|
|
|
|
/* Scan the MTU Table till we find an MTU which is larger than our
|
|
|
|
* Maximum MTU or we reach the end of the table. Along the way,
|
|
|
|
* record the last MTU found, if any, which will result in a Data
|
|
|
|
* Segment Length matching the requested alignment.
|
|
|
|
*/
|
|
|
|
for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
|
|
|
|
unsigned short data_size = mtus[mtu_idx] - header_size;
|
|
|
|
|
|
|
|
/* If this MTU minus the Header Size would result in a
|
|
|
|
* Data Segment Size of the desired alignment, remember it.
|
|
|
|
*/
|
|
|
|
if ((data_size & data_size_align_mask) == 0)
|
|
|
|
aligned_mtu_idx = mtu_idx;
|
|
|
|
|
|
|
|
/* If we're not at the end of the Hardware MTU Table and the
|
|
|
|
* next element is larger than our Maximum MTU, drop out of
|
|
|
|
* the loop.
|
|
|
|
*/
|
|
|
|
if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If we fell out of the loop because we ran to the end of the table,
|
|
|
|
* then we just have to use the last [largest] entry.
|
|
|
|
*/
|
|
|
|
if (mtu_idx == NMTUS)
|
|
|
|
mtu_idx--;
|
|
|
|
|
|
|
|
/* If we found an MTU which resulted in the requested Data Segment
|
|
|
|
* Length alignment and that's "not far" from the largest MTU which is
|
|
|
|
* less than or equal to the maximum MTU, then use that.
|
|
|
|
*/
|
|
|
|
if (aligned_mtu_idx >= 0 &&
|
|
|
|
mtu_idx - aligned_mtu_idx <= 1)
|
|
|
|
mtu_idx = aligned_mtu_idx;
|
|
|
|
|
|
|
|
/* If the caller has passed in an MTU Index pointer, pass the
|
|
|
|
* MTU Index back. Return the MTU value.
|
|
|
|
*/
|
|
|
|
if (mtu_idxp)
|
|
|
|
*mtu_idxp = mtu_idx;
|
|
|
|
return mtus[mtu_idx];
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
/**
|
|
|
|
* cxgb4_port_chan - get the HW channel of a port
|
|
|
|
* @dev: the net device for the port
|
|
|
|
*
|
|
|
|
* Return the HW Tx channel of the given port.
|
|
|
|
*/
|
|
|
|
unsigned int cxgb4_port_chan(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return netdev2pinfo(dev)->tx_chan;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_port_chan);
|
|
|
|
|
2012-05-18 16:59:24 +07:00
|
|
|
unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
|
|
|
|
{
|
|
|
|
struct adapter *adap = netdev2adap(dev);
|
2013-03-14 12:08:52 +07:00
|
|
|
u32 v1, v2, lp_count, hp_count;
|
2012-05-18 16:59:24 +07:00
|
|
|
|
2015-01-05 18:00:44 +07:00
|
|
|
v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
|
|
|
|
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
|
2013-12-03 18:35:56 +07:00
|
|
|
if (is_t4(adap->params.chip)) {
|
2015-01-05 18:00:44 +07:00
|
|
|
lp_count = LP_COUNT_G(v1);
|
|
|
|
hp_count = HP_COUNT_G(v1);
|
2013-03-14 12:08:52 +07:00
|
|
|
} else {
|
2015-01-05 18:00:44 +07:00
|
|
|
lp_count = LP_COUNT_T5_G(v1);
|
|
|
|
hp_count = HP_COUNT_T5_G(v2);
|
2013-03-14 12:08:52 +07:00
|
|
|
}
|
|
|
|
return lpfifo ? lp_count : hp_count;
|
2012-05-18 16:59:24 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_dbfifo_count);
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
/**
|
|
|
|
* cxgb4_port_viid - get the VI id of a port
|
|
|
|
* @dev: the net device for the port
|
|
|
|
*
|
|
|
|
* Return the VI id of the given port.
|
|
|
|
*/
|
|
|
|
unsigned int cxgb4_port_viid(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return netdev2pinfo(dev)->viid;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_port_viid);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cxgb4_port_idx - get the index of a port
|
|
|
|
* @dev: the net device for the port
|
|
|
|
*
|
|
|
|
* Return the index of the given port.
|
|
|
|
*/
|
|
|
|
unsigned int cxgb4_port_idx(const struct net_device *dev)
|
|
|
|
{
|
|
|
|
return netdev2pinfo(dev)->port_id;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_port_idx);
|
|
|
|
|
|
|
|
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
|
|
|
|
struct tp_tcp_stats *v6)
|
|
|
|
{
|
|
|
|
struct adapter *adap = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
spin_lock(&adap->stats_lock);
|
|
|
|
t4_tp_get_tcp_stats(adap, v4, v6);
|
|
|
|
spin_unlock(&adap->stats_lock);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_get_tcp_stats);
|
|
|
|
|
|
|
|
void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
|
|
|
|
const unsigned int *pgsz_order)
|
|
|
|
{
|
|
|
|
struct adapter *adap = netdev2adap(dev);
|
|
|
|
|
2015-01-05 18:00:47 +07:00
|
|
|
t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
|
|
|
|
t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
|
|
|
|
HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
|
|
|
|
HPZ3_V(pgsz_order[3]));
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_iscsi_init);
|
|
|
|
|
2012-05-18 16:59:26 +07:00
|
|
|
int cxgb4_flush_eq_cache(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct adapter *adap = netdev2adap(dev);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = t4_fwaddrspace_write(adap, adap->mbox,
|
2015-01-05 18:00:44 +07:00
|
|
|
0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
|
2012-05-18 16:59:26 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
|
|
|
|
|
|
|
|
static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
|
|
|
|
{
|
2015-01-05 18:00:44 +07:00
|
|
|
u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
|
2012-05-18 16:59:26 +07:00
|
|
|
__be64 indices;
|
|
|
|
int ret;
|
|
|
|
|
2014-06-27 20:53:49 +07:00
|
|
|
spin_lock(&adap->win0_lock);
|
|
|
|
ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
|
|
|
|
sizeof(indices), (__be32 *)&indices,
|
|
|
|
T4_MEMORY_READ);
|
|
|
|
spin_unlock(&adap->win0_lock);
|
2012-05-18 16:59:26 +07:00
|
|
|
if (!ret) {
|
2012-10-08 09:59:43 +07:00
|
|
|
*cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
|
|
|
|
*pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
|
2012-05-18 16:59:26 +07:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
|
|
|
|
u16 size)
|
|
|
|
{
|
|
|
|
struct adapter *adap = netdev2adap(dev);
|
|
|
|
u16 hw_pidx, hw_cidx;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (pidx != hw_pidx) {
|
|
|
|
u16 delta;
|
2015-01-05 18:00:43 +07:00
|
|
|
u32 val;
|
2012-05-18 16:59:26 +07:00
|
|
|
|
|
|
|
if (pidx >= hw_pidx)
|
|
|
|
delta = pidx - hw_pidx;
|
|
|
|
else
|
|
|
|
delta = size - hw_pidx + pidx;
|
2015-01-05 18:00:43 +07:00
|
|
|
|
|
|
|
if (is_t4(adap->params.chip))
|
|
|
|
val = PIDX_V(delta);
|
|
|
|
else
|
|
|
|
val = PIDX_T5_V(delta);
|
2012-05-18 16:59:26 +07:00
|
|
|
wmb();
|
2015-01-05 18:00:43 +07:00
|
|
|
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
|
|
|
|
QID_V(qid) | val);
|
2012-05-18 16:59:26 +07:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
|
|
|
|
|
2013-03-14 12:08:59 +07:00
|
|
|
void cxgb4_disable_db_coalescing(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct adapter *adap;
|
|
|
|
|
|
|
|
adap = netdev2adap(dev);
|
2015-01-05 18:00:44 +07:00
|
|
|
t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
|
2015-01-05 18:00:43 +07:00
|
|
|
NOCOALESCE_F);
|
2013-03-14 12:08:59 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
|
|
|
|
|
|
|
|
void cxgb4_enable_db_coalescing(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct adapter *adap;
|
|
|
|
|
|
|
|
adap = netdev2adap(dev);
|
2015-01-05 18:00:44 +07:00
|
|
|
t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
|
2013-03-14 12:08:59 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
|
|
|
|
|
2014-07-14 23:04:53 +07:00
|
|
|
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
|
|
|
|
{
|
|
|
|
struct adapter *adap;
|
|
|
|
u32 offset, memtype, memaddr;
|
2014-11-07 11:05:24 +07:00
|
|
|
u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
|
2014-07-14 23:04:53 +07:00
|
|
|
u32 edc0_end, edc1_end, mc0_end, mc1_end;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
adap = netdev2adap(dev);
|
|
|
|
|
|
|
|
offset = ((stag >> 8) * 32) + adap->vres.stag.start;
|
|
|
|
|
|
|
|
/* Figure out where the offset lands in the Memory Type/Address scheme.
|
|
|
|
* This code assumes that the memory is laid out starting at offset 0
|
|
|
|
* with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
|
|
|
|
* and EDC1. Some cards will have neither MC0 nor MC1, most cards have
|
|
|
|
* MC0, and some have both MC0 and MC1.
|
|
|
|
*/
|
2014-11-07 11:05:24 +07:00
|
|
|
size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
|
|
|
|
edc0_size = EDRAM0_SIZE_G(size) << 20;
|
|
|
|
size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
|
|
|
|
edc1_size = EDRAM1_SIZE_G(size) << 20;
|
|
|
|
size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
|
|
|
|
mc0_size = EXT_MEM0_SIZE_G(size) << 20;
|
2014-07-14 23:04:53 +07:00
|
|
|
|
|
|
|
edc0_end = edc0_size;
|
|
|
|
edc1_end = edc0_end + edc1_size;
|
|
|
|
mc0_end = edc1_end + mc0_size;
|
|
|
|
|
|
|
|
if (offset < edc0_end) {
|
|
|
|
memtype = MEM_EDC0;
|
|
|
|
memaddr = offset;
|
|
|
|
} else if (offset < edc1_end) {
|
|
|
|
memtype = MEM_EDC1;
|
|
|
|
memaddr = offset - edc0_end;
|
|
|
|
} else {
|
|
|
|
if (offset < mc0_end) {
|
|
|
|
memtype = MEM_MC0;
|
|
|
|
memaddr = offset - edc1_end;
|
|
|
|
} else if (is_t4(adap->params.chip)) {
|
|
|
|
/* T4 only has a single memory channel */
|
|
|
|
goto err;
|
|
|
|
} else {
|
2014-11-07 11:05:24 +07:00
|
|
|
size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
|
|
|
|
mc1_size = EXT_MEM1_SIZE_G(size) << 20;
|
2014-07-14 23:04:53 +07:00
|
|
|
mc1_end = mc0_end + mc1_size;
|
|
|
|
if (offset < mc1_end) {
|
|
|
|
memtype = MEM_MC1;
|
|
|
|
memaddr = offset - mc0_end;
|
|
|
|
} else {
|
|
|
|
/* offset beyond the end of any memory */
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock(&adap->win0_lock);
|
|
|
|
ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
|
|
|
|
spin_unlock(&adap->win0_lock);
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
err:
|
|
|
|
dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
|
|
|
|
stag, offset);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_read_tpte);
|
|
|
|
|
2014-07-14 23:04:54 +07:00
|
|
|
u64 cxgb4_read_sge_timestamp(struct net_device *dev)
|
|
|
|
{
|
|
|
|
u32 hi, lo;
|
|
|
|
struct adapter *adap;
|
|
|
|
|
|
|
|
adap = netdev2adap(dev);
|
2015-01-05 18:00:43 +07:00
|
|
|
lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
|
|
|
|
hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
|
2014-07-14 23:04:54 +07:00
|
|
|
|
|
|
|
return ((u64)hi << 32) | (u64)lo;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
|
|
|
|
|
2014-12-03 21:02:53 +07:00
|
|
|
int cxgb4_bar2_sge_qregs(struct net_device *dev,
|
|
|
|
unsigned int qid,
|
|
|
|
enum cxgb4_bar2_qtype qtype,
|
|
|
|
u64 *pbar2_qoffset,
|
|
|
|
unsigned int *pbar2_qid)
|
|
|
|
{
|
2014-12-10 15:48:02 +07:00
|
|
|
return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
|
2014-12-03 21:02:53 +07:00
|
|
|
qid,
|
|
|
|
(qtype == CXGB4_BAR2_QTYPE_EGRESS
|
|
|
|
? T4_BAR2_QTYPE_EGRESS
|
|
|
|
: T4_BAR2_QTYPE_INGRESS),
|
|
|
|
pbar2_qoffset,
|
|
|
|
pbar2_qid);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
static struct pci_driver cxgb4_driver;
|
|
|
|
|
|
|
|
static void check_neigh_update(struct neighbour *neigh)
|
|
|
|
{
|
|
|
|
const struct device *parent;
|
|
|
|
const struct net_device *netdev = neigh->dev;
|
|
|
|
|
|
|
|
if (netdev->priv_flags & IFF_802_1Q_VLAN)
|
|
|
|
netdev = vlan_dev_real_dev(netdev);
|
|
|
|
parent = netdev->dev.parent;
|
|
|
|
if (parent && parent->driver == &cxgb4_driver.driver)
|
|
|
|
t4_l2t_update(dev_get_drvdata(parent), neigh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int netevent_cb(struct notifier_block *nb, unsigned long event,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
switch (event) {
|
|
|
|
case NETEVENT_NEIGH_UPDATE:
|
|
|
|
check_neigh_update(data);
|
|
|
|
break;
|
|
|
|
case NETEVENT_REDIRECT:
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool netevent_registered;
|
|
|
|
static struct notifier_block cxgb4_netevent_nb = {
|
|
|
|
.notifier_call = netevent_cb
|
|
|
|
};
|
|
|
|
|
2012-05-18 16:59:26 +07:00
|
|
|
static void drain_db_fifo(struct adapter *adap, int usecs)
|
|
|
|
{
|
2013-03-14 12:08:52 +07:00
|
|
|
u32 v1, v2, lp_count, hp_count;
|
2012-05-18 16:59:26 +07:00
|
|
|
|
|
|
|
do {
|
2015-01-05 18:00:44 +07:00
|
|
|
v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
|
|
|
|
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
|
2013-12-03 18:35:56 +07:00
|
|
|
if (is_t4(adap->params.chip)) {
|
2015-01-05 18:00:44 +07:00
|
|
|
lp_count = LP_COUNT_G(v1);
|
|
|
|
hp_count = HP_COUNT_G(v1);
|
2013-03-14 12:08:52 +07:00
|
|
|
} else {
|
2015-01-05 18:00:44 +07:00
|
|
|
lp_count = LP_COUNT_T5_G(v1);
|
|
|
|
hp_count = HP_COUNT_T5_G(v2);
|
2013-03-14 12:08:52 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (lp_count == 0 && hp_count == 0)
|
|
|
|
break;
|
2012-05-18 16:59:26 +07:00
|
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
|
schedule_timeout(usecs_to_jiffies(usecs));
|
|
|
|
} while (1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void disable_txq_db(struct sge_txq *q)
|
|
|
|
{
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&q->db_lock, flags);
|
2012-05-18 16:59:26 +07:00
|
|
|
q->db_disabled = 1;
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
spin_unlock_irqrestore(&q->db_lock, flags);
|
2012-05-18 16:59:26 +07:00
|
|
|
}
|
|
|
|
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
|
2012-05-18 16:59:26 +07:00
|
|
|
{
|
|
|
|
spin_lock_irq(&q->db_lock);
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
if (q->db_pidx_inc) {
|
|
|
|
/* Make sure that all writes to the TX descriptors
|
|
|
|
* are committed before we tell HW about them.
|
|
|
|
*/
|
|
|
|
wmb();
|
2015-01-05 18:00:43 +07:00
|
|
|
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
|
|
|
|
QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
q->db_pidx_inc = 0;
|
|
|
|
}
|
2012-05-18 16:59:26 +07:00
|
|
|
q->db_disabled = 0;
|
|
|
|
spin_unlock_irq(&q->db_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void disable_dbs(struct adapter *adap)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_ethrxq(&adap->sge, i)
|
|
|
|
disable_txq_db(&adap->sge.ethtxq[i].q);
|
|
|
|
for_each_ofldrxq(&adap->sge, i)
|
|
|
|
disable_txq_db(&adap->sge.ofldtxq[i].q);
|
|
|
|
for_each_port(adap, i)
|
|
|
|
disable_txq_db(&adap->sge.ctrlq[i].q);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void enable_dbs(struct adapter *adap)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_ethrxq(&adap->sge, i)
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
|
2012-05-18 16:59:26 +07:00
|
|
|
for_each_ofldrxq(&adap->sge, i)
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
|
2012-05-18 16:59:26 +07:00
|
|
|
for_each_port(adap, i)
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
|
|
|
|
{
|
|
|
|
if (adap->uld_handle[CXGB4_ULD_RDMA])
|
|
|
|
ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
|
|
|
|
cmd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void process_db_full(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct adapter *adap;
|
|
|
|
|
|
|
|
adap = container_of(work, struct adapter, db_full_task);
|
|
|
|
|
|
|
|
drain_db_fifo(adap, dbfifo_drain_delay);
|
|
|
|
enable_dbs(adap);
|
|
|
|
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
|
2015-01-05 18:00:43 +07:00
|
|
|
t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
|
|
|
|
DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
|
|
|
|
DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
|
2012-05-18 16:59:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
|
|
|
|
{
|
|
|
|
u16 hw_pidx, hw_cidx;
|
|
|
|
int ret;
|
|
|
|
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
spin_lock_irq(&q->db_lock);
|
2012-05-18 16:59:26 +07:00
|
|
|
ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
|
|
|
if (q->db_pidx != hw_pidx) {
|
|
|
|
u16 delta;
|
2015-01-05 18:00:43 +07:00
|
|
|
u32 val;
|
2012-05-18 16:59:26 +07:00
|
|
|
|
|
|
|
if (q->db_pidx >= hw_pidx)
|
|
|
|
delta = q->db_pidx - hw_pidx;
|
|
|
|
else
|
|
|
|
delta = q->size - hw_pidx + q->db_pidx;
|
2015-01-05 18:00:43 +07:00
|
|
|
|
|
|
|
if (is_t4(adap->params.chip))
|
|
|
|
val = PIDX_V(delta);
|
|
|
|
else
|
|
|
|
val = PIDX_T5_V(delta);
|
2012-05-18 16:59:26 +07:00
|
|
|
wmb();
|
2015-01-05 18:00:43 +07:00
|
|
|
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
|
|
|
|
QID_V(q->cntxt_id) | val);
|
2012-05-18 16:59:26 +07:00
|
|
|
}
|
|
|
|
out:
|
|
|
|
q->db_disabled = 0;
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
q->db_pidx_inc = 0;
|
|
|
|
spin_unlock_irq(&q->db_lock);
|
2012-05-18 16:59:26 +07:00
|
|
|
if (ret)
|
|
|
|
CH_WARN(adap, "DB drop recovery failed.\n");
|
|
|
|
}
|
|
|
|
static void recover_all_queues(struct adapter *adap)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for_each_ethrxq(&adap->sge, i)
|
|
|
|
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
|
|
|
|
for_each_ofldrxq(&adap->sge, i)
|
|
|
|
sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
|
|
|
|
for_each_port(adap, i)
|
|
|
|
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
|
|
|
|
}
|
|
|
|
|
2012-05-18 16:59:24 +07:00
|
|
|
static void process_db_drop(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct adapter *adap;
|
|
|
|
|
2012-05-18 16:59:26 +07:00
|
|
|
adap = container_of(work, struct adapter, db_drop_task);
|
2012-05-18 16:59:24 +07:00
|
|
|
|
2013-12-03 18:35:56 +07:00
|
|
|
if (is_t4(adap->params.chip)) {
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
drain_db_fifo(adap, dbfifo_drain_delay);
|
2013-03-14 12:08:52 +07:00
|
|
|
notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
drain_db_fifo(adap, dbfifo_drain_delay);
|
2013-03-14 12:08:52 +07:00
|
|
|
recover_all_queues(adap);
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
drain_db_fifo(adap, dbfifo_drain_delay);
|
2013-03-14 12:08:52 +07:00
|
|
|
enable_dbs(adap);
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
|
2013-03-14 12:08:52 +07:00
|
|
|
} else {
|
|
|
|
u32 dropped_db = t4_read_reg(adap, 0x010ac);
|
|
|
|
u16 qid = (dropped_db >> 15) & 0x1ffff;
|
|
|
|
u16 pidx_inc = dropped_db & 0x1fff;
|
2014-12-03 21:02:53 +07:00
|
|
|
u64 bar2_qoffset;
|
|
|
|
unsigned int bar2_qid;
|
|
|
|
int ret;
|
2013-03-14 12:08:52 +07:00
|
|
|
|
2014-12-10 15:48:02 +07:00
|
|
|
ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
|
2014-12-03 21:02:53 +07:00
|
|
|
&bar2_qoffset, &bar2_qid);
|
|
|
|
if (ret)
|
|
|
|
dev_err(adap->pdev_dev, "doorbell drop recovery: "
|
|
|
|
"qid=%d, pidx_inc=%d\n", qid, pidx_inc);
|
|
|
|
else
|
2015-01-05 18:00:43 +07:00
|
|
|
writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
|
2014-12-03 21:02:53 +07:00
|
|
|
adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
|
2013-03-14 12:08:52 +07:00
|
|
|
|
|
|
|
/* Re-enable BAR2 WC */
|
|
|
|
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
|
|
|
|
}
|
|
|
|
|
2015-01-05 18:00:44 +07:00
|
|
|
t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
|
2012-05-18 16:59:24 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void t4_db_full(struct adapter *adap)
|
|
|
|
{
|
2013-12-03 18:35:56 +07:00
|
|
|
if (is_t4(adap->params.chip)) {
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
disable_dbs(adap);
|
|
|
|
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
|
2015-01-05 18:00:43 +07:00
|
|
|
t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
|
|
|
|
DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
|
2014-08-21 03:44:06 +07:00
|
|
|
queue_work(adap->workq, &adap->db_full_task);
|
2013-03-14 12:08:52 +07:00
|
|
|
}
|
2012-05-18 16:59:24 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void t4_db_dropped(struct adapter *adap)
|
|
|
|
{
|
cxgb4/iw_cxgb4: Doorbell Drop Avoidance Bug Fixes
The current logic suffers from a slow response time to disable user DB
usage, and also fails to avoid DB FIFO drops under heavy load. This commit
fixes these deficiencies and makes the avoidance logic more optimal.
This is done by more efficiently notifying the ULDs of potential DB
problems, and implements a smoother flow control algorithm in iw_cxgb4,
which is the ULD that puts the most load on the DB fifo.
Design:
cxgb4:
Direct ULD callback from the DB FULL/DROP interrupt handler. This allows
the ULD to stop doing user DB writes as quickly as possible.
While user DB usage is disabled, the LLD will accumulate DB write events
for its queues. Then once DB usage is reenabled, a single DB write is
done for each queue with its accumulated write count. This reduces the
load put on the DB fifo when reenabling.
iw_cxgb4:
Instead of marking each qp to indicate DB writes are disabled, we create
a device-global status page that each user process maps. This allows
iw_cxgb4 to only set this single bit to disable all DB writes for all
user QPs vs traversing the idr of all the active QPs. If the libcxgb4
doesn't support this, then we fall back to the old approach of marking
each QP. Thus we allow the new driver to work with an older libcxgb4.
When the LLD upcalls iw_cxgb4 indicating DB FULL, we disable all DB writes
via the status page and transition the DB state to STOPPED. As user
processes see that DB writes are disabled, they call into iw_cxgb4
to submit their DB write events. Since the DB state is in STOPPED,
the QP trying to write gets enqueued on a new DB "flow control" list.
As subsequent DB writes are submitted for this flow controlled QP, the
amount of writes are accumulated for each QP on the flow control list.
So all the user QPs that are actively ringing the DB get put on this
list and the number of writes they request are accumulated.
When the LLD upcalls iw_cxgb4 indicating DB EMPTY, which is in a workq
context, we change the DB state to FLOW_CONTROL, and begin resuming all
the QPs that are on the flow control list. This logic runs on until
the flow control list is empty or we exit FLOW_CONTROL mode (due to
a DB DROP upcall, for example). QPs are removed from this list, and
their accumulated DB write counts written to the DB FIFO. Sets of QPs,
called chunks in the code, are removed at one time. The chunk size is 64.
So 64 QPs are resumed at a time, and before the next chunk is resumed, the
logic waits (blocks) for the DB FIFO to drain. This prevents resuming to
quickly and overflowing the FIFO. Once the flow control list is empty,
the db state transitions back to NORMAL and user QPs are again allowed
to write directly to the user DB register.
The algorithm is designed such that if the DB write load is high enough,
then all the DB writes get submitted by the kernel using this flow
controlled approach to avoid DB drops. As the load lightens though, we
resume to normal DB writes directly by user applications.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-03-14 23:22:08 +07:00
|
|
|
if (is_t4(adap->params.chip)) {
|
|
|
|
disable_dbs(adap);
|
|
|
|
notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
|
|
|
|
}
|
2014-08-21 03:44:06 +07:00
|
|
|
queue_work(adap->workq, &adap->db_drop_task);
|
2012-05-18 16:59:24 +07:00
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
static void uld_attach(struct adapter *adap, unsigned int uld)
|
|
|
|
{
|
|
|
|
void *handle;
|
|
|
|
struct cxgb4_lld_info lli;
|
2012-12-10 16:30:53 +07:00
|
|
|
unsigned short i;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
lli.pdev = adap->pdev;
|
2014-06-27 20:53:47 +07:00
|
|
|
lli.pf = adap->fn;
|
2010-04-01 22:28:26 +07:00
|
|
|
lli.l2t = adap->l2t;
|
|
|
|
lli.tids = &adap->tids;
|
|
|
|
lli.ports = adap->port;
|
|
|
|
lli.vr = &adap->vres;
|
|
|
|
lli.mtus = adap->params.mtus;
|
|
|
|
if (uld == CXGB4_ULD_RDMA) {
|
|
|
|
lli.rxq_ids = adap->sge.rdma_rxq;
|
2014-06-06 23:10:42 +07:00
|
|
|
lli.ciq_ids = adap->sge.rdma_ciq;
|
2010-04-01 22:28:26 +07:00
|
|
|
lli.nrxq = adap->sge.rdmaqs;
|
2014-06-06 23:10:42 +07:00
|
|
|
lli.nciq = adap->sge.rdmaciqs;
|
2010-04-01 22:28:26 +07:00
|
|
|
} else if (uld == CXGB4_ULD_ISCSI) {
|
|
|
|
lli.rxq_ids = adap->sge.ofld_rxq;
|
|
|
|
lli.nrxq = adap->sge.ofldqsets;
|
|
|
|
}
|
|
|
|
lli.ntxq = adap->sge.ofldqsets;
|
|
|
|
lli.nchan = adap->params.nports;
|
|
|
|
lli.nports = adap->params.nports;
|
|
|
|
lli.wr_cred = adap->params.ofldq_wr_cred;
|
2013-12-03 18:35:56 +07:00
|
|
|
lli.adapter_type = adap->params.chip;
|
2015-01-05 18:00:46 +07:00
|
|
|
lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
|
2014-07-14 23:04:54 +07:00
|
|
|
lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
|
2014-12-03 21:02:53 +07:00
|
|
|
lli.udb_density = 1 << adap->params.sge.eq_qpp;
|
|
|
|
lli.ucq_density = 1 << adap->params.sge.iq_qpp;
|
2013-12-18 18:08:23 +07:00
|
|
|
lli.filt_mode = adap->params.tp.vlan_pri_map;
|
2012-12-10 16:30:53 +07:00
|
|
|
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
|
|
|
|
for (i = 0; i < NCHAN; i++)
|
|
|
|
lli.tx_modq[i] = i;
|
2015-01-05 18:00:43 +07:00
|
|
|
lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
|
|
|
|
lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
|
2010-04-01 22:28:26 +07:00
|
|
|
lli.fw_vers = adap->params.fw_vers;
|
2012-05-18 16:59:26 +07:00
|
|
|
lli.dbfifo_int_thresh = dbfifo_int_thresh;
|
2014-07-14 23:04:51 +07:00
|
|
|
lli.sge_ingpadboundary = adap->sge.fl_align;
|
|
|
|
lli.sge_egrstatuspagesize = adap->sge.stat_len;
|
2012-12-10 16:30:53 +07:00
|
|
|
lli.sge_pktshift = adap->sge.pktshift;
|
|
|
|
lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
|
2014-07-14 23:04:52 +07:00
|
|
|
lli.max_ordird_qp = adap->params.max_ordird_qp;
|
|
|
|
lli.max_ird_adapter = adap->params.max_ird_adapter;
|
2014-02-18 19:26:12 +07:00
|
|
|
lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
handle = ulds[uld].add(&lli);
|
|
|
|
if (IS_ERR(handle)) {
|
|
|
|
dev_warn(adap->pdev_dev,
|
|
|
|
"could not attach to the %s driver, error %ld\n",
|
|
|
|
uld_str[uld], PTR_ERR(handle));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
adap->uld_handle[uld] = handle;
|
|
|
|
|
|
|
|
if (!netevent_registered) {
|
|
|
|
register_netevent_notifier(&cxgb4_netevent_nb);
|
|
|
|
netevent_registered = true;
|
|
|
|
}
|
2010-05-18 17:07:13 +07:00
|
|
|
|
|
|
|
if (adap->flags & FULL_INIT_DONE)
|
|
|
|
ulds[uld].state_change(handle, CXGB4_STATE_UP);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void attach_ulds(struct adapter *adap)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
2013-07-04 17:40:46 +07:00
|
|
|
spin_lock(&adap_rcu_lock);
|
|
|
|
list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
|
|
|
|
spin_unlock(&adap_rcu_lock);
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
mutex_lock(&uld_mutex);
|
|
|
|
list_add_tail(&adap->list_node, &adapter_list);
|
|
|
|
for (i = 0; i < CXGB4_ULD_MAX; i++)
|
|
|
|
if (ulds[i].add)
|
|
|
|
uld_attach(adap, i);
|
|
|
|
mutex_unlock(&uld_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void detach_ulds(struct adapter *adap)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
mutex_lock(&uld_mutex);
|
|
|
|
list_del(&adap->list_node);
|
|
|
|
for (i = 0; i < CXGB4_ULD_MAX; i++)
|
|
|
|
if (adap->uld_handle[i]) {
|
|
|
|
ulds[i].state_change(adap->uld_handle[i],
|
|
|
|
CXGB4_STATE_DETACH);
|
|
|
|
adap->uld_handle[i] = NULL;
|
|
|
|
}
|
|
|
|
if (netevent_registered && list_empty(&adapter_list)) {
|
|
|
|
unregister_netevent_notifier(&cxgb4_netevent_nb);
|
|
|
|
netevent_registered = false;
|
|
|
|
}
|
|
|
|
mutex_unlock(&uld_mutex);
|
2013-07-04 17:40:46 +07:00
|
|
|
|
|
|
|
spin_lock(&adap_rcu_lock);
|
|
|
|
list_del_rcu(&adap->rcu_node);
|
|
|
|
spin_unlock(&adap_rcu_lock);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
mutex_lock(&uld_mutex);
|
|
|
|
for (i = 0; i < CXGB4_ULD_MAX; i++)
|
|
|
|
if (adap->uld_handle[i])
|
|
|
|
ulds[i].state_change(adap->uld_handle[i], new_state);
|
|
|
|
mutex_unlock(&uld_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cxgb4_register_uld - register an upper-layer driver
|
|
|
|
* @type: the ULD type
|
|
|
|
* @p: the ULD methods
|
|
|
|
*
|
|
|
|
* Registers an upper-layer driver with this driver and notifies the ULD
|
|
|
|
* about any presently available devices that support its type. Returns
|
|
|
|
* %-EBUSY if a ULD of the same type is already registered.
|
|
|
|
*/
|
|
|
|
int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct adapter *adap;
|
|
|
|
|
|
|
|
if (type >= CXGB4_ULD_MAX)
|
|
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&uld_mutex);
|
|
|
|
if (ulds[type].add) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ulds[type] = *p;
|
|
|
|
list_for_each_entry(adap, &adapter_list, list_node)
|
|
|
|
uld_attach(adap, type);
|
|
|
|
out: mutex_unlock(&uld_mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_register_uld);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* cxgb4_unregister_uld - unregister an upper-layer driver
|
|
|
|
* @type: the ULD type
|
|
|
|
*
|
|
|
|
* Unregisters an existing upper-layer driver.
|
|
|
|
*/
|
|
|
|
int cxgb4_unregister_uld(enum cxgb4_uld type)
|
|
|
|
{
|
|
|
|
struct adapter *adap;
|
|
|
|
|
|
|
|
if (type >= CXGB4_ULD_MAX)
|
|
|
|
return -EINVAL;
|
|
|
|
mutex_lock(&uld_mutex);
|
|
|
|
list_for_each_entry(adap, &adapter_list, list_node)
|
|
|
|
adap->uld_handle[type] = NULL;
|
|
|
|
ulds[type].add = NULL;
|
|
|
|
mutex_unlock(&uld_mutex);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_unregister_uld);
|
|
|
|
|
2014-10-15 10:07:22 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2015-01-15 06:17:34 +07:00
|
|
|
static int cxgb4_inet6addr_handler(struct notifier_block *this,
|
|
|
|
unsigned long event, void *data)
|
2013-07-04 17:40:46 +07:00
|
|
|
{
|
2015-01-15 06:17:34 +07:00
|
|
|
struct inet6_ifaddr *ifa = data;
|
|
|
|
struct net_device *event_dev = ifa->idev->dev;
|
|
|
|
const struct device *parent = NULL;
|
|
|
|
#if IS_ENABLED(CONFIG_BONDING)
|
2013-07-04 17:40:46 +07:00
|
|
|
struct adapter *adap;
|
2015-01-15 06:17:34 +07:00
|
|
|
#endif
|
|
|
|
if (event_dev->priv_flags & IFF_802_1Q_VLAN)
|
|
|
|
event_dev = vlan_dev_real_dev(event_dev);
|
|
|
|
#if IS_ENABLED(CONFIG_BONDING)
|
|
|
|
if (event_dev->flags & IFF_MASTER) {
|
|
|
|
list_for_each_entry(adap, &adapter_list, list_node) {
|
|
|
|
switch (event) {
|
|
|
|
case NETDEV_UP:
|
|
|
|
cxgb4_clip_get(adap->port[0],
|
|
|
|
(const u32 *)ifa, 1);
|
|
|
|
break;
|
|
|
|
case NETDEV_DOWN:
|
|
|
|
cxgb4_clip_release(adap->port[0],
|
|
|
|
(const u32 *)ifa, 1);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
#endif
|
2013-07-04 17:40:46 +07:00
|
|
|
|
2015-01-15 06:17:34 +07:00
|
|
|
if (event_dev)
|
|
|
|
parent = event_dev->dev.parent;
|
2013-07-04 17:40:46 +07:00
|
|
|
|
2015-01-15 06:17:34 +07:00
|
|
|
if (parent && parent->driver == &cxgb4_driver.driver) {
|
2013-07-04 17:40:46 +07:00
|
|
|
switch (event) {
|
|
|
|
case NETDEV_UP:
|
2015-01-15 06:17:34 +07:00
|
|
|
cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
|
2013-07-04 17:40:46 +07:00
|
|
|
break;
|
|
|
|
case NETDEV_DOWN:
|
2015-01-15 06:17:34 +07:00
|
|
|
cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
|
2013-07-04 17:40:46 +07:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2015-01-15 06:17:34 +07:00
|
|
|
return NOTIFY_OK;
|
2013-07-04 17:40:46 +07:00
|
|
|
}
|
|
|
|
|
2015-01-15 06:17:34 +07:00
|
|
|
static bool inet6addr_registered;
|
2013-07-04 17:40:46 +07:00
|
|
|
static struct notifier_block cxgb4_inet6addr_notifier = {
|
|
|
|
.notifier_call = cxgb4_inet6addr_handler
|
|
|
|
};
|
|
|
|
|
|
|
|
static void update_clip(const struct adapter *adap)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct net_device *dev;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_NPORTS; i++) {
|
|
|
|
dev = adap->port[i];
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
if (dev)
|
2015-01-15 06:17:34 +07:00
|
|
|
ret = cxgb4_update_root_dev_clip(dev);
|
2013-07-04 17:40:46 +07:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
2014-10-15 10:07:22 +07:00
|
|
|
#endif /* IS_ENABLED(CONFIG_IPV6) */
|
2013-07-04 17:40:46 +07:00
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
/**
|
|
|
|
* cxgb_up - enable the adapter
|
|
|
|
* @adap: adapter being enabled
|
|
|
|
*
|
|
|
|
* Called when the first port is enabled, this function performs the
|
|
|
|
* actions necessary to make an adapter operational, such as completing
|
|
|
|
* the initialization of HW modules, and enabling interrupts.
|
|
|
|
*
|
|
|
|
* Must be called with the rtnl lock held.
|
|
|
|
*/
|
|
|
|
static int cxgb_up(struct adapter *adap)
|
|
|
|
{
|
2010-05-18 17:07:12 +07:00
|
|
|
int err;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2010-05-18 17:07:12 +07:00
|
|
|
err = setup_sge_queues(adap);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
err = setup_rss(adap);
|
|
|
|
if (err)
|
|
|
|
goto freeq;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
if (adap->flags & USING_MSIX) {
|
2010-05-18 17:07:12 +07:00
|
|
|
name_msix_vecs(adap);
|
2010-04-01 22:28:26 +07:00
|
|
|
err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
|
|
|
|
adap->msix_info[0].desc, adap);
|
|
|
|
if (err)
|
|
|
|
goto irq_err;
|
|
|
|
|
|
|
|
err = request_msix_queue_irqs(adap);
|
|
|
|
if (err) {
|
|
|
|
free_irq(adap->msix_info[0].vec, adap);
|
|
|
|
goto irq_err;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
|
|
|
|
(adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
|
2010-12-15 04:36:51 +07:00
|
|
|
adap->port[0]->name, adap);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (err)
|
|
|
|
goto irq_err;
|
|
|
|
}
|
|
|
|
enable_rx(adap);
|
|
|
|
t4_sge_start(adap);
|
|
|
|
t4_intr_enable(adap);
|
2010-05-18 17:07:12 +07:00
|
|
|
adap->flags |= FULL_INIT_DONE;
|
2010-04-01 22:28:26 +07:00
|
|
|
notify_ulds(adap, CXGB4_STATE_UP);
|
2014-10-15 10:07:22 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2013-07-04 17:40:46 +07:00
|
|
|
update_clip(adap);
|
2014-10-15 10:07:22 +07:00
|
|
|
#endif
|
2010-04-01 22:28:26 +07:00
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
irq_err:
|
|
|
|
dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
|
2010-05-18 17:07:12 +07:00
|
|
|
freeq:
|
|
|
|
t4_free_sge_resources(adap);
|
2010-04-01 22:28:26 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cxgb_down(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
t4_intr_disable(adapter);
|
|
|
|
cancel_work_sync(&adapter->tid_release_task);
|
2012-05-18 16:59:24 +07:00
|
|
|
cancel_work_sync(&adapter->db_full_task);
|
|
|
|
cancel_work_sync(&adapter->db_drop_task);
|
2010-04-01 22:28:26 +07:00
|
|
|
adapter->tid_release_task_busy = false;
|
2010-06-18 17:05:29 +07:00
|
|
|
adapter->tid_release_head = NULL;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
if (adapter->flags & USING_MSIX) {
|
|
|
|
free_msix_queue_irqs(adapter);
|
|
|
|
free_irq(adapter->msix_info[0].vec, adapter);
|
|
|
|
} else
|
|
|
|
free_irq(adapter->pdev->irq, adapter);
|
|
|
|
quiesce_rx(adapter);
|
2010-05-18 17:07:12 +07:00
|
|
|
t4_sge_stop(adapter);
|
|
|
|
t4_free_sge_resources(adapter);
|
|
|
|
adapter->flags &= ~FULL_INIT_DONE;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* net_device operations
|
|
|
|
*/
|
|
|
|
static int cxgb_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
struct adapter *adapter = pi->adapter;
|
|
|
|
|
2011-01-19 22:29:05 +07:00
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
2010-05-18 17:07:12 +07:00
|
|
|
if (!(adapter->flags & FULL_INIT_DONE)) {
|
|
|
|
err = cxgb_up(adapter);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2010-06-18 17:05:32 +07:00
|
|
|
err = link_start(dev);
|
|
|
|
if (!err)
|
|
|
|
netif_tx_start_all_queues(dev);
|
|
|
|
return err;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int cxgb_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
struct adapter *adapter = pi->adapter;
|
|
|
|
|
|
|
|
netif_tx_stop_all_queues(dev);
|
|
|
|
netif_carrier_off(dev);
|
2010-08-02 20:19:21 +07:00
|
|
|
return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
2012-12-10 16:30:52 +07:00
|
|
|
/* Return an error number if the indicated filter isn't writable ...
|
|
|
|
*/
|
|
|
|
static int writable_filter(struct filter_entry *f)
|
|
|
|
{
|
|
|
|
if (f->locked)
|
|
|
|
return -EPERM;
|
|
|
|
if (f->pending)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Delete the filter at the specified index (if valid). The checks for all
|
|
|
|
* the common problems with doing this like the filter being locked, currently
|
|
|
|
* pending in another operation, etc.
|
|
|
|
*/
|
|
|
|
static int delete_filter(struct adapter *adapter, unsigned int fidx)
|
|
|
|
{
|
|
|
|
struct filter_entry *f;
|
|
|
|
int ret;
|
|
|
|
|
2012-12-10 16:30:53 +07:00
|
|
|
if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
|
2012-12-10 16:30:52 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
f = &adapter->tids.ftid_tab[fidx];
|
|
|
|
ret = writable_filter(f);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
if (f->valid)
|
|
|
|
return del_filter_wr(adapter, fidx);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-10 16:30:53 +07:00
|
|
|
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
|
2012-12-10 16:30:56 +07:00
|
|
|
__be32 sip, __be16 sport, __be16 vlan,
|
|
|
|
unsigned int queue, unsigned char port, unsigned char mask)
|
2012-12-10 16:30:53 +07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct filter_entry *f;
|
|
|
|
struct adapter *adap;
|
|
|
|
int i;
|
|
|
|
u8 *val;
|
|
|
|
|
|
|
|
adap = netdev2adap(dev);
|
|
|
|
|
2012-12-10 16:30:55 +07:00
|
|
|
/* Adjust stid to correct filter index */
|
2013-12-18 18:08:21 +07:00
|
|
|
stid -= adap->tids.sftid_base;
|
2012-12-10 16:30:55 +07:00
|
|
|
stid += adap->tids.nftids;
|
|
|
|
|
2012-12-10 16:30:53 +07:00
|
|
|
/* Check to make sure the filter requested is writable ...
|
|
|
|
*/
|
|
|
|
f = &adap->tids.ftid_tab[stid];
|
|
|
|
ret = writable_filter(f);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Clear out any old resources being used by the filter before
|
|
|
|
* we start constructing the new filter.
|
|
|
|
*/
|
|
|
|
if (f->valid)
|
|
|
|
clear_filter(adap, f);
|
|
|
|
|
|
|
|
/* Clear out filter specifications */
|
|
|
|
memset(&f->fs, 0, sizeof(struct ch_filter_specification));
|
|
|
|
f->fs.val.lport = cpu_to_be16(sport);
|
|
|
|
f->fs.mask.lport = ~0;
|
|
|
|
val = (u8 *)&sip;
|
2012-12-10 16:30:56 +07:00
|
|
|
if ((val[0] | val[1] | val[2] | val[3]) != 0) {
|
2012-12-10 16:30:53 +07:00
|
|
|
for (i = 0; i < 4; i++) {
|
|
|
|
f->fs.val.lip[i] = val[i];
|
|
|
|
f->fs.mask.lip[i] = ~0;
|
|
|
|
}
|
2015-01-05 18:00:47 +07:00
|
|
|
if (adap->params.tp.vlan_pri_map & PORT_F) {
|
2012-12-10 16:30:56 +07:00
|
|
|
f->fs.val.iport = port;
|
|
|
|
f->fs.mask.iport = mask;
|
|
|
|
}
|
|
|
|
}
|
2012-12-10 16:30:53 +07:00
|
|
|
|
2015-01-05 18:00:47 +07:00
|
|
|
if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
|
2013-12-18 18:08:20 +07:00
|
|
|
f->fs.val.proto = IPPROTO_TCP;
|
|
|
|
f->fs.mask.proto = ~0;
|
|
|
|
}
|
|
|
|
|
2012-12-10 16:30:53 +07:00
|
|
|
f->fs.dirsteer = 1;
|
|
|
|
f->fs.iq = queue;
|
|
|
|
/* Mark filter as locked */
|
|
|
|
f->locked = 1;
|
|
|
|
f->fs.rpttid = 1;
|
|
|
|
|
|
|
|
ret = set_filter_wr(adap, stid);
|
|
|
|
if (ret) {
|
|
|
|
clear_filter(adap, f);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_create_server_filter);
|
|
|
|
|
|
|
|
int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
|
|
|
|
unsigned int queue, bool ipv6)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct filter_entry *f;
|
|
|
|
struct adapter *adap;
|
|
|
|
|
|
|
|
adap = netdev2adap(dev);
|
2012-12-10 16:30:55 +07:00
|
|
|
|
|
|
|
/* Adjust stid to correct filter index */
|
2013-12-18 18:08:21 +07:00
|
|
|
stid -= adap->tids.sftid_base;
|
2012-12-10 16:30:55 +07:00
|
|
|
stid += adap->tids.nftids;
|
|
|
|
|
2012-12-10 16:30:53 +07:00
|
|
|
f = &adap->tids.ftid_tab[stid];
|
|
|
|
/* Unlock the filter */
|
|
|
|
f->locked = 0;
|
|
|
|
|
|
|
|
ret = delete_filter(adap, stid);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cxgb4_remove_server_filter);
|
|
|
|
|
2010-07-07 23:11:25 +07:00
|
|
|
static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
|
|
|
|
struct rtnl_link_stats64 *ns)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
struct port_stats stats;
|
|
|
|
struct port_info *p = netdev_priv(dev);
|
|
|
|
struct adapter *adapter = p->adapter;
|
|
|
|
|
2014-01-23 11:27:35 +07:00
|
|
|
/* Block retrieving statistics during EEH error
|
|
|
|
* recovery. Otherwise, the recovery might fail
|
|
|
|
* and the PCI device will be removed permanently
|
|
|
|
*/
|
2010-04-01 22:28:26 +07:00
|
|
|
spin_lock(&adapter->stats_lock);
|
2014-01-23 11:27:35 +07:00
|
|
|
if (!netif_device_present(dev)) {
|
|
|
|
spin_unlock(&adapter->stats_lock);
|
|
|
|
return ns;
|
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
t4_get_port_stats(adapter, p->tx_chan, &stats);
|
|
|
|
spin_unlock(&adapter->stats_lock);
|
|
|
|
|
|
|
|
ns->tx_bytes = stats.tx_octets;
|
|
|
|
ns->tx_packets = stats.tx_frames;
|
|
|
|
ns->rx_bytes = stats.rx_octets;
|
|
|
|
ns->rx_packets = stats.rx_frames;
|
|
|
|
ns->multicast = stats.rx_mcast_frames;
|
|
|
|
|
|
|
|
/* detailed rx_errors */
|
|
|
|
ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
|
|
|
|
stats.rx_runt;
|
|
|
|
ns->rx_over_errors = 0;
|
|
|
|
ns->rx_crc_errors = stats.rx_fcs_err;
|
|
|
|
ns->rx_frame_errors = stats.rx_symbol_err;
|
|
|
|
ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
|
|
|
|
stats.rx_ovflow2 + stats.rx_ovflow3 +
|
|
|
|
stats.rx_trunc0 + stats.rx_trunc1 +
|
|
|
|
stats.rx_trunc2 + stats.rx_trunc3;
|
|
|
|
ns->rx_missed_errors = 0;
|
|
|
|
|
|
|
|
/* detailed tx_errors */
|
|
|
|
ns->tx_aborted_errors = 0;
|
|
|
|
ns->tx_carrier_errors = 0;
|
|
|
|
ns->tx_fifo_errors = 0;
|
|
|
|
ns->tx_heartbeat_errors = 0;
|
|
|
|
ns->tx_window_errors = 0;
|
|
|
|
|
|
|
|
ns->tx_errors = stats.tx_error_frames;
|
|
|
|
ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
|
|
|
|
ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
|
|
|
|
return ns;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
|
|
|
|
{
|
2010-08-02 20:19:21 +07:00
|
|
|
unsigned int mbox;
|
2010-04-01 22:28:26 +07:00
|
|
|
int ret = 0, prtad, devad;
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case SIOCGMIIPHY:
|
|
|
|
if (pi->mdio_addr < 0)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
data->phy_id = pi->mdio_addr;
|
|
|
|
break;
|
|
|
|
case SIOCGMIIREG:
|
|
|
|
case SIOCSMIIREG:
|
|
|
|
if (mdio_phy_id_is_c45(data->phy_id)) {
|
|
|
|
prtad = mdio_phy_id_prtad(data->phy_id);
|
|
|
|
devad = mdio_phy_id_devad(data->phy_id);
|
|
|
|
} else if (data->phy_id < 32) {
|
|
|
|
prtad = data->phy_id;
|
|
|
|
devad = 0;
|
|
|
|
data->reg_num &= 0x1f;
|
|
|
|
} else
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-08-02 20:19:21 +07:00
|
|
|
mbox = pi->adapter->fn;
|
2010-04-01 22:28:26 +07:00
|
|
|
if (cmd == SIOCGMIIREG)
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
|
2010-04-01 22:28:26 +07:00
|
|
|
data->reg_num, &data->val_out);
|
|
|
|
else
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
|
2010-04-01 22:28:26 +07:00
|
|
|
data->reg_num, data->val_in);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cxgb_set_rxmode(struct net_device *dev)
|
|
|
|
{
|
|
|
|
/* unfortunately we can't return errors to the stack */
|
|
|
|
set_rxmode(dev, -1, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
|
|
|
|
return -EINVAL;
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
|
|
|
|
-1, -1, -1, true);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (!ret)
|
|
|
|
dev->mtu = new_mtu;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
struct sockaddr *addr = p;
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(addr->sa_data))
|
2012-02-21 09:07:49 +07:00
|
|
|
return -EADDRNOTAVAIL;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
|
|
|
|
pi->xact_addr_filt, addr->sa_data, true, true);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
|
|
|
pi->xact_addr_filt = ret;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
static void cxgb_netpoll(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct port_info *pi = netdev_priv(dev);
|
|
|
|
struct adapter *adap = pi->adapter;
|
|
|
|
|
|
|
|
if (adap->flags & USING_MSIX) {
|
|
|
|
int i;
|
|
|
|
struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
|
|
|
|
|
|
|
|
for (i = pi->nqsets; i; i--, rx++)
|
|
|
|
t4_sge_intr_msix(0, &rx->rspq);
|
|
|
|
} else
|
|
|
|
t4_intr_handler(adap)(0, adap);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static const struct net_device_ops cxgb4_netdev_ops = {
|
|
|
|
.ndo_open = cxgb_open,
|
|
|
|
.ndo_stop = cxgb_close,
|
|
|
|
.ndo_start_xmit = t4_eth_xmit,
|
2014-06-20 11:37:13 +07:00
|
|
|
.ndo_select_queue = cxgb_select_queue,
|
2010-06-18 17:05:31 +07:00
|
|
|
.ndo_get_stats64 = cxgb_get_stats,
|
2010-04-01 22:28:26 +07:00
|
|
|
.ndo_set_rx_mode = cxgb_set_rxmode,
|
|
|
|
.ndo_set_mac_address = cxgb_set_mac_addr,
|
2011-04-16 20:05:08 +07:00
|
|
|
.ndo_set_features = cxgb_set_features,
|
2010-04-01 22:28:26 +07:00
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_do_ioctl = cxgb_ioctl,
|
|
|
|
.ndo_change_mtu = cxgb_change_mtu,
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = cxgb_netpoll,
|
|
|
|
#endif
|
2015-02-04 17:02:52 +07:00
|
|
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
|
|
|
.ndo_busy_poll = cxgb_busy_poll,
|
|
|
|
#endif
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
void t4_fatal_err(struct adapter *adap)
|
|
|
|
{
|
2015-01-05 18:00:43 +07:00
|
|
|
t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
|
2010-04-01 22:28:26 +07:00
|
|
|
t4_intr_disable(adap);
|
|
|
|
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
|
|
|
|
}
|
|
|
|
|
2014-06-27 20:53:48 +07:00
|
|
|
/* Return the specified PCI-E Configuration Space register from our Physical
|
|
|
|
* Function. We try first via a Firmware LDST Command since we prefer to let
|
|
|
|
* the firmware own all of these registers, but if that fails we go for it
|
|
|
|
* directly ourselves.
|
|
|
|
*/
|
|
|
|
static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
|
|
|
|
{
|
|
|
|
struct fw_ldst_cmd ldst_cmd;
|
|
|
|
u32 val;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Construct and send the Firmware LDST Command to retrieve the
|
|
|
|
* specified PCI-E Configuration Space register.
|
|
|
|
*/
|
|
|
|
memset(&ldst_cmd, 0, sizeof(ldst_cmd));
|
|
|
|
ldst_cmd.op_to_addrspace =
|
2014-11-07 11:05:25 +07:00
|
|
|
htonl(FW_CMD_OP_V(FW_LDST_CMD) |
|
|
|
|
FW_CMD_REQUEST_F |
|
|
|
|
FW_CMD_READ_F |
|
2014-11-21 14:22:02 +07:00
|
|
|
FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
|
2014-06-27 20:53:48 +07:00
|
|
|
ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
|
2014-11-21 14:22:02 +07:00
|
|
|
ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
|
2014-06-27 20:53:48 +07:00
|
|
|
ldst_cmd.u.pcie.ctrl_to_fn =
|
2014-11-21 14:22:02 +07:00
|
|
|
(FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
|
2014-06-27 20:53:48 +07:00
|
|
|
ldst_cmd.u.pcie.r = reg;
|
|
|
|
ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
|
|
|
|
&ldst_cmd);
|
|
|
|
|
|
|
|
/* If the LDST Command suucceeded, exctract the returned register
|
|
|
|
* value. Otherwise read it directly ourself.
|
|
|
|
*/
|
|
|
|
if (ret == 0)
|
|
|
|
val = ntohl(ldst_cmd.u.pcie.data[0]);
|
|
|
|
else
|
|
|
|
t4_hw_pci_read_cfg4(adap, reg, &val);
|
|
|
|
|
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
static void setup_memwin(struct adapter *adap)
|
|
|
|
{
|
2014-06-27 20:53:48 +07:00
|
|
|
u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2013-12-03 18:35:56 +07:00
|
|
|
if (is_t4(adap->params.chip)) {
|
2014-06-27 20:53:48 +07:00
|
|
|
u32 bar0;
|
|
|
|
|
|
|
|
/* Truncation intentional: we only read the bottom 32-bits of
|
|
|
|
* the 64-bit BAR0/BAR1 ... We use the hardware backdoor
|
|
|
|
* mechanism to read BAR0 instead of using
|
|
|
|
* pci_resource_start() because we could be operating from
|
|
|
|
* within a Virtual Machine which is trapping our accesses to
|
|
|
|
* our Configuration Space and we need to set up the PCI-E
|
|
|
|
* Memory Window decoders with the actual addresses which will
|
|
|
|
* be coming across the PCI-E link.
|
|
|
|
*/
|
|
|
|
bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
|
|
|
|
bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
|
|
|
|
adap->t4_bar0 = bar0;
|
|
|
|
|
2013-03-14 12:08:53 +07:00
|
|
|
mem_win0_base = bar0 + MEMWIN0_BASE;
|
|
|
|
mem_win1_base = bar0 + MEMWIN1_BASE;
|
|
|
|
mem_win2_base = bar0 + MEMWIN2_BASE;
|
2014-06-27 20:53:48 +07:00
|
|
|
mem_win2_aperture = MEMWIN2_APERTURE;
|
2013-03-14 12:08:53 +07:00
|
|
|
} else {
|
|
|
|
/* For T5, only relative offset inside the PCIe BAR is passed */
|
|
|
|
mem_win0_base = MEMWIN0_BASE;
|
2014-06-27 20:53:48 +07:00
|
|
|
mem_win1_base = MEMWIN1_BASE;
|
2013-03-14 12:08:53 +07:00
|
|
|
mem_win2_base = MEMWIN2_BASE_T5;
|
2014-06-27 20:53:48 +07:00
|
|
|
mem_win2_aperture = MEMWIN2_APERTURE_T5;
|
2013-03-14 12:08:53 +07:00
|
|
|
}
|
2015-01-05 18:00:44 +07:00
|
|
|
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
|
|
|
|
mem_win0_base | BIR_V(0) |
|
|
|
|
WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
|
|
|
|
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
|
|
|
|
mem_win1_base | BIR_V(0) |
|
|
|
|
WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
|
|
|
|
t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
|
|
|
|
mem_win2_base | BIR_V(0) |
|
|
|
|
WINDOW_V(ilog2(mem_win2_aperture) - 10));
|
|
|
|
t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
|
2012-09-26 09:39:39 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void setup_memwin_rdma(struct adapter *adap)
|
|
|
|
{
|
2010-08-02 20:19:19 +07:00
|
|
|
if (adap->vres.ocq.size) {
|
2014-06-27 20:53:48 +07:00
|
|
|
u32 start;
|
|
|
|
unsigned int sz_kb;
|
2010-08-02 20:19:19 +07:00
|
|
|
|
2014-06-27 20:53:48 +07:00
|
|
|
start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
|
|
|
|
start &= PCI_BASE_ADDRESS_MEM_MASK;
|
|
|
|
start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
|
2010-08-02 20:19:19 +07:00
|
|
|
sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
|
|
|
|
t4_write_reg(adap,
|
2015-01-05 18:00:44 +07:00
|
|
|
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
|
|
|
|
start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
|
2010-08-02 20:19:19 +07:00
|
|
|
t4_write_reg(adap,
|
2015-01-05 18:00:44 +07:00
|
|
|
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
|
2010-08-02 20:19:19 +07:00
|
|
|
adap->vres.ocq.start);
|
|
|
|
t4_read_reg(adap,
|
2015-01-05 18:00:44 +07:00
|
|
|
PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
|
2010-08-02 20:19:19 +07:00
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
2010-06-18 17:05:28 +07:00
|
|
|
static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
|
|
|
|
{
|
|
|
|
u32 v;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* get device capabilities */
|
|
|
|
memset(c, 0, sizeof(*c));
|
2014-11-07 11:05:25 +07:00
|
|
|
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
|
|
|
|
FW_CMD_REQUEST_F | FW_CMD_READ_F);
|
2012-11-16 00:11:17 +07:00
|
|
|
c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
|
2010-06-18 17:05:28 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* select capabilities we'll be using */
|
|
|
|
if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
|
|
|
|
if (!vf_acls)
|
|
|
|
c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
|
|
|
|
else
|
|
|
|
c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
|
|
|
|
} else if (vf_acls) {
|
|
|
|
dev_err(adap->pdev_dev, "virtualization ACLs not supported");
|
|
|
|
return ret;
|
|
|
|
}
|
2014-11-07 11:05:25 +07:00
|
|
|
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
|
|
|
|
FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
|
2010-06-18 17:05:28 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_config_glbl_rss(adap, adap->fn,
|
2010-06-18 17:05:28 +07:00
|
|
|
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
|
2014-11-21 14:22:05 +07:00
|
|
|
FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
|
|
|
|
FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
|
2010-06-18 17:05:28 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
|
|
|
|
0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
|
2010-06-18 17:05:28 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
t4_sge_init(adap);
|
|
|
|
|
|
|
|
/* tweak some settings */
|
2015-01-05 18:00:46 +07:00
|
|
|
t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
|
2015-01-05 18:00:47 +07:00
|
|
|
t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
|
2015-01-05 18:00:46 +07:00
|
|
|
t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
|
|
|
|
v = t4_read_reg(adap, TP_PIO_DATA_A);
|
|
|
|
t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
|
2010-08-02 20:19:21 +07:00
|
|
|
|
2012-12-10 16:30:53 +07:00
|
|
|
/* first 4 Tx modulation queues point to consecutive Tx channels */
|
|
|
|
adap->params.tp.tx_modq_map = 0xE4;
|
2015-01-05 18:00:47 +07:00
|
|
|
t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
|
|
|
|
TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
|
2012-12-10 16:30:53 +07:00
|
|
|
|
|
|
|
/* associate each Tx modulation queue with consecutive Tx channels */
|
|
|
|
v = 0x84218421;
|
2015-01-05 18:00:46 +07:00
|
|
|
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
|
2015-01-05 18:00:47 +07:00
|
|
|
&v, 1, TP_TX_SCHED_HDR_A);
|
2015-01-05 18:00:46 +07:00
|
|
|
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
|
2015-01-05 18:00:47 +07:00
|
|
|
&v, 1, TP_TX_SCHED_FIFO_A);
|
2015-01-05 18:00:46 +07:00
|
|
|
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
|
2015-01-05 18:00:47 +07:00
|
|
|
&v, 1, TP_TX_SCHED_PCMD_A);
|
2012-12-10 16:30:53 +07:00
|
|
|
|
|
|
|
#define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
|
|
|
|
if (is_offload(adap)) {
|
2015-01-05 18:00:47 +07:00
|
|
|
t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
|
|
|
|
TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
|
|
|
TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
|
|
|
TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
|
|
|
TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
|
|
|
|
t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
|
|
|
|
TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
|
|
|
TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
|
|
|
TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
|
|
|
|
TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
|
2012-12-10 16:30:53 +07:00
|
|
|
}
|
|
|
|
|
2010-08-02 20:19:21 +07:00
|
|
|
/* get basic stuff going */
|
|
|
|
return t4_early_init(adap, adap->fn);
|
2010-06-18 17:05:28 +07:00
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
/*
|
|
|
|
* Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
|
|
|
|
*/
|
|
|
|
#define MAX_ATIDS 8192U
|
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
/*
|
|
|
|
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
|
|
|
|
*
|
|
|
|
* If the firmware we're dealing with has Configuration File support, then
|
|
|
|
* we use that to perform all configuration
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tweak configuration based on module parameters, etc. Most of these have
|
|
|
|
* defaults assigned to them by Firmware Configuration Files (if we're using
|
|
|
|
* them) but need to be explicitly set if we're using hard-coded
|
|
|
|
* initialization. But even in the case of using Firmware Configuration
|
|
|
|
* Files, we'd like to expose the ability to change these via module
|
|
|
|
* parameters so these are essentially common tweaks/settings for
|
|
|
|
* Configuration Files and hard-coded initialization ...
|
|
|
|
*/
|
|
|
|
static int adap_init0_tweaks(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Fix up various Host-Dependent Parameters like Page Size, Cache
|
|
|
|
* Line Size, etc. The firmware default is for a 4KB Page Size and
|
|
|
|
* 64B Cache Line Size ...
|
|
|
|
*/
|
|
|
|
t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Process module parameters which affect early initialization.
|
|
|
|
*/
|
|
|
|
if (rx_dma_offset != 2 && rx_dma_offset != 0) {
|
|
|
|
dev_err(&adapter->pdev->dev,
|
|
|
|
"Ignoring illegal rx_dma_offset=%d, using 2\n",
|
|
|
|
rx_dma_offset);
|
|
|
|
rx_dma_offset = 2;
|
|
|
|
}
|
2015-01-05 18:00:43 +07:00
|
|
|
t4_set_reg_field(adapter, SGE_CONTROL_A,
|
|
|
|
PKTSHIFT_V(PKTSHIFT_M),
|
|
|
|
PKTSHIFT_V(rx_dma_offset));
|
2012-09-26 09:39:39 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
|
|
|
|
* adds the pseudo header itself.
|
|
|
|
*/
|
2015-01-05 18:00:46 +07:00
|
|
|
t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
|
|
|
|
CSUM_HAS_PSEUDO_HDR_F, 0);
|
2012-09-26 09:39:39 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Attempt to initialize the adapter via a Firmware Configuration File.
|
|
|
|
*/
|
|
|
|
static int adap_init0_config(struct adapter *adapter, int reset)
|
|
|
|
{
|
|
|
|
struct fw_caps_config_cmd caps_cmd;
|
|
|
|
const struct firmware *cf;
|
|
|
|
unsigned long mtype = 0, maddr = 0;
|
|
|
|
u32 finiver, finicsum, cfcsum;
|
2013-12-03 18:35:58 +07:00
|
|
|
int ret;
|
|
|
|
int config_issued = 0;
|
2013-03-14 12:08:49 +07:00
|
|
|
char *fw_config_file, fw_config_file_path[256];
|
2013-12-03 18:35:58 +07:00
|
|
|
char *config_name = NULL;
|
2012-09-26 09:39:39 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reset device if necessary.
|
|
|
|
*/
|
|
|
|
if (reset) {
|
|
|
|
ret = t4_fw_reset(adapter, adapter->mbox,
|
2015-01-05 18:00:47 +07:00
|
|
|
PIORSTMODE_F | PIORST_F);
|
2012-09-26 09:39:39 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we have a T4 configuration file under /lib/firmware/cxgb4/,
|
|
|
|
* then use that. Otherwise, use the configuration file stored
|
|
|
|
* in the adapter flash ...
|
|
|
|
*/
|
2013-12-03 18:35:56 +07:00
|
|
|
switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
|
2013-03-14 12:08:49 +07:00
|
|
|
case CHELSIO_T4:
|
2013-12-03 18:35:58 +07:00
|
|
|
fw_config_file = FW4_CFNAME;
|
2013-03-14 12:08:49 +07:00
|
|
|
break;
|
|
|
|
case CHELSIO_T5:
|
|
|
|
fw_config_file = FW5_CFNAME;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
|
|
|
|
adapter->pdev->device);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto bye;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
|
2012-09-26 09:39:39 +07:00
|
|
|
if (ret < 0) {
|
2013-12-03 18:35:58 +07:00
|
|
|
config_name = "On FLASH";
|
2012-09-26 09:39:39 +07:00
|
|
|
mtype = FW_MEMTYPE_CF_FLASH;
|
|
|
|
maddr = t4_flash_cfg_addr(adapter);
|
|
|
|
} else {
|
|
|
|
u32 params[7], val[7];
|
|
|
|
|
2013-12-03 18:35:58 +07:00
|
|
|
sprintf(fw_config_file_path,
|
|
|
|
"/lib/firmware/%s", fw_config_file);
|
|
|
|
config_name = fw_config_file_path;
|
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
if (cf->size >= FLASH_CFG_MAX_SIZE)
|
|
|
|
ret = -ENOMEM;
|
|
|
|
else {
|
2014-11-21 14:22:02 +07:00
|
|
|
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
|
|
|
|
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
|
2012-09-26 09:39:39 +07:00
|
|
|
ret = t4_query_params(adapter, adapter->mbox,
|
|
|
|
adapter->fn, 0, 1, params, val);
|
|
|
|
if (ret == 0) {
|
|
|
|
/*
|
2014-06-27 20:53:49 +07:00
|
|
|
* For t4_memory_rw() below addresses and
|
2012-09-26 09:39:39 +07:00
|
|
|
* sizes have to be in terms of multiples of 4
|
|
|
|
* bytes. So, if the Configuration File isn't
|
|
|
|
* a multiple of 4 bytes in length we'll have
|
|
|
|
* to write that out separately since we can't
|
|
|
|
* guarantee that the bytes following the
|
|
|
|
* residual byte in the buffer returned by
|
|
|
|
* request_firmware() are zeroed out ...
|
|
|
|
*/
|
|
|
|
size_t resid = cf->size & 0x3;
|
|
|
|
size_t size = cf->size & ~0x3;
|
|
|
|
__be32 *data = (__be32 *)cf->data;
|
|
|
|
|
2014-11-21 14:22:02 +07:00
|
|
|
mtype = FW_PARAMS_PARAM_Y_G(val[0]);
|
|
|
|
maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
|
2012-09-26 09:39:39 +07:00
|
|
|
|
2014-06-27 20:53:49 +07:00
|
|
|
spin_lock(&adapter->win0_lock);
|
|
|
|
ret = t4_memory_rw(adapter, 0, mtype, maddr,
|
|
|
|
size, data, T4_MEMORY_WRITE);
|
2012-09-26 09:39:39 +07:00
|
|
|
if (ret == 0 && resid != 0) {
|
|
|
|
union {
|
|
|
|
__be32 word;
|
|
|
|
char buf[4];
|
|
|
|
} last;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
last.word = data[size >> 2];
|
|
|
|
for (i = resid; i < 4; i++)
|
|
|
|
last.buf[i] = 0;
|
2014-06-27 20:53:49 +07:00
|
|
|
ret = t4_memory_rw(adapter, 0, mtype,
|
|
|
|
maddr + size,
|
|
|
|
4, &last.word,
|
|
|
|
T4_MEMORY_WRITE);
|
2012-09-26 09:39:39 +07:00
|
|
|
}
|
2014-06-27 20:53:49 +07:00
|
|
|
spin_unlock(&adapter->win0_lock);
|
2012-09-26 09:39:39 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
release_firmware(cf);
|
|
|
|
if (ret)
|
|
|
|
goto bye;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Issue a Capability Configuration command to the firmware to get it
|
|
|
|
* to parse the Configuration File. We don't use t4_fw_config_file()
|
|
|
|
* because we want the ability to modify various features after we've
|
|
|
|
* processed the configuration file ...
|
|
|
|
*/
|
|
|
|
memset(&caps_cmd, 0, sizeof(caps_cmd));
|
|
|
|
caps_cmd.op_to_write =
|
2014-11-07 11:05:25 +07:00
|
|
|
htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
|
|
|
|
FW_CMD_REQUEST_F |
|
|
|
|
FW_CMD_READ_F);
|
2012-11-16 00:11:17 +07:00
|
|
|
caps_cmd.cfvalid_to_len16 =
|
2014-11-21 14:22:02 +07:00
|
|
|
htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
|
|
|
|
FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
|
|
|
|
FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
|
2012-09-26 09:39:39 +07:00
|
|
|
FW_LEN16(caps_cmd));
|
|
|
|
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
|
|
|
|
&caps_cmd);
|
2013-12-03 18:35:58 +07:00
|
|
|
|
|
|
|
/* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
|
|
|
|
* Configuration File in FLASH), our last gasp effort is to use the
|
|
|
|
* Firmware Configuration File which is embedded in the firmware. A
|
|
|
|
* very few early versions of the firmware didn't have one embedded
|
|
|
|
* but we can ignore those.
|
|
|
|
*/
|
|
|
|
if (ret == -ENOENT) {
|
|
|
|
memset(&caps_cmd, 0, sizeof(caps_cmd));
|
|
|
|
caps_cmd.op_to_write =
|
2014-11-07 11:05:25 +07:00
|
|
|
htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
|
|
|
|
FW_CMD_REQUEST_F |
|
|
|
|
FW_CMD_READ_F);
|
2013-12-03 18:35:58 +07:00
|
|
|
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
|
|
|
|
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
|
|
|
|
sizeof(caps_cmd), &caps_cmd);
|
|
|
|
config_name = "Firmware Default";
|
|
|
|
}
|
|
|
|
|
|
|
|
config_issued = 1;
|
2012-09-26 09:39:39 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
|
|
|
|
finiver = ntohl(caps_cmd.finiver);
|
|
|
|
finicsum = ntohl(caps_cmd.finicsum);
|
|
|
|
cfcsum = ntohl(caps_cmd.cfcsum);
|
|
|
|
if (finicsum != cfcsum)
|
|
|
|
dev_warn(adapter->pdev_dev, "Configuration File checksum "\
|
|
|
|
"mismatch: [fini] csum=%#x, computed csum=%#x\n",
|
|
|
|
finicsum, cfcsum);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* And now tell the firmware to use the configuration we just loaded.
|
|
|
|
*/
|
|
|
|
caps_cmd.op_to_write =
|
2014-11-07 11:05:25 +07:00
|
|
|
htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
|
|
|
|
FW_CMD_REQUEST_F |
|
|
|
|
FW_CMD_WRITE_F);
|
2012-11-16 00:11:17 +07:00
|
|
|
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
|
2012-09-26 09:39:39 +07:00
|
|
|
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
|
|
|
|
NULL);
|
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Tweak configuration based on system architecture, module
|
|
|
|
* parameters, etc.
|
|
|
|
*/
|
|
|
|
ret = adap_init0_tweaks(adapter);
|
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* And finally tell the firmware to initialize itself using the
|
|
|
|
* parameters from the Configuration File.
|
|
|
|
*/
|
|
|
|
ret = t4_fw_initialize(adapter, adapter->mbox);
|
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
|
2015-01-13 16:49:25 +07:00
|
|
|
/* Emit Firmware Configuration File information and return
|
|
|
|
* successfully.
|
2012-09-26 09:39:39 +07:00
|
|
|
*/
|
|
|
|
dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
|
2013-12-03 18:35:58 +07:00
|
|
|
"Configuration File \"%s\", version %#x, computed checksum %#x\n",
|
|
|
|
config_name, finiver, cfcsum);
|
2012-09-26 09:39:39 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Something bad happened. Return the error ... (If the "error"
|
|
|
|
* is that there's no Configuration File on the adapter we don't
|
|
|
|
* want to issue a warning since this is fairly common.)
|
|
|
|
*/
|
|
|
|
bye:
|
2013-12-03 18:35:58 +07:00
|
|
|
if (config_issued && ret != -ENOENT)
|
|
|
|
dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
|
|
|
|
config_name, -ret);
|
2012-09-26 09:39:39 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-12-03 18:35:58 +07:00
|
|
|
static struct fw_info fw_info_array[] = {
|
|
|
|
{
|
|
|
|
.chip = CHELSIO_T4,
|
|
|
|
.fs_name = FW4_CFNAME,
|
|
|
|
.fw_mod_name = FW4_FNAME,
|
|
|
|
.fw_hdr = {
|
|
|
|
.chip = FW_HDR_CHIP_T4,
|
|
|
|
.fw_ver = __cpu_to_be32(FW_VERSION(T4)),
|
|
|
|
.intfver_nic = FW_INTFVER(T4, NIC),
|
|
|
|
.intfver_vnic = FW_INTFVER(T4, VNIC),
|
|
|
|
.intfver_ri = FW_INTFVER(T4, RI),
|
|
|
|
.intfver_iscsi = FW_INTFVER(T4, ISCSI),
|
|
|
|
.intfver_fcoe = FW_INTFVER(T4, FCOE),
|
|
|
|
},
|
|
|
|
}, {
|
|
|
|
.chip = CHELSIO_T5,
|
|
|
|
.fs_name = FW5_CFNAME,
|
|
|
|
.fw_mod_name = FW5_FNAME,
|
|
|
|
.fw_hdr = {
|
|
|
|
.chip = FW_HDR_CHIP_T5,
|
|
|
|
.fw_ver = __cpu_to_be32(FW_VERSION(T5)),
|
|
|
|
.intfver_nic = FW_INTFVER(T5, NIC),
|
|
|
|
.intfver_vnic = FW_INTFVER(T5, VNIC),
|
|
|
|
.intfver_ri = FW_INTFVER(T5, RI),
|
|
|
|
.intfver_iscsi = FW_INTFVER(T5, ISCSI),
|
|
|
|
.intfver_fcoe = FW_INTFVER(T5, FCOE),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct fw_info *find_fw_info(int chip)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
|
|
|
|
if (fw_info_array[i].chip == chip)
|
|
|
|
return &fw_info_array[i];
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
/*
|
|
|
|
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
|
|
|
|
*/
|
|
|
|
static int adap_init0(struct adapter *adap)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
u32 v, port_vec;
|
|
|
|
enum dev_state state;
|
|
|
|
u32 params[7], val[7];
|
2012-10-19 09:09:53 +07:00
|
|
|
struct fw_caps_config_cmd caps_cmd;
|
2015-01-07 10:18:00 +07:00
|
|
|
struct fw_devlog_cmd devlog_cmd;
|
|
|
|
u32 devlog_meminfo;
|
2013-12-18 18:08:23 +07:00
|
|
|
int reset = 1;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2014-12-11 12:41:43 +07:00
|
|
|
/* Contact FW, advertising Master capability */
|
|
|
|
ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
|
|
|
|
ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2012-09-26 09:39:39 +07:00
|
|
|
if (ret == adap->mbox)
|
|
|
|
adap->flags |= MASTER_PF;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
/*
|
|
|
|
* If we're the Master PF Driver and the device is uninitialized,
|
|
|
|
* then let's consider upgrading the firmware ... (We always want
|
|
|
|
* to check the firmware version number in order to A. get it for
|
|
|
|
* later reporting and B. to warn if the currently loaded firmware
|
|
|
|
* is excessively mismatched relative to the driver.)
|
|
|
|
*/
|
2013-12-03 18:35:58 +07:00
|
|
|
t4_get_fw_version(adap, &adap->params.fw_vers);
|
|
|
|
t4_get_tp_version(adap, &adap->params.tp_vers);
|
2012-09-26 09:39:39 +07:00
|
|
|
if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
|
2013-12-03 18:35:58 +07:00
|
|
|
struct fw_info *fw_info;
|
|
|
|
struct fw_hdr *card_fw;
|
|
|
|
const struct firmware *fw;
|
|
|
|
const u8 *fw_data = NULL;
|
|
|
|
unsigned int fw_size = 0;
|
|
|
|
|
|
|
|
/* This is the firmware whose headers the driver was compiled
|
|
|
|
* against
|
|
|
|
*/
|
|
|
|
fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
|
|
|
|
if (fw_info == NULL) {
|
|
|
|
dev_err(adap->pdev_dev,
|
|
|
|
"unable to get firmware info for chip %d.\n",
|
|
|
|
CHELSIO_CHIP_VERSION(adap->params.chip));
|
|
|
|
return -EINVAL;
|
2012-09-26 09:39:39 +07:00
|
|
|
}
|
2013-12-03 18:35:58 +07:00
|
|
|
|
|
|
|
/* allocate memory to read the header of the firmware on the
|
|
|
|
* card
|
|
|
|
*/
|
|
|
|
card_fw = t4_alloc_mem(sizeof(*card_fw));
|
|
|
|
|
|
|
|
/* Get FW from from /lib/firmware/ */
|
|
|
|
ret = request_firmware(&fw, fw_info->fw_mod_name,
|
|
|
|
adap->pdev_dev);
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(adap->pdev_dev,
|
|
|
|
"unable to load firmware image %s, error %d\n",
|
|
|
|
fw_info->fw_mod_name, ret);
|
|
|
|
} else {
|
|
|
|
fw_data = fw->data;
|
|
|
|
fw_size = fw->size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* upgrade FW logic */
|
|
|
|
ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
|
|
|
|
state, &reset);
|
|
|
|
|
|
|
|
/* Cleaning up */
|
2015-02-04 17:28:43 +07:00
|
|
|
release_firmware(fw);
|
2013-12-03 18:35:58 +07:00
|
|
|
t4_free_mem(card_fw);
|
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
if (ret < 0)
|
2013-12-03 18:35:58 +07:00
|
|
|
goto bye;
|
2012-09-26 09:39:39 +07:00
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
/*
|
|
|
|
* Grab VPD parameters. This should be done after we establish a
|
|
|
|
* connection to the firmware since some of the VPD parameters
|
|
|
|
* (notably the Core Clock frequency) are retrieved via requests to
|
|
|
|
* the firmware. On the other hand, we need these fairly early on
|
|
|
|
* so we do this right after getting ahold of the firmware.
|
|
|
|
*/
|
|
|
|
ret = get_vpd_params(adap, &adap->params.vpd);
|
2010-06-18 17:05:34 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
|
2015-01-07 10:18:00 +07:00
|
|
|
/* Read firmware device log parameters. We really need to find a way
|
|
|
|
* to get these parameters initialized with some default values (which
|
|
|
|
* are likely to be correct) for the case where we either don't
|
|
|
|
* attache to the firmware or it's crashed when we probe the adapter.
|
|
|
|
* That way we'll still be able to perform early firmware startup
|
|
|
|
* debugging ... If the request to get the Firmware's Device Log
|
|
|
|
* parameters fails, we'll live so we don't make that a fatal error.
|
|
|
|
*/
|
|
|
|
memset(&devlog_cmd, 0, sizeof(devlog_cmd));
|
|
|
|
devlog_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_DEVLOG_CMD) |
|
|
|
|
FW_CMD_REQUEST_F | FW_CMD_READ_F);
|
|
|
|
devlog_cmd.retval_len16 = htonl(FW_LEN16(devlog_cmd));
|
|
|
|
ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
|
|
|
|
&devlog_cmd);
|
|
|
|
if (ret == 0) {
|
|
|
|
devlog_meminfo =
|
|
|
|
ntohl(devlog_cmd.memtype_devlog_memaddr16_devlog);
|
|
|
|
adap->params.devlog.memtype =
|
|
|
|
FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
|
|
|
|
adap->params.devlog.start =
|
|
|
|
FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
|
|
|
|
adap->params.devlog.size = ntohl(devlog_cmd.memsize_devlog);
|
|
|
|
}
|
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
/*
|
2012-09-26 09:39:40 +07:00
|
|
|
* Find out what ports are available to us. Note that we need to do
|
|
|
|
* this before calling adap_init0_no_config() since it needs nports
|
|
|
|
* and portvec ...
|
2012-09-26 09:39:39 +07:00
|
|
|
*/
|
|
|
|
v =
|
2014-11-21 14:22:02 +07:00
|
|
|
FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
|
|
|
|
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
|
2012-09-26 09:39:39 +07:00
|
|
|
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
|
2010-06-18 17:05:34 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
adap->params.nports = hweight32(port_vec);
|
|
|
|
adap->params.portvec = port_vec;
|
|
|
|
|
2015-01-13 16:49:25 +07:00
|
|
|
/* If the firmware is initialized already, emit a simply note to that
|
|
|
|
* effect. Otherwise, it's time to try initializing the adapter.
|
2012-09-26 09:39:39 +07:00
|
|
|
*/
|
|
|
|
if (state == DEV_STATE_INIT) {
|
|
|
|
dev_info(adap->pdev_dev, "Coming up as %s: "\
|
|
|
|
"Adapter already initialized\n",
|
|
|
|
adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
|
|
|
|
} else {
|
|
|
|
dev_info(adap->pdev_dev, "Coming up as MASTER: "\
|
|
|
|
"Initializing adapter\n");
|
2015-01-13 16:49:25 +07:00
|
|
|
|
|
|
|
/* Find out whether we're dealing with a version of the
|
|
|
|
* firmware which has configuration file support.
|
2012-09-26 09:39:39 +07:00
|
|
|
*/
|
2015-01-13 16:49:25 +07:00
|
|
|
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
|
|
|
|
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
|
|
|
|
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
|
|
|
|
params, val);
|
2012-09-26 09:39:40 +07:00
|
|
|
|
2015-01-13 16:49:25 +07:00
|
|
|
/* If the firmware doesn't support Configuration Files,
|
|
|
|
* return an error.
|
|
|
|
*/
|
|
|
|
if (ret < 0) {
|
|
|
|
dev_err(adap->pdev_dev, "firmware doesn't support "
|
|
|
|
"Firmware Configuration Files\n");
|
|
|
|
goto bye;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The firmware provides us with a memory buffer where we can
|
|
|
|
* load a Configuration File from the host if we want to
|
|
|
|
* override the Configuration File in flash.
|
|
|
|
*/
|
|
|
|
ret = adap_init0_config(adap, reset);
|
|
|
|
if (ret == -ENOENT) {
|
|
|
|
dev_err(adap->pdev_dev, "no Configuration File "
|
|
|
|
"present on adapter.\n");
|
|
|
|
goto bye;
|
2012-09-26 09:39:39 +07:00
|
|
|
}
|
|
|
|
if (ret < 0) {
|
2015-01-13 16:49:25 +07:00
|
|
|
dev_err(adap->pdev_dev, "could not initialize "
|
|
|
|
"adapter, error %d\n", -ret);
|
2012-09-26 09:39:39 +07:00
|
|
|
goto bye;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-13 16:49:25 +07:00
|
|
|
/* Give the SGE code a chance to pull in anything that it needs ...
|
|
|
|
* Note that this must be called after we retrieve our VPD parameters
|
|
|
|
* in order to know how to convert core ticks to seconds, etc.
|
2012-09-26 09:39:39 +07:00
|
|
|
*/
|
2015-01-13 16:49:25 +07:00
|
|
|
ret = t4_sge_init(adap);
|
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
2012-09-26 09:39:39 +07:00
|
|
|
|
2012-10-19 09:09:53 +07:00
|
|
|
if (is_bypass_device(adap->pdev->device))
|
|
|
|
adap->params.bypass = 1;
|
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
/*
|
|
|
|
* Grab some of our basic fundamental operating parameters.
|
|
|
|
*/
|
|
|
|
#define FW_PARAM_DEV(param) \
|
2014-11-21 14:22:02 +07:00
|
|
|
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
|
|
|
|
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
|
2012-09-26 09:39:39 +07:00
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
#define FW_PARAM_PFVF(param) \
|
2014-11-21 14:22:02 +07:00
|
|
|
FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
|
|
|
|
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
|
|
|
|
FW_PARAMS_PARAM_Y_V(0) | \
|
|
|
|
FW_PARAMS_PARAM_Z_V(0)
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
params[0] = FW_PARAM_PFVF(EQ_START);
|
2010-04-01 22:28:26 +07:00
|
|
|
params[1] = FW_PARAM_PFVF(L2T_START);
|
|
|
|
params[2] = FW_PARAM_PFVF(L2T_END);
|
|
|
|
params[3] = FW_PARAM_PFVF(FILTER_START);
|
|
|
|
params[4] = FW_PARAM_PFVF(FILTER_END);
|
2010-08-24 00:20:58 +07:00
|
|
|
params[5] = FW_PARAM_PFVF(IQFLINT_START);
|
2012-09-26 09:39:39 +07:00
|
|
|
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
2012-09-26 09:39:39 +07:00
|
|
|
adap->sge.egr_start = val[0];
|
|
|
|
adap->l2t_start = val[1];
|
|
|
|
adap->l2t_end = val[2];
|
2010-04-01 22:28:26 +07:00
|
|
|
adap->tids.ftid_base = val[3];
|
|
|
|
adap->tids.nftids = val[4] - val[3] + 1;
|
2010-08-24 00:20:58 +07:00
|
|
|
adap->sge.ingr_start = val[5];
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2015-01-15 06:17:34 +07:00
|
|
|
params[0] = FW_PARAM_PFVF(CLIP_START);
|
|
|
|
params[1] = FW_PARAM_PFVF(CLIP_END);
|
|
|
|
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
|
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
adap->clipt_start = val[0];
|
|
|
|
adap->clipt_end = val[1];
|
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
/* query params related to active filter region */
|
|
|
|
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
|
|
|
|
params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
|
|
|
|
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
|
|
|
|
/* If Active filter size is set we enable establishing
|
|
|
|
* offload connection through firmware work request
|
|
|
|
*/
|
|
|
|
if ((val[0] != val[1]) && (ret >= 0)) {
|
|
|
|
adap->flags |= FW_OFLD_CONN;
|
|
|
|
adap->tids.aftid_base = val[0];
|
|
|
|
adap->tids.aftid_end = val[1];
|
|
|
|
}
|
|
|
|
|
2013-04-29 11:04:40 +07:00
|
|
|
/* If we're running on newer firmware, let it know that we're
|
|
|
|
* prepared to deal with encapsulated CPL messages. Older
|
|
|
|
* firmware won't understand this and we'll just get
|
|
|
|
* unencapsulated messages ...
|
|
|
|
*/
|
|
|
|
params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
|
|
|
|
val[0] = 1;
|
|
|
|
(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
|
|
|
|
|
2014-02-18 19:26:12 +07:00
|
|
|
/*
|
|
|
|
* Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
|
|
|
|
* capability. Earlier versions of the firmware didn't have the
|
|
|
|
* ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
|
|
|
|
* permission to use ULPTX MEMWRITE DSGL.
|
|
|
|
*/
|
|
|
|
if (is_t4(adap->params.chip)) {
|
|
|
|
adap->params.ulptx_memwrite_dsgl = false;
|
|
|
|
} else {
|
|
|
|
params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
|
|
|
|
ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
|
|
|
|
1, params, val);
|
|
|
|
adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
|
|
|
|
}
|
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
/*
|
|
|
|
* Get device capabilities so we can determine what resources we need
|
|
|
|
* to manage.
|
|
|
|
*/
|
|
|
|
memset(&caps_cmd, 0, sizeof(caps_cmd));
|
2014-11-07 11:05:25 +07:00
|
|
|
caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
|
|
|
|
FW_CMD_REQUEST_F | FW_CMD_READ_F);
|
2012-11-16 00:11:17 +07:00
|
|
|
caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
|
2012-09-26 09:39:39 +07:00
|
|
|
ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
|
|
|
|
&caps_cmd);
|
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
|
2012-09-26 09:39:40 +07:00
|
|
|
if (caps_cmd.ofldcaps) {
|
2010-04-01 22:28:26 +07:00
|
|
|
/* query offload-related parameters */
|
|
|
|
params[0] = FW_PARAM_DEV(NTID);
|
|
|
|
params[1] = FW_PARAM_PFVF(SERVER_START);
|
|
|
|
params[2] = FW_PARAM_PFVF(SERVER_END);
|
|
|
|
params[3] = FW_PARAM_PFVF(TDDP_START);
|
|
|
|
params[4] = FW_PARAM_PFVF(TDDP_END);
|
|
|
|
params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
|
2012-09-26 09:39:39 +07:00
|
|
|
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
|
|
|
|
params, val);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
adap->tids.ntids = val[0];
|
|
|
|
adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
|
|
|
|
adap->tids.stid_base = val[1];
|
|
|
|
adap->tids.nstids = val[2] - val[1] + 1;
|
2012-09-26 09:39:39 +07:00
|
|
|
/*
|
2015-03-07 11:49:12 +07:00
|
|
|
* Setup server filter region. Divide the available filter
|
2012-09-26 09:39:39 +07:00
|
|
|
* region into two parts. Regular filters get 1/3rd and server
|
|
|
|
* filters get 2/3rd part. This is only enabled if workarond
|
|
|
|
* path is enabled.
|
|
|
|
* 1. For regular filters.
|
|
|
|
* 2. Server filter: This are special filters which are used
|
|
|
|
* to redirect SYN packets to offload queue.
|
|
|
|
*/
|
|
|
|
if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
|
|
|
|
adap->tids.sftid_base = adap->tids.ftid_base +
|
|
|
|
DIV_ROUND_UP(adap->tids.nftids, 3);
|
|
|
|
adap->tids.nsftids = adap->tids.nftids -
|
|
|
|
DIV_ROUND_UP(adap->tids.nftids, 3);
|
|
|
|
adap->tids.nftids = adap->tids.sftid_base -
|
|
|
|
adap->tids.ftid_base;
|
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
adap->vres.ddp.start = val[3];
|
|
|
|
adap->vres.ddp.size = val[4] - val[3] + 1;
|
|
|
|
adap->params.ofldq_wr_cred = val[5];
|
2012-09-26 09:39:39 +07:00
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
adap->params.offload = 1;
|
|
|
|
}
|
2012-09-26 09:39:39 +07:00
|
|
|
if (caps_cmd.rdmacaps) {
|
2010-04-01 22:28:26 +07:00
|
|
|
params[0] = FW_PARAM_PFVF(STAG_START);
|
|
|
|
params[1] = FW_PARAM_PFVF(STAG_END);
|
|
|
|
params[2] = FW_PARAM_PFVF(RQ_START);
|
|
|
|
params[3] = FW_PARAM_PFVF(RQ_END);
|
|
|
|
params[4] = FW_PARAM_PFVF(PBL_START);
|
|
|
|
params[5] = FW_PARAM_PFVF(PBL_END);
|
2012-09-26 09:39:39 +07:00
|
|
|
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
|
|
|
|
params, val);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
adap->vres.stag.start = val[0];
|
|
|
|
adap->vres.stag.size = val[1] - val[0] + 1;
|
|
|
|
adap->vres.rq.start = val[2];
|
|
|
|
adap->vres.rq.size = val[3] - val[2] + 1;
|
|
|
|
adap->vres.pbl.start = val[4];
|
|
|
|
adap->vres.pbl.size = val[5] - val[4] + 1;
|
2010-06-18 17:05:34 +07:00
|
|
|
|
|
|
|
params[0] = FW_PARAM_PFVF(SQRQ_START);
|
|
|
|
params[1] = FW_PARAM_PFVF(SQRQ_END);
|
|
|
|
params[2] = FW_PARAM_PFVF(CQ_START);
|
|
|
|
params[3] = FW_PARAM_PFVF(CQ_END);
|
2010-08-02 20:19:19 +07:00
|
|
|
params[4] = FW_PARAM_PFVF(OCQ_START);
|
|
|
|
params[5] = FW_PARAM_PFVF(OCQ_END);
|
2014-09-01 21:25:00 +07:00
|
|
|
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
|
|
|
|
val);
|
2010-06-18 17:05:34 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
adap->vres.qp.start = val[0];
|
|
|
|
adap->vres.qp.size = val[1] - val[0] + 1;
|
|
|
|
adap->vres.cq.start = val[2];
|
|
|
|
adap->vres.cq.size = val[3] - val[2] + 1;
|
2010-08-02 20:19:19 +07:00
|
|
|
adap->vres.ocq.start = val[4];
|
|
|
|
adap->vres.ocq.size = val[5] - val[4] + 1;
|
2014-07-14 23:04:52 +07:00
|
|
|
|
|
|
|
params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
|
|
|
|
params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
|
2014-09-01 21:25:00 +07:00
|
|
|
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
|
|
|
|
val);
|
2014-07-14 23:04:52 +07:00
|
|
|
if (ret < 0) {
|
|
|
|
adap->params.max_ordird_qp = 8;
|
|
|
|
adap->params.max_ird_adapter = 32 * adap->tids.ntids;
|
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
adap->params.max_ordird_qp = val[0];
|
|
|
|
adap->params.max_ird_adapter = val[1];
|
|
|
|
}
|
|
|
|
dev_info(adap->pdev_dev,
|
|
|
|
"max_ordird_qp %d max_ird_adapter %d\n",
|
|
|
|
adap->params.max_ordird_qp,
|
|
|
|
adap->params.max_ird_adapter);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
2012-09-26 09:39:39 +07:00
|
|
|
if (caps_cmd.iscsicaps) {
|
2010-04-01 22:28:26 +07:00
|
|
|
params[0] = FW_PARAM_PFVF(ISCSI_START);
|
|
|
|
params[1] = FW_PARAM_PFVF(ISCSI_END);
|
2012-09-26 09:39:39 +07:00
|
|
|
ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
|
|
|
|
params, val);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto bye;
|
|
|
|
adap->vres.iscsi.start = val[0];
|
|
|
|
adap->vres.iscsi.size = val[1] - val[0] + 1;
|
|
|
|
}
|
|
|
|
#undef FW_PARAM_PFVF
|
|
|
|
#undef FW_PARAM_DEV
|
|
|
|
|
2014-06-06 23:10:43 +07:00
|
|
|
/* The MTU/MSS Table is initialized by now, so load their values. If
|
|
|
|
* we're initializing the adapter, then we'll make any modifications
|
|
|
|
* we want to the MTU/MSS Table and also initialize the congestion
|
|
|
|
* parameters.
|
2012-09-26 09:39:39 +07:00
|
|
|
*/
|
2010-04-01 22:28:26 +07:00
|
|
|
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
|
2014-06-06 23:10:43 +07:00
|
|
|
if (state != DEV_STATE_INIT) {
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* The default MTU Table contains values 1492 and 1500.
|
|
|
|
* However, for TCP, it's better to have two values which are
|
|
|
|
* a multiple of 8 +/- 4 bytes apart near this popular MTU.
|
|
|
|
* This allows us to have a TCP Data Payload which is a
|
|
|
|
* multiple of 8 regardless of what combination of TCP Options
|
|
|
|
* are in use (always a multiple of 4 bytes) which is
|
|
|
|
* important for performance reasons. For instance, if no
|
|
|
|
* options are in use, then we have a 20-byte IP header and a
|
|
|
|
* 20-byte TCP header. In this case, a 1500-byte MSS would
|
|
|
|
* result in a TCP Data Payload of 1500 - 40 == 1460 bytes
|
|
|
|
* which is not a multiple of 8. So using an MSS of 1488 in
|
|
|
|
* this case results in a TCP Data Payload of 1448 bytes which
|
|
|
|
* is a multiple of 8. On the other hand, if 12-byte TCP Time
|
|
|
|
* Stamps have been negotiated, then an MTU of 1500 bytes
|
|
|
|
* results in a TCP Data Payload of 1448 bytes which, as
|
|
|
|
* above, is a multiple of 8 bytes ...
|
|
|
|
*/
|
|
|
|
for (i = 0; i < NMTUS; i++)
|
|
|
|
if (adap->params.mtus[i] == 1492) {
|
|
|
|
adap->params.mtus[i] = 1488;
|
|
|
|
break;
|
|
|
|
}
|
2010-06-25 19:11:46 +07:00
|
|
|
|
2014-06-06 23:10:43 +07:00
|
|
|
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
|
|
|
|
adap->params.b_wnd);
|
|
|
|
}
|
2014-12-03 21:02:53 +07:00
|
|
|
t4_init_sge_params(adap);
|
2013-12-18 18:08:23 +07:00
|
|
|
t4_init_tp_params(adap);
|
2012-09-26 09:39:39 +07:00
|
|
|
adap->flags |= FW_OK;
|
2010-04-01 22:28:26 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/*
|
2012-09-26 09:39:39 +07:00
|
|
|
* Something bad happened. If a command timed out or failed with EIO
|
|
|
|
* FW does not operate within its spec or something catastrophic
|
|
|
|
* happened to HW/FW, stop issuing commands.
|
2010-04-01 22:28:26 +07:00
|
|
|
*/
|
2012-09-26 09:39:39 +07:00
|
|
|
bye:
|
|
|
|
if (ret != -ETIMEDOUT && ret != -EIO)
|
|
|
|
t4_fw_bye(adap, adap->mbox);
|
2010-04-01 22:28:26 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2010-06-18 17:05:29 +07:00
|
|
|
/* EEH callbacks */
|
|
|
|
|
|
|
|
static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
|
|
|
|
pci_channel_state_t state)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct adapter *adap = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
if (!adap)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
adap->flags &= ~FW_OK;
|
|
|
|
notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
|
2014-01-23 11:27:35 +07:00
|
|
|
spin_lock(&adap->stats_lock);
|
2010-06-18 17:05:29 +07:00
|
|
|
for_each_port(adap, i) {
|
|
|
|
struct net_device *dev = adap->port[i];
|
|
|
|
|
|
|
|
netif_device_detach(dev);
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
}
|
2014-01-23 11:27:35 +07:00
|
|
|
spin_unlock(&adap->stats_lock);
|
2010-06-18 17:05:29 +07:00
|
|
|
if (adap->flags & FULL_INIT_DONE)
|
|
|
|
cxgb_down(adap);
|
|
|
|
rtnl_unlock();
|
2014-01-23 11:27:34 +07:00
|
|
|
if ((adap->flags & DEV_ENABLED)) {
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
adap->flags &= ~DEV_ENABLED;
|
|
|
|
}
|
2010-06-18 17:05:29 +07:00
|
|
|
out: return state == pci_channel_io_perm_failure ?
|
|
|
|
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
|
|
|
|
}
|
|
|
|
|
|
|
|
static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
int i, ret;
|
|
|
|
struct fw_caps_config_cmd c;
|
|
|
|
struct adapter *adap = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
if (!adap) {
|
|
|
|
pci_restore_state(pdev);
|
|
|
|
pci_save_state(pdev);
|
|
|
|
return PCI_ERS_RESULT_RECOVERED;
|
|
|
|
}
|
|
|
|
|
2014-01-23 11:27:34 +07:00
|
|
|
if (!(adap->flags & DEV_ENABLED)) {
|
|
|
|
if (pci_enable_device(pdev)) {
|
|
|
|
dev_err(&pdev->dev, "Cannot reenable PCI "
|
|
|
|
"device after reset\n");
|
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
}
|
|
|
|
adap->flags |= DEV_ENABLED;
|
2010-06-18 17:05:29 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_master(pdev);
|
|
|
|
pci_restore_state(pdev);
|
|
|
|
pci_save_state(pdev);
|
|
|
|
pci_cleanup_aer_uncorrect_error_status(pdev);
|
|
|
|
|
2014-10-09 07:18:47 +07:00
|
|
|
if (t4_wait_dev_ready(adap->regs) < 0)
|
2010-06-18 17:05:29 +07:00
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
2013-05-03 15:11:04 +07:00
|
|
|
if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
|
2010-06-18 17:05:29 +07:00
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
adap->flags |= FW_OK;
|
|
|
|
if (adap_init1(adap, &c))
|
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
|
|
|
|
for_each_port(adap, i) {
|
|
|
|
struct port_info *p = adap2pinfo(adap, i);
|
|
|
|
|
2010-08-02 20:19:21 +07:00
|
|
|
ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
|
|
|
|
NULL, NULL);
|
2010-06-18 17:05:29 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
p->viid = ret;
|
|
|
|
p->xact_addr_filt = -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
|
|
|
|
adap->params.b_wnd);
|
2010-08-02 20:19:19 +07:00
|
|
|
setup_memwin(adap);
|
2010-06-18 17:05:29 +07:00
|
|
|
if (cxgb_up(adap))
|
|
|
|
return PCI_ERS_RESULT_DISCONNECT;
|
|
|
|
return PCI_ERS_RESULT_RECOVERED;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void eeh_resume(struct pci_dev *pdev)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct adapter *adap = pci_get_drvdata(pdev);
|
|
|
|
|
|
|
|
if (!adap)
|
|
|
|
return;
|
|
|
|
|
|
|
|
rtnl_lock();
|
|
|
|
for_each_port(adap, i) {
|
|
|
|
struct net_device *dev = adap->port[i];
|
|
|
|
|
|
|
|
if (netif_running(dev)) {
|
|
|
|
link_start(dev);
|
|
|
|
cxgb_set_rxmode(dev);
|
|
|
|
}
|
|
|
|
netif_device_attach(dev);
|
|
|
|
}
|
|
|
|
rtnl_unlock();
|
|
|
|
}
|
|
|
|
|
2012-09-07 23:33:15 +07:00
|
|
|
static const struct pci_error_handlers cxgb4_eeh = {
|
2010-06-18 17:05:29 +07:00
|
|
|
.error_detected = eeh_err_detected,
|
|
|
|
.slot_reset = eeh_slot_reset,
|
|
|
|
.resume = eeh_resume,
|
|
|
|
};
|
|
|
|
|
2014-02-18 19:26:10 +07:00
|
|
|
static inline bool is_x_10g_port(const struct link_config *lc)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
2014-02-18 19:26:10 +07:00
|
|
|
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
|
|
|
|
(lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
2014-06-06 23:10:45 +07:00
|
|
|
static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
|
|
|
|
unsigned int us, unsigned int cnt,
|
2010-04-01 22:28:26 +07:00
|
|
|
unsigned int size, unsigned int iqe_size)
|
|
|
|
{
|
2014-06-06 23:10:45 +07:00
|
|
|
q->adap = adap;
|
|
|
|
set_rspq_intr_params(q, us, cnt);
|
2010-04-01 22:28:26 +07:00
|
|
|
q->iqe_len = iqe_size;
|
|
|
|
q->size = size;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform default configuration of DMA queues depending on the number and type
|
|
|
|
* of ports we found and the number of available CPUs. Most settings can be
|
|
|
|
* modified by the admin prior to actual use.
|
|
|
|
*/
|
2012-12-03 21:23:02 +07:00
|
|
|
static void cfg_queues(struct adapter *adap)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
struct sge *s = &adap->sge;
|
2014-06-20 11:37:13 +07:00
|
|
|
int i, n10g = 0, qidx = 0;
|
|
|
|
#ifndef CONFIG_CHELSIO_T4_DCB
|
|
|
|
int q10g = 0;
|
|
|
|
#endif
|
2014-06-06 23:10:42 +07:00
|
|
|
int ciq_size;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
for_each_port(adap, i)
|
2014-02-18 19:26:10 +07:00
|
|
|
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
|
2014-06-20 11:37:13 +07:00
|
|
|
#ifdef CONFIG_CHELSIO_T4_DCB
|
|
|
|
/* For Data Center Bridging support we need to be able to support up
|
|
|
|
* to 8 Traffic Priorities; each of which will be assigned to its
|
|
|
|
* own TX Queue in order to prevent Head-Of-Line Blocking.
|
|
|
|
*/
|
|
|
|
if (adap->params.nports * 8 > MAX_ETH_QSETS) {
|
|
|
|
dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
|
|
|
|
MAX_ETH_QSETS, adap->params.nports * 8);
|
|
|
|
BUG_ON(1);
|
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2014-06-20 11:37:13 +07:00
|
|
|
for_each_port(adap, i) {
|
|
|
|
struct port_info *pi = adap2pinfo(adap, i);
|
|
|
|
|
|
|
|
pi->first_qset = qidx;
|
|
|
|
pi->nqsets = 8;
|
|
|
|
qidx += pi->nqsets;
|
|
|
|
}
|
|
|
|
#else /* !CONFIG_CHELSIO_T4_DCB */
|
2010-04-01 22:28:26 +07:00
|
|
|
/*
|
|
|
|
* We default to 1 queue per non-10G port and up to # of cores queues
|
|
|
|
* per 10G port.
|
|
|
|
*/
|
|
|
|
if (n10g)
|
|
|
|
q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
|
2012-07-01 10:18:55 +07:00
|
|
|
if (q10g > netif_get_num_default_rss_queues())
|
|
|
|
q10g = netif_get_num_default_rss_queues();
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
for_each_port(adap, i) {
|
|
|
|
struct port_info *pi = adap2pinfo(adap, i);
|
|
|
|
|
|
|
|
pi->first_qset = qidx;
|
2014-02-18 19:26:10 +07:00
|
|
|
pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
|
2010-04-01 22:28:26 +07:00
|
|
|
qidx += pi->nqsets;
|
|
|
|
}
|
2014-06-20 11:37:13 +07:00
|
|
|
#endif /* !CONFIG_CHELSIO_T4_DCB */
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
s->ethqsets = qidx;
|
|
|
|
s->max_ethqsets = qidx; /* MSI-X may lower it later */
|
|
|
|
|
|
|
|
if (is_offload(adap)) {
|
|
|
|
/*
|
|
|
|
* For offload we use 1 queue/channel if all ports are up to 1G,
|
|
|
|
* otherwise we divide all available queues amongst the channels
|
|
|
|
* capped by the number of available cores.
|
|
|
|
*/
|
|
|
|
if (n10g) {
|
|
|
|
i = min_t(int, ARRAY_SIZE(s->ofldrxq),
|
|
|
|
num_online_cpus());
|
|
|
|
s->ofldqsets = roundup(i, adap->params.nports);
|
|
|
|
} else
|
|
|
|
s->ofldqsets = adap->params.nports;
|
|
|
|
/* For RDMA one Rx queue per channel suffices */
|
|
|
|
s->rdmaqs = adap->params.nports;
|
2015-03-04 19:46:28 +07:00
|
|
|
/* Try and allow at least 1 CIQ per cpu rounding down
|
|
|
|
* to the number of ports, with a minimum of 1 per port.
|
|
|
|
* A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
|
|
|
|
* A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
|
|
|
|
* A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
|
|
|
|
*/
|
|
|
|
s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
|
|
|
|
s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
|
|
|
|
adap->params.nports;
|
|
|
|
s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
|
|
|
|
struct sge_eth_rxq *r = &s->ethrxq[i];
|
|
|
|
|
2014-06-06 23:10:45 +07:00
|
|
|
init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
|
2010-04-01 22:28:26 +07:00
|
|
|
r->fl.size = 72;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
|
|
|
|
s->ethtxq[i].q.size = 1024;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
|
|
|
|
s->ctrlq[i].q.size = 512;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
|
|
|
|
s->ofldtxq[i].q.size = 1024;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
|
|
|
|
struct sge_ofld_rxq *r = &s->ofldrxq[i];
|
|
|
|
|
2014-06-06 23:10:45 +07:00
|
|
|
init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
|
2010-04-01 22:28:26 +07:00
|
|
|
r->rspq.uld = CXGB4_ULD_ISCSI;
|
|
|
|
r->fl.size = 72;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
|
|
|
|
struct sge_ofld_rxq *r = &s->rdmarxq[i];
|
|
|
|
|
2014-06-06 23:10:45 +07:00
|
|
|
init_rspq(adap, &r->rspq, 5, 1, 511, 64);
|
2010-04-01 22:28:26 +07:00
|
|
|
r->rspq.uld = CXGB4_ULD_RDMA;
|
|
|
|
r->fl.size = 72;
|
|
|
|
}
|
|
|
|
|
2014-06-06 23:10:42 +07:00
|
|
|
ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
|
|
|
|
if (ciq_size > SGE_MAX_IQ_SIZE) {
|
|
|
|
CH_WARN(adap, "CIQ size too small for available IQs\n");
|
|
|
|
ciq_size = SGE_MAX_IQ_SIZE;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
|
|
|
|
struct sge_ofld_rxq *r = &s->rdmaciq[i];
|
|
|
|
|
2014-06-06 23:10:45 +07:00
|
|
|
init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
|
2014-06-06 23:10:42 +07:00
|
|
|
r->rspq.uld = CXGB4_ULD_RDMA;
|
|
|
|
}
|
|
|
|
|
2014-06-06 23:10:45 +07:00
|
|
|
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
|
|
|
|
init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reduce the number of Ethernet queues across all ports to at most n.
|
|
|
|
* n provides at least one queue per port.
|
|
|
|
*/
|
2012-12-03 21:23:02 +07:00
|
|
|
static void reduce_ethqs(struct adapter *adap, int n)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
struct port_info *pi;
|
|
|
|
|
|
|
|
while (n < adap->sge.ethqsets)
|
|
|
|
for_each_port(adap, i) {
|
|
|
|
pi = adap2pinfo(adap, i);
|
|
|
|
if (pi->nqsets > 1) {
|
|
|
|
pi->nqsets--;
|
|
|
|
adap->sge.ethqsets--;
|
|
|
|
if (adap->sge.ethqsets <= n)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
n = 0;
|
|
|
|
for_each_port(adap, i) {
|
|
|
|
pi = adap2pinfo(adap, i);
|
|
|
|
pi->first_qset = n;
|
|
|
|
n += pi->nqsets;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
|
|
|
|
#define EXTRA_VECS 2
|
|
|
|
|
2012-12-03 21:23:02 +07:00
|
|
|
static int enable_msix(struct adapter *adap)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
int ofld_need = 0;
|
2015-03-04 19:46:28 +07:00
|
|
|
int i, want, need, allocated;
|
2010-04-01 22:28:26 +07:00
|
|
|
struct sge *s = &adap->sge;
|
|
|
|
unsigned int nchan = adap->params.nports;
|
2015-03-04 19:46:28 +07:00
|
|
|
struct msix_entry *entries;
|
|
|
|
|
|
|
|
entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!entries)
|
|
|
|
return -ENOMEM;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2015-03-04 19:46:28 +07:00
|
|
|
for (i = 0; i < MAX_INGQ + 1; ++i)
|
2010-04-01 22:28:26 +07:00
|
|
|
entries[i].entry = i;
|
|
|
|
|
|
|
|
want = s->max_ethqsets + EXTRA_VECS;
|
|
|
|
if (is_offload(adap)) {
|
2014-06-06 23:10:42 +07:00
|
|
|
want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
|
2010-04-01 22:28:26 +07:00
|
|
|
/* need nchan for each possible ULD */
|
2014-06-06 23:10:42 +07:00
|
|
|
ofld_need = 3 * nchan;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
2014-06-20 11:37:13 +07:00
|
|
|
#ifdef CONFIG_CHELSIO_T4_DCB
|
|
|
|
/* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
|
|
|
|
* each port.
|
|
|
|
*/
|
|
|
|
need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
|
|
|
|
#else
|
2010-04-01 22:28:26 +07:00
|
|
|
need = adap->params.nports + EXTRA_VECS + ofld_need;
|
2014-06-20 11:37:13 +07:00
|
|
|
#endif
|
2015-03-04 19:46:28 +07:00
|
|
|
allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
|
|
|
|
if (allocated < 0) {
|
|
|
|
dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
|
|
|
|
" not using MSI-X\n");
|
|
|
|
kfree(entries);
|
|
|
|
return allocated;
|
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2015-03-04 19:46:28 +07:00
|
|
|
/* Distribute available vectors to the various queue groups.
|
2014-02-18 17:07:59 +07:00
|
|
|
* Every group gets its minimum requirement and NIC gets top
|
|
|
|
* priority for leftovers.
|
|
|
|
*/
|
2015-03-04 19:46:28 +07:00
|
|
|
i = allocated - EXTRA_VECS - ofld_need;
|
2014-02-18 17:07:59 +07:00
|
|
|
if (i < s->max_ethqsets) {
|
|
|
|
s->max_ethqsets = i;
|
|
|
|
if (i < s->ethqsets)
|
|
|
|
reduce_ethqs(adap, i);
|
|
|
|
}
|
|
|
|
if (is_offload(adap)) {
|
2015-03-04 19:46:28 +07:00
|
|
|
if (allocated < want) {
|
|
|
|
s->rdmaqs = nchan;
|
|
|
|
s->rdmaciqs = nchan;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* leftovers go to OFLD */
|
|
|
|
i = allocated - EXTRA_VECS - s->max_ethqsets -
|
|
|
|
s->rdmaqs - s->rdmaciqs;
|
2014-02-18 17:07:59 +07:00
|
|
|
s->ofldqsets = (i / nchan) * nchan; /* round down */
|
|
|
|
}
|
2015-03-04 19:46:28 +07:00
|
|
|
for (i = 0; i < allocated; ++i)
|
2014-02-18 17:07:59 +07:00
|
|
|
adap->msix_info[i].vec = entries[i].vector;
|
|
|
|
|
2015-03-04 19:46:28 +07:00
|
|
|
kfree(entries);
|
2014-02-18 17:07:59 +07:00
|
|
|
return 0;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#undef EXTRA_VECS
|
|
|
|
|
2012-12-03 21:23:02 +07:00
|
|
|
static int init_rss(struct adapter *adap)
|
2010-07-11 19:01:17 +07:00
|
|
|
{
|
|
|
|
unsigned int i, j;
|
|
|
|
|
|
|
|
for_each_port(adap, i) {
|
|
|
|
struct port_info *pi = adap2pinfo(adap, i);
|
|
|
|
|
|
|
|
pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
|
|
|
|
if (!pi->rss)
|
|
|
|
return -ENOMEM;
|
|
|
|
for (j = 0; j < pi->rss_size; j++)
|
2011-12-15 20:56:49 +07:00
|
|
|
pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
|
2010-07-11 19:01:17 +07:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-03 21:23:02 +07:00
|
|
|
static void print_port_info(const struct net_device *dev)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
char buf[80];
|
2010-12-15 04:36:48 +07:00
|
|
|
char *bufp = buf;
|
2010-05-10 22:58:08 +07:00
|
|
|
const char *spd = "";
|
2010-12-15 04:36:48 +07:00
|
|
|
const struct port_info *pi = netdev_priv(dev);
|
|
|
|
const struct adapter *adap = pi->adapter;
|
2010-05-10 22:58:08 +07:00
|
|
|
|
|
|
|
if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
|
|
|
|
spd = " 2.5 GT/s";
|
|
|
|
else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
|
|
|
|
spd = " 5 GT/s";
|
2014-04-29 07:36:20 +07:00
|
|
|
else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
|
|
|
|
spd = " 8 GT/s";
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2010-12-15 04:36:48 +07:00
|
|
|
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
|
|
|
|
bufp += sprintf(bufp, "100/");
|
|
|
|
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
|
|
|
|
bufp += sprintf(bufp, "1000/");
|
|
|
|
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
|
|
|
|
bufp += sprintf(bufp, "10G/");
|
2014-02-18 19:26:08 +07:00
|
|
|
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
|
|
|
|
bufp += sprintf(bufp, "40G/");
|
2010-12-15 04:36:48 +07:00
|
|
|
if (bufp != buf)
|
|
|
|
--bufp;
|
2014-02-18 19:26:08 +07:00
|
|
|
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
|
2010-12-15 04:36:48 +07:00
|
|
|
|
|
|
|
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
|
2013-03-14 12:08:49 +07:00
|
|
|
adap->params.vpd.id,
|
2013-12-03 18:35:56 +07:00
|
|
|
CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
|
2010-12-15 04:36:48 +07:00
|
|
|
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
|
|
|
|
(adap->flags & USING_MSIX) ? " MSI-X" :
|
|
|
|
(adap->flags & USING_MSI) ? " MSI" : "");
|
2014-02-18 19:26:09 +07:00
|
|
|
netdev_info(dev, "S/N: %s, P/N: %s\n",
|
|
|
|
adap->params.vpd.sn, adap->params.vpd.pn);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
2012-12-03 21:23:02 +07:00
|
|
|
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
|
2010-12-15 04:36:44 +07:00
|
|
|
{
|
2012-08-21 02:53:19 +07:00
|
|
|
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
|
2010-12-15 04:36:44 +07:00
|
|
|
}
|
|
|
|
|
2010-07-11 19:01:16 +07:00
|
|
|
/*
|
|
|
|
* Free the following resources:
|
|
|
|
* - memory used for tables
|
|
|
|
* - MSI/MSI-X
|
|
|
|
* - net devices
|
|
|
|
* - resources FW is holding for us
|
|
|
|
*/
|
|
|
|
static void free_some_resources(struct adapter *adapter)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
t4_free_mem(adapter->l2t);
|
|
|
|
t4_free_mem(adapter->tids.tid_tab);
|
|
|
|
disable_msi(adapter);
|
|
|
|
|
|
|
|
for_each_port(adapter, i)
|
2010-07-11 19:01:17 +07:00
|
|
|
if (adapter->port[i]) {
|
|
|
|
kfree(adap2pinfo(adapter, i)->rss);
|
2010-07-11 19:01:16 +07:00
|
|
|
free_netdev(adapter->port[i]);
|
2010-07-11 19:01:17 +07:00
|
|
|
}
|
2010-07-11 19:01:16 +07:00
|
|
|
if (adapter->flags & FW_OK)
|
2010-08-02 20:19:21 +07:00
|
|
|
t4_fw_bye(adapter, adapter->fn);
|
2010-07-11 19:01:16 +07:00
|
|
|
}
|
|
|
|
|
2011-04-16 20:05:08 +07:00
|
|
|
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
|
2010-08-02 20:19:20 +07:00
|
|
|
#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
|
2010-04-01 22:28:26 +07:00
|
|
|
NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
|
2013-03-14 12:08:51 +07:00
|
|
|
#define SEGMENT_SIZE 128
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2012-12-06 21:30:56 +07:00
|
|
|
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
2013-03-14 12:08:51 +07:00
|
|
|
int func, i, err, s_qpp, qpp, num_seg;
|
2010-04-01 22:28:26 +07:00
|
|
|
struct port_info *pi;
|
2011-11-15 22:29:55 +07:00
|
|
|
bool highdma = false;
|
2010-04-01 22:28:26 +07:00
|
|
|
struct adapter *adapter = NULL;
|
2014-09-16 04:28:46 +07:00
|
|
|
void __iomem *regs;
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
|
|
|
|
|
|
|
|
err = pci_request_regions(pdev, KBUILD_MODNAME);
|
|
|
|
if (err) {
|
|
|
|
/* Just info, some other driver may have claimed the device. */
|
|
|
|
dev_info(&pdev->dev, "cannot obtain PCI resources\n");
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = pci_enable_device(pdev);
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "cannot enable PCI device\n");
|
|
|
|
goto out_release_regions;
|
|
|
|
}
|
|
|
|
|
2014-09-16 04:28:46 +07:00
|
|
|
regs = pci_ioremap_bar(pdev, 0);
|
|
|
|
if (!regs) {
|
|
|
|
dev_err(&pdev->dev, "cannot map device registers\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_disable_device;
|
|
|
|
}
|
|
|
|
|
2014-10-09 07:18:47 +07:00
|
|
|
err = t4_wait_dev_ready(regs);
|
|
|
|
if (err < 0)
|
|
|
|
goto out_unmap_bar0;
|
|
|
|
|
2014-09-16 04:28:46 +07:00
|
|
|
/* We control everything through one PF */
|
2015-01-05 18:00:47 +07:00
|
|
|
func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
|
2014-09-16 04:28:46 +07:00
|
|
|
if (func != ent->driver_data) {
|
|
|
|
iounmap(regs);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
pci_save_state(pdev); /* to restore SR-IOV later */
|
|
|
|
goto sriov;
|
|
|
|
}
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
2011-11-15 22:29:55 +07:00
|
|
|
highdma = true;
|
2010-04-01 22:28:26 +07:00
|
|
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
|
|
|
|
"coherent allocations\n");
|
2014-09-16 04:28:46 +07:00
|
|
|
goto out_unmap_bar0;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
|
|
if (err) {
|
|
|
|
dev_err(&pdev->dev, "no usable DMA configuration\n");
|
2014-09-16 04:28:46 +07:00
|
|
|
goto out_unmap_bar0;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pci_enable_pcie_error_reporting(pdev);
|
2010-12-15 04:36:44 +07:00
|
|
|
enable_pcie_relaxed_ordering(pdev);
|
2010-04-01 22:28:26 +07:00
|
|
|
pci_set_master(pdev);
|
|
|
|
pci_save_state(pdev);
|
|
|
|
|
|
|
|
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
|
|
|
|
if (!adapter) {
|
|
|
|
err = -ENOMEM;
|
2014-09-16 04:28:46 +07:00
|
|
|
goto out_unmap_bar0;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
2014-08-21 03:44:06 +07:00
|
|
|
adapter->workq = create_singlethread_workqueue("cxgb4");
|
|
|
|
if (!adapter->workq) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_free_adapter;
|
|
|
|
}
|
|
|
|
|
2014-01-23 11:27:34 +07:00
|
|
|
/* PCI device has been enabled */
|
|
|
|
adapter->flags |= DEV_ENABLED;
|
|
|
|
|
2014-09-16 04:28:46 +07:00
|
|
|
adapter->regs = regs;
|
2010-04-01 22:28:26 +07:00
|
|
|
adapter->pdev = pdev;
|
|
|
|
adapter->pdev_dev = &pdev->dev;
|
2012-05-18 16:59:26 +07:00
|
|
|
adapter->mbox = func;
|
2010-08-02 20:19:21 +07:00
|
|
|
adapter->fn = func;
|
2010-04-01 22:28:26 +07:00
|
|
|
adapter->msg_enable = dflt_msg_enable;
|
|
|
|
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
|
|
|
|
|
|
|
|
spin_lock_init(&adapter->stats_lock);
|
|
|
|
spin_lock_init(&adapter->tid_release_lock);
|
2014-10-30 07:54:03 +07:00
|
|
|
spin_lock_init(&adapter->win0_lock);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
|
2012-05-18 16:59:24 +07:00
|
|
|
INIT_WORK(&adapter->db_full_task, process_db_full);
|
|
|
|
INIT_WORK(&adapter->db_drop_task, process_db_drop);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
err = t4_prep_adapter(adapter);
|
|
|
|
if (err)
|
2014-09-16 04:28:46 +07:00
|
|
|
goto out_free_adapter;
|
|
|
|
|
2013-03-14 12:08:51 +07:00
|
|
|
|
2013-12-03 18:35:56 +07:00
|
|
|
if (!is_t4(adapter->params.chip)) {
|
2015-01-05 18:00:43 +07:00
|
|
|
s_qpp = (QUEUESPERPAGEPF0_S +
|
|
|
|
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
|
|
|
|
adapter->fn);
|
|
|
|
qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
|
|
|
|
SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
|
2013-03-14 12:08:51 +07:00
|
|
|
num_seg = PAGE_SIZE / SEGMENT_SIZE;
|
|
|
|
|
|
|
|
/* Each segment size is 128B. Write coalescing is enabled only
|
|
|
|
* when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
|
|
|
|
* queue is less no of segments that can be accommodated in
|
|
|
|
* a page size.
|
|
|
|
*/
|
|
|
|
if (qpp > num_seg) {
|
|
|
|
dev_err(&pdev->dev,
|
|
|
|
"Incorrect number of egress queues per page\n");
|
|
|
|
err = -EINVAL;
|
2014-09-16 04:28:46 +07:00
|
|
|
goto out_free_adapter;
|
2013-03-14 12:08:51 +07:00
|
|
|
}
|
|
|
|
adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
|
|
|
|
pci_resource_len(pdev, 2));
|
|
|
|
if (!adapter->bar2) {
|
|
|
|
dev_err(&pdev->dev, "cannot map device bar2 region\n");
|
|
|
|
err = -ENOMEM;
|
2014-09-16 04:28:46 +07:00
|
|
|
goto out_free_adapter;
|
2013-03-14 12:08:51 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
setup_memwin(adapter);
|
2010-04-01 22:28:26 +07:00
|
|
|
err = adap_init0(adapter);
|
2012-09-26 09:39:39 +07:00
|
|
|
setup_memwin_rdma(adapter);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (err)
|
|
|
|
goto out_unmap_bar;
|
|
|
|
|
|
|
|
for_each_port(adapter, i) {
|
|
|
|
struct net_device *netdev;
|
|
|
|
|
|
|
|
netdev = alloc_etherdev_mq(sizeof(struct port_info),
|
|
|
|
MAX_ETH_QSETS);
|
|
|
|
if (!netdev) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out_free_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(netdev, &pdev->dev);
|
|
|
|
|
|
|
|
adapter->port[i] = netdev;
|
|
|
|
pi = netdev_priv(netdev);
|
|
|
|
pi->adapter = adapter;
|
|
|
|
pi->xact_addr_filt = -1;
|
|
|
|
pi->port_id = i;
|
|
|
|
netdev->irq = pdev->irq;
|
|
|
|
|
2011-04-16 20:05:08 +07:00
|
|
|
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
|
|
|
|
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
|
|
|
NETIF_F_RXCSUM | NETIF_F_RXHASH |
|
2013-04-19 09:04:27 +07:00
|
|
|
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
|
2011-11-15 22:29:55 +07:00
|
|
|
if (highdma)
|
|
|
|
netdev->hw_features |= NETIF_F_HIGHDMA;
|
|
|
|
netdev->features |= netdev->hw_features;
|
2010-04-01 22:28:26 +07:00
|
|
|
netdev->vlan_features = netdev->features & VLAN_FEAT;
|
|
|
|
|
2011-08-16 13:29:00 +07:00
|
|
|
netdev->priv_flags |= IFF_UNICAST_FLT;
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
netdev->netdev_ops = &cxgb4_netdev_ops;
|
2014-06-20 11:37:13 +07:00
|
|
|
#ifdef CONFIG_CHELSIO_T4_DCB
|
|
|
|
netdev->dcbnl_ops = &cxgb4_dcb_ops;
|
|
|
|
cxgb4_dcb_state_init(netdev);
|
|
|
|
#endif
|
2014-05-11 07:12:32 +07:00
|
|
|
netdev->ethtool_ops = &cxgb_ethtool_ops;
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
pci_set_drvdata(pdev, adapter);
|
|
|
|
|
|
|
|
if (adapter->flags & FW_OK) {
|
2010-08-02 20:19:21 +07:00
|
|
|
err = t4_port_init(adapter, func, func, 0);
|
2010-04-01 22:28:26 +07:00
|
|
|
if (err)
|
|
|
|
goto out_free_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Configure queues and allocate tables now, they can be needed as
|
|
|
|
* soon as the first register_netdev completes.
|
|
|
|
*/
|
|
|
|
cfg_queues(adapter);
|
|
|
|
|
|
|
|
adapter->l2t = t4_init_l2t();
|
|
|
|
if (!adapter->l2t) {
|
|
|
|
/* We tolerate a lack of L2T, giving up some functionality */
|
|
|
|
dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
|
|
|
|
adapter->params.offload = 0;
|
|
|
|
}
|
|
|
|
|
2015-01-15 06:17:34 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
|
|
|
|
adapter->clipt_end);
|
|
|
|
if (!adapter->clipt) {
|
|
|
|
/* We tolerate a lack of clip_table, giving up
|
|
|
|
* some functionality
|
|
|
|
*/
|
|
|
|
dev_warn(&pdev->dev,
|
|
|
|
"could not allocate Clip table, continuing\n");
|
|
|
|
adapter->params.offload = 0;
|
|
|
|
}
|
|
|
|
#endif
|
2010-04-01 22:28:26 +07:00
|
|
|
if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
|
|
|
|
dev_warn(&pdev->dev, "could not allocate TID table, "
|
|
|
|
"continuing\n");
|
|
|
|
adapter->params.offload = 0;
|
|
|
|
}
|
|
|
|
|
2010-07-11 19:01:15 +07:00
|
|
|
/* See what interrupts we'll be using */
|
|
|
|
if (msi > 1 && enable_msix(adapter) == 0)
|
|
|
|
adapter->flags |= USING_MSIX;
|
|
|
|
else if (msi > 0 && pci_enable_msi(pdev) == 0)
|
|
|
|
adapter->flags |= USING_MSI;
|
|
|
|
|
2010-07-11 19:01:17 +07:00
|
|
|
err = init_rss(adapter);
|
|
|
|
if (err)
|
|
|
|
goto out_free_dev;
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
/*
|
|
|
|
* The card is now ready to go. If any errors occur during device
|
|
|
|
* registration we do not fail the whole card but rather proceed only
|
|
|
|
* with the ports we manage to register successfully. However we must
|
|
|
|
* register at least one net device.
|
|
|
|
*/
|
|
|
|
for_each_port(adapter, i) {
|
2010-12-15 04:36:46 +07:00
|
|
|
pi = adap2pinfo(adapter, i);
|
|
|
|
netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
|
|
|
|
netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
err = register_netdev(adapter->port[i]);
|
|
|
|
if (err)
|
2010-12-15 04:36:51 +07:00
|
|
|
break;
|
|
|
|
adapter->chan_map[pi->tx_chan] = i;
|
|
|
|
print_port_info(adapter->port[i]);
|
2010-04-01 22:28:26 +07:00
|
|
|
}
|
2010-12-15 04:36:51 +07:00
|
|
|
if (i == 0) {
|
2010-04-01 22:28:26 +07:00
|
|
|
dev_err(&pdev->dev, "could not register any net devices\n");
|
|
|
|
goto out_free_dev;
|
|
|
|
}
|
2010-12-15 04:36:51 +07:00
|
|
|
if (err) {
|
|
|
|
dev_warn(&pdev->dev, "only %d net devices registered\n", i);
|
|
|
|
err = 0;
|
2011-06-03 18:51:20 +07:00
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
if (cxgb4_debugfs_root) {
|
|
|
|
adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
|
|
|
|
cxgb4_debugfs_root);
|
|
|
|
setup_debugfs(adapter);
|
|
|
|
}
|
|
|
|
|
2011-09-24 13:11:31 +07:00
|
|
|
/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
|
|
|
|
pdev->needs_freset = 1;
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
if (is_offload(adapter))
|
|
|
|
attach_ulds(adapter);
|
|
|
|
|
2014-08-06 18:40:59 +07:00
|
|
|
sriov:
|
2010-04-01 22:28:26 +07:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
2013-03-14 12:08:56 +07:00
|
|
|
if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
|
2010-04-01 22:28:26 +07:00
|
|
|
if (pci_enable_sriov(pdev, num_vf[func]) == 0)
|
|
|
|
dev_info(&pdev->dev,
|
|
|
|
"instantiated %u virtual functions\n",
|
|
|
|
num_vf[func]);
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_free_dev:
|
2010-07-11 19:01:16 +07:00
|
|
|
free_some_resources(adapter);
|
2010-04-01 22:28:26 +07:00
|
|
|
out_unmap_bar:
|
2013-12-03 18:35:56 +07:00
|
|
|
if (!is_t4(adapter->params.chip))
|
2013-03-14 12:08:51 +07:00
|
|
|
iounmap(adapter->bar2);
|
2010-04-01 22:28:26 +07:00
|
|
|
out_free_adapter:
|
2014-08-21 03:44:06 +07:00
|
|
|
if (adapter->workq)
|
|
|
|
destroy_workqueue(adapter->workq);
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
kfree(adapter);
|
2014-09-16 04:28:46 +07:00
|
|
|
out_unmap_bar0:
|
|
|
|
iounmap(regs);
|
2010-04-01 22:28:26 +07:00
|
|
|
out_disable_device:
|
|
|
|
pci_disable_pcie_error_reporting(pdev);
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
out_release_regions:
|
|
|
|
pci_release_regions(pdev);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-12-03 21:23:02 +07:00
|
|
|
static void remove_one(struct pci_dev *pdev)
|
2010-04-01 22:28:26 +07:00
|
|
|
{
|
|
|
|
struct adapter *adapter = pci_get_drvdata(pdev);
|
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
#ifdef CONFIG_PCI_IOV
|
2010-04-01 22:28:26 +07:00
|
|
|
pci_disable_sriov(pdev);
|
|
|
|
|
2012-09-26 09:39:39 +07:00
|
|
|
#endif
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
if (adapter) {
|
|
|
|
int i;
|
|
|
|
|
2014-08-21 03:44:06 +07:00
|
|
|
/* Tear down per-adapter Work Queue first since it can contain
|
|
|
|
* references to our adapter data structure.
|
|
|
|
*/
|
|
|
|
destroy_workqueue(adapter->workq);
|
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
if (is_offload(adapter))
|
|
|
|
detach_ulds(adapter);
|
|
|
|
|
|
|
|
for_each_port(adapter, i)
|
2010-12-15 04:36:52 +07:00
|
|
|
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
|
2010-04-01 22:28:26 +07:00
|
|
|
unregister_netdev(adapter->port[i]);
|
|
|
|
|
2014-06-28 03:51:52 +07:00
|
|
|
debugfs_remove_recursive(adapter->debugfs_root);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2012-12-10 16:30:52 +07:00
|
|
|
/* If we allocated filters, free up state associated with any
|
|
|
|
* valid filters ...
|
|
|
|
*/
|
|
|
|
if (adapter->tids.ftid_tab) {
|
|
|
|
struct filter_entry *f = &adapter->tids.ftid_tab[0];
|
2012-12-10 16:30:53 +07:00
|
|
|
for (i = 0; i < (adapter->tids.nftids +
|
|
|
|
adapter->tids.nsftids); i++, f++)
|
2012-12-10 16:30:52 +07:00
|
|
|
if (f->valid)
|
|
|
|
clear_filter(adapter, f);
|
|
|
|
}
|
|
|
|
|
2010-05-18 17:07:12 +07:00
|
|
|
if (adapter->flags & FULL_INIT_DONE)
|
|
|
|
cxgb_down(adapter);
|
2010-04-01 22:28:26 +07:00
|
|
|
|
2010-07-11 19:01:16 +07:00
|
|
|
free_some_resources(adapter);
|
2015-01-15 06:17:34 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
t4_cleanup_clip_tbl(adapter);
|
|
|
|
#endif
|
2010-04-01 22:28:26 +07:00
|
|
|
iounmap(adapter->regs);
|
2013-12-03 18:35:56 +07:00
|
|
|
if (!is_t4(adapter->params.chip))
|
2013-03-14 12:08:51 +07:00
|
|
|
iounmap(adapter->bar2);
|
2010-04-01 22:28:26 +07:00
|
|
|
pci_disable_pcie_error_reporting(pdev);
|
2014-01-23 11:27:34 +07:00
|
|
|
if ((adapter->flags & DEV_ENABLED)) {
|
|
|
|
pci_disable_device(pdev);
|
|
|
|
adapter->flags &= ~DEV_ENABLED;
|
|
|
|
}
|
2010-04-01 22:28:26 +07:00
|
|
|
pci_release_regions(pdev);
|
cxgb4: Not need to hold the adap_rcu_lock lock when read adap_rcu_list
cxgb4_netdev maybe lead to dead lock, since it uses a spin lock, and be called
in both thread and softirq context, but not disable BH, the lockdep report is
below; In fact, cxgb4_netdev only reads adap_rcu_list with RCU protection, so
not need to hold spin lock again.
=================================
[ INFO: inconsistent lock state ]
3.14.7+ #24 Tainted: G C O
---------------------------------
inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage.
radvd/3794 [HC0[0]:SC1[1]:HE1:SE0] takes:
(adap_rcu_lock){+.?...}, at: [<ffffffffa09989ea>] clip_add+0x2c/0x116 [cxgb4]
{SOFTIRQ-ON-W} state was registered at:
[<ffffffff810fca81>] __lock_acquire+0x34a/0xe48
[<ffffffff810fd98b>] lock_acquire+0x82/0x9d
[<ffffffff815d6ff8>] _raw_spin_lock+0x34/0x43
[<ffffffffa09989ea>] clip_add+0x2c/0x116 [cxgb4]
[<ffffffffa0998beb>] cxgb4_inet6addr_handler+0x117/0x12c [cxgb4]
[<ffffffff815da98b>] notifier_call_chain+0x32/0x5c
[<ffffffff815da9f9>] __atomic_notifier_call_chain+0x44/0x6e
[<ffffffff815daa32>] atomic_notifier_call_chain+0xf/0x11
[<ffffffff815b1356>] inet6addr_notifier_call_chain+0x16/0x18
[<ffffffffa01f72e5>] ipv6_add_addr+0x404/0x46e [ipv6]
[<ffffffffa01f8df0>] addrconf_add_linklocal+0x5f/0x95 [ipv6]
[<ffffffffa01fc3e9>] addrconf_notify+0x632/0x841 [ipv6]
[<ffffffff815da98b>] notifier_call_chain+0x32/0x5c
[<ffffffff810e09a1>] __raw_notifier_call_chain+0x9/0xb
[<ffffffff810e09b2>] raw_notifier_call_chain+0xf/0x11
[<ffffffff8151b3b7>] call_netdevice_notifiers_info+0x4e/0x56
[<ffffffff8151b3d0>] call_netdevice_notifiers+0x11/0x13
[<ffffffff8151c0a6>] netdev_state_change+0x1f/0x38
[<ffffffff8152f004>] linkwatch_do_dev+0x3b/0x49
[<ffffffff8152f184>] __linkwatch_run_queue+0x10b/0x144
[<ffffffff8152f1dd>] linkwatch_event+0x20/0x27
[<ffffffff810d7bc0>] process_one_work+0x1cb/0x2ee
[<ffffffff810d7e3b>] worker_thread+0x12e/0x1fc
[<ffffffff810dd391>] kthread+0xc4/0xcc
[<ffffffff815dc48c>] ret_from_fork+0x7c/0xb0
irq event stamp: 3388
hardirqs last enabled at (3388): [<ffffffff810c6c85>]
__local_bh_enable_ip+0xaa/0xd9
hardirqs last disabled at (3387): [<ffffffff810c6c2d>]
__local_bh_enable_ip+0x52/0xd9
softirqs last enabled at (3288): [<ffffffffa01f1d5b>]
rcu_read_unlock_bh+0x0/0x2f [ipv6]
softirqs last disabled at (3289): [<ffffffff815ddafc>]
do_softirq_own_stack+0x1c/0x30
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(adap_rcu_lock);
<Interrupt>
lock(adap_rcu_lock);
*** DEADLOCK ***
5 locks held by radvd/3794:
#0: (sk_lock-AF_INET6){+.+.+.}, at: [<ffffffffa020b85a>]
rawv6_sendmsg+0x74b/0xa4d [ipv6]
#1: (rcu_read_lock){.+.+..}, at: [<ffffffff8151ac6b>]
rcu_lock_acquire+0x0/0x29
#2: (rcu_read_lock){.+.+..}, at: [<ffffffffa01f4cca>]
rcu_lock_acquire.constprop.16+0x0/0x30 [ipv6]
#3: (rcu_read_lock){.+.+..}, at: [<ffffffff810e09b4>]
rcu_lock_acquire+0x0/0x29
#4: (rcu_read_lock){.+.+..}, at: [<ffffffffa0998782>]
rcu_lock_acquire.constprop.40+0x0/0x30 [cxgb4]
stack backtrace:
CPU: 7 PID: 3794 Comm: radvd Tainted: G C O 3.14.7+ #24
Hardware name: Supermicro X7DBU/X7DBU, BIOS 6.00 12/03/2007
ffffffff81f15990 ffff88012fdc36a8 ffffffff815d0016 0000000000000006
ffff8800c80dc2a0 ffff88012fdc3708 ffffffff815cc727 0000000000000001
0000000000000001 ffff880100000000 ffffffff81015b02 ffff8800c80dcb58
Call Trace:
<IRQ> [<ffffffff815d0016>] dump_stack+0x4e/0x71
[<ffffffff815cc727>] print_usage_bug+0x1ec/0x1fd
[<ffffffff81015b02>] ? save_stack_trace+0x27/0x44
[<ffffffff810fbfaa>] ? check_usage_backwards+0xa0/0xa0
[<ffffffff810fc640>] mark_lock+0x11b/0x212
[<ffffffff810fca0b>] __lock_acquire+0x2d4/0xe48
[<ffffffff810fbfaa>] ? check_usage_backwards+0xa0/0xa0
[<ffffffff810fbff6>] ? check_usage_forwards+0x4c/0xa6
[<ffffffff810c6c8a>] ? __local_bh_enable_ip+0xaf/0xd9
[<ffffffff810fd98b>] lock_acquire+0x82/0x9d
[<ffffffffa09989ea>] ? clip_add+0x2c/0x116 [cxgb4]
[<ffffffffa0998782>] ? rcu_read_unlock+0x23/0x23 [cxgb4]
[<ffffffff815d6ff8>] _raw_spin_lock+0x34/0x43
[<ffffffffa09989ea>] ? clip_add+0x2c/0x116 [cxgb4]
[<ffffffffa09987b0>] ? rcu_lock_acquire.constprop.40+0x2e/0x30 [cxgb4]
[<ffffffffa0998782>] ? rcu_read_unlock+0x23/0x23 [cxgb4]
[<ffffffffa09989ea>] clip_add+0x2c/0x116 [cxgb4]
[<ffffffffa0998beb>] cxgb4_inet6addr_handler+0x117/0x12c [cxgb4]
[<ffffffff810fd99d>] ? lock_acquire+0x94/0x9d
[<ffffffff810e09b4>] ? raw_notifier_call_chain+0x11/0x11
[<ffffffff815da98b>] notifier_call_chain+0x32/0x5c
[<ffffffff815da9f9>] __atomic_notifier_call_chain+0x44/0x6e
[<ffffffff815daa32>] atomic_notifier_call_chain+0xf/0x11
[<ffffffff815b1356>] inet6addr_notifier_call_chain+0x16/0x18
[<ffffffffa01f72e5>] ipv6_add_addr+0x404/0x46e [ipv6]
[<ffffffff810fde6a>] ? trace_hardirqs_on+0xd/0xf
[<ffffffffa01fb634>] addrconf_prefix_rcv+0x385/0x6ea [ipv6]
[<ffffffffa0207950>] ndisc_rcv+0x9d3/0xd76 [ipv6]
[<ffffffffa020d536>] icmpv6_rcv+0x592/0x67b [ipv6]
[<ffffffff810c6c85>] ? __local_bh_enable_ip+0xaa/0xd9
[<ffffffff810c6c85>] ? __local_bh_enable_ip+0xaa/0xd9
[<ffffffff810fd8dc>] ? lock_release+0x14e/0x17b
[<ffffffffa020df97>] ? rcu_read_unlock+0x21/0x23 [ipv6]
[<ffffffff8150df52>] ? rcu_read_unlock+0x23/0x23
[<ffffffffa01f4ede>] ip6_input_finish+0x1e4/0x2fc [ipv6]
[<ffffffffa01f540b>] ip6_input+0x33/0x38 [ipv6]
[<ffffffffa01f5557>] ip6_mc_input+0x147/0x160 [ipv6]
[<ffffffffa01f4ba3>] ip6_rcv_finish+0x7c/0x81 [ipv6]
[<ffffffffa01f5397>] ipv6_rcv+0x3a1/0x3e2 [ipv6]
[<ffffffff8151ef96>] __netif_receive_skb_core+0x4ab/0x511
[<ffffffff810fdc94>] ? mark_held_locks+0x71/0x99
[<ffffffff8151f0c0>] ? process_backlog+0x69/0x15e
[<ffffffff8151f045>] __netif_receive_skb+0x49/0x5b
[<ffffffff8151f0cf>] process_backlog+0x78/0x15e
[<ffffffff8151f571>] ? net_rx_action+0x1a2/0x1cc
[<ffffffff8151f47b>] net_rx_action+0xac/0x1cc
[<ffffffff810c69b7>] ? __do_softirq+0xad/0x218
[<ffffffff810c69ff>] __do_softirq+0xf5/0x218
[<ffffffff815ddafc>] do_softirq_own_stack+0x1c/0x30
<EOI> [<ffffffff810c6bb6>] do_softirq+0x38/0x5d
[<ffffffffa01f1d5b>] ? ip6_copy_metadata+0x156/0x156 [ipv6]
[<ffffffff810c6c78>] __local_bh_enable_ip+0x9d/0xd9
[<ffffffffa01f1d88>] rcu_read_unlock_bh+0x2d/0x2f [ipv6]
[<ffffffffa01f28b4>] ip6_finish_output2+0x381/0x3d8 [ipv6]
[<ffffffffa01f49ef>] ip6_finish_output+0x6e/0x73 [ipv6]
[<ffffffffa01f4a70>] ip6_output+0x7c/0xa8 [ipv6]
[<ffffffff815b1bfa>] dst_output+0x18/0x1c
[<ffffffff815b1c9e>] ip6_local_out+0x1c/0x21
[<ffffffffa01f2489>] ip6_push_pending_frames+0x37d/0x427 [ipv6]
[<ffffffff81558af8>] ? skb_orphan+0x39/0x39
[<ffffffffa020b85a>] ? rawv6_sendmsg+0x74b/0xa4d [ipv6]
[<ffffffffa020ba51>] rawv6_sendmsg+0x942/0xa4d [ipv6]
[<ffffffff81584cd2>] inet_sendmsg+0x3d/0x66
[<ffffffff81508930>] __sock_sendmsg_nosec+0x25/0x27
[<ffffffff8150b0d7>] sock_sendmsg+0x5a/0x7b
[<ffffffff810fd8dc>] ? lock_release+0x14e/0x17b
[<ffffffff8116d756>] ? might_fault+0x9e/0xa5
[<ffffffff8116d70d>] ? might_fault+0x55/0xa5
[<ffffffff81508cb1>] ? copy_from_user+0x2a/0x2c
[<ffffffff8150b70c>] ___sys_sendmsg+0x226/0x2d9
[<ffffffff810fcd25>] ? __lock_acquire+0x5ee/0xe48
[<ffffffff810fde01>] ? trace_hardirqs_on_caller+0x145/0x1a1
[<ffffffff8118efcb>] ? slab_free_hook.isra.71+0x50/0x59
[<ffffffff8115c81f>] ? release_pages+0xbc/0x181
[<ffffffff810fd99d>] ? lock_acquire+0x94/0x9d
[<ffffffff81115e97>] ? read_seqcount_begin.constprop.25+0x73/0x90
[<ffffffff8150c408>] __sys_sendmsg+0x3d/0x5b
[<ffffffff8150c433>] SyS_sendmsg+0xd/0x19
[<ffffffff815dc53d>] system_call_fastpath+0x1a/0x1f
Reported-by: Ben Greear <greearb@candelatech.com>
Cc: Casey Leedom <leedom@chelsio.com>
Cc: Hariprasad Shenai <hariprasad@chelsio.com>
Signed-off-by: Li RongQing <roy.qing.li@gmail.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Acked-by: Casey Leedom <leedom@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-06-20 16:32:36 +07:00
|
|
|
synchronize_rcu();
|
2014-01-24 16:12:03 +07:00
|
|
|
kfree(adapter);
|
2010-09-30 16:17:12 +07:00
|
|
|
} else
|
2010-04-01 22:28:26 +07:00
|
|
|
pci_release_regions(pdev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pci_driver cxgb4_driver = {
|
|
|
|
.name = KBUILD_MODNAME,
|
|
|
|
.id_table = cxgb4_pci_tbl,
|
|
|
|
.probe = init_one,
|
2012-12-03 21:23:02 +07:00
|
|
|
.remove = remove_one,
|
2014-02-25 03:04:52 +07:00
|
|
|
.shutdown = remove_one,
|
2010-06-18 17:05:29 +07:00
|
|
|
.err_handler = &cxgb4_eeh,
|
2010-04-01 22:28:26 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int __init cxgb4_init_module(void)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Debugfs support is optional, just warn if this fails */
|
|
|
|
cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
|
|
|
|
if (!cxgb4_debugfs_root)
|
2013-01-06 20:34:49 +07:00
|
|
|
pr_warn("could not create debugfs entry, continuing\n");
|
2010-04-01 22:28:26 +07:00
|
|
|
|
|
|
|
ret = pci_register_driver(&cxgb4_driver);
|
2014-08-21 03:44:06 +07:00
|
|
|
if (ret < 0)
|
2010-04-01 22:28:26 +07:00
|
|
|
debugfs_remove(cxgb4_debugfs_root);
|
2013-07-04 17:40:46 +07:00
|
|
|
|
2014-10-15 10:07:22 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2015-01-15 06:17:34 +07:00
|
|
|
if (!inet6addr_registered) {
|
|
|
|
register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
|
|
|
|
inet6addr_registered = true;
|
|
|
|
}
|
2014-10-15 10:07:22 +07:00
|
|
|
#endif
|
2013-07-04 17:40:46 +07:00
|
|
|
|
2010-04-01 22:28:26 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit cxgb4_cleanup_module(void)
|
|
|
|
{
|
2014-10-15 10:07:22 +07:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2015-01-21 22:27:52 +07:00
|
|
|
if (inet6addr_registered) {
|
2015-01-15 06:17:34 +07:00
|
|
|
unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
|
|
|
|
inet6addr_registered = false;
|
|
|
|
}
|
2014-10-15 10:07:22 +07:00
|
|
|
#endif
|
2010-04-01 22:28:26 +07:00
|
|
|
pci_unregister_driver(&cxgb4_driver);
|
|
|
|
debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(cxgb4_init_module);
|
|
|
|
module_exit(cxgb4_cleanup_module);
|