mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 11:56:43 +07:00
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (48 commits) RDMA/iwcm: Reject the connection when the cm_id is destroyed RDMA/cxgb3: Clean up properly on FW mismatch failures RDMA/cxgb3: Don't ignore insert_handle() failures MAINTAINERS: InfiniBand/RDMA mailing list transition to vger IB/mad: Allow tuning of QP0 and QP1 sizes IB/mad: Fix possible lock-lock-timer deadlock RDMA/nes: Map MTU to IB_MTU_* and correctly report link state RDMA/nes: Rework the disconn routine for terminate and flushing RDMA/nes: Use the flush code to fill in cqe error RDMA/nes: Make poll_cq return correct number of wqes during flush RDMA/nes: Use flush mechanism to set status for wqe in error RDMA/nes: Implement Terminate Packet RDMA/nes: Add CQ error handling RDMA/nes: Clean out CQ completions when QP is destroyed RDMA/nes: Change memory allocation for cqp request to GFP_ATOMIC RDMA/nes: Allocate work item for disconnect event handling RDMA/nes: Update refcnt during disconnect IB/mthca: Don't allow userspace open while recovering from catastrophic error IB/mthca: Distinguish multiple devices in /proc/interrupts IB/mthca: Annotate CQ locking ...
This commit is contained in:
commit
2490138cb7
12
MAINTAINERS
12
MAINTAINERS
@ -439,7 +439,7 @@ F: drivers/hwmon/ams/
|
||||
AMSO1100 RNIC DRIVER
|
||||
M: Tom Tucker <tom@opengridcomputing.com>
|
||||
M: Steve Wise <swise@opengridcomputing.com>
|
||||
L: general@lists.openfabrics.org
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/infiniband/hw/amso1100/
|
||||
|
||||
@ -1494,7 +1494,7 @@ F: drivers/net/cxgb3/
|
||||
|
||||
CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
|
||||
M: Steve Wise <swise@chelsio.com>
|
||||
L: general@lists.openfabrics.org
|
||||
L: linux-rdma@vger.kernel.org
|
||||
W: http://www.openfabrics.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/cxgb3/
|
||||
@ -1868,7 +1868,7 @@ F: fs/efs/
|
||||
EHCA (IBM GX bus InfiniBand adapter) DRIVER
|
||||
M: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
|
||||
M: Christoph Raisch <raisch@de.ibm.com>
|
||||
L: general@lists.openfabrics.org
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/ehca/
|
||||
|
||||
@ -2552,7 +2552,7 @@ INFINIBAND SUBSYSTEM
|
||||
M: Roland Dreier <rolandd@cisco.com>
|
||||
M: Sean Hefty <sean.hefty@intel.com>
|
||||
M: Hal Rosenstock <hal.rosenstock@gmail.com>
|
||||
L: general@lists.openfabrics.org (moderated for non-subscribers)
|
||||
L: linux-rdma@vger.kernel.org
|
||||
W: http://www.openib.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
|
||||
S: Supported
|
||||
@ -2729,7 +2729,7 @@ F: drivers/net/ipg.c
|
||||
|
||||
IPATH DRIVER
|
||||
M: Ralph Campbell <infinipath@qlogic.com>
|
||||
L: general@lists.openfabrics.org
|
||||
L: linux-rdma@vger.kernel.org
|
||||
T: git git://git.qlogic.com/ipath-linux-2.6
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/ipath/
|
||||
@ -3485,7 +3485,7 @@ F: drivers/scsi/NCR_D700.*
|
||||
NETEFFECT IWARP RNIC DRIVER (IW_NES)
|
||||
M: Faisal Latif <faisal.latif@intel.com>
|
||||
M: Chien Tung <chien.tin.tung@intel.com>
|
||||
L: general@lists.openfabrics.org
|
||||
L: linux-rdma@vger.kernel.org
|
||||
W: http://www.neteffect.com
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/nes/
|
||||
|
@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
|
||||
* In either case, must tell the provider to reject.
|
||||
*/
|
||||
cm_id_priv->state = IW_CM_STATE_DESTROYING;
|
||||
cm_id->device->iwcm->reject(cm_id, NULL, 0);
|
||||
break;
|
||||
case IW_CM_STATE_CONN_SENT:
|
||||
case IW_CM_STATE_DESTROYING:
|
||||
|
@ -2,6 +2,7 @@
|
||||
* Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
|
||||
* Copyright (c) 2005 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
|
||||
* Copyright (c) 2009 HNR Consulting. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -45,14 +46,21 @@ MODULE_DESCRIPTION("kernel IB MAD API");
|
||||
MODULE_AUTHOR("Hal Rosenstock");
|
||||
MODULE_AUTHOR("Sean Hefty");
|
||||
|
||||
int mad_sendq_size = IB_MAD_QP_SEND_SIZE;
|
||||
int mad_recvq_size = IB_MAD_QP_RECV_SIZE;
|
||||
|
||||
module_param_named(send_queue_size, mad_sendq_size, int, 0444);
|
||||
MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests");
|
||||
module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
|
||||
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
|
||||
|
||||
static struct kmem_cache *ib_mad_cache;
|
||||
|
||||
static struct list_head ib_mad_port_list;
|
||||
static u32 ib_mad_client_id = 0;
|
||||
|
||||
/* Port list lock */
|
||||
static spinlock_t ib_mad_port_list_lock;
|
||||
|
||||
static DEFINE_SPINLOCK(ib_mad_port_list_lock);
|
||||
|
||||
/* Forward declarations */
|
||||
static int method_in_use(struct ib_mad_mgmt_method_table **method,
|
||||
@ -1974,7 +1982,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
|
||||
unsigned long delay;
|
||||
|
||||
if (list_empty(&mad_agent_priv->wait_list)) {
|
||||
cancel_delayed_work(&mad_agent_priv->timed_work);
|
||||
__cancel_delayed_work(&mad_agent_priv->timed_work);
|
||||
} else {
|
||||
mad_send_wr = list_entry(mad_agent_priv->wait_list.next,
|
||||
struct ib_mad_send_wr_private,
|
||||
@ -1983,7 +1991,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)
|
||||
if (time_after(mad_agent_priv->timeout,
|
||||
mad_send_wr->timeout)) {
|
||||
mad_agent_priv->timeout = mad_send_wr->timeout;
|
||||
cancel_delayed_work(&mad_agent_priv->timed_work);
|
||||
__cancel_delayed_work(&mad_agent_priv->timed_work);
|
||||
delay = mad_send_wr->timeout - jiffies;
|
||||
if ((long)delay <= 0)
|
||||
delay = 1;
|
||||
@ -2023,7 +2031,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)
|
||||
|
||||
/* Reschedule a work item if we have a shorter timeout */
|
||||
if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) {
|
||||
cancel_delayed_work(&mad_agent_priv->timed_work);
|
||||
__cancel_delayed_work(&mad_agent_priv->timed_work);
|
||||
queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,
|
||||
&mad_agent_priv->timed_work, delay);
|
||||
}
|
||||
@ -2736,8 +2744,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
|
||||
qp_init_attr.send_cq = qp_info->port_priv->cq;
|
||||
qp_init_attr.recv_cq = qp_info->port_priv->cq;
|
||||
qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
|
||||
qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE;
|
||||
qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE;
|
||||
qp_init_attr.cap.max_send_wr = mad_sendq_size;
|
||||
qp_init_attr.cap.max_recv_wr = mad_recvq_size;
|
||||
qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;
|
||||
qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;
|
||||
qp_init_attr.qp_type = qp_type;
|
||||
@ -2752,8 +2760,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,
|
||||
goto error;
|
||||
}
|
||||
/* Use minimum queue sizes unless the CQ is resized */
|
||||
qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE;
|
||||
qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE;
|
||||
qp_info->send_queue.max_active = mad_sendq_size;
|
||||
qp_info->recv_queue.max_active = mad_recvq_size;
|
||||
return 0;
|
||||
|
||||
error:
|
||||
@ -2792,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,
|
||||
init_mad_qp(port_priv, &port_priv->qp_info[0]);
|
||||
init_mad_qp(port_priv, &port_priv->qp_info[1]);
|
||||
|
||||
cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2;
|
||||
cq_size = (mad_sendq_size + mad_recvq_size) * 2;
|
||||
port_priv->cq = ib_create_cq(port_priv->device,
|
||||
ib_mad_thread_completion_handler,
|
||||
NULL, port_priv, cq_size, 0);
|
||||
@ -2984,7 +2992,11 @@ static int __init ib_mad_init_module(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock_init(&ib_mad_port_list_lock);
|
||||
mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE);
|
||||
mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE);
|
||||
|
||||
mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
|
||||
mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
|
||||
|
||||
ib_mad_cache = kmem_cache_create("ib_mad",
|
||||
sizeof(struct ib_mad_private),
|
||||
@ -3021,4 +3033,3 @@ static void __exit ib_mad_cleanup_module(void)
|
||||
|
||||
module_init(ib_mad_init_module);
|
||||
module_exit(ib_mad_cleanup_module);
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
* Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.
|
||||
* Copyright (c) 2005 Intel Corporation. All rights reserved.
|
||||
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
||||
* Copyright (c) 2009 HNR Consulting. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
@ -49,6 +50,8 @@
|
||||
/* QP and CQ parameters */
|
||||
#define IB_MAD_QP_SEND_SIZE 128
|
||||
#define IB_MAD_QP_RECV_SIZE 512
|
||||
#define IB_MAD_QP_MIN_SIZE 64
|
||||
#define IB_MAD_QP_MAX_SIZE 8192
|
||||
#define IB_MAD_SEND_REQ_MAX_SG 2
|
||||
#define IB_MAD_RECV_REQ_MAX_SG 1
|
||||
|
||||
|
@ -106,6 +106,8 @@ struct mcast_group {
|
||||
struct ib_sa_query *query;
|
||||
int query_id;
|
||||
u16 pkey_index;
|
||||
u8 leave_state;
|
||||
int retries;
|
||||
};
|
||||
|
||||
struct mcast_member {
|
||||
@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
|
||||
|
||||
rec = group->rec;
|
||||
rec.join_state = leave_state;
|
||||
group->leave_state = leave_state;
|
||||
|
||||
ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
|
||||
port->port_num, IB_SA_METHOD_DELETE, &rec,
|
||||
@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
|
||||
{
|
||||
struct mcast_group *group = context;
|
||||
|
||||
mcast_work_handler(&group->work);
|
||||
if (status && group->retries > 0 &&
|
||||
!send_leave(group, group->leave_state))
|
||||
group->retries--;
|
||||
else
|
||||
mcast_work_handler(&group->work);
|
||||
}
|
||||
|
||||
static struct mcast_group *acquire_group(struct mcast_port *port,
|
||||
@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
|
||||
if (!group)
|
||||
return NULL;
|
||||
|
||||
group->retries = 3;
|
||||
group->port = port;
|
||||
group->rec.mgid = *mgid;
|
||||
group->pkey_index = MCAST_INVALID_PKEY_INDEX;
|
||||
|
@ -109,10 +109,10 @@ static struct ib_client sa_client = {
|
||||
.remove = ib_sa_remove_one
|
||||
};
|
||||
|
||||
static spinlock_t idr_lock;
|
||||
static DEFINE_SPINLOCK(idr_lock);
|
||||
static DEFINE_IDR(query_idr);
|
||||
|
||||
static spinlock_t tid_lock;
|
||||
static DEFINE_SPINLOCK(tid_lock);
|
||||
static u32 tid;
|
||||
|
||||
#define PATH_REC_FIELD(field) \
|
||||
@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock_init(&idr_lock);
|
||||
spin_lock_init(&tid_lock);
|
||||
|
||||
get_random_bytes(&tid, sizeof tid);
|
||||
|
||||
ret = ib_register_client(&sa_client);
|
||||
|
@ -52,6 +52,10 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
|
||||
hop_cnt = smp->hop_cnt;
|
||||
|
||||
/* See section 14.2.2.2, Vol 1 IB spec */
|
||||
/* C14-6 -- valid hop_cnt values are from 0 to 63 */
|
||||
if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
|
||||
return IB_SMI_DISCARD;
|
||||
|
||||
if (!ib_get_smp_direction(smp)) {
|
||||
/* C14-9:1 */
|
||||
if (hop_cnt && hop_ptr == 0) {
|
||||
@ -133,6 +137,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
|
||||
hop_cnt = smp->hop_cnt;
|
||||
|
||||
/* See section 14.2.2.2, Vol 1 IB spec */
|
||||
/* C14-6 -- valid hop_cnt values are from 0 to 63 */
|
||||
if (hop_cnt >= IB_SMP_MAX_PATH_HOPS)
|
||||
return IB_SMI_DISCARD;
|
||||
|
||||
if (!ib_get_smp_direction(smp)) {
|
||||
/* C14-9:1 -- sender should have incremented hop_ptr */
|
||||
if (hop_cnt && hop_ptr == 0)
|
||||
|
@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);
|
||||
DEFINE_IDR(ib_uverbs_qp_idr);
|
||||
DEFINE_IDR(ib_uverbs_srq_idr);
|
||||
|
||||
static spinlock_t map_lock;
|
||||
static DEFINE_SPINLOCK(map_lock);
|
||||
static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
|
||||
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
|
||||
|
||||
@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
||||
|
||||
if (hdr.command < 0 ||
|
||||
hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
|
||||
!uverbs_cmd_table[hdr.command] ||
|
||||
!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
|
||||
!uverbs_cmd_table[hdr.command])
|
||||
return -EINVAL;
|
||||
|
||||
if (!file->ucontext &&
|
||||
hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
|
||||
return -EINVAL;
|
||||
|
||||
if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
|
||||
return -ENOSYS;
|
||||
|
||||
return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,
|
||||
hdr.in_words * 4, hdr.out_words * 4);
|
||||
}
|
||||
@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock_init(&map_lock);
|
||||
|
||||
ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
|
||||
"infiniband_verbs");
|
||||
if (ret) {
|
||||
|
@ -86,11 +86,7 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table);
|
||||
|
||||
static void c2_print_macaddr(struct net_device *netdev)
|
||||
{
|
||||
pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, "
|
||||
"IRQ %u\n", netdev->name,
|
||||
netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
|
||||
netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
|
||||
netdev->irq);
|
||||
pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
|
||||
}
|
||||
|
||||
static void c2_set_rxbufsize(struct c2_port *c2_port)
|
||||
|
@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev)
|
||||
/* Register pseudo network device */
|
||||
dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
|
||||
if (!dev->pseudo_netdev)
|
||||
goto out3;
|
||||
goto out;
|
||||
|
||||
ret = register_netdev(dev->pseudo_netdev);
|
||||
if (ret)
|
||||
goto out2;
|
||||
goto out_free_netdev;
|
||||
|
||||
pr_debug("%s:%u\n", __func__, __LINE__);
|
||||
strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
|
||||
@ -851,6 +851,10 @@ int c2_register_device(struct c2_dev *dev)
|
||||
dev->ibdev.post_recv = c2_post_receive;
|
||||
|
||||
dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
|
||||
if (dev->ibdev.iwcm == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto out_unregister_netdev;
|
||||
}
|
||||
dev->ibdev.iwcm->add_ref = c2_add_ref;
|
||||
dev->ibdev.iwcm->rem_ref = c2_rem_ref;
|
||||
dev->ibdev.iwcm->get_qp = c2_get_qp;
|
||||
@ -862,23 +866,25 @@ int c2_register_device(struct c2_dev *dev)
|
||||
|
||||
ret = ib_register_device(&dev->ibdev);
|
||||
if (ret)
|
||||
goto out1;
|
||||
goto out_free_iwcm;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
|
||||
ret = device_create_file(&dev->ibdev.dev,
|
||||
c2_dev_attributes[i]);
|
||||
if (ret)
|
||||
goto out0;
|
||||
goto out_unregister_ibdev;
|
||||
}
|
||||
goto out3;
|
||||
goto out;
|
||||
|
||||
out0:
|
||||
out_unregister_ibdev:
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
out1:
|
||||
out_free_iwcm:
|
||||
kfree(dev->ibdev.iwcm);
|
||||
out_unregister_netdev:
|
||||
unregister_netdev(dev->pseudo_netdev);
|
||||
out2:
|
||||
out_free_netdev:
|
||||
free_netdev(dev->pseudo_netdev);
|
||||
out3:
|
||||
out:
|
||||
pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -852,7 +852,9 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
|
||||
wqe->qpcaps = attr->qpcaps;
|
||||
wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
|
||||
wqe->rqe_count = cpu_to_be16(attr->rqe_count);
|
||||
wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type));
|
||||
wqe->flags_rtr_type = cpu_to_be16(attr->flags |
|
||||
V_RTR_TYPE(attr->rtr_type) |
|
||||
V_CHAN(attr->chan));
|
||||
wqe->ord = cpu_to_be32(attr->ord);
|
||||
wqe->ird = cpu_to_be32(attr->ird);
|
||||
wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
|
||||
@ -1032,6 +1034,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev_p)
|
||||
err2:
|
||||
cxio_hal_destroy_ctrl_qp(rdev_p);
|
||||
err1:
|
||||
rdev_p->t3cdev_p->ulp = NULL;
|
||||
list_del(&rdev_p->entry);
|
||||
return err;
|
||||
}
|
||||
|
@ -327,6 +327,11 @@ enum rdma_init_rtr_types {
|
||||
#define V_RTR_TYPE(x) ((x) << S_RTR_TYPE)
|
||||
#define G_RTR_TYPE(x) ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
|
||||
|
||||
#define S_CHAN 4
|
||||
#define M_CHAN 0x3
|
||||
#define V_CHAN(x) ((x) << S_CHAN)
|
||||
#define G_CHAN(x) ((((x) >> S_CHAN)) & M_CHAN)
|
||||
|
||||
struct t3_rdma_init_attr {
|
||||
u32 tid;
|
||||
u32 qpid;
|
||||
@ -346,6 +351,7 @@ struct t3_rdma_init_attr {
|
||||
u16 flags;
|
||||
u16 rqe_count;
|
||||
u32 irs;
|
||||
u32 chan;
|
||||
};
|
||||
|
||||
struct t3_rdma_init_wr {
|
||||
|
@ -51,7 +51,7 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
|
||||
|
||||
static void open_rnic_dev(struct t3cdev *);
|
||||
static void close_rnic_dev(struct t3cdev *);
|
||||
static void iwch_err_handler(struct t3cdev *, u32, u32);
|
||||
static void iwch_event_handler(struct t3cdev *, u32, u32);
|
||||
|
||||
struct cxgb3_client t3c_client = {
|
||||
.name = "iw_cxgb3",
|
||||
@ -59,7 +59,7 @@ struct cxgb3_client t3c_client = {
|
||||
.remove = close_rnic_dev,
|
||||
.handlers = t3c_handlers,
|
||||
.redirect = iwch_ep_redirect,
|
||||
.err_handler = iwch_err_handler
|
||||
.event_handler = iwch_event_handler
|
||||
};
|
||||
|
||||
static LIST_HEAD(dev_list);
|
||||
@ -105,11 +105,9 @@ static void rnic_init(struct iwch_dev *rnicp)
|
||||
static void open_rnic_dev(struct t3cdev *tdev)
|
||||
{
|
||||
struct iwch_dev *rnicp;
|
||||
static int vers_printed;
|
||||
|
||||
PDBG("%s t3cdev %p\n", __func__, tdev);
|
||||
if (!vers_printed++)
|
||||
printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
|
||||
printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
|
||||
DRV_VERSION);
|
||||
rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
|
||||
if (!rnicp) {
|
||||
@ -162,21 +160,36 @@ static void close_rnic_dev(struct t3cdev *tdev)
|
||||
mutex_unlock(&dev_mutex);
|
||||
}
|
||||
|
||||
static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error)
|
||||
static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
|
||||
{
|
||||
struct cxio_rdev *rdev = tdev->ulp;
|
||||
struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev);
|
||||
struct iwch_dev *rnicp;
|
||||
struct ib_event event;
|
||||
u32 portnum = port_id + 1;
|
||||
|
||||
if (status == OFFLOAD_STATUS_DOWN) {
|
||||
if (!rdev)
|
||||
return;
|
||||
rnicp = rdev_to_iwch_dev(rdev);
|
||||
switch (evt) {
|
||||
case OFFLOAD_STATUS_DOWN: {
|
||||
rdev->flags = CXIO_ERROR_FATAL;
|
||||
|
||||
event.device = &rnicp->ibdev;
|
||||
event.event = IB_EVENT_DEVICE_FATAL;
|
||||
event.element.port_num = 0;
|
||||
ib_dispatch_event(&event);
|
||||
break;
|
||||
}
|
||||
case OFFLOAD_PORT_DOWN: {
|
||||
event.event = IB_EVENT_PORT_ERR;
|
||||
break;
|
||||
}
|
||||
case OFFLOAD_PORT_UP: {
|
||||
event.event = IB_EVENT_PORT_ACTIVE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
event.device = &rnicp->ibdev;
|
||||
event.element.port_num = portnum;
|
||||
ib_dispatch_event(&event);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -286,7 +286,7 @@ void __free_ep(struct kref *kref)
|
||||
ep = container_of(container_of(kref, struct iwch_ep_common, kref),
|
||||
struct iwch_ep, com);
|
||||
PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
|
||||
if (ep->com.flags & RELEASE_RESOURCES) {
|
||||
if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
|
||||
cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
|
||||
dst_release(ep->dst);
|
||||
l2t_release(L2DATA(ep->com.tdev), ep->l2t);
|
||||
@ -297,7 +297,7 @@ void __free_ep(struct kref *kref)
|
||||
static void release_ep_resources(struct iwch_ep *ep)
|
||||
{
|
||||
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
|
||||
ep->com.flags |= RELEASE_RESOURCES;
|
||||
set_bit(RELEASE_RESOURCES, &ep->com.flags);
|
||||
put_ep(&ep->com);
|
||||
}
|
||||
|
||||
@ -786,10 +786,12 @@ static void connect_request_upcall(struct iwch_ep *ep)
|
||||
event.private_data_len = ep->plen;
|
||||
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
|
||||
event.provider_data = ep;
|
||||
if (state_read(&ep->parent_ep->com) != DEAD)
|
||||
if (state_read(&ep->parent_ep->com) != DEAD) {
|
||||
get_ep(&ep->com);
|
||||
ep->parent_ep->com.cm_id->event_handler(
|
||||
ep->parent_ep->com.cm_id,
|
||||
&event);
|
||||
}
|
||||
put_ep(&ep->parent_ep->com);
|
||||
ep->parent_ep = NULL;
|
||||
}
|
||||
@ -1156,8 +1158,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
* We get 2 abort replies from the HW. The first one must
|
||||
* be ignored except for scribbling that we need one more.
|
||||
*/
|
||||
if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) {
|
||||
ep->com.flags |= ABORT_REQ_IN_PROGRESS;
|
||||
if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
|
||||
return CPL_RET_BUF_DONE;
|
||||
}
|
||||
|
||||
@ -1477,10 +1478,14 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
/*
|
||||
* We're gonna mark this puppy DEAD, but keep
|
||||
* the reference on it until the ULP accepts or
|
||||
* rejects the CR.
|
||||
* rejects the CR. Also wake up anyone waiting
|
||||
* in rdma connection migration (see iwch_accept_cr()).
|
||||
*/
|
||||
__state_set(&ep->com, CLOSING);
|
||||
get_ep(&ep->com);
|
||||
ep->com.rpl_done = 1;
|
||||
ep->com.rpl_err = -ECONNRESET;
|
||||
PDBG("waking up ep %p\n", ep);
|
||||
wake_up(&ep->com.waitq);
|
||||
break;
|
||||
case MPA_REP_SENT:
|
||||
__state_set(&ep->com, CLOSING);
|
||||
@ -1561,8 +1566,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
* We get 2 peer aborts from the HW. The first one must
|
||||
* be ignored except for scribbling that we need one more.
|
||||
*/
|
||||
if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) {
|
||||
ep->com.flags |= PEER_ABORT_IN_PROGRESS;
|
||||
if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
|
||||
return CPL_RET_BUF_DONE;
|
||||
}
|
||||
|
||||
@ -1589,9 +1593,13 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
/*
|
||||
* We're gonna mark this puppy DEAD, but keep
|
||||
* the reference on it until the ULP accepts or
|
||||
* rejects the CR.
|
||||
* rejects the CR. Also wake up anyone waiting
|
||||
* in rdma connection migration (see iwch_accept_cr()).
|
||||
*/
|
||||
get_ep(&ep->com);
|
||||
ep->com.rpl_done = 1;
|
||||
ep->com.rpl_err = -ECONNRESET;
|
||||
PDBG("waking up ep %p\n", ep);
|
||||
wake_up(&ep->com.waitq);
|
||||
break;
|
||||
case MORIBUND:
|
||||
case CLOSING:
|
||||
@ -1797,6 +1805,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
|
||||
err = send_mpa_reject(ep, pdata, pdata_len);
|
||||
err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
|
||||
}
|
||||
put_ep(&ep->com);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1810,8 +1819,10 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
|
||||
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
if (state_read(&ep->com) == DEAD)
|
||||
return -ECONNRESET;
|
||||
if (state_read(&ep->com) == DEAD) {
|
||||
err = -ECONNRESET;
|
||||
goto err;
|
||||
}
|
||||
|
||||
BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
|
||||
BUG_ON(!qp);
|
||||
@ -1819,15 +1830,14 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
|
||||
(conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
|
||||
abort_connection(ep, NULL, GFP_KERNEL);
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
cm_id->add_ref(cm_id);
|
||||
ep->com.cm_id = cm_id;
|
||||
ep->com.qp = qp;
|
||||
|
||||
ep->com.rpl_done = 0;
|
||||
ep->com.rpl_err = 0;
|
||||
ep->ird = conn_param->ird;
|
||||
ep->ord = conn_param->ord;
|
||||
|
||||
@ -1836,8 +1846,6 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
|
||||
PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
|
||||
|
||||
get_ep(&ep->com);
|
||||
|
||||
/* bind QP to EP and move to RTS */
|
||||
attrs.mpa_attr = ep->mpa_attr;
|
||||
attrs.max_ird = ep->ird;
|
||||
@ -1855,30 +1863,31 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
err = iwch_modify_qp(ep->com.qp->rhp,
|
||||
ep->com.qp, mask, &attrs, 1);
|
||||
if (err)
|
||||
goto err;
|
||||
goto err1;
|
||||
|
||||
/* if needed, wait for wr_ack */
|
||||
if (iwch_rqes_posted(qp)) {
|
||||
wait_event(ep->com.waitq, ep->com.rpl_done);
|
||||
err = ep->com.rpl_err;
|
||||
if (err)
|
||||
goto err;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
err = send_mpa_reply(ep, conn_param->private_data,
|
||||
conn_param->private_data_len);
|
||||
if (err)
|
||||
goto err;
|
||||
goto err1;
|
||||
|
||||
|
||||
state_set(&ep->com, FPDU_MODE);
|
||||
established_upcall(ep);
|
||||
put_ep(&ep->com);
|
||||
return 0;
|
||||
err:
|
||||
err1:
|
||||
ep->com.cm_id = NULL;
|
||||
ep->com.qp = NULL;
|
||||
cm_id->rem_ref(cm_id);
|
||||
err:
|
||||
put_ep(&ep->com);
|
||||
return err;
|
||||
}
|
||||
@ -2097,14 +2106,17 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
|
||||
ep->com.state = CLOSING;
|
||||
start_ep_timer(ep);
|
||||
}
|
||||
set_bit(CLOSE_SENT, &ep->com.flags);
|
||||
break;
|
||||
case CLOSING:
|
||||
close = 1;
|
||||
if (abrupt) {
|
||||
stop_ep_timer(ep);
|
||||
ep->com.state = ABORTING;
|
||||
} else
|
||||
ep->com.state = MORIBUND;
|
||||
if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
|
||||
close = 1;
|
||||
if (abrupt) {
|
||||
stop_ep_timer(ep);
|
||||
ep->com.state = ABORTING;
|
||||
} else
|
||||
ep->com.state = MORIBUND;
|
||||
}
|
||||
break;
|
||||
case MORIBUND:
|
||||
case ABORTING:
|
||||
|
@ -145,9 +145,10 @@ enum iwch_ep_state {
|
||||
};
|
||||
|
||||
enum iwch_ep_flags {
|
||||
PEER_ABORT_IN_PROGRESS = (1 << 0),
|
||||
ABORT_REQ_IN_PROGRESS = (1 << 1),
|
||||
RELEASE_RESOURCES = (1 << 2),
|
||||
PEER_ABORT_IN_PROGRESS = 0,
|
||||
ABORT_REQ_IN_PROGRESS = 1,
|
||||
RELEASE_RESOURCES = 2,
|
||||
CLOSE_SENT = 3,
|
||||
};
|
||||
|
||||
struct iwch_ep_common {
|
||||
@ -162,7 +163,7 @@ struct iwch_ep_common {
|
||||
wait_queue_head_t waitq;
|
||||
int rpl_done;
|
||||
int rpl_err;
|
||||
u32 flags;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
struct iwch_listen_ep {
|
||||
|
@ -39,7 +39,7 @@
|
||||
#include "iwch.h"
|
||||
#include "iwch_provider.h"
|
||||
|
||||
static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
|
||||
static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
|
||||
{
|
||||
u32 mmid;
|
||||
|
||||
@ -47,14 +47,15 @@ static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
|
||||
mhp->attr.stag = stag;
|
||||
mmid = stag >> 8;
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
|
||||
PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
|
||||
return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
|
||||
}
|
||||
|
||||
int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
struct iwch_mr *mhp, int shift)
|
||||
{
|
||||
u32 stag;
|
||||
int ret;
|
||||
|
||||
if (cxio_register_phys_mem(&rhp->rdev,
|
||||
&stag, mhp->attr.pdid,
|
||||
@ -66,9 +67,11 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr))
|
||||
return -ENOMEM;
|
||||
|
||||
iwch_finish_mem_reg(mhp, stag);
|
||||
|
||||
return 0;
|
||||
ret = iwch_finish_mem_reg(mhp, stag);
|
||||
if (ret)
|
||||
cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
@ -77,6 +80,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
int npages)
|
||||
{
|
||||
u32 stag;
|
||||
int ret;
|
||||
|
||||
/* We could support this... */
|
||||
if (npages > mhp->attr.pbl_size)
|
||||
@ -93,9 +97,12 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr))
|
||||
return -ENOMEM;
|
||||
|
||||
iwch_finish_mem_reg(mhp, stag);
|
||||
ret = iwch_finish_mem_reg(mhp, stag);
|
||||
if (ret)
|
||||
cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
|
||||
|
@ -195,7 +195,11 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
|
||||
spin_lock_init(&chp->lock);
|
||||
atomic_set(&chp->refcnt, 1);
|
||||
init_waitqueue_head(&chp->wait);
|
||||
insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
|
||||
if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
|
||||
cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
|
||||
kfree(chp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (ucontext) {
|
||||
struct iwch_mm_entry *mm;
|
||||
@ -750,7 +754,11 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
|
||||
mhp->attr.stag = stag;
|
||||
mmid = (stag) >> 8;
|
||||
mhp->ibmw.rkey = stag;
|
||||
insert_handle(rhp, &rhp->mmidr, mhp, mmid);
|
||||
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
|
||||
cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
|
||||
kfree(mhp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
|
||||
return &(mhp->ibmw);
|
||||
}
|
||||
@ -778,37 +786,43 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
|
||||
struct iwch_mr *mhp;
|
||||
u32 mmid;
|
||||
u32 stag = 0;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
php = to_iwch_pd(pd);
|
||||
rhp = php->rhp;
|
||||
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
|
||||
if (!mhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
|
||||
mhp->rhp = rhp;
|
||||
ret = iwch_alloc_pbl(mhp, pbl_depth);
|
||||
if (ret) {
|
||||
kfree(mhp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
if (ret)
|
||||
goto err1;
|
||||
mhp->attr.pbl_size = pbl_depth;
|
||||
ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr);
|
||||
if (ret) {
|
||||
iwch_free_pbl(mhp);
|
||||
kfree(mhp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
if (ret)
|
||||
goto err2;
|
||||
mhp->attr.pdid = php->pdid;
|
||||
mhp->attr.type = TPT_NON_SHARED_MR;
|
||||
mhp->attr.stag = stag;
|
||||
mhp->attr.state = 1;
|
||||
mmid = (stag) >> 8;
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
insert_handle(rhp, &rhp->mmidr, mhp, mmid);
|
||||
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
|
||||
goto err3;
|
||||
|
||||
PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
|
||||
return &(mhp->ibmr);
|
||||
err3:
|
||||
cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr);
|
||||
err2:
|
||||
iwch_free_pbl(mhp);
|
||||
err1:
|
||||
kfree(mhp);
|
||||
err:
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
|
||||
@ -961,7 +975,13 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
|
||||
spin_lock_init(&qhp->lock);
|
||||
init_waitqueue_head(&qhp->wait);
|
||||
atomic_set(&qhp->refcnt, 1);
|
||||
insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid);
|
||||
|
||||
if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
|
||||
cxio_destroy_qp(&rhp->rdev, &qhp->wq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
kfree(qhp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
if (udata) {
|
||||
|
||||
@ -1418,6 +1438,7 @@ int iwch_register_device(struct iwch_dev *dev)
|
||||
bail2:
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
bail1:
|
||||
kfree(dev->ibdev.iwcm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1430,5 +1451,6 @@ void iwch_unregister_device(struct iwch_dev *dev)
|
||||
device_remove_file(&dev->ibdev.dev,
|
||||
iwch_class_attributes[i]);
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
kfree(dev->ibdev.iwcm);
|
||||
return;
|
||||
}
|
||||
|
@ -889,6 +889,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
|
||||
init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
|
||||
init_attr.rqe_count = iwch_rqes_posted(qhp);
|
||||
init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
|
||||
init_attr.chan = qhp->ep->l2t->smt_idx;
|
||||
if (peer2peer) {
|
||||
init_attr.rtr_type = RTR_READ;
|
||||
if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
|
||||
|
@ -52,7 +52,7 @@
|
||||
#include "ehca_tools.h"
|
||||
#include "hcp_if.h"
|
||||
|
||||
#define HCAD_VERSION "0028"
|
||||
#define HCAD_VERSION "0029"
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
|
||||
@ -64,7 +64,7 @@ static int ehca_hw_level = 0;
|
||||
static int ehca_poll_all_eqs = 1;
|
||||
|
||||
int ehca_debug_level = 0;
|
||||
int ehca_nr_ports = 2;
|
||||
int ehca_nr_ports = -1;
|
||||
int ehca_use_hp_mr = 0;
|
||||
int ehca_port_act_time = 30;
|
||||
int ehca_static_rate = -1;
|
||||
@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level,
|
||||
"Hardware level (0: autosensing (default), "
|
||||
"0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
|
||||
MODULE_PARM_DESC(nr_ports,
|
||||
"number of connected ports (-1: autodetect, 1: port one only, "
|
||||
"2: two ports (default)");
|
||||
"number of connected ports (-1: autodetect (default), "
|
||||
"1: port one only, 2: two ports)");
|
||||
MODULE_PARM_DESC(use_hp_mr,
|
||||
"Use high performance MRs (default: no)");
|
||||
MODULE_PARM_DESC(port_act_time,
|
||||
|
@ -786,7 +786,11 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
|
||||
wc->slid = cqe->rlid;
|
||||
wc->dlid_path_bits = cqe->dlid;
|
||||
wc->src_qp = cqe->remote_qp_number;
|
||||
wc->wc_flags = cqe->w_completion_flags;
|
||||
/*
|
||||
* HW has "Immed data present" and "GRH present" in bits 6 and 5.
|
||||
* SW defines those in bits 1 and 0, so we can just shift and mask.
|
||||
*/
|
||||
wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
|
||||
wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
|
||||
wc->sl = cqe->service_level;
|
||||
|
||||
|
@ -125,14 +125,30 @@ struct ib_perf {
|
||||
u8 data[192];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
|
||||
struct tcslfl {
|
||||
u32 tc:8;
|
||||
u32 sl:4;
|
||||
u32 fl:20;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* IP Version/TC/FL packed into 32 bits, as in GRH */
|
||||
struct vertcfl {
|
||||
u32 ver:4;
|
||||
u32 tc:8;
|
||||
u32 fl:20;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
|
||||
struct ib_wc *in_wc, struct ib_grh *in_grh,
|
||||
struct ib_mad *in_mad, struct ib_mad *out_mad)
|
||||
{
|
||||
struct ib_perf *in_perf = (struct ib_perf *)in_mad;
|
||||
struct ib_perf *out_perf = (struct ib_perf *)out_mad;
|
||||
struct ib_class_port_info *poi =
|
||||
(struct ib_class_port_info *)out_perf->data;
|
||||
struct tcslfl *tcslfl =
|
||||
(struct tcslfl *)&poi->redirect_tcslfl;
|
||||
struct ehca_shca *shca =
|
||||
container_of(ibdev, struct ehca_shca, ib_device);
|
||||
struct ehca_sport *sport = &shca->sport[port_num - 1];
|
||||
@ -158,10 +174,29 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
|
||||
poi->base_version = 1;
|
||||
poi->class_version = 1;
|
||||
poi->resp_time_value = 18;
|
||||
poi->redirect_lid = sport->saved_attr.lid;
|
||||
poi->redirect_qp = sport->pma_qp_nr;
|
||||
|
||||
/* copy local routing information from WC where applicable */
|
||||
tcslfl->sl = in_wc->sl;
|
||||
poi->redirect_lid =
|
||||
sport->saved_attr.lid | in_wc->dlid_path_bits;
|
||||
poi->redirect_qp = sport->pma_qp_nr;
|
||||
poi->redirect_qkey = IB_QP1_QKEY;
|
||||
poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
|
||||
|
||||
ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
|
||||
&poi->redirect_pkey);
|
||||
|
||||
/* if request was globally routed, copy route info */
|
||||
if (in_grh) {
|
||||
struct vertcfl *vertcfl =
|
||||
(struct vertcfl *)&in_grh->version_tclass_flow;
|
||||
memcpy(poi->redirect_gid, in_grh->dgid.raw,
|
||||
sizeof(poi->redirect_gid));
|
||||
tcslfl->tc = vertcfl->tc;
|
||||
tcslfl->fl = vertcfl->fl;
|
||||
} else
|
||||
/* else only fill in default GID */
|
||||
ehca_query_gid(ibdev, port_num, 0,
|
||||
(union ib_gid *)&poi->redirect_gid);
|
||||
|
||||
ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
|
||||
sport->saved_attr.lid, sport->pma_qp_nr);
|
||||
@ -183,8 +218,7 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
|
||||
|
||||
int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
||||
struct ib_wc *in_wc, struct ib_grh *in_grh,
|
||||
struct ib_mad *in_mad,
|
||||
struct ib_mad *out_mad)
|
||||
struct ib_mad *in_mad, struct ib_mad *out_mad)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -196,7 +230,8 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
|
||||
return IB_MAD_RESULT_SUCCESS;
|
||||
|
||||
ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
|
||||
ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad);
|
||||
ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
|
||||
in_mad, out_mad);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
|
||||
pd->port_cnt = 1;
|
||||
port_fp(fp) = pd;
|
||||
pd->port_pid = get_pid(task_pid(current));
|
||||
strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
|
||||
strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
|
||||
ipath_stats.sps_ports++;
|
||||
ret = 0;
|
||||
} else
|
||||
|
@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp,
|
||||
if (smp->attr_mod)
|
||||
smp->status |= IB_SMP_INVALID_FIELD;
|
||||
|
||||
strncpy(smp->data, ibdev->node_desc, sizeof(smp->data));
|
||||
memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
|
||||
|
||||
return reply(smp);
|
||||
}
|
||||
|
@ -342,6 +342,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct mlx4_ib_alloc_ucontext_resp resp;
|
||||
int err;
|
||||
|
||||
if (!dev->ib_active)
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
resp.qp_tab_size = dev->dev->caps.num_qps;
|
||||
resp.bf_reg_size = dev->dev->caps.bf_reg_size;
|
||||
resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
|
||||
@ -540,15 +543,11 @@ static struct device_attribute *mlx4_class_attributes[] = {
|
||||
|
||||
static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
{
|
||||
static int mlx4_ib_version_printed;
|
||||
struct mlx4_ib_dev *ibdev;
|
||||
int num_ports = 0;
|
||||
int i;
|
||||
|
||||
if (!mlx4_ib_version_printed) {
|
||||
printk(KERN_INFO "%s", mlx4_ib_version);
|
||||
++mlx4_ib_version_printed;
|
||||
}
|
||||
printk_once(KERN_INFO "%s", mlx4_ib_version);
|
||||
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
|
||||
num_ports++;
|
||||
@ -673,6 +672,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
|
||||
goto err_reg;
|
||||
}
|
||||
|
||||
ibdev->ib_active = true;
|
||||
|
||||
return ibdev;
|
||||
|
||||
err_reg:
|
||||
@ -729,6 +730,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
|
||||
break;
|
||||
|
||||
case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
|
||||
ibdev->ib_active = false;
|
||||
ibev.event = IB_EVENT_DEVICE_FATAL;
|
||||
break;
|
||||
|
||||
|
@ -175,6 +175,7 @@ struct mlx4_ib_dev {
|
||||
spinlock_t sm_lock;
|
||||
|
||||
struct mutex cap_mask_mutex;
|
||||
bool ib_active;
|
||||
};
|
||||
|
||||
static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
|
||||
|
@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
|
||||
}
|
||||
|
||||
static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
|
||||
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
|
||||
{
|
||||
if (send_cq == recv_cq)
|
||||
if (send_cq == recv_cq) {
|
||||
spin_lock_irq(&send_cq->lock);
|
||||
else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
|
||||
__acquire(&recv_cq->lock);
|
||||
} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
|
||||
spin_lock_irq(&send_cq->lock);
|
||||
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
|
||||
} else {
|
||||
@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
|
||||
}
|
||||
|
||||
static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
|
||||
__releases(&send_cq->lock) __releases(&recv_cq->lock)
|
||||
{
|
||||
if (send_cq == recv_cq)
|
||||
if (send_cq == recv_cq) {
|
||||
__release(&recv_cq->lock);
|
||||
spin_unlock_irq(&send_cq->lock);
|
||||
else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
|
||||
} else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
|
||||
spin_unlock(&recv_cq->lock);
|
||||
spin_unlock_irq(&send_cq->lock);
|
||||
} else {
|
||||
|
@ -88,6 +88,7 @@ static void handle_catas(struct mthca_dev *dev)
|
||||
event.device = &dev->ib_dev;
|
||||
event.event = IB_EVENT_DEVICE_FATAL;
|
||||
event.element.port_num = 0;
|
||||
dev->active = false;
|
||||
|
||||
ib_dispatch_event(&event);
|
||||
|
||||
|
@ -34,8 +34,6 @@
|
||||
#ifndef MTHCA_CONFIG_REG_H
|
||||
#define MTHCA_CONFIG_REG_H
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
#define MTHCA_HCR_BASE 0x80680
|
||||
#define MTHCA_HCR_SIZE 0x0001c
|
||||
#define MTHCA_ECR_BASE 0x80700
|
||||
|
@ -357,6 +357,7 @@ struct mthca_dev {
|
||||
struct ib_ah *sm_ah[MTHCA_MAX_PORTS];
|
||||
spinlock_t sm_lock;
|
||||
u8 rate[MTHCA_MAX_PORTS];
|
||||
bool active;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
|
||||
|
@ -829,27 +829,34 @@ int mthca_init_eq_table(struct mthca_dev *dev)
|
||||
|
||||
if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
|
||||
static const char *eq_name[] = {
|
||||
[MTHCA_EQ_COMP] = DRV_NAME " (comp)",
|
||||
[MTHCA_EQ_ASYNC] = DRV_NAME " (async)",
|
||||
[MTHCA_EQ_CMD] = DRV_NAME " (cmd)"
|
||||
[MTHCA_EQ_COMP] = DRV_NAME "-comp",
|
||||
[MTHCA_EQ_ASYNC] = DRV_NAME "-async",
|
||||
[MTHCA_EQ_CMD] = DRV_NAME "-cmd"
|
||||
};
|
||||
|
||||
for (i = 0; i < MTHCA_NUM_EQ; ++i) {
|
||||
snprintf(dev->eq_table.eq[i].irq_name,
|
||||
IB_DEVICE_NAME_MAX,
|
||||
"%s@pci:%s", eq_name[i],
|
||||
pci_name(dev->pdev));
|
||||
err = request_irq(dev->eq_table.eq[i].msi_x_vector,
|
||||
mthca_is_memfree(dev) ?
|
||||
mthca_arbel_msi_x_interrupt :
|
||||
mthca_tavor_msi_x_interrupt,
|
||||
0, eq_name[i], dev->eq_table.eq + i);
|
||||
0, dev->eq_table.eq[i].irq_name,
|
||||
dev->eq_table.eq + i);
|
||||
if (err)
|
||||
goto err_out_cmd;
|
||||
dev->eq_table.eq[i].have_irq = 1;
|
||||
}
|
||||
} else {
|
||||
snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
|
||||
DRV_NAME "@pci:%s", pci_name(dev->pdev));
|
||||
err = request_irq(dev->pdev->irq,
|
||||
mthca_is_memfree(dev) ?
|
||||
mthca_arbel_interrupt :
|
||||
mthca_tavor_interrupt,
|
||||
IRQF_SHARED, DRV_NAME, dev);
|
||||
IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
|
||||
if (err)
|
||||
goto err_out_cmd;
|
||||
dev->eq_table.have_irq = 1;
|
||||
|
@ -1116,6 +1116,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
|
||||
pci_set_drvdata(pdev, mdev);
|
||||
mdev->hca_type = hca_type;
|
||||
|
||||
mdev->active = true;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unregister:
|
||||
@ -1215,15 +1217,11 @@ int __mthca_restart_one(struct pci_dev *pdev)
|
||||
static int __devinit mthca_init_one(struct pci_dev *pdev,
|
||||
const struct pci_device_id *id)
|
||||
{
|
||||
static int mthca_version_printed = 0;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&mthca_device_mutex);
|
||||
|
||||
if (!mthca_version_printed) {
|
||||
printk(KERN_INFO "%s", mthca_version);
|
||||
++mthca_version_printed;
|
||||
}
|
||||
printk_once(KERN_INFO "%s", mthca_version);
|
||||
|
||||
if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
|
||||
printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
|
||||
|
@ -334,6 +334,9 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
|
||||
struct mthca_ucontext *context;
|
||||
int err;
|
||||
|
||||
if (!(to_mdev(ibdev)->active))
|
||||
return ERR_PTR(-EAGAIN);
|
||||
|
||||
memset(&uresp, 0, sizeof uresp);
|
||||
|
||||
uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
|
||||
|
@ -113,6 +113,7 @@ struct mthca_eq {
|
||||
int nent;
|
||||
struct mthca_buf_list *page_list;
|
||||
struct mthca_mr mr;
|
||||
char irq_name[IB_DEVICE_NAME_MAX];
|
||||
};
|
||||
|
||||
struct mthca_av;
|
||||
|
@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev,
|
||||
}
|
||||
|
||||
static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
|
||||
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
|
||||
{
|
||||
if (send_cq == recv_cq)
|
||||
if (send_cq == recv_cq) {
|
||||
spin_lock_irq(&send_cq->lock);
|
||||
else if (send_cq->cqn < recv_cq->cqn) {
|
||||
__acquire(&recv_cq->lock);
|
||||
} else if (send_cq->cqn < recv_cq->cqn) {
|
||||
spin_lock_irq(&send_cq->lock);
|
||||
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
|
||||
} else {
|
||||
@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
|
||||
}
|
||||
|
||||
static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
|
||||
__releases(&send_cq->lock) __releases(&recv_cq->lock)
|
||||
{
|
||||
if (send_cq == recv_cq)
|
||||
if (send_cq == recv_cq) {
|
||||
__release(&recv_cq->lock);
|
||||
spin_unlock_irq(&send_cq->lock);
|
||||
else if (send_cq->cqn < recv_cq->cqn) {
|
||||
} else if (send_cq->cqn < recv_cq->cqn) {
|
||||
spin_unlock(&recv_cq->lock);
|
||||
spin_unlock_irq(&send_cq->lock);
|
||||
} else {
|
||||
|
@ -30,7 +30,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
|
@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *);
|
||||
void nes_cm_disconn_worker(void *);
|
||||
|
||||
/* nes_verbs.c */
|
||||
int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32);
|
||||
int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
|
||||
int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
|
||||
struct nes_ib_device *nes_init_ofa_device(struct net_device *);
|
||||
void nes_destroy_ofa_device(struct nes_ib_device *);
|
||||
|
@ -2450,19 +2450,16 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
|
||||
*/
|
||||
int nes_cm_disconn(struct nes_qp *nesqp)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct disconn_work *work;
|
||||
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
if (nesqp->disconn_pending == 0) {
|
||||
nesqp->disconn_pending++;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
/* init our disconnect work element, to */
|
||||
INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
|
||||
|
||||
queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work);
|
||||
} else
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
work = kzalloc(sizeof *work, GFP_ATOMIC);
|
||||
if (!work)
|
||||
return -ENOMEM; /* Timer will clean up */
|
||||
|
||||
nes_add_ref(&nesqp->ibqp);
|
||||
work->nesqp = nesqp;
|
||||
INIT_WORK(&work->work, nes_disconnect_worker);
|
||||
queue_work(g_cm_core->disconn_wq, &work->work);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2472,11 +2469,14 @@ int nes_cm_disconn(struct nes_qp *nesqp)
|
||||
*/
|
||||
static void nes_disconnect_worker(struct work_struct *work)
|
||||
{
|
||||
struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
|
||||
struct disconn_work *dwork = container_of(work, struct disconn_work, work);
|
||||
struct nes_qp *nesqp = dwork->nesqp;
|
||||
|
||||
kfree(dwork);
|
||||
nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
|
||||
nesqp->last_aeq, nesqp->hwqp.qp_id);
|
||||
nes_cm_disconn_true(nesqp);
|
||||
nes_rem_ref(&nesqp->ibqp);
|
||||
}
|
||||
|
||||
|
||||
@ -2493,7 +2493,12 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
|
||||
u16 last_ae;
|
||||
u8 original_hw_tcp_state;
|
||||
u8 original_ibqp_state;
|
||||
u8 issued_disconnect_reset = 0;
|
||||
enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
|
||||
int issue_disconn = 0;
|
||||
int issue_close = 0;
|
||||
int issue_flush = 0;
|
||||
u32 flush_q = NES_CQP_FLUSH_RQ;
|
||||
struct ib_event ibevent;
|
||||
|
||||
if (!nesqp) {
|
||||
nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
|
||||
@ -2517,24 +2522,55 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
|
||||
original_ibqp_state = nesqp->ibqp_state;
|
||||
last_ae = nesqp->last_aeq;
|
||||
|
||||
if (nesqp->term_flags) {
|
||||
issue_disconn = 1;
|
||||
issue_close = 1;
|
||||
nesqp->cm_id = NULL;
|
||||
if (nesqp->flush_issued == 0) {
|
||||
nesqp->flush_issued = 1;
|
||||
issue_flush = 1;
|
||||
}
|
||||
} else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
|
||||
((original_ibqp_state == IB_QPS_RTS) &&
|
||||
(last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
|
||||
issue_disconn = 1;
|
||||
if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
|
||||
disconn_status = IW_CM_EVENT_STATUS_RESET;
|
||||
}
|
||||
|
||||
nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state);
|
||||
if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
|
||||
(original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
|
||||
(last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
|
||||
(last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
|
||||
issue_close = 1;
|
||||
nesqp->cm_id = NULL;
|
||||
if (nesqp->flush_issued == 0) {
|
||||
nesqp->flush_issued = 1;
|
||||
issue_flush = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if ((nesqp->cm_id) && (cm_id->event_handler)) {
|
||||
if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
|
||||
((original_ibqp_state == IB_QPS_RTS) &&
|
||||
(last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
|
||||
if ((issue_flush) && (nesqp->destroyed == 0)) {
|
||||
/* Flush the queue(s) */
|
||||
if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE)
|
||||
flush_q |= NES_CQP_FLUSH_SQ;
|
||||
flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1);
|
||||
|
||||
if (nesqp->term_flags) {
|
||||
ibevent.device = nesqp->ibqp.device;
|
||||
ibevent.event = nesqp->terminate_eventtype;
|
||||
ibevent.element.qp = &nesqp->ibqp;
|
||||
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
|
||||
}
|
||||
}
|
||||
|
||||
if ((cm_id) && (cm_id->event_handler)) {
|
||||
if (issue_disconn) {
|
||||
atomic_inc(&cm_disconnects);
|
||||
cm_event.event = IW_CM_EVENT_DISCONNECT;
|
||||
if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
|
||||
cm_event.status = IW_CM_EVENT_STATUS_RESET;
|
||||
nes_debug(NES_DBG_CM, "Generating a CM "
|
||||
"Disconnect Event (status reset) for "
|
||||
"QP%u, cm_id = %p. \n",
|
||||
nesqp->hwqp.qp_id, cm_id);
|
||||
} else
|
||||
cm_event.status = IW_CM_EVENT_STATUS_OK;
|
||||
|
||||
cm_event.status = disconn_status;
|
||||
cm_event.local_addr = cm_id->local_addr;
|
||||
cm_event.remote_addr = cm_id->remote_addr;
|
||||
cm_event.private_data = NULL;
|
||||
@ -2547,29 +2583,14 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
|
||||
nesqp->hwqp.sq_tail, cm_id,
|
||||
atomic_read(&nesqp->refcount));
|
||||
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
ret = cm_id->event_handler(cm_id, &cm_event);
|
||||
if (ret)
|
||||
nes_debug(NES_DBG_CM, "OFA CM event_handler "
|
||||
"returned, ret=%d\n", ret);
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
}
|
||||
|
||||
nesqp->disconn_pending = 0;
|
||||
/* There might have been another AE while the lock was released */
|
||||
original_hw_tcp_state = nesqp->hw_tcp_state;
|
||||
original_ibqp_state = nesqp->ibqp_state;
|
||||
last_ae = nesqp->last_aeq;
|
||||
|
||||
if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
|
||||
((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
|
||||
(original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
|
||||
(last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
|
||||
(last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
|
||||
if (issue_close) {
|
||||
atomic_inc(&cm_closes);
|
||||
nesqp->cm_id = NULL;
|
||||
nesqp->in_disconnect = 0;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
nes_disconnect(nesqp, 1);
|
||||
|
||||
cm_id->provider_data = nesqp;
|
||||
@ -2588,28 +2609,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
|
||||
}
|
||||
|
||||
cm_id->rem_ref(cm_id);
|
||||
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
if (nesqp->flush_issued == 0) {
|
||||
nesqp->flush_issued = 1;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
flush_wqes(nesvnic->nesdev, nesqp,
|
||||
NES_CQP_FLUSH_RQ, 1);
|
||||
} else
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
} else {
|
||||
cm_id = nesqp->cm_id;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
/* check to see if the inbound reset beat the outbound reset */
|
||||
if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
|
||||
nes_debug(NES_DBG_CM, "QP%u: Decing refcount "
|
||||
"due to inbound reset beating the "
|
||||
"outbound reset.\n", nesqp->hwqp.qp_id);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
nesqp->disconn_pending = 0;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -410,8 +410,6 @@ struct nes_cm_ops {
|
||||
int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
|
||||
enum nes_timer_type, int, int);
|
||||
|
||||
int nes_cm_disconn(struct nes_qp *);
|
||||
|
||||
int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
|
||||
int nes_reject(struct iw_cm_id *, const void *, u8);
|
||||
int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
|
||||
|
@ -74,6 +74,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
|
||||
static void process_critical_error(struct nes_device *nesdev);
|
||||
static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
|
||||
static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
|
||||
static void nes_terminate_timeout(unsigned long context);
|
||||
static void nes_terminate_start_timer(struct nes_qp *nesqp);
|
||||
|
||||
#ifdef CONFIG_INFINIBAND_NES_DEBUG
|
||||
static unsigned char *nes_iwarp_state_str[] = {
|
||||
@ -2903,6 +2905,417 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
|
||||
}
|
||||
|
||||
|
||||
static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
|
||||
{
|
||||
u16 pkt_len;
|
||||
|
||||
if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
|
||||
/* skip over ethernet header */
|
||||
pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2));
|
||||
pkt += ETH_HLEN;
|
||||
|
||||
/* Skip over IP and TCP headers */
|
||||
pkt += 4 * (pkt[0] & 0x0f);
|
||||
pkt += 4 * ((pkt[12] >> 4) & 0x0f);
|
||||
}
|
||||
return pkt;
|
||||
}
|
||||
|
||||
/* Determine if incoming error pkt is rdma layer */
|
||||
static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info)
|
||||
{
|
||||
u8 *pkt;
|
||||
u16 *mpa;
|
||||
u32 opcode = 0xffffffff;
|
||||
|
||||
if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
|
||||
pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
|
||||
mpa = (u16 *)locate_mpa(pkt, aeq_info);
|
||||
opcode = be16_to_cpu(mpa[1]) & 0xf;
|
||||
}
|
||||
|
||||
return opcode;
|
||||
}
|
||||
|
||||
/* Build iWARP terminate header */
|
||||
static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info)
|
||||
{
|
||||
u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
|
||||
u16 ddp_seg_len;
|
||||
int copy_len = 0;
|
||||
u8 is_tagged = 0;
|
||||
u8 flush_code = 0;
|
||||
struct nes_terminate_hdr *termhdr;
|
||||
|
||||
termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase;
|
||||
memset(termhdr, 0, 64);
|
||||
|
||||
if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
|
||||
|
||||
/* Use data from offending packet to fill in ddp & rdma hdrs */
|
||||
pkt = locate_mpa(pkt, aeq_info);
|
||||
ddp_seg_len = be16_to_cpu(*(u16 *)pkt);
|
||||
if (ddp_seg_len) {
|
||||
copy_len = 2;
|
||||
termhdr->hdrct = DDP_LEN_FLAG;
|
||||
if (pkt[2] & 0x80) {
|
||||
is_tagged = 1;
|
||||
if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
|
||||
copy_len += TERM_DDP_LEN_TAGGED;
|
||||
termhdr->hdrct |= DDP_HDR_FLAG;
|
||||
}
|
||||
} else {
|
||||
if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
|
||||
copy_len += TERM_DDP_LEN_UNTAGGED;
|
||||
termhdr->hdrct |= DDP_HDR_FLAG;
|
||||
}
|
||||
|
||||
if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
|
||||
if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
|
||||
copy_len += TERM_RDMA_LEN;
|
||||
termhdr->hdrct |= RDMA_HDR_FLAG;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch (async_event_id) {
|
||||
case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
|
||||
switch (iwarp_opcode(nesqp, aeq_info)) {
|
||||
case IWARP_OPCODE_WRITE:
|
||||
flush_code = IB_WC_LOC_PROT_ERR;
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
|
||||
termhdr->error_code = DDP_TAGGED_INV_STAG;
|
||||
break;
|
||||
default:
|
||||
flush_code = IB_WC_REM_ACCESS_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
|
||||
termhdr->error_code = RDMAP_INV_STAG;
|
||||
}
|
||||
break;
|
||||
case NES_AEQE_AEID_AMP_INVALID_STAG:
|
||||
flush_code = IB_WC_REM_ACCESS_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
|
||||
termhdr->error_code = RDMAP_INV_STAG;
|
||||
break;
|
||||
case NES_AEQE_AEID_AMP_BAD_QP:
|
||||
flush_code = IB_WC_LOC_QP_OP_ERR;
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
|
||||
termhdr->error_code = DDP_UNTAGGED_INV_QN;
|
||||
break;
|
||||
case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
|
||||
case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
|
||||
switch (iwarp_opcode(nesqp, aeq_info)) {
|
||||
case IWARP_OPCODE_SEND_INV:
|
||||
case IWARP_OPCODE_SEND_SE_INV:
|
||||
flush_code = IB_WC_REM_OP_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
|
||||
termhdr->error_code = RDMAP_CANT_INV_STAG;
|
||||
break;
|
||||
default:
|
||||
flush_code = IB_WC_REM_ACCESS_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
|
||||
termhdr->error_code = RDMAP_INV_STAG;
|
||||
}
|
||||
break;
|
||||
case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
|
||||
if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) {
|
||||
flush_code = IB_WC_LOC_PROT_ERR;
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
|
||||
termhdr->error_code = DDP_TAGGED_BOUNDS;
|
||||
} else {
|
||||
flush_code = IB_WC_REM_ACCESS_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
|
||||
termhdr->error_code = RDMAP_INV_BOUNDS;
|
||||
}
|
||||
break;
|
||||
case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
|
||||
case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
|
||||
case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
|
||||
flush_code = IB_WC_REM_ACCESS_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
|
||||
termhdr->error_code = RDMAP_ACCESS;
|
||||
break;
|
||||
case NES_AEQE_AEID_AMP_TO_WRAP:
|
||||
flush_code = IB_WC_REM_ACCESS_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
|
||||
termhdr->error_code = RDMAP_TO_WRAP;
|
||||
break;
|
||||
case NES_AEQE_AEID_AMP_BAD_PD:
|
||||
switch (iwarp_opcode(nesqp, aeq_info)) {
|
||||
case IWARP_OPCODE_WRITE:
|
||||
flush_code = IB_WC_LOC_PROT_ERR;
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
|
||||
termhdr->error_code = DDP_TAGGED_UNASSOC_STAG;
|
||||
break;
|
||||
case IWARP_OPCODE_SEND_INV:
|
||||
case IWARP_OPCODE_SEND_SE_INV:
|
||||
flush_code = IB_WC_REM_ACCESS_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
|
||||
termhdr->error_code = RDMAP_CANT_INV_STAG;
|
||||
break;
|
||||
default:
|
||||
flush_code = IB_WC_REM_ACCESS_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
|
||||
termhdr->error_code = RDMAP_UNASSOC_STAG;
|
||||
}
|
||||
break;
|
||||
case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
|
||||
flush_code = IB_WC_LOC_LEN_ERR;
|
||||
termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
|
||||
termhdr->error_code = MPA_MARKER;
|
||||
break;
|
||||
case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
|
||||
flush_code = IB_WC_GENERAL_ERR;
|
||||
termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
|
||||
termhdr->error_code = MPA_CRC;
|
||||
break;
|
||||
case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
|
||||
case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
|
||||
flush_code = IB_WC_LOC_LEN_ERR;
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
|
||||
termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
|
||||
break;
|
||||
case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
|
||||
case NES_AEQE_AEID_DDP_NO_L_BIT:
|
||||
flush_code = IB_WC_FATAL_ERR;
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
|
||||
termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
|
||||
break;
|
||||
case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
|
||||
case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
|
||||
flush_code = IB_WC_GENERAL_ERR;
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
|
||||
termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE;
|
||||
break;
|
||||
case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
|
||||
flush_code = IB_WC_LOC_LEN_ERR;
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
|
||||
termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG;
|
||||
break;
|
||||
case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
|
||||
flush_code = IB_WC_GENERAL_ERR;
|
||||
if (is_tagged) {
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
|
||||
termhdr->error_code = DDP_TAGGED_INV_DDP_VER;
|
||||
} else {
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
|
||||
termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER;
|
||||
}
|
||||
break;
|
||||
case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
|
||||
flush_code = IB_WC_GENERAL_ERR;
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
|
||||
termhdr->error_code = DDP_UNTAGGED_INV_MO;
|
||||
break;
|
||||
case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
|
||||
flush_code = IB_WC_REM_OP_ERR;
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
|
||||
termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF;
|
||||
break;
|
||||
case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
|
||||
flush_code = IB_WC_GENERAL_ERR;
|
||||
termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
|
||||
termhdr->error_code = DDP_UNTAGGED_INV_QN;
|
||||
break;
|
||||
case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
|
||||
flush_code = IB_WC_GENERAL_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
|
||||
termhdr->error_code = RDMAP_INV_RDMAP_VER;
|
||||
break;
|
||||
case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
|
||||
flush_code = IB_WC_LOC_QP_OP_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
|
||||
termhdr->error_code = RDMAP_UNEXPECTED_OP;
|
||||
break;
|
||||
default:
|
||||
flush_code = IB_WC_FATAL_ERR;
|
||||
termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
|
||||
termhdr->error_code = RDMAP_UNSPECIFIED;
|
||||
break;
|
||||
}
|
||||
|
||||
if (copy_len)
|
||||
memcpy(termhdr + 1, pkt, copy_len);
|
||||
|
||||
if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) {
|
||||
if (aeq_info & NES_AEQE_SQ)
|
||||
nesqp->term_sq_flush_code = flush_code;
|
||||
else
|
||||
nesqp->term_rq_flush_code = flush_code;
|
||||
}
|
||||
|
||||
return sizeof(struct nes_terminate_hdr) + copy_len;
|
||||
}
|
||||
|
||||
static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp,
|
||||
struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype)
|
||||
{
|
||||
u64 context;
|
||||
unsigned long flags;
|
||||
u32 aeq_info;
|
||||
u16 async_event_id;
|
||||
u8 tcp_state;
|
||||
u8 iwarp_state;
|
||||
u32 termlen = 0;
|
||||
u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE |
|
||||
NES_CQP_QP_TERM_DONT_SEND_FIN;
|
||||
struct nes_adapter *nesadapter = nesdev->nesadapter;
|
||||
|
||||
if (nesqp->term_flags & NES_TERM_SENT)
|
||||
return; /* Sanity check */
|
||||
|
||||
aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
|
||||
tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
|
||||
iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
|
||||
async_event_id = (u16)aeq_info;
|
||||
|
||||
context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
|
||||
aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
|
||||
if (!context) {
|
||||
WARN_ON(!context);
|
||||
return;
|
||||
}
|
||||
|
||||
nesqp = (struct nes_qp *)(unsigned long)context;
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
nesqp->hw_iwarp_state = iwarp_state;
|
||||
nesqp->hw_tcp_state = tcp_state;
|
||||
nesqp->last_aeq = async_event_id;
|
||||
nesqp->terminate_eventtype = eventtype;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
|
||||
if (nesadapter->send_term_ok)
|
||||
termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info);
|
||||
else
|
||||
mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
|
||||
|
||||
nes_terminate_start_timer(nesqp);
|
||||
nesqp->term_flags |= NES_TERM_SENT;
|
||||
nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
|
||||
}
|
||||
|
||||
static void nes_terminate_send_fin(struct nes_device *nesdev,
|
||||
struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
|
||||
{
|
||||
u32 aeq_info;
|
||||
u16 async_event_id;
|
||||
u8 tcp_state;
|
||||
u8 iwarp_state;
|
||||
unsigned long flags;
|
||||
|
||||
aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
|
||||
tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
|
||||
iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
|
||||
async_event_id = (u16)aeq_info;
|
||||
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
nesqp->hw_iwarp_state = iwarp_state;
|
||||
nesqp->hw_tcp_state = tcp_state;
|
||||
nesqp->last_aeq = async_event_id;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
|
||||
/* Send the fin only */
|
||||
nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE |
|
||||
NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0);
|
||||
}
|
||||
|
||||
/* Cleanup after a terminate sent or received */
|
||||
static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred)
|
||||
{
|
||||
u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
|
||||
unsigned long flags;
|
||||
struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device);
|
||||
struct nes_device *nesdev = nesvnic->nesdev;
|
||||
u8 first_time = 0;
|
||||
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
if (nesqp->hte_added) {
|
||||
nesqp->hte_added = 0;
|
||||
next_iwarp_state |= NES_CQP_QP_DEL_HTE;
|
||||
}
|
||||
|
||||
first_time = (nesqp->term_flags & NES_TERM_DONE) == 0;
|
||||
nesqp->term_flags |= NES_TERM_DONE;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
|
||||
/* Make sure we go through this only once */
|
||||
if (first_time) {
|
||||
if (timeout_occurred == 0)
|
||||
del_timer(&nesqp->terminate_timer);
|
||||
else
|
||||
next_iwarp_state |= NES_CQP_QP_RESET;
|
||||
|
||||
nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
|
||||
nes_cm_disconn(nesqp);
|
||||
}
|
||||
}
|
||||
|
||||
static void nes_terminate_received(struct nes_device *nesdev,
|
||||
struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
|
||||
{
|
||||
u32 aeq_info;
|
||||
u8 *pkt;
|
||||
u32 *mpa;
|
||||
u8 ddp_ctl;
|
||||
u8 rdma_ctl;
|
||||
u16 aeq_id = 0;
|
||||
|
||||
aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
|
||||
if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
|
||||
/* Terminate is not a performance path so the silicon */
|
||||
/* did not validate the frame - do it now */
|
||||
pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
|
||||
mpa = (u32 *)locate_mpa(pkt, aeq_info);
|
||||
ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff;
|
||||
rdma_ctl = be32_to_cpu(mpa[0]) & 0xff;
|
||||
if ((ddp_ctl & 0xc0) != 0x40)
|
||||
aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC;
|
||||
else if ((ddp_ctl & 0x03) != 1)
|
||||
aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION;
|
||||
else if (be32_to_cpu(mpa[2]) != 2)
|
||||
aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN;
|
||||
else if (be32_to_cpu(mpa[3]) != 1)
|
||||
aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN;
|
||||
else if (be32_to_cpu(mpa[4]) != 0)
|
||||
aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO;
|
||||
else if ((rdma_ctl & 0xc0) != 0x40)
|
||||
aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION;
|
||||
|
||||
if (aeq_id) {
|
||||
/* Bad terminate recvd - send back a terminate */
|
||||
aeq_info = (aeq_info & 0xffff0000) | aeq_id;
|
||||
aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
|
||||
nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
nesqp->term_flags |= NES_TERM_RCVD;
|
||||
nesqp->terminate_eventtype = IB_EVENT_QP_FATAL;
|
||||
nes_terminate_start_timer(nesqp);
|
||||
nes_terminate_send_fin(nesdev, nesqp, aeqe);
|
||||
}
|
||||
|
||||
/* Timeout routine in case terminate fails to complete */
|
||||
static void nes_terminate_timeout(unsigned long context)
|
||||
{
|
||||
struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
|
||||
|
||||
nes_terminate_done(nesqp, 1);
|
||||
}
|
||||
|
||||
/* Set a timer in case hw cannot complete the terminate sequence */
|
||||
static void nes_terminate_start_timer(struct nes_qp *nesqp)
|
||||
{
|
||||
init_timer(&nesqp->terminate_timer);
|
||||
nesqp->terminate_timer.function = nes_terminate_timeout;
|
||||
nesqp->terminate_timer.expires = jiffies + HZ;
|
||||
nesqp->terminate_timer.data = (unsigned long)nesqp;
|
||||
add_timer(&nesqp->terminate_timer);
|
||||
}
|
||||
|
||||
/**
|
||||
* nes_process_iwarp_aeqe
|
||||
*/
|
||||
@ -2910,28 +3323,27 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
|
||||
struct nes_hw_aeqe *aeqe)
|
||||
{
|
||||
u64 context;
|
||||
u64 aeqe_context = 0;
|
||||
unsigned long flags;
|
||||
struct nes_qp *nesqp;
|
||||
struct nes_hw_cq *hw_cq;
|
||||
struct nes_cq *nescq;
|
||||
int resource_allocated;
|
||||
/* struct iw_cm_id *cm_id; */
|
||||
struct nes_adapter *nesadapter = nesdev->nesadapter;
|
||||
struct ib_event ibevent;
|
||||
/* struct iw_cm_event cm_event; */
|
||||
u32 aeq_info;
|
||||
u32 next_iwarp_state = 0;
|
||||
u16 async_event_id;
|
||||
u8 tcp_state;
|
||||
u8 iwarp_state;
|
||||
int must_disconn = 1;
|
||||
int must_terminate = 0;
|
||||
struct ib_event ibevent;
|
||||
|
||||
nes_debug(NES_DBG_AEQ, "\n");
|
||||
aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
|
||||
if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) {
|
||||
if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) {
|
||||
context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
|
||||
context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
|
||||
} else {
|
||||
aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
|
||||
aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
|
||||
context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
|
||||
aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
|
||||
BUG_ON(!context);
|
||||
@ -2948,7 +3360,11 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
|
||||
|
||||
switch (async_event_id) {
|
||||
case NES_AEQE_AEID_LLP_FIN_RECEIVED:
|
||||
nesqp = *((struct nes_qp **)&context);
|
||||
nesqp = (struct nes_qp *)(unsigned long)context;
|
||||
|
||||
if (nesqp->term_flags)
|
||||
return; /* Ignore it, wait for close complete */
|
||||
|
||||
if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
|
||||
nesqp->cm_id->add_ref(nesqp->cm_id);
|
||||
schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
|
||||
@ -2959,18 +3375,24 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
|
||||
nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
|
||||
async_event_id, nesqp->last_aeq, tcp_state);
|
||||
}
|
||||
|
||||
if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
|
||||
(nesqp->ibqp_state != IB_QPS_RTS)) {
|
||||
/* FIN Received but tcp state or IB state moved on,
|
||||
should expect a close complete */
|
||||
return;
|
||||
}
|
||||
|
||||
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
|
||||
nesqp = (struct nes_qp *)(unsigned long)context;
|
||||
if (nesqp->term_flags) {
|
||||
nes_terminate_done(nesqp, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
case NES_AEQE_AEID_LLP_CONNECTION_RESET:
|
||||
case NES_AEQE_AEID_TERMINATE_SENT:
|
||||
case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
|
||||
case NES_AEQE_AEID_RESET_SENT:
|
||||
nesqp = *((struct nes_qp **)&context);
|
||||
nesqp = (struct nes_qp *)(unsigned long)context;
|
||||
if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
|
||||
tcp_state = NES_AEQE_TCP_STATE_CLOSED;
|
||||
}
|
||||
@ -2982,12 +3404,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
|
||||
if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
|
||||
(tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
|
||||
nesqp->hte_added = 0;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n",
|
||||
nesqp->hwqp.qp_id);
|
||||
nes_hw_modify_qp(nesdev, nesqp,
|
||||
NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0);
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
|
||||
}
|
||||
|
||||
if ((nesqp->ibqp_state == IB_QPS_RTS) &&
|
||||
@ -2999,151 +3416,106 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
|
||||
nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
|
||||
break;
|
||||
case NES_AEQE_IWARP_STATE_TERMINATE:
|
||||
next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
|
||||
nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
|
||||
if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
|
||||
next_iwarp_state |= 0x02000000;
|
||||
nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
|
||||
}
|
||||
must_disconn = 0; /* terminate path takes care of disconn */
|
||||
if (nesqp->term_flags == 0)
|
||||
must_terminate = 1;
|
||||
break;
|
||||
default:
|
||||
next_iwarp_state = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
if (next_iwarp_state) {
|
||||
nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
|
||||
" also added another reference\n",
|
||||
nesqp->hwqp.qp_id, next_iwarp_state);
|
||||
nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
|
||||
}
|
||||
nes_cm_disconn(nesqp);
|
||||
} else {
|
||||
if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
|
||||
/* FIN Received but ib state not RTS,
|
||||
close complete will be on its way */
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
return;
|
||||
must_disconn = 0;
|
||||
}
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
|
||||
next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000;
|
||||
nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
|
||||
nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
|
||||
" also added another reference\n",
|
||||
nesqp->hwqp.qp_id, next_iwarp_state);
|
||||
nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
|
||||
}
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
|
||||
if (must_terminate)
|
||||
nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
|
||||
else if (must_disconn) {
|
||||
if (next_iwarp_state) {
|
||||
nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n",
|
||||
nesqp->hwqp.qp_id, next_iwarp_state);
|
||||
nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
|
||||
}
|
||||
nes_cm_disconn(nesqp);
|
||||
}
|
||||
break;
|
||||
|
||||
case NES_AEQE_AEID_TERMINATE_SENT:
|
||||
nesqp = (struct nes_qp *)(unsigned long)context;
|
||||
nes_terminate_send_fin(nesdev, nesqp, aeqe);
|
||||
break;
|
||||
|
||||
case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
|
||||
nesqp = *((struct nes_qp **)&context);
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
nesqp->hw_iwarp_state = iwarp_state;
|
||||
nesqp->hw_tcp_state = tcp_state;
|
||||
nesqp->last_aeq = async_event_id;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED"
|
||||
" event on QP%u \n Q2 Data:\n",
|
||||
nesqp->hwqp.qp_id);
|
||||
if (nesqp->ibqp.event_handler) {
|
||||
ibevent.device = nesqp->ibqp.device;
|
||||
ibevent.element.qp = &nesqp->ibqp;
|
||||
ibevent.event = IB_EVENT_QP_FATAL;
|
||||
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
|
||||
}
|
||||
if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
|
||||
((nesqp->ibqp_state == IB_QPS_RTS)&&
|
||||
(async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
|
||||
nes_cm_disconn(nesqp);
|
||||
} else {
|
||||
nesqp->in_disconnect = 0;
|
||||
wake_up(&nesqp->kick_waitq);
|
||||
}
|
||||
break;
|
||||
case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
|
||||
nesqp = *((struct nes_qp **)&context);
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
|
||||
nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
|
||||
nesqp->last_aeq = async_event_id;
|
||||
if (nesqp->cm_id) {
|
||||
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
|
||||
" event on QP%u, remote IP = 0x%08X \n",
|
||||
nesqp->hwqp.qp_id,
|
||||
ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr));
|
||||
} else {
|
||||
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
|
||||
" event on QP%u \n",
|
||||
nesqp->hwqp.qp_id);
|
||||
}
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET;
|
||||
nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
|
||||
if (nesqp->ibqp.event_handler) {
|
||||
ibevent.device = nesqp->ibqp.device;
|
||||
ibevent.element.qp = &nesqp->ibqp;
|
||||
ibevent.event = IB_EVENT_QP_FATAL;
|
||||
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
|
||||
}
|
||||
nesqp = (struct nes_qp *)(unsigned long)context;
|
||||
nes_terminate_received(nesdev, nesqp, aeqe);
|
||||
break;
|
||||
|
||||
case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
|
||||
case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
|
||||
if (NES_AEQE_INBOUND_RDMA&aeq_info) {
|
||||
nesqp = nesadapter->qp_table[le32_to_cpu(
|
||||
aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
|
||||
} else {
|
||||
/* TODO: get the actual WQE and mask off wqe index */
|
||||
context &= ~((u64)511);
|
||||
nesqp = *((struct nes_qp **)&context);
|
||||
}
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
nesqp->hw_iwarp_state = iwarp_state;
|
||||
nesqp->hw_tcp_state = tcp_state;
|
||||
nesqp->last_aeq = async_event_id;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n",
|
||||
nesqp->hwqp.qp_id);
|
||||
if (nesqp->ibqp.event_handler) {
|
||||
ibevent.device = nesqp->ibqp.device;
|
||||
ibevent.element.qp = &nesqp->ibqp;
|
||||
ibevent.event = IB_EVENT_QP_ACCESS_ERR;
|
||||
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
|
||||
}
|
||||
break;
|
||||
case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
|
||||
nesqp = *((struct nes_qp **)&context);
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
nesqp->hw_iwarp_state = iwarp_state;
|
||||
nesqp->hw_tcp_state = tcp_state;
|
||||
nesqp->last_aeq = async_event_id;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n",
|
||||
nesqp->hwqp.qp_id);
|
||||
if (nesqp->ibqp.event_handler) {
|
||||
ibevent.device = nesqp->ibqp.device;
|
||||
ibevent.element.qp = &nesqp->ibqp;
|
||||
ibevent.event = IB_EVENT_QP_ACCESS_ERR;
|
||||
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
|
||||
}
|
||||
break;
|
||||
case NES_AEQE_AEID_AMP_INVALID_STAG:
|
||||
case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
|
||||
case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
|
||||
case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
|
||||
nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words
|
||||
[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
nesqp->hw_iwarp_state = iwarp_state;
|
||||
nesqp->hw_tcp_state = tcp_state;
|
||||
nesqp->last_aeq = async_event_id;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u,"
|
||||
" nesqp = %p, AE reported %p\n",
|
||||
nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context));
|
||||
if (nesqp->ibqp.event_handler) {
|
||||
ibevent.device = nesqp->ibqp.device;
|
||||
ibevent.element.qp = &nesqp->ibqp;
|
||||
ibevent.event = IB_EVENT_QP_ACCESS_ERR;
|
||||
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
|
||||
}
|
||||
case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
|
||||
case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
|
||||
case NES_AEQE_AEID_AMP_TO_WRAP:
|
||||
nesqp = (struct nes_qp *)(unsigned long)context;
|
||||
nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
|
||||
break;
|
||||
|
||||
case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
|
||||
case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
|
||||
case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
|
||||
case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
|
||||
nesqp = (struct nes_qp *)(unsigned long)context;
|
||||
if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
|
||||
aeq_info &= 0xffff0000;
|
||||
aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
|
||||
aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
|
||||
}
|
||||
|
||||
case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
|
||||
case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
|
||||
case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
|
||||
case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
|
||||
case NES_AEQE_AEID_AMP_BAD_QP:
|
||||
case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
|
||||
case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
|
||||
case NES_AEQE_AEID_DDP_NO_L_BIT:
|
||||
case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
|
||||
case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
|
||||
case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
|
||||
case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
|
||||
case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
|
||||
case NES_AEQE_AEID_AMP_BAD_PD:
|
||||
case NES_AEQE_AEID_AMP_FASTREG_SHARED:
|
||||
case NES_AEQE_AEID_AMP_FASTREG_VALID_STAG:
|
||||
case NES_AEQE_AEID_AMP_FASTREG_MW_STAG:
|
||||
case NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS:
|
||||
case NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW:
|
||||
case NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH:
|
||||
case NES_AEQE_AEID_AMP_INVALIDATE_SHARED:
|
||||
case NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS:
|
||||
case NES_AEQE_AEID_AMP_MWBIND_VALID_STAG:
|
||||
case NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG:
|
||||
case NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG:
|
||||
case NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG:
|
||||
case NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS:
|
||||
case NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS:
|
||||
case NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT:
|
||||
case NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED:
|
||||
case NES_AEQE_AEID_BAD_CLOSE:
|
||||
case NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO:
|
||||
case NES_AEQE_AEID_STAG_ZERO_INVALID:
|
||||
case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
|
||||
case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
|
||||
nesqp = (struct nes_qp *)(unsigned long)context;
|
||||
nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
|
||||
break;
|
||||
|
||||
case NES_AEQE_AEID_CQ_OPERATION_ERROR:
|
||||
context <<= 1;
|
||||
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n",
|
||||
@ -3153,83 +3525,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
|
||||
if (resource_allocated) {
|
||||
printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
|
||||
__func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
|
||||
hw_cq = (struct nes_hw_cq *)(unsigned long)context;
|
||||
if (hw_cq) {
|
||||
nescq = container_of(hw_cq, struct nes_cq, hw_cq);
|
||||
if (nescq->ibcq.event_handler) {
|
||||
ibevent.device = nescq->ibcq.device;
|
||||
ibevent.event = IB_EVENT_CQ_ERR;
|
||||
ibevent.element.cq = &nescq->ibcq;
|
||||
nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
|
||||
nesqp = nesadapter->qp_table[le32_to_cpu(
|
||||
aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
nesqp->hw_iwarp_state = iwarp_state;
|
||||
nesqp->hw_tcp_state = tcp_state;
|
||||
nesqp->last_aeq = async_event_id;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG"
|
||||
"_FOR_AVAILABLE_BUFFER event on QP%u\n",
|
||||
nesqp->hwqp.qp_id);
|
||||
if (nesqp->ibqp.event_handler) {
|
||||
ibevent.device = nesqp->ibqp.device;
|
||||
ibevent.element.qp = &nesqp->ibqp;
|
||||
ibevent.event = IB_EVENT_QP_ACCESS_ERR;
|
||||
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
|
||||
}
|
||||
/* tell cm to disconnect, cm will queue work to thread */
|
||||
nes_cm_disconn(nesqp);
|
||||
break;
|
||||
case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
|
||||
nesqp = *((struct nes_qp **)&context);
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
nesqp->hw_iwarp_state = iwarp_state;
|
||||
nesqp->hw_tcp_state = tcp_state;
|
||||
nesqp->last_aeq = async_event_id;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN"
|
||||
"_NO_BUFFER_AVAILABLE event on QP%u\n",
|
||||
nesqp->hwqp.qp_id);
|
||||
if (nesqp->ibqp.event_handler) {
|
||||
ibevent.device = nesqp->ibqp.device;
|
||||
ibevent.element.qp = &nesqp->ibqp;
|
||||
ibevent.event = IB_EVENT_QP_FATAL;
|
||||
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
|
||||
}
|
||||
/* tell cm to disconnect, cm will queue work to thread */
|
||||
nes_cm_disconn(nesqp);
|
||||
break;
|
||||
case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
|
||||
nesqp = *((struct nes_qp **)&context);
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
nesqp->hw_iwarp_state = iwarp_state;
|
||||
nesqp->hw_tcp_state = tcp_state;
|
||||
nesqp->last_aeq = async_event_id;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR"
|
||||
" event on QP%u \n Q2 Data:\n",
|
||||
nesqp->hwqp.qp_id);
|
||||
if (nesqp->ibqp.event_handler) {
|
||||
ibevent.device = nesqp->ibqp.device;
|
||||
ibevent.element.qp = &nesqp->ibqp;
|
||||
ibevent.event = IB_EVENT_QP_FATAL;
|
||||
nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
|
||||
}
|
||||
/* tell cm to disconnect, cm will queue work to thread */
|
||||
nes_cm_disconn(nesqp);
|
||||
break;
|
||||
/* TODO: additional AEs need to be here */
|
||||
case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
|
||||
nesqp = *((struct nes_qp **)&context);
|
||||
spin_lock_irqsave(&nesqp->lock, flags);
|
||||
nesqp->hw_iwarp_state = iwarp_state;
|
||||
nesqp->hw_tcp_state = tcp_state;
|
||||
nesqp->last_aeq = async_event_id;
|
||||
spin_unlock_irqrestore(&nesqp->lock, flags);
|
||||
if (nesqp->ibqp.event_handler) {
|
||||
ibevent.device = nesqp->ibqp.device;
|
||||
ibevent.element.qp = &nesqp->ibqp;
|
||||
ibevent.event = IB_EVENT_QP_ACCESS_ERR;
|
||||
nesqp->ibqp.event_handler(&ibevent,
|
||||
nesqp->ibqp.qp_context);
|
||||
}
|
||||
nes_cm_disconn(nesqp);
|
||||
break;
|
||||
|
||||
default:
|
||||
nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
|
||||
async_event_id);
|
||||
@ -3238,7 +3546,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
|
||||
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* nes_iwarp_ce_handler
|
||||
*/
|
||||
@ -3373,6 +3680,8 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
|
||||
{
|
||||
struct nes_cqp_request *cqp_request;
|
||||
struct nes_hw_cqp_wqe *cqp_wqe;
|
||||
u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
|
||||
u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
|
||||
int ret;
|
||||
|
||||
cqp_request = nes_get_cqp_request(nesdev);
|
||||
@ -3389,6 +3698,24 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
|
||||
cqp_wqe = &cqp_request->cqp_wqe;
|
||||
nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
|
||||
|
||||
/* If wqe in error was identified, set code to be put into cqe */
|
||||
if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) {
|
||||
which_wq |= NES_CQP_FLUSH_MAJ_MIN;
|
||||
sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code;
|
||||
nesqp->term_sq_flush_code = 0;
|
||||
}
|
||||
|
||||
if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) {
|
||||
which_wq |= NES_CQP_FLUSH_MAJ_MIN;
|
||||
rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code;
|
||||
nesqp->term_rq_flush_code = 0;
|
||||
}
|
||||
|
||||
if (which_wq & NES_CQP_FLUSH_MAJ_MIN) {
|
||||
cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code);
|
||||
cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code);
|
||||
}
|
||||
|
||||
cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
|
||||
cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
|
||||
cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
|
||||
|
@ -241,6 +241,7 @@ enum nes_cqp_stag_wqeword_idx {
|
||||
};
|
||||
|
||||
#define NES_CQP_OP_IWARP_STATE_SHIFT 28
|
||||
#define NES_CQP_OP_TERMLEN_SHIFT 28
|
||||
|
||||
enum nes_cqp_qp_bits {
|
||||
NES_CQP_QP_ARP_VALID = (1<<8),
|
||||
@ -265,12 +266,16 @@ enum nes_cqp_qp_bits {
|
||||
NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
|
||||
NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
|
||||
NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
|
||||
NES_CQP_QP_TERM_DONT_SEND_FIN = (1<<24),
|
||||
NES_CQP_QP_TERM_DONT_SEND_TERM_MSG = (1<<25),
|
||||
NES_CQP_QP_RESET = (1<<31),
|
||||
};
|
||||
|
||||
enum nes_cqp_qp_wqe_word_idx {
|
||||
NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
|
||||
NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
|
||||
NES_CQP_QP_WQE_FLUSH_SQ_CODE = 8,
|
||||
NES_CQP_QP_WQE_FLUSH_RQ_CODE = 9,
|
||||
NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
|
||||
};
|
||||
|
||||
@ -361,6 +366,7 @@ enum nes_cqp_arp_bits {
|
||||
enum nes_cqp_flush_bits {
|
||||
NES_CQP_FLUSH_SQ = (1<<30),
|
||||
NES_CQP_FLUSH_RQ = (1<<31),
|
||||
NES_CQP_FLUSH_MAJ_MIN = (1<<28),
|
||||
};
|
||||
|
||||
enum nes_cqe_opcode_bits {
|
||||
@ -633,11 +639,14 @@ enum nes_aeqe_bits {
|
||||
NES_AEQE_INBOUND_RDMA = (1<<19),
|
||||
NES_AEQE_IWARP_STATE_MASK = (7<<20),
|
||||
NES_AEQE_TCP_STATE_MASK = (0xf<<24),
|
||||
NES_AEQE_Q2_DATA_WRITTEN = (0x3<<28),
|
||||
NES_AEQE_VALID = (1<<31),
|
||||
};
|
||||
|
||||
#define NES_AEQE_IWARP_STATE_SHIFT 20
|
||||
#define NES_AEQE_TCP_STATE_SHIFT 24
|
||||
#define NES_AEQE_Q2_DATA_ETHERNET (1<<28)
|
||||
#define NES_AEQE_Q2_DATA_MPA (1<<29)
|
||||
|
||||
enum nes_aeqe_iwarp_state {
|
||||
NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
|
||||
@ -751,6 +760,15 @@ enum nes_iwarp_sq_wqe_bits {
|
||||
NES_IWARP_SQ_OP_NOP = 12,
|
||||
};
|
||||
|
||||
enum nes_iwarp_cqe_major_code {
|
||||
NES_IWARP_CQE_MAJOR_FLUSH = 1,
|
||||
NES_IWARP_CQE_MAJOR_DRV = 0x8000
|
||||
};
|
||||
|
||||
enum nes_iwarp_cqe_minor_code {
|
||||
NES_IWARP_CQE_MINOR_FLUSH = 1
|
||||
};
|
||||
|
||||
#define NES_EEPROM_READ_REQUEST (1<<16)
|
||||
#define NES_MAC_ADDR_VALID (1<<20)
|
||||
|
||||
@ -1119,6 +1137,7 @@ struct nes_adapter {
|
||||
u8 netdev_max; /* from host nic address count in EEPROM */
|
||||
u8 port_count;
|
||||
u8 virtwq;
|
||||
u8 send_term_ok;
|
||||
u8 et_use_adaptive_rx_coalesce;
|
||||
u8 adapter_fcn_count;
|
||||
u8 pft_mcast_map[NES_PFT_SIZE];
|
||||
@ -1217,6 +1236,90 @@ struct nes_ib_device {
|
||||
u32 num_pd;
|
||||
};
|
||||
|
||||
enum nes_hdrct_flags {
|
||||
DDP_LEN_FLAG = 0x80,
|
||||
DDP_HDR_FLAG = 0x40,
|
||||
RDMA_HDR_FLAG = 0x20
|
||||
};
|
||||
|
||||
enum nes_term_layers {
|
||||
LAYER_RDMA = 0,
|
||||
LAYER_DDP = 1,
|
||||
LAYER_MPA = 2
|
||||
};
|
||||
|
||||
enum nes_term_error_types {
|
||||
RDMAP_CATASTROPHIC = 0,
|
||||
RDMAP_REMOTE_PROT = 1,
|
||||
RDMAP_REMOTE_OP = 2,
|
||||
DDP_CATASTROPHIC = 0,
|
||||
DDP_TAGGED_BUFFER = 1,
|
||||
DDP_UNTAGGED_BUFFER = 2,
|
||||
DDP_LLP = 3
|
||||
};
|
||||
|
||||
enum nes_term_rdma_errors {
|
||||
RDMAP_INV_STAG = 0x00,
|
||||
RDMAP_INV_BOUNDS = 0x01,
|
||||
RDMAP_ACCESS = 0x02,
|
||||
RDMAP_UNASSOC_STAG = 0x03,
|
||||
RDMAP_TO_WRAP = 0x04,
|
||||
RDMAP_INV_RDMAP_VER = 0x05,
|
||||
RDMAP_UNEXPECTED_OP = 0x06,
|
||||
RDMAP_CATASTROPHIC_LOCAL = 0x07,
|
||||
RDMAP_CATASTROPHIC_GLOBAL = 0x08,
|
||||
RDMAP_CANT_INV_STAG = 0x09,
|
||||
RDMAP_UNSPECIFIED = 0xff
|
||||
};
|
||||
|
||||
enum nes_term_ddp_errors {
|
||||
DDP_CATASTROPHIC_LOCAL = 0x00,
|
||||
DDP_TAGGED_INV_STAG = 0x00,
|
||||
DDP_TAGGED_BOUNDS = 0x01,
|
||||
DDP_TAGGED_UNASSOC_STAG = 0x02,
|
||||
DDP_TAGGED_TO_WRAP = 0x03,
|
||||
DDP_TAGGED_INV_DDP_VER = 0x04,
|
||||
DDP_UNTAGGED_INV_QN = 0x01,
|
||||
DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
|
||||
DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
|
||||
DDP_UNTAGGED_INV_MO = 0x04,
|
||||
DDP_UNTAGGED_INV_TOO_LONG = 0x05,
|
||||
DDP_UNTAGGED_INV_DDP_VER = 0x06
|
||||
};
|
||||
|
||||
enum nes_term_mpa_errors {
|
||||
MPA_CLOSED = 0x01,
|
||||
MPA_CRC = 0x02,
|
||||
MPA_MARKER = 0x03,
|
||||
MPA_REQ_RSP = 0x04,
|
||||
};
|
||||
|
||||
struct nes_terminate_hdr {
|
||||
u8 layer_etype;
|
||||
u8 error_code;
|
||||
u8 hdrct;
|
||||
u8 rsvd;
|
||||
};
|
||||
|
||||
/* Used to determine how to fill in terminate error codes */
|
||||
#define IWARP_OPCODE_WRITE 0
|
||||
#define IWARP_OPCODE_READREQ 1
|
||||
#define IWARP_OPCODE_READRSP 2
|
||||
#define IWARP_OPCODE_SEND 3
|
||||
#define IWARP_OPCODE_SEND_INV 4
|
||||
#define IWARP_OPCODE_SEND_SE 5
|
||||
#define IWARP_OPCODE_SEND_SE_INV 6
|
||||
#define IWARP_OPCODE_TERM 7
|
||||
|
||||
/* These values are used only during terminate processing */
|
||||
#define TERM_DDP_LEN_TAGGED 14
|
||||
#define TERM_DDP_LEN_UNTAGGED 18
|
||||
#define TERM_RDMA_LEN 28
|
||||
#define RDMA_OPCODE_MASK 0x0f
|
||||
#define RDMA_READ_REQ_OPCODE 1
|
||||
#define BAD_FRAME_OFFSET 64
|
||||
#define CQE_MAJOR_DRV 0x8000
|
||||
|
||||
#define nes_vlan_rx vlan_hwaccel_receive_skb
|
||||
#define nes_netif_rx netif_receive_skb
|
||||
|
||||
|
@ -183,6 +183,9 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada
|
||||
} else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
|
||||
nesadapter->virtwq = 1;
|
||||
}
|
||||
if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3))
|
||||
nesadapter->send_term_ok = 1;
|
||||
|
||||
nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8)) << 16) +
|
||||
(u32)((u8)eeprom_data);
|
||||
|
||||
@ -548,7 +551,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
|
||||
spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
|
||||
}
|
||||
if (cqp_request == NULL) {
|
||||
cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL);
|
||||
cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC);
|
||||
if (cqp_request) {
|
||||
cqp_request->dynamic = 1;
|
||||
INIT_LIST_HEAD(&cqp_request->list);
|
||||
|
@ -667,15 +667,32 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
|
||||
*/
|
||||
static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
|
||||
{
|
||||
struct nes_vnic *nesvnic = to_nesvnic(ibdev);
|
||||
struct net_device *netdev = nesvnic->netdev;
|
||||
|
||||
memset(props, 0, sizeof(*props));
|
||||
|
||||
props->max_mtu = IB_MTU_2048;
|
||||
props->active_mtu = IB_MTU_2048;
|
||||
props->max_mtu = IB_MTU_4096;
|
||||
|
||||
if (netdev->mtu >= 4096)
|
||||
props->active_mtu = IB_MTU_4096;
|
||||
else if (netdev->mtu >= 2048)
|
||||
props->active_mtu = IB_MTU_2048;
|
||||
else if (netdev->mtu >= 1024)
|
||||
props->active_mtu = IB_MTU_1024;
|
||||
else if (netdev->mtu >= 512)
|
||||
props->active_mtu = IB_MTU_512;
|
||||
else
|
||||
props->active_mtu = IB_MTU_256;
|
||||
|
||||
props->lid = 1;
|
||||
props->lmc = 0;
|
||||
props->sm_lid = 0;
|
||||
props->sm_sl = 0;
|
||||
props->state = IB_PORT_ACTIVE;
|
||||
if (nesvnic->linkup)
|
||||
props->state = IB_PORT_ACTIVE;
|
||||
else
|
||||
props->state = IB_PORT_DOWN;
|
||||
props->phys_state = 0;
|
||||
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
|
||||
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
|
||||
@ -1505,13 +1522,46 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* nes_clean_cq
|
||||
*/
|
||||
static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq)
|
||||
{
|
||||
u32 cq_head;
|
||||
u32 lo;
|
||||
u32 hi;
|
||||
u64 u64temp;
|
||||
unsigned long flags = 0;
|
||||
|
||||
spin_lock_irqsave(&nescq->lock, flags);
|
||||
|
||||
cq_head = nescq->hw_cq.cq_head;
|
||||
while (le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
|
||||
rmb();
|
||||
lo = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
|
||||
hi = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]);
|
||||
u64temp = (((u64)hi) << 32) | ((u64)lo);
|
||||
u64temp &= ~(NES_SW_CONTEXT_ALIGN-1);
|
||||
if (u64temp == (u64)(unsigned long)nesqp) {
|
||||
/* Zero the context value so cqe will be ignored */
|
||||
nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0;
|
||||
nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0;
|
||||
}
|
||||
|
||||
if (++cq_head >= nescq->hw_cq.cq_size)
|
||||
cq_head = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&nescq->lock, flags);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* nes_destroy_qp
|
||||
*/
|
||||
static int nes_destroy_qp(struct ib_qp *ibqp)
|
||||
{
|
||||
struct nes_qp *nesqp = to_nesqp(ibqp);
|
||||
/* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
|
||||
struct nes_ucontext *nes_ucontext;
|
||||
struct ib_qp_attr attr;
|
||||
struct iw_cm_id *cm_id;
|
||||
@ -1548,7 +1598,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
|
||||
nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
|
||||
}
|
||||
|
||||
|
||||
if (nesqp->user_mode) {
|
||||
if ((ibqp->uobject)&&(ibqp->uobject->context)) {
|
||||
nes_ucontext = to_nesucontext(ibqp->uobject->context);
|
||||
@ -1560,6 +1609,13 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
|
||||
}
|
||||
if (nesqp->pbl_pbase)
|
||||
kunmap(nesqp->page);
|
||||
} else {
|
||||
/* Clean any pending completions from the cq(s) */
|
||||
if (nesqp->nesscq)
|
||||
nes_clean_cq(nesqp, nesqp->nesscq);
|
||||
|
||||
if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq))
|
||||
nes_clean_cq(nesqp, nesqp->nesrcq);
|
||||
}
|
||||
|
||||
nes_rem_ref(&nesqp->ibqp);
|
||||
@ -2884,7 +2940,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
* nes_hw_modify_qp
|
||||
*/
|
||||
int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
|
||||
u32 next_iwarp_state, u32 wait_completion)
|
||||
u32 next_iwarp_state, u32 termlen, u32 wait_completion)
|
||||
{
|
||||
struct nes_hw_cqp_wqe *cqp_wqe;
|
||||
/* struct iw_cm_id *cm_id = nesqp->cm_id; */
|
||||
@ -2916,6 +2972,13 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
|
||||
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
|
||||
set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
|
||||
|
||||
/* If sending a terminate message, fill in the length (in words) */
|
||||
if (((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == NES_CQP_QP_IWARP_STATE_TERMINATE) &&
|
||||
!(next_iwarp_state & NES_CQP_QP_TERM_DONT_SEND_TERM_MSG)) {
|
||||
termlen = ((termlen + 3) >> 2) << NES_CQP_OP_TERMLEN_SHIFT;
|
||||
set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_NEW_MSS_IDX, termlen);
|
||||
}
|
||||
|
||||
atomic_set(&cqp_request->refcount, 2);
|
||||
nes_post_cqp_request(nesdev, cqp_request);
|
||||
|
||||
@ -3086,6 +3149,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
}
|
||||
nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
|
||||
nesqp->hwqp.qp_id);
|
||||
if (nesqp->term_flags)
|
||||
del_timer(&nesqp->terminate_timer);
|
||||
|
||||
next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
|
||||
/* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
|
||||
if (nesqp->hte_added) {
|
||||
@ -3163,7 +3229,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
|
||||
if (issue_modify_qp) {
|
||||
nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
|
||||
ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1);
|
||||
ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 1);
|
||||
if (ret)
|
||||
nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
|
||||
" failed for QP%u.\n",
|
||||
@ -3328,6 +3394,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
|
||||
head = nesqp->hwqp.sq_head;
|
||||
|
||||
while (ib_wr) {
|
||||
/* Check for QP error */
|
||||
if (nesqp->term_flags) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Check for SQ overflow */
|
||||
if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
|
||||
err = -EINVAL;
|
||||
@ -3484,6 +3556,12 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
|
||||
head = nesqp->hwqp.rq_head;
|
||||
|
||||
while (ib_wr) {
|
||||
/* Check for QP error */
|
||||
if (nesqp->term_flags) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
|
||||
err = -EINVAL;
|
||||
break;
|
||||
@ -3547,7 +3625,6 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
|
||||
{
|
||||
u64 u64temp;
|
||||
u64 wrid;
|
||||
/* u64 u64temp; */
|
||||
unsigned long flags = 0;
|
||||
struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
|
||||
struct nes_device *nesdev = nesvnic->nesdev;
|
||||
@ -3555,12 +3632,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
|
||||
struct nes_qp *nesqp;
|
||||
struct nes_hw_cqe cqe;
|
||||
u32 head;
|
||||
u32 wq_tail;
|
||||
u32 wq_tail = 0;
|
||||
u32 cq_size;
|
||||
u32 cqe_count = 0;
|
||||
u32 wqe_index;
|
||||
u32 u32temp;
|
||||
/* u32 counter; */
|
||||
u32 move_cq_head = 1;
|
||||
u32 err_code;
|
||||
|
||||
nes_debug(NES_DBG_CQ, "\n");
|
||||
|
||||
@ -3570,29 +3648,40 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
|
||||
cq_size = nescq->hw_cq.cq_size;
|
||||
|
||||
while (cqe_count < num_entries) {
|
||||
if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
|
||||
NES_CQE_VALID) {
|
||||
/*
|
||||
* Make sure we read CQ entry contents *after*
|
||||
* we've checked the valid bit.
|
||||
*/
|
||||
rmb();
|
||||
if ((le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
|
||||
NES_CQE_VALID) == 0)
|
||||
break;
|
||||
|
||||
cqe = nescq->hw_cq.cq_vbase[head];
|
||||
nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
|
||||
u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
|
||||
wqe_index = u32temp &
|
||||
(nesdev->nesadapter->max_qp_wr - 1);
|
||||
u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
|
||||
/* parse CQE, get completion context from WQE (either rq or sq */
|
||||
u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
|
||||
((u64)u32temp);
|
||||
nesqp = *((struct nes_qp **)&u64temp);
|
||||
/*
|
||||
* Make sure we read CQ entry contents *after*
|
||||
* we've checked the valid bit.
|
||||
*/
|
||||
rmb();
|
||||
|
||||
cqe = nescq->hw_cq.cq_vbase[head];
|
||||
u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
|
||||
wqe_index = u32temp & (nesdev->nesadapter->max_qp_wr - 1);
|
||||
u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
|
||||
/* parse CQE, get completion context from WQE (either rq or sq) */
|
||||
u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
|
||||
((u64)u32temp);
|
||||
|
||||
if (u64temp) {
|
||||
nesqp = (struct nes_qp *)(unsigned long)u64temp;
|
||||
memset(entry, 0, sizeof *entry);
|
||||
if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
|
||||
entry->status = IB_WC_SUCCESS;
|
||||
} else {
|
||||
entry->status = IB_WC_WR_FLUSH_ERR;
|
||||
err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
|
||||
if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) {
|
||||
entry->status = err_code & 0x0000ffff;
|
||||
|
||||
/* The rest of the cqe's will be marked as flushed */
|
||||
nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX] =
|
||||
cpu_to_le32((NES_IWARP_CQE_MAJOR_FLUSH << 16) |
|
||||
NES_IWARP_CQE_MINOR_FLUSH);
|
||||
} else
|
||||
entry->status = IB_WC_WR_FLUSH_ERR;
|
||||
}
|
||||
|
||||
entry->qp = &nesqp->ibqp;
|
||||
@ -3601,20 +3690,18 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
|
||||
if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
|
||||
if (nesqp->skip_lsmm) {
|
||||
nesqp->skip_lsmm = 0;
|
||||
wq_tail = nesqp->hwqp.sq_tail++;
|
||||
nesqp->hwqp.sq_tail++;
|
||||
}
|
||||
|
||||
/* Working on a SQ Completion*/
|
||||
wq_tail = wqe_index;
|
||||
nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
|
||||
wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
|
||||
wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
|
||||
wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
|
||||
((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
|
||||
((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
|
||||
wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
|
||||
entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
|
||||
entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
|
||||
wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
|
||||
|
||||
switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
|
||||
switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
|
||||
wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
|
||||
case NES_IWARP_SQ_OP_RDMAW:
|
||||
nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
|
||||
@ -3623,7 +3710,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
|
||||
case NES_IWARP_SQ_OP_RDMAR:
|
||||
nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
|
||||
entry->opcode = IB_WC_RDMA_READ;
|
||||
entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
|
||||
entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
|
||||
wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
|
||||
break;
|
||||
case NES_IWARP_SQ_OP_SENDINV:
|
||||
@ -3634,33 +3721,54 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
|
||||
entry->opcode = IB_WC_SEND;
|
||||
break;
|
||||
}
|
||||
|
||||
nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
|
||||
if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)) {
|
||||
move_cq_head = 0;
|
||||
wq_tail = nesqp->hwqp.sq_tail;
|
||||
}
|
||||
} else {
|
||||
/* Working on a RQ Completion*/
|
||||
wq_tail = wqe_index;
|
||||
nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
|
||||
entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
|
||||
wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
|
||||
((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
|
||||
wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
|
||||
((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
|
||||
entry->opcode = IB_WC_RECV;
|
||||
}
|
||||
entry->wr_id = wrid;
|
||||
|
||||
nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
|
||||
if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) {
|
||||
move_cq_head = 0;
|
||||
wq_tail = nesqp->hwqp.rq_tail;
|
||||
}
|
||||
}
|
||||
|
||||
entry->wr_id = wrid;
|
||||
entry++;
|
||||
cqe_count++;
|
||||
}
|
||||
|
||||
if (move_cq_head) {
|
||||
nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
|
||||
if (++head >= cq_size)
|
||||
head = 0;
|
||||
cqe_count++;
|
||||
nescq->polled_completions++;
|
||||
|
||||
if ((nescq->polled_completions > (cq_size / 2)) ||
|
||||
(nescq->polled_completions == 255)) {
|
||||
nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
|
||||
" are pending %u of %u.\n",
|
||||
nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
|
||||
" are pending %u of %u.\n",
|
||||
nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
|
||||
nes_write32(nesdev->regs+NES_CQE_ALLOC,
|
||||
nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
|
||||
nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
|
||||
nescq->polled_completions = 0;
|
||||
}
|
||||
entry++;
|
||||
} else
|
||||
break;
|
||||
} else {
|
||||
/* Update the wqe index and set status to flush */
|
||||
wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
|
||||
wqe_index = (wqe_index & (~(nesdev->nesadapter->max_qp_wr - 1))) | wq_tail;
|
||||
nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] =
|
||||
cpu_to_le32(wqe_index);
|
||||
move_cq_head = 1; /* ready for next pass */
|
||||
}
|
||||
}
|
||||
|
||||
if (nescq->polled_completions) {
|
||||
|
@ -40,6 +40,10 @@ struct nes_device;
|
||||
#define NES_MAX_USER_DB_REGIONS 4096
|
||||
#define NES_MAX_USER_WQ_REGIONS 4096
|
||||
|
||||
#define NES_TERM_SENT 0x01
|
||||
#define NES_TERM_RCVD 0x02
|
||||
#define NES_TERM_DONE 0x04
|
||||
|
||||
struct nes_ucontext {
|
||||
struct ib_ucontext ibucontext;
|
||||
struct nes_device *nesdev;
|
||||
@ -119,6 +123,11 @@ struct nes_wq {
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct disconn_work {
|
||||
struct work_struct work;
|
||||
struct nes_qp *nesqp;
|
||||
};
|
||||
|
||||
struct iw_cm_id;
|
||||
struct ietf_mpa_frame;
|
||||
|
||||
@ -127,7 +136,6 @@ struct nes_qp {
|
||||
void *allocated_buffer;
|
||||
struct iw_cm_id *cm_id;
|
||||
struct workqueue_struct *wq;
|
||||
struct work_struct disconn_work;
|
||||
struct nes_cq *nesscq;
|
||||
struct nes_cq *nesrcq;
|
||||
struct nes_pd *nespd;
|
||||
@ -155,9 +163,13 @@ struct nes_qp {
|
||||
void *pbl_vbase;
|
||||
dma_addr_t pbl_pbase;
|
||||
struct page *page;
|
||||
struct timer_list terminate_timer;
|
||||
enum ib_event_type terminate_eventtype;
|
||||
wait_queue_head_t kick_waitq;
|
||||
u16 in_disconnect;
|
||||
u16 private_data_len;
|
||||
u16 term_sq_flush_code;
|
||||
u16 term_rq_flush_code;
|
||||
u8 active_conn;
|
||||
u8 skip_lsmm;
|
||||
u8 user_mode;
|
||||
@ -165,7 +177,7 @@ struct nes_qp {
|
||||
u8 hw_iwarp_state;
|
||||
u8 flush_issued;
|
||||
u8 hw_tcp_state;
|
||||
u8 disconn_pending;
|
||||
u8 term_flags;
|
||||
u8 destroyed;
|
||||
};
|
||||
#endif /* NES_VERBS_H */
|
||||
|
@ -31,7 +31,6 @@
|
||||
*/
|
||||
|
||||
#include <rdma/ib_cm.h>
|
||||
#include <rdma/ib_cache.h>
|
||||
#include <net/dst.h>
|
||||
#include <net/icmp.h>
|
||||
#include <linux/icmpv6.h>
|
||||
|
@ -36,7 +36,6 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <rdma/ib_cache.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/tcp.h>
|
||||
|
||||
|
@ -604,8 +604,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
||||
skb_queue_len(&neigh->queue));
|
||||
goto err_drop;
|
||||
}
|
||||
} else
|
||||
} else {
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
neigh->ah = NULL;
|
||||
|
||||
@ -688,7 +691,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
|
||||
ipoib_dbg(priv, "Send unicast ARP to %04x\n",
|
||||
be16_to_cpu(path->pathrec.dlid));
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
|
||||
return;
|
||||
} else if ((path->query || !path_rec_start(dev, path)) &&
|
||||
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
|
||||
/* put pseudoheader back on for next time */
|
||||
|
@ -720,7 +720,9 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb)
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
|
||||
return;
|
||||
}
|
||||
|
||||
unlock:
|
||||
@ -758,6 +760,20 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
|
||||
const u8 *broadcast)
|
||||
{
|
||||
if (addrlen != INFINIBAND_ALEN)
|
||||
return 0;
|
||||
/* reserved QPN, prefix, scope */
|
||||
if (memcmp(addr, broadcast, 6))
|
||||
return 0;
|
||||
/* signature lower, pkey */
|
||||
if (memcmp(addr + 7, broadcast + 7, 3))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void ipoib_mcast_restart_task(struct work_struct *work)
|
||||
{
|
||||
struct ipoib_dev_priv *priv =
|
||||
@ -791,6 +807,11 @@ void ipoib_mcast_restart_task(struct work_struct *work)
|
||||
for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
|
||||
union ib_gid mgid;
|
||||
|
||||
if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
|
||||
mclist->dmi_addrlen,
|
||||
dev->broadcast))
|
||||
continue;
|
||||
|
||||
memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
|
||||
|
||||
mcast = __ipoib_mcast_find(dev, &mgid);
|
||||
|
@ -1286,6 +1286,7 @@ static int cxgb_open(struct net_device *dev)
|
||||
if (!other_ports)
|
||||
schedule_chk_task(adapter);
|
||||
|
||||
cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1318,6 +1319,7 @@ static int cxgb_close(struct net_device *dev)
|
||||
if (!adapter->open_device_map)
|
||||
cxgb_down(adapter);
|
||||
|
||||
cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2717,7 +2719,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
|
||||
|
||||
if (is_offload(adapter) &&
|
||||
test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
|
||||
cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
|
||||
cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
|
||||
offload_close(&adapter->tdev);
|
||||
}
|
||||
|
||||
@ -2782,7 +2784,7 @@ static void t3_resume_ports(struct adapter *adapter)
|
||||
}
|
||||
|
||||
if (is_offload(adapter) && !ofld_disable)
|
||||
cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
|
||||
cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -153,14 +153,14 @@ void cxgb3_remove_clients(struct t3cdev *tdev)
|
||||
mutex_unlock(&cxgb3_db_lock);
|
||||
}
|
||||
|
||||
void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error)
|
||||
void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
|
||||
{
|
||||
struct cxgb3_client *client;
|
||||
|
||||
mutex_lock(&cxgb3_db_lock);
|
||||
list_for_each_entry(client, &client_list, client_list) {
|
||||
if (client->err_handler)
|
||||
client->err_handler(tdev, status, error);
|
||||
if (client->event_handler)
|
||||
client->event_handler(tdev, event, port);
|
||||
}
|
||||
mutex_unlock(&cxgb3_db_lock);
|
||||
}
|
||||
|
@ -64,14 +64,16 @@ void cxgb3_register_client(struct cxgb3_client *client);
|
||||
void cxgb3_unregister_client(struct cxgb3_client *client);
|
||||
void cxgb3_add_clients(struct t3cdev *tdev);
|
||||
void cxgb3_remove_clients(struct t3cdev *tdev);
|
||||
void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error);
|
||||
void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port);
|
||||
|
||||
typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
|
||||
struct sk_buff *skb, void *ctx);
|
||||
|
||||
enum {
|
||||
OFFLOAD_STATUS_UP,
|
||||
OFFLOAD_STATUS_DOWN
|
||||
OFFLOAD_STATUS_DOWN,
|
||||
OFFLOAD_PORT_DOWN,
|
||||
OFFLOAD_PORT_UP
|
||||
};
|
||||
|
||||
struct cxgb3_client {
|
||||
@ -82,7 +84,7 @@ struct cxgb3_client {
|
||||
int (*redirect)(void *ctx, struct dst_entry *old,
|
||||
struct dst_entry *new, struct l2t_entry *l2t);
|
||||
struct list_head client_list;
|
||||
void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error);
|
||||
void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port);
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -34,7 +34,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/hardirq.h>
|
||||
|
||||
#include <linux/mlx4/cmd.h>
|
||||
|
@ -31,7 +31,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
@ -41,6 +40,10 @@
|
||||
#include "mlx4.h"
|
||||
#include "fw.h"
|
||||
|
||||
enum {
|
||||
MLX4_IRQNAME_SIZE = 64
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX4_NUM_ASYNC_EQE = 0x100,
|
||||
MLX4_NUM_SPARE_EQE = 0x80,
|
||||
@ -526,48 +529,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
|
||||
iounmap(priv->clr_base);
|
||||
}
|
||||
|
||||
int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We assume that mapping one page is enough for the whole EQ
|
||||
* context table. This is fine with all current HCAs, because
|
||||
* we only use 32 EQs and each EQ uses 64 bytes of context
|
||||
* memory, or 1 KB total.
|
||||
*/
|
||||
priv->eq_table.icm_virt = icm_virt;
|
||||
priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
|
||||
if (!priv->eq_table.icm_page)
|
||||
return -ENOMEM;
|
||||
priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
|
||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
|
||||
__free_page(priv->eq_table.icm_page);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
|
||||
if (ret) {
|
||||
pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
__free_page(priv->eq_table.icm_page);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
|
||||
mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
|
||||
pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
__free_page(priv->eq_table.icm_page);
|
||||
}
|
||||
|
||||
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
|
||||
{
|
||||
struct mlx4_priv *priv = mlx4_priv(dev);
|
||||
@ -615,7 +576,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
|
||||
priv->eq_table.clr_int = priv->clr_base +
|
||||
(priv->eq_table.inta_pin < 32 ? 4 : 0);
|
||||
|
||||
priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL);
|
||||
priv->eq_table.irq_names =
|
||||
kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
|
||||
GFP_KERNEL);
|
||||
if (!priv->eq_table.irq_names) {
|
||||
err = -ENOMEM;
|
||||
goto err_out_bitmap;
|
||||
@ -638,17 +601,25 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
|
||||
goto err_out_comp;
|
||||
|
||||
if (dev->flags & MLX4_FLAG_MSI_X) {
|
||||
static const char async_eq_name[] = "mlx4-async";
|
||||
const char *eq_name;
|
||||
|
||||
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
|
||||
if (i < dev->caps.num_comp_vectors) {
|
||||
snprintf(priv->eq_table.irq_names + i * 16, 16,
|
||||
"mlx4-comp-%d", i);
|
||||
eq_name = priv->eq_table.irq_names + i * 16;
|
||||
} else
|
||||
eq_name = async_eq_name;
|
||||
snprintf(priv->eq_table.irq_names +
|
||||
i * MLX4_IRQNAME_SIZE,
|
||||
MLX4_IRQNAME_SIZE,
|
||||
"mlx4-comp-%d@pci:%s", i,
|
||||
pci_name(dev->pdev));
|
||||
} else {
|
||||
snprintf(priv->eq_table.irq_names +
|
||||
i * MLX4_IRQNAME_SIZE,
|
||||
MLX4_IRQNAME_SIZE,
|
||||
"mlx4-async@pci:%s",
|
||||
pci_name(dev->pdev));
|
||||
}
|
||||
|
||||
eq_name = priv->eq_table.irq_names +
|
||||
i * MLX4_IRQNAME_SIZE;
|
||||
err = request_irq(priv->eq_table.eq[i].irq,
|
||||
mlx4_msi_x_interrupt, 0, eq_name,
|
||||
priv->eq_table.eq + i);
|
||||
@ -658,8 +629,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
|
||||
priv->eq_table.eq[i].have_irq = 1;
|
||||
}
|
||||
} else {
|
||||
snprintf(priv->eq_table.irq_names,
|
||||
MLX4_IRQNAME_SIZE,
|
||||
DRV_NAME "@pci:%s",
|
||||
pci_name(dev->pdev));
|
||||
err = request_irq(dev->pdev->irq, mlx4_interrupt,
|
||||
IRQF_SHARED, DRV_NAME, dev);
|
||||
IRQF_SHARED, priv->eq_table.irq_names, dev);
|
||||
if (err)
|
||||
goto err_out_async;
|
||||
|
||||
|
@ -31,7 +31,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
|
||||
goto err_unmap_aux;
|
||||
}
|
||||
|
||||
err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
|
||||
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
|
||||
init_hca->eqc_base, dev_cap->eqc_entry_sz,
|
||||
dev->caps.num_eqs, dev->caps.num_eqs,
|
||||
0, 0);
|
||||
if (err) {
|
||||
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
|
||||
goto err_unmap_cmpt;
|
||||
@ -668,7 +671,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
|
||||
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
|
||||
|
||||
err_unmap_eq:
|
||||
mlx4_unmap_eq_icm(dev);
|
||||
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
|
||||
|
||||
err_unmap_cmpt:
|
||||
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
|
||||
@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
|
||||
mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
|
||||
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
|
||||
mlx4_unmap_eq_icm(dev);
|
||||
|
||||
mlx4_UNMAP_ICM_AUX(dev);
|
||||
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
|
||||
@ -786,7 +789,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
|
||||
return 0;
|
||||
|
||||
err_close:
|
||||
mlx4_close_hca(dev);
|
||||
mlx4_CLOSE_HCA(dev, 0);
|
||||
|
||||
err_free_icm:
|
||||
mlx4_free_icms(dev);
|
||||
@ -1070,18 +1073,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
goto err_disable_pdev;
|
||||
}
|
||||
|
||||
err = pci_request_region(pdev, 0, DRV_NAME);
|
||||
err = pci_request_regions(pdev, DRV_NAME);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Cannot request control region, aborting.\n");
|
||||
dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
|
||||
goto err_disable_pdev;
|
||||
}
|
||||
|
||||
err = pci_request_region(pdev, 2, DRV_NAME);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
|
||||
goto err_release_bar0;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
@ -1090,7 +1087,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
|
||||
goto err_release_bar2;
|
||||
goto err_release_regions;
|
||||
}
|
||||
}
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
@ -1101,7 +1098,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
|
||||
"aborting.\n");
|
||||
goto err_release_bar2;
|
||||
goto err_release_regions;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1110,7 +1107,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
dev_err(&pdev->dev, "Device struct alloc failed, "
|
||||
"aborting.\n");
|
||||
err = -ENOMEM;
|
||||
goto err_release_bar2;
|
||||
goto err_release_regions;
|
||||
}
|
||||
|
||||
dev = &priv->dev;
|
||||
@ -1205,11 +1202,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
err_free_dev:
|
||||
kfree(priv);
|
||||
|
||||
err_release_bar2:
|
||||
pci_release_region(pdev, 2);
|
||||
|
||||
err_release_bar0:
|
||||
pci_release_region(pdev, 0);
|
||||
err_release_regions:
|
||||
pci_release_regions(pdev);
|
||||
|
||||
err_disable_pdev:
|
||||
pci_disable_device(pdev);
|
||||
@ -1265,8 +1259,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
|
||||
pci_disable_msix(pdev);
|
||||
|
||||
kfree(priv);
|
||||
pci_release_region(pdev, 2);
|
||||
pci_release_region(pdev, 0);
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
}
|
||||
|
@ -31,7 +31,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
|
@ -205,9 +205,7 @@ struct mlx4_eq_table {
|
||||
void __iomem **uar_map;
|
||||
u32 clr_mask;
|
||||
struct mlx4_eq *eq;
|
||||
u64 icm_virt;
|
||||
struct page *icm_page;
|
||||
dma_addr_t icm_dma;
|
||||
struct mlx4_icm_table table;
|
||||
struct mlx4_icm_table cmpt_table;
|
||||
int have_irq;
|
||||
u8 inta_pin;
|
||||
@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
|
||||
struct mlx4_dev_cap *dev_cap,
|
||||
struct mlx4_init_hca_param *init_hca);
|
||||
|
||||
int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
|
||||
void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
|
||||
|
||||
int mlx4_cmd_init(struct mlx4_dev *dev);
|
||||
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
|
||||
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
|
||||
|
@ -32,7 +32,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#include <linux/mlx4/cmd.h>
|
||||
|
@ -31,7 +31,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
|
@ -32,8 +32,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
#include "fw.h"
|
||||
|
||||
|
@ -33,8 +33,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <linux/mlx4/cmd.h>
|
||||
#include <linux/mlx4/qp.h>
|
||||
|
||||
|
@ -31,7 +31,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
|
@ -31,8 +31,6 @@
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <linux/mlx4/cmd.h>
|
||||
|
||||
#include "mlx4.h"
|
||||
|
@ -26,7 +26,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
|
||||
|
||||
static void open_s3_dev(struct t3cdev *);
|
||||
static void close_s3_dev(struct t3cdev *);
|
||||
static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error);
|
||||
static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port);
|
||||
|
||||
static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
|
||||
static struct cxgb3_client t3c_client = {
|
||||
@ -34,7 +34,7 @@ static struct cxgb3_client t3c_client = {
|
||||
.handlers = cxgb3i_cpl_handlers,
|
||||
.add = open_s3_dev,
|
||||
.remove = close_s3_dev,
|
||||
.err_handler = s3_err_handler,
|
||||
.event_handler = s3_event_handler,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -66,16 +66,16 @@ static void close_s3_dev(struct t3cdev *t3dev)
|
||||
cxgb3i_ddp_cleanup(t3dev);
|
||||
}
|
||||
|
||||
static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error)
|
||||
static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port)
|
||||
{
|
||||
struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev);
|
||||
|
||||
cxgb3i_log_info("snic 0x%p, tdev 0x%p, status 0x%x, err 0x%x.\n",
|
||||
snic, tdev, status, error);
|
||||
cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n",
|
||||
snic, tdev, event, port);
|
||||
if (!snic)
|
||||
return;
|
||||
|
||||
switch (status) {
|
||||
switch (event) {
|
||||
case OFFLOAD_STATUS_DOWN:
|
||||
snic->flags |= CXGB3I_ADAPTER_FLAG_RESET;
|
||||
break;
|
||||
|
Loading…
Reference in New Issue
Block a user