mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 08:40:53 +07:00
Updates for 4.15 kernel merge window
- Add iWARP support to qedr driver - Lots of misc fixes across subsystem - Multiple update series to hns roce driver - Multiple update series to hfi1 driver - Updates to vnic driver - Add kref to wait struct in cxgb4 driver - Updates to i40iw driver - Mellanox shared pull request - timer_setup changes - massive cleanup series from Bart Van Assche - Two series of SRP/SRPT changes from Bart Van Assche - Core updates from Mellanox - i40iw updates - IPoIB updates - mlx5 updates - mlx4 updates - hns updates - bnxt_re fixes - PCI write padding support - Sparse/Smatch/warning cleanups/fixes - CQ moderation support - SRQ support in vmw_pvrdma -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJaDF9JAAoJELgmozMOVy/dDXUP/i92g+G4OJ+4hHMh4KCjQMHT eMr/w9l1C033HrtsU1afPhqHOsKSxwCuJSiTgN4uXIm67/2kPK5Vlx+ir7mbOLwB 3ukVK6Q/aFdigWCUhIaJSlDpjbd2sEj7JwKtM3rucvMWJlBJ4mAbcVQVfU96CCsv V9mO7dpR3QtYWDId9DukfnAfPUPFa3SMZnD7tdl6mKNRg/MjWGYLAL4nJoBfex5f b4o+MTrbuFWXYsfDru1m9BpHgyul20ldfcnbe8C/sVOQmOgkX7ngD5Sdi1FLeRJP GF/DnAqInC9N7cAxZHx4kH9x6mLMmEdfnwQ9VTVqGUHBsj3H4hQTVIAFfHUhWUbG TP5ZHgZG2CewZ0rf092cWlDZwp6n0BalnbQJr+QN4MzPmYbofs3AccSKUwrle+e+ E6yYf4XxJdt7wRr4F1QKygtUEXSnNkNYUDQ4ZFbpJS/D4Sq80R1ZV/WZ7PJxm1D/ EIKoi7NU9cbPMIlbCzn8kzgfjS7Pe4p0WW/Xxc/IYmACzpwNPkZuFGSND79ksIpF jhHqwZsOWFuXISjvcR4loc8wW6a5w5vjOiX0lLVz0NSdXSzVqav/2at7ZLDx/PT+ Lh9YVL51akA3hiD+3X6iOhfOUu6kskjT9HijE5T8rJnf0V+C6AtIRpwrQ7ONmjJm 3JMrjjLxtCIvpUyzCvDW =A1oL -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma Pull rdma updates from Doug Ledford: "This is a fairly plain pull request. Lots of driver updates across the stack, a huge number of static analysis cleanups including a close to 50 patch series from Bart Van Assche, and a number of new features inside the stack such as general CQ moderation support. Nothing really stands out, but there might be a few conflicts as you take things in. In particular, the cleanups touched some of the same lines as the new timer_setup changes. Everything in this pull request has been through 0day and at least two days of linux-next (since Stephen doesn't necessarily flag new errors/warnings until day2). A few more items (about 30 patches) from Intel and Mellanox showed up on the list on Tuesday. I've excluded those from this pull request, and I'm sure some of them qualify as fixes suitable to send any time, but I still have to review them fully. If they contain mostly fixes and little or no new development, then I will probably send them through by the end of the week just to get them out of the way. There was a break in my acceptance of patches which coincides with the computer problems I had, and then when I got things mostly back under control I had a backlog of patches to process, which I did mostly last Friday and Monday. So there is a larger number of patches processed in that timeframe than I was striving for. Summary: - Add iWARP support to qedr driver - Lots of misc fixes across subsystem - Multiple update series to hns roce driver - Multiple update series to hfi1 driver - Updates to vnic driver - Add kref to wait struct in cxgb4 driver - Updates to i40iw driver - Mellanox shared pull request - timer_setup changes - massive cleanup series from Bart Van Assche - Two series of SRP/SRPT changes from Bart Van Assche - Core updates from Mellanox - i40iw updates - IPoIB updates - mlx5 updates - mlx4 updates - hns updates - bnxt_re fixes - PCI write padding support - Sparse/Smatch/warning cleanups/fixes - CQ moderation support - SRQ support in vmw_pvrdma" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (296 commits) RDMA/core: Rename kernel modify_cq to better describe its usage IB/mlx5: Add CQ moderation capability to query_device IB/mlx4: Add CQ moderation capability to query_device IB/uverbs: Add CQ moderation capability to query_device IB/mlx5: Exposing modify CQ callback to uverbs layer IB/mlx4: Exposing modify CQ callback to uverbs layer IB/uverbs: Allow CQ moderation with modify CQ iw_cxgb4: atomically flush the qp iw_cxgb4: only call the cq comp_handler when the cq is armed iw_cxgb4: Fix possible circular dependency locking warning RDMA/bnxt_re: report vlan_id and sl in qp1 recv completion IB/core: Only maintain real QPs in the security lists IB/ocrdma_hw: remove unnecessary code in ocrdma_mbx_dealloc_lkey RDMA/core: Make function rdma_copy_addr return void RDMA/vmw_pvrdma: Add shared receive queue support RDMA/core: avoid uninitialized variable warning in create_udata RDMA/bnxt_re: synchronize poll_cq and req_notify_cq verbs RDMA/bnxt_re: Flush CQ notification Work Queue before destroying QP RDMA/bnxt_re: Set QP state in case of response completion errors RDMA/bnxt_re: Add memory barriers when processing CQ/EQ entries ...
This commit is contained in:
commit
ad0835a930
@ -6805,8 +6805,6 @@ F: drivers/ipack/
|
||||
|
||||
INFINIBAND SUBSYSTEM
|
||||
M: Doug Ledford <dledford@redhat.com>
|
||||
M: Sean Hefty <sean.hefty@intel.com>
|
||||
M: Hal Rosenstock <hal.rosenstock@gmail.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
W: http://www.openfabrics.org/
|
||||
Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
||||
@ -11116,6 +11114,7 @@ F: drivers/net/ethernet/qlogic/qede/
|
||||
|
||||
QLOGIC QL4xxx RDMA DRIVER
|
||||
M: Ram Amrani <Ram.Amrani@cavium.com>
|
||||
M: Michal Kalderon <Michal.Kalderon@cavium.com>
|
||||
M: Ariel Elior <Ariel.Elior@cavium.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
|
@ -1,6 +1,5 @@
|
||||
menuconfig INFINIBAND
|
||||
tristate "InfiniBand support"
|
||||
depends on PCI || BROKEN
|
||||
depends on HAS_IOMEM
|
||||
depends on NET
|
||||
depends on INET
|
||||
@ -46,6 +45,7 @@ config INFINIBAND_EXP_USER_ACCESS
|
||||
config INFINIBAND_USER_MEM
|
||||
bool
|
||||
depends on INFINIBAND_USER_ACCESS != n
|
||||
depends on MMU
|
||||
default y
|
||||
|
||||
config INFINIBAND_ON_DEMAND_PAGING
|
||||
|
@ -15,7 +15,7 @@ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \
|
||||
security.o nldev.o
|
||||
|
||||
ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o
|
||||
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o
|
||||
ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
|
||||
ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o
|
||||
|
||||
ib_cm-y := cm.o
|
||||
|
@ -229,8 +229,9 @@ void rdma_addr_unregister_client(struct rdma_addr_client *client)
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_addr_unregister_client);
|
||||
|
||||
int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
|
||||
const unsigned char *dst_dev_addr)
|
||||
void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
|
||||
const struct net_device *dev,
|
||||
const unsigned char *dst_dev_addr)
|
||||
{
|
||||
dev_addr->dev_type = dev->type;
|
||||
memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
|
||||
@ -238,7 +239,6 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
|
||||
if (dst_dev_addr)
|
||||
memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
|
||||
dev_addr->bound_dev_if = dev->ifindex;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_copy_addr);
|
||||
|
||||
@ -247,15 +247,14 @@ int rdma_translate_ip(const struct sockaddr *addr,
|
||||
u16 *vlan_id)
|
||||
{
|
||||
struct net_device *dev;
|
||||
int ret = -EADDRNOTAVAIL;
|
||||
|
||||
if (dev_addr->bound_dev_if) {
|
||||
dev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
ret = rdma_copy_addr(dev_addr, dev, NULL);
|
||||
rdma_copy_addr(dev_addr, dev, NULL);
|
||||
dev_put(dev);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (addr->sa_family) {
|
||||
@ -264,9 +263,9 @@ int rdma_translate_ip(const struct sockaddr *addr,
|
||||
((const struct sockaddr_in *)addr)->sin_addr.s_addr);
|
||||
|
||||
if (!dev)
|
||||
return ret;
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
ret = rdma_copy_addr(dev_addr, dev, NULL);
|
||||
rdma_copy_addr(dev_addr, dev, NULL);
|
||||
dev_addr->bound_dev_if = dev->ifindex;
|
||||
if (vlan_id)
|
||||
*vlan_id = rdma_vlan_dev_vlan_id(dev);
|
||||
@ -279,7 +278,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
|
||||
if (ipv6_chk_addr(dev_addr->net,
|
||||
&((const struct sockaddr_in6 *)addr)->sin6_addr,
|
||||
dev, 1)) {
|
||||
ret = rdma_copy_addr(dev_addr, dev, NULL);
|
||||
rdma_copy_addr(dev_addr, dev, NULL);
|
||||
dev_addr->bound_dev_if = dev->ifindex;
|
||||
if (vlan_id)
|
||||
*vlan_id = rdma_vlan_dev_vlan_id(dev);
|
||||
@ -290,7 +289,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_translate_ip);
|
||||
|
||||
@ -336,7 +335,7 @@ static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
|
||||
const void *daddr)
|
||||
{
|
||||
struct neighbour *n;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
n = dst_neigh_lookup(dst, daddr);
|
||||
|
||||
@ -346,7 +345,7 @@ static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *dev_addr,
|
||||
neigh_event_send(n, NULL);
|
||||
ret = -ENODATA;
|
||||
} else {
|
||||
ret = rdma_copy_addr(dev_addr, dst->dev, n->ha);
|
||||
rdma_copy_addr(dev_addr, dst->dev, n->ha);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -494,7 +493,9 @@ static int addr_resolve_neigh(struct dst_entry *dst,
|
||||
if (!(dst->dev->flags & IFF_NOARP))
|
||||
return fetch_ha(dst, addr, dst_in, seq);
|
||||
|
||||
return rdma_copy_addr(addr, dst->dev, NULL);
|
||||
rdma_copy_addr(addr, dst->dev, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int addr_resolve(struct sockaddr *src_in,
|
||||
@ -852,7 +853,7 @@ static struct notifier_block nb = {
|
||||
|
||||
int addr_init(void)
|
||||
{
|
||||
addr_wq = alloc_ordered_workqueue("ib_addr", WQ_MEM_RECLAIM);
|
||||
addr_wq = alloc_ordered_workqueue("ib_addr", 0);
|
||||
if (!addr_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -1472,31 +1472,29 @@ static void cm_format_path_lid_from_req(struct cm_req_msg *req_msg,
|
||||
|
||||
if (primary_path->rec_type != SA_PATH_REC_TYPE_OPA) {
|
||||
sa_path_set_dlid(primary_path,
|
||||
htonl(ntohs(req_msg->primary_local_lid)));
|
||||
ntohs(req_msg->primary_local_lid));
|
||||
sa_path_set_slid(primary_path,
|
||||
htonl(ntohs(req_msg->primary_remote_lid)));
|
||||
ntohs(req_msg->primary_remote_lid));
|
||||
} else {
|
||||
lid = opa_get_lid_from_gid(&req_msg->primary_local_gid);
|
||||
sa_path_set_dlid(primary_path, cpu_to_be32(lid));
|
||||
sa_path_set_dlid(primary_path, lid);
|
||||
|
||||
lid = opa_get_lid_from_gid(&req_msg->primary_remote_gid);
|
||||
sa_path_set_slid(primary_path, cpu_to_be32(lid));
|
||||
sa_path_set_slid(primary_path, lid);
|
||||
}
|
||||
|
||||
if (!cm_req_has_alt_path(req_msg))
|
||||
return;
|
||||
|
||||
if (alt_path->rec_type != SA_PATH_REC_TYPE_OPA) {
|
||||
sa_path_set_dlid(alt_path,
|
||||
htonl(ntohs(req_msg->alt_local_lid)));
|
||||
sa_path_set_slid(alt_path,
|
||||
htonl(ntohs(req_msg->alt_remote_lid)));
|
||||
sa_path_set_dlid(alt_path, ntohs(req_msg->alt_local_lid));
|
||||
sa_path_set_slid(alt_path, ntohs(req_msg->alt_remote_lid));
|
||||
} else {
|
||||
lid = opa_get_lid_from_gid(&req_msg->alt_local_gid);
|
||||
sa_path_set_dlid(alt_path, cpu_to_be32(lid));
|
||||
sa_path_set_dlid(alt_path, lid);
|
||||
|
||||
lid = opa_get_lid_from_gid(&req_msg->alt_remote_gid);
|
||||
sa_path_set_slid(alt_path, cpu_to_be32(lid));
|
||||
sa_path_set_slid(alt_path, lid);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1575,7 +1573,7 @@ static void cm_format_req_event(struct cm_work *work,
|
||||
param->bth_pkey = cm_get_bth_pkey(work);
|
||||
param->port = cm_id_priv->av.port->port_num;
|
||||
param->primary_path = &work->path[0];
|
||||
if (req_msg->alt_local_lid)
|
||||
if (cm_req_has_alt_path(req_msg))
|
||||
param->alternate_path = &work->path[1];
|
||||
else
|
||||
param->alternate_path = NULL;
|
||||
@ -1856,7 +1854,8 @@ static int cm_req_handler(struct cm_work *work)
|
||||
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
|
||||
|
||||
memset(&work->path[0], 0, sizeof(work->path[0]));
|
||||
memset(&work->path[1], 0, sizeof(work->path[1]));
|
||||
if (cm_req_has_alt_path(req_msg))
|
||||
memset(&work->path[1], 0, sizeof(work->path[1]));
|
||||
grh = rdma_ah_read_grh(&cm_id_priv->av.ah_attr);
|
||||
ret = ib_get_cached_gid(work->port->cm_dev->ib_device,
|
||||
work->port->port_num,
|
||||
@ -2810,6 +2809,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
|
||||
msg_response = CM_MSG_RESPONSE_OTHER;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto error1;
|
||||
@ -3037,14 +3037,14 @@ static void cm_format_path_lid_from_lap(struct cm_lap_msg *lap_msg,
|
||||
u32 lid;
|
||||
|
||||
if (path->rec_type != SA_PATH_REC_TYPE_OPA) {
|
||||
sa_path_set_dlid(path, htonl(ntohs(lap_msg->alt_local_lid)));
|
||||
sa_path_set_slid(path, htonl(ntohs(lap_msg->alt_remote_lid)));
|
||||
sa_path_set_dlid(path, ntohs(lap_msg->alt_local_lid));
|
||||
sa_path_set_slid(path, ntohs(lap_msg->alt_remote_lid));
|
||||
} else {
|
||||
lid = opa_get_lid_from_gid(&lap_msg->alt_local_gid);
|
||||
sa_path_set_dlid(path, cpu_to_be32(lid));
|
||||
sa_path_set_dlid(path, lid);
|
||||
|
||||
lid = opa_get_lid_from_gid(&lap_msg->alt_remote_gid);
|
||||
sa_path_set_slid(path, cpu_to_be32(lid));
|
||||
sa_path_set_slid(path, lid);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3817,14 +3817,16 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
|
||||
struct cm_port *port = mad_agent->context;
|
||||
struct cm_work *work;
|
||||
enum ib_cm_event_type event;
|
||||
bool alt_path = false;
|
||||
u16 attr_id;
|
||||
int paths = 0;
|
||||
int going_down = 0;
|
||||
|
||||
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
|
||||
case CM_REQ_ATTR_ID:
|
||||
paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
|
||||
alt_local_lid != 0);
|
||||
alt_path = cm_req_has_alt_path((struct cm_req_msg *)
|
||||
mad_recv_wc->recv_buf.mad);
|
||||
paths = 1 + (alt_path != 0);
|
||||
event = IB_CM_REQ_RECEIVED;
|
||||
break;
|
||||
case CM_MRA_ATTR_ID:
|
||||
|
@ -1540,7 +1540,7 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
|
||||
return id_priv;
|
||||
}
|
||||
|
||||
static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
|
||||
static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
|
||||
{
|
||||
return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
|
||||
}
|
||||
@ -1846,9 +1846,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
|
||||
rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
|
||||
|
||||
if (net_dev) {
|
||||
ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
|
||||
if (ret)
|
||||
goto err;
|
||||
rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
|
||||
} else {
|
||||
if (!cma_protocol_roce(listen_id) &&
|
||||
cma_any_addr(cma_src_addr(id_priv))) {
|
||||
@ -1894,9 +1892,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
|
||||
goto err;
|
||||
|
||||
if (net_dev) {
|
||||
ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
|
||||
if (ret)
|
||||
goto err;
|
||||
rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
|
||||
} else {
|
||||
if (!cma_any_addr(cma_src_addr(id_priv))) {
|
||||
ret = cma_translate_addr(cma_src_addr(id_priv),
|
||||
@ -1942,7 +1938,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
|
||||
struct rdma_id_private *listen_id, *conn_id = NULL;
|
||||
struct rdma_cm_event event;
|
||||
struct net_device *net_dev;
|
||||
int offset, ret;
|
||||
u8 offset;
|
||||
int ret;
|
||||
|
||||
listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
|
||||
if (IS_ERR(listen_id))
|
||||
@ -3440,7 +3437,8 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
|
||||
struct ib_cm_sidr_req_param req;
|
||||
struct ib_cm_id *id;
|
||||
void *private_data;
|
||||
int offset, ret;
|
||||
u8 offset;
|
||||
int ret;
|
||||
|
||||
memset(&req, 0, sizeof req);
|
||||
offset = cma_user_data_offset(id_priv);
|
||||
@ -3497,7 +3495,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
|
||||
struct rdma_route *route;
|
||||
void *private_data;
|
||||
struct ib_cm_id *id;
|
||||
int offset, ret;
|
||||
u8 offset;
|
||||
int ret;
|
||||
|
||||
memset(&req, 0, sizeof req);
|
||||
offset = cma_user_data_offset(id_priv);
|
||||
|
@ -447,9 +447,6 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
|
||||
*/
|
||||
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
|
||||
{
|
||||
struct iwcm_id_private *cm_id_priv;
|
||||
|
||||
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
||||
destroy_cm_id(cm_id);
|
||||
}
|
||||
EXPORT_SYMBOL(iw_destroy_cm_id);
|
||||
|
@ -1974,14 +1974,15 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
|
||||
ret = ib_mad_enforce_security(mad_agent_priv,
|
||||
mad_recv_wc->wc->pkey_index);
|
||||
if (ret) {
|
||||
ib_free_recv_mad(mad_recv_wc);
|
||||
deref_mad_agent(mad_agent_priv);
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&mad_recv_wc->rmpp_list);
|
||||
list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list);
|
||||
if (ib_mad_kernel_rmpp_agent(&mad_agent_priv->agent)) {
|
||||
mad_recv_wc = ib_process_rmpp_recv_wc(mad_agent_priv,
|
||||
|
@ -384,21 +384,17 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
|
||||
count += ret;
|
||||
prev_wr = &ctx->sig->data.reg_wr.wr;
|
||||
|
||||
if (prot_sg_cnt) {
|
||||
ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
|
||||
prot_sg, prot_sg_cnt, 0);
|
||||
if (ret < 0)
|
||||
goto out_destroy_data_mr;
|
||||
count += ret;
|
||||
ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot,
|
||||
prot_sg, prot_sg_cnt, 0);
|
||||
if (ret < 0)
|
||||
goto out_destroy_data_mr;
|
||||
count += ret;
|
||||
|
||||
if (ctx->sig->prot.inv_wr.next)
|
||||
prev_wr->next = &ctx->sig->prot.inv_wr;
|
||||
else
|
||||
prev_wr->next = &ctx->sig->prot.reg_wr.wr;
|
||||
prev_wr = &ctx->sig->prot.reg_wr.wr;
|
||||
} else {
|
||||
ctx->sig->prot.mr = NULL;
|
||||
}
|
||||
if (ctx->sig->prot.inv_wr.next)
|
||||
prev_wr->next = &ctx->sig->prot.inv_wr;
|
||||
else
|
||||
prev_wr->next = &ctx->sig->prot.reg_wr.wr;
|
||||
prev_wr = &ctx->sig->prot.reg_wr.wr;
|
||||
|
||||
ctx->sig->sig_mr = ib_mr_pool_get(qp, &qp->sig_mrs);
|
||||
if (!ctx->sig->sig_mr) {
|
||||
|
@ -87,16 +87,14 @@ static int enforce_qp_pkey_security(u16 pkey,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (qp_sec->qp == qp_sec->qp->real_qp) {
|
||||
list_for_each_entry(shared_qp_sec,
|
||||
&qp_sec->shared_qp_list,
|
||||
shared_qp_list) {
|
||||
ret = security_ib_pkey_access(shared_qp_sec->security,
|
||||
subnet_prefix,
|
||||
pkey);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
list_for_each_entry(shared_qp_sec,
|
||||
&qp_sec->shared_qp_list,
|
||||
shared_qp_list) {
|
||||
ret = security_ib_pkey_access(shared_qp_sec->security,
|
||||
subnet_prefix,
|
||||
pkey);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -560,15 +558,22 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
||||
int ret = 0;
|
||||
struct ib_ports_pkeys *tmp_pps;
|
||||
struct ib_ports_pkeys *new_pps;
|
||||
bool special_qp = (qp->qp_type == IB_QPT_SMI ||
|
||||
qp->qp_type == IB_QPT_GSI ||
|
||||
qp->qp_type >= IB_QPT_RESERVED1);
|
||||
struct ib_qp *real_qp = qp->real_qp;
|
||||
bool special_qp = (real_qp->qp_type == IB_QPT_SMI ||
|
||||
real_qp->qp_type == IB_QPT_GSI ||
|
||||
real_qp->qp_type >= IB_QPT_RESERVED1);
|
||||
bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
|
||||
(qp_attr_mask & IB_QP_ALT_PATH));
|
||||
|
||||
/* The port/pkey settings are maintained only for the real QP. Open
|
||||
* handles on the real QP will be in the shared_qp_list. When
|
||||
* enforcing security on the real QP all the shared QPs will be
|
||||
* checked as well.
|
||||
*/
|
||||
|
||||
if (pps_change && !special_qp) {
|
||||
mutex_lock(&qp->qp_sec->mutex);
|
||||
new_pps = get_new_pps(qp,
|
||||
mutex_lock(&real_qp->qp_sec->mutex);
|
||||
new_pps = get_new_pps(real_qp,
|
||||
qp_attr,
|
||||
qp_attr_mask);
|
||||
|
||||
@ -586,14 +591,14 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
||||
|
||||
if (!ret)
|
||||
ret = check_qp_port_pkey_settings(new_pps,
|
||||
qp->qp_sec);
|
||||
real_qp->qp_sec);
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
ret = qp->device->modify_qp(qp->real_qp,
|
||||
qp_attr,
|
||||
qp_attr_mask,
|
||||
udata);
|
||||
ret = real_qp->device->modify_qp(real_qp,
|
||||
qp_attr,
|
||||
qp_attr_mask,
|
||||
udata);
|
||||
|
||||
if (pps_change && !special_qp) {
|
||||
/* Clean up the lists and free the appropriate
|
||||
@ -602,8 +607,8 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
||||
if (ret) {
|
||||
tmp_pps = new_pps;
|
||||
} else {
|
||||
tmp_pps = qp->qp_sec->ports_pkeys;
|
||||
qp->qp_sec->ports_pkeys = new_pps;
|
||||
tmp_pps = real_qp->qp_sec->ports_pkeys;
|
||||
real_qp->qp_sec->ports_pkeys = new_pps;
|
||||
}
|
||||
|
||||
if (tmp_pps) {
|
||||
@ -611,7 +616,7 @@ int ib_security_modify_qp(struct ib_qp *qp,
|
||||
port_pkey_list_remove(&tmp_pps->alt);
|
||||
}
|
||||
kfree(tmp_pps);
|
||||
mutex_unlock(&qp->qp_sec->mutex);
|
||||
mutex_unlock(&real_qp->qp_sec->mutex);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -692,20 +697,13 @@ void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent)
|
||||
|
||||
int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed)
|
||||
return -EACCES;
|
||||
|
||||
ret = ib_security_pkey_access(map->agent.device,
|
||||
map->agent.port_num,
|
||||
pkey_index,
|
||||
map->agent.security);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
return ib_security_pkey_access(map->agent.device,
|
||||
map->agent.port_num,
|
||||
pkey_index,
|
||||
map->agent.security);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SECURITY_INFINIBAND */
|
||||
|
@ -108,8 +108,22 @@ static ssize_t port_attr_show(struct kobject *kobj,
|
||||
return port_attr->show(p, port_attr, buf);
|
||||
}
|
||||
|
||||
static ssize_t port_attr_store(struct kobject *kobj,
|
||||
struct attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct port_attribute *port_attr =
|
||||
container_of(attr, struct port_attribute, attr);
|
||||
struct ib_port *p = container_of(kobj, struct ib_port, kobj);
|
||||
|
||||
if (!port_attr->store)
|
||||
return -EIO;
|
||||
return port_attr->store(p, port_attr, buf, count);
|
||||
}
|
||||
|
||||
static const struct sysfs_ops port_sysfs_ops = {
|
||||
.show = port_attr_show
|
||||
.show = port_attr_show,
|
||||
.store = port_attr_store
|
||||
};
|
||||
|
||||
static ssize_t gid_attr_show(struct kobject *kobj,
|
||||
|
@ -39,11 +39,44 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/interval_tree_generic.h>
|
||||
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_umem_odp.h>
|
||||
|
||||
/*
|
||||
* The ib_umem list keeps track of memory regions for which the HW
|
||||
* device request to receive notification when the related memory
|
||||
* mapping is changed.
|
||||
*
|
||||
* ib_umem_lock protects the list.
|
||||
*/
|
||||
|
||||
static u64 node_start(struct umem_odp_node *n)
|
||||
{
|
||||
struct ib_umem_odp *umem_odp =
|
||||
container_of(n, struct ib_umem_odp, interval_tree);
|
||||
|
||||
return ib_umem_start(umem_odp->umem);
|
||||
}
|
||||
|
||||
/* Note that the representation of the intervals in the interval tree
|
||||
* considers the ending point as contained in the interval, while the
|
||||
* function ib_umem_end returns the first address which is not contained
|
||||
* in the umem.
|
||||
*/
|
||||
static u64 node_last(struct umem_odp_node *n)
|
||||
{
|
||||
struct ib_umem_odp *umem_odp =
|
||||
container_of(n, struct ib_umem_odp, interval_tree);
|
||||
|
||||
return ib_umem_end(umem_odp->umem) - 1;
|
||||
}
|
||||
|
||||
INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
|
||||
node_start, node_last, static, rbt_ib_umem)
|
||||
|
||||
static void ib_umem_notifier_start_account(struct ib_umem *item)
|
||||
{
|
||||
mutex_lock(&item->odp_data->umem_mutex);
|
||||
@ -754,3 +787,42 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
|
||||
mutex_unlock(&umem->odp_data->umem_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
|
||||
|
||||
/* @last is not a part of the interval. See comment for function
|
||||
* node_last.
|
||||
*/
|
||||
int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
|
||||
u64 start, u64 last,
|
||||
umem_call_back cb,
|
||||
void *cookie)
|
||||
{
|
||||
int ret_val = 0;
|
||||
struct umem_odp_node *node, *next;
|
||||
struct ib_umem_odp *umem;
|
||||
|
||||
if (unlikely(start == last))
|
||||
return ret_val;
|
||||
|
||||
for (node = rbt_ib_umem_iter_first(root, start, last - 1);
|
||||
node; node = next) {
|
||||
next = rbt_ib_umem_iter_next(node, start, last - 1);
|
||||
umem = container_of(node, struct ib_umem_odp, interval_tree);
|
||||
ret_val = cb(umem->umem, start, last, cookie) || ret_val;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
|
||||
|
||||
struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
|
||||
u64 addr, u64 length)
|
||||
{
|
||||
struct umem_odp_node *node;
|
||||
|
||||
node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
|
||||
if (node)
|
||||
return container_of(node, struct ib_umem_odp, interval_tree);
|
||||
return NULL;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(rbt_ib_umem_lookup);
|
||||
|
@ -1,109 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Mellanox Technologies. All rights reserved.
|
||||
*
|
||||
* This software is available to you under a choice of one of two
|
||||
* licenses. You may choose to be licensed under the terms of the GNU
|
||||
* General Public License (GPL) Version 2, available from the file
|
||||
* COPYING in the main directory of this source tree, or the
|
||||
* OpenIB.org BSD license below:
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or
|
||||
* without modification, are permitted provided that the following
|
||||
* conditions are met:
|
||||
*
|
||||
* - Redistributions of source code must retain the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer.
|
||||
*
|
||||
* - Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following
|
||||
* disclaimer in the documentation and/or other materials
|
||||
* provided with the distribution.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
||||
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/interval_tree_generic.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <rdma/ib_umem_odp.h>
|
||||
|
||||
/*
|
||||
* The ib_umem list keeps track of memory regions for which the HW
|
||||
* device request to receive notification when the related memory
|
||||
* mapping is changed.
|
||||
*
|
||||
* ib_umem_lock protects the list.
|
||||
*/
|
||||
|
||||
static inline u64 node_start(struct umem_odp_node *n)
|
||||
{
|
||||
struct ib_umem_odp *umem_odp =
|
||||
container_of(n, struct ib_umem_odp, interval_tree);
|
||||
|
||||
return ib_umem_start(umem_odp->umem);
|
||||
}
|
||||
|
||||
/* Note that the representation of the intervals in the interval tree
|
||||
* considers the ending point as contained in the interval, while the
|
||||
* function ib_umem_end returns the first address which is not contained
|
||||
* in the umem.
|
||||
*/
|
||||
static inline u64 node_last(struct umem_odp_node *n)
|
||||
{
|
||||
struct ib_umem_odp *umem_odp =
|
||||
container_of(n, struct ib_umem_odp, interval_tree);
|
||||
|
||||
return ib_umem_end(umem_odp->umem) - 1;
|
||||
}
|
||||
|
||||
INTERVAL_TREE_DEFINE(struct umem_odp_node, rb, u64, __subtree_last,
|
||||
node_start, node_last, , rbt_ib_umem)
|
||||
|
||||
/* @last is not a part of the interval. See comment for function
|
||||
* node_last.
|
||||
*/
|
||||
int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
|
||||
u64 start, u64 last,
|
||||
umem_call_back cb,
|
||||
void *cookie)
|
||||
{
|
||||
int ret_val = 0;
|
||||
struct umem_odp_node *node, *next;
|
||||
struct ib_umem_odp *umem;
|
||||
|
||||
if (unlikely(start == last))
|
||||
return ret_val;
|
||||
|
||||
for (node = rbt_ib_umem_iter_first(root, start, last - 1);
|
||||
node; node = next) {
|
||||
next = rbt_ib_umem_iter_next(node, start, last - 1);
|
||||
umem = container_of(node, struct ib_umem_odp, interval_tree);
|
||||
ret_val = cb(umem->umem, start, last, cookie) || ret_val;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
EXPORT_SYMBOL(rbt_ib_umem_for_each_in_range);
|
||||
|
||||
struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
|
||||
u64 addr, u64 length)
|
||||
{
|
||||
struct umem_odp_node *node;
|
||||
|
||||
node = rbt_ib_umem_iter_first(root, addr, addr + length - 1);
|
||||
if (node)
|
||||
return container_of(node, struct ib_umem_odp, interval_tree);
|
||||
return NULL;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(rbt_ib_umem_lookup);
|
@ -229,7 +229,16 @@ static void recv_handler(struct ib_mad_agent *agent,
|
||||
packet->mad.hdr.status = 0;
|
||||
packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len;
|
||||
packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
|
||||
packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
|
||||
/*
|
||||
* On OPA devices it is okay to lose the upper 16 bits of LID as this
|
||||
* information is obtained elsewhere. Mask off the upper 16 bits.
|
||||
*/
|
||||
if (agent->device->port_immutable[agent->port_num].core_cap_flags &
|
||||
RDMA_CORE_PORT_INTEL_OPA)
|
||||
packet->mad.hdr.lid = ib_lid_be16(0xFFFF &
|
||||
mad_recv_wc->wc->slid);
|
||||
else
|
||||
packet->mad.hdr.lid = ib_lid_be16(mad_recv_wc->wc->slid);
|
||||
packet->mad.hdr.sl = mad_recv_wc->wc->sl;
|
||||
packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
|
||||
packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index;
|
||||
@ -506,7 +515,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||
rdma_ah_set_dgid_raw(&ah_attr, packet->mad.hdr.gid);
|
||||
}
|
||||
|
||||
ah = rdma_create_ah(agent->qp->pd, &ah_attr);
|
||||
ah = rdma_create_user_ah(agent->qp->pd, &ah_attr, NULL);
|
||||
if (IS_ERR(ah)) {
|
||||
ret = PTR_ERR(ah);
|
||||
goto err_up;
|
||||
|
@ -47,21 +47,28 @@
|
||||
#include <rdma/ib_umem.h>
|
||||
#include <rdma/ib_user_verbs.h>
|
||||
|
||||
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
|
||||
do { \
|
||||
(udata)->inbuf = (const void __user *) (ibuf); \
|
||||
(udata)->outbuf = (void __user *) (obuf); \
|
||||
(udata)->inlen = (ilen); \
|
||||
(udata)->outlen = (olen); \
|
||||
} while (0)
|
||||
static inline void
|
||||
ib_uverbs_init_udata(struct ib_udata *udata,
|
||||
const void __user *ibuf,
|
||||
void __user *obuf,
|
||||
size_t ilen, size_t olen)
|
||||
{
|
||||
udata->inbuf = ibuf;
|
||||
udata->outbuf = obuf;
|
||||
udata->inlen = ilen;
|
||||
udata->outlen = olen;
|
||||
}
|
||||
|
||||
#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
|
||||
do { \
|
||||
(udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
|
||||
(udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
|
||||
(udata)->inlen = (ilen); \
|
||||
(udata)->outlen = (olen); \
|
||||
} while (0)
|
||||
static inline void
|
||||
ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
|
||||
const void __user *ibuf,
|
||||
void __user *obuf,
|
||||
size_t ilen, size_t olen)
|
||||
{
|
||||
ib_uverbs_init_udata(udata,
|
||||
ilen ? ibuf : NULL, olen ? obuf : NULL,
|
||||
ilen, olen);
|
||||
}
|
||||
|
||||
/*
|
||||
* Our lifetime rules for these structs are the following:
|
||||
@ -299,5 +306,6 @@ IB_UVERBS_DECLARE_EX_CMD(destroy_wq);
|
||||
IB_UVERBS_DECLARE_EX_CMD(create_rwq_ind_table);
|
||||
IB_UVERBS_DECLARE_EX_CMD(destroy_rwq_ind_table);
|
||||
IB_UVERBS_DECLARE_EX_CMD(modify_qp);
|
||||
IB_UVERBS_DECLARE_EX_CMD(modify_cq);
|
||||
|
||||
#endif /* UVERBS_H */
|
||||
|
@ -91,8 +91,8 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
|
||||
goto err;
|
||||
}
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long) cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -141,8 +141,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
|
||||
goto err_fd;
|
||||
}
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp)) {
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
|
||||
ret = -EFAULT;
|
||||
goto err_file;
|
||||
}
|
||||
@ -238,8 +237,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
|
||||
memset(&resp, 0, sizeof resp);
|
||||
copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
|
||||
return -EFAULT;
|
||||
|
||||
return in_len;
|
||||
@ -295,8 +293,7 @@ ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
|
||||
resp.link_layer = rdma_port_get_link_layer(ib_dev,
|
||||
cmd.port_num);
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
|
||||
return -EFAULT;
|
||||
|
||||
return in_len;
|
||||
@ -320,8 +317,8 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long) cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -344,8 +341,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
|
||||
memset(&resp, 0, sizeof resp);
|
||||
resp.pd_handle = uobj->id;
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp)) {
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
|
||||
ret = -EFAULT;
|
||||
goto err_copy;
|
||||
}
|
||||
@ -490,8 +486,8 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long) cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -556,8 +552,7 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
|
||||
atomic_inc(&xrcd->usecnt);
|
||||
}
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp)) {
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
|
||||
ret = -EFAULT;
|
||||
goto err_copy;
|
||||
}
|
||||
@ -655,8 +650,8 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long) cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -705,8 +700,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
|
||||
resp.rkey = mr->rkey;
|
||||
resp.mr_handle = uobj->id;
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp)) {
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
|
||||
ret = -EFAULT;
|
||||
goto err_copy;
|
||||
}
|
||||
@ -748,8 +742,8 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long) cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -800,8 +794,7 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
|
||||
resp.lkey = mr->lkey;
|
||||
resp.rkey = mr->rkey;
|
||||
|
||||
if (copy_to_user((void __user *)(unsigned long)cmd.response,
|
||||
&resp, sizeof(resp)))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
|
||||
ret = -EFAULT;
|
||||
else
|
||||
ret = in_len;
|
||||
@ -867,8 +860,8 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long)cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -889,8 +882,7 @@ ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
|
||||
resp.rkey = mw->rkey;
|
||||
resp.mw_handle = uobj->id;
|
||||
|
||||
if (copy_to_user((void __user *)(unsigned long)cmd.response,
|
||||
&resp, sizeof(resp))) {
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp))) {
|
||||
ret = -EFAULT;
|
||||
goto err_copy;
|
||||
}
|
||||
@ -956,8 +948,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
|
||||
uobj_file.uobj);
|
||||
ib_uverbs_init_event_queue(&ev_file->ev_queue);
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp)) {
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
|
||||
uobj_alloc_abort(uobj);
|
||||
return -EFAULT;
|
||||
}
|
||||
@ -1087,10 +1078,11 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp));
|
||||
ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
|
||||
sizeof(cmd), sizeof(resp));
|
||||
|
||||
INIT_UDATA(&uhw, buf + sizeof(cmd),
|
||||
(unsigned long)cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -1173,8 +1165,8 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long) cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -1188,8 +1180,7 @@ ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
|
||||
|
||||
resp.cqe = cq->cqe;
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp.cqe))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp.cqe))
|
||||
ret = -EFAULT;
|
||||
|
||||
out:
|
||||
@ -1249,7 +1240,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
|
||||
return -EINVAL;
|
||||
|
||||
/* we copy a struct ib_uverbs_poll_cq_resp to user space */
|
||||
header_ptr = (void __user *)(unsigned long) cmd.response;
|
||||
header_ptr = u64_to_user_ptr(cmd.response);
|
||||
data_ptr = header_ptr + sizeof resp;
|
||||
|
||||
memset(&resp, 0, sizeof resp);
|
||||
@ -1343,8 +1334,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
|
||||
resp.async_events_reported = obj->async_events_reported;
|
||||
|
||||
uverbs_uobject_put(uobj);
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
|
||||
return -EFAULT;
|
||||
|
||||
return in_len;
|
||||
@ -1501,7 +1491,8 @@ static int create_qp(struct ib_uverbs_file *file,
|
||||
IB_QP_CREATE_MANAGED_RECV |
|
||||
IB_QP_CREATE_SCATTER_FCS |
|
||||
IB_QP_CREATE_CVLAN_STRIPPING |
|
||||
IB_QP_CREATE_SOURCE_QPN)) {
|
||||
IB_QP_CREATE_SOURCE_QPN |
|
||||
IB_QP_CREATE_PCI_WRITE_END_PADDING)) {
|
||||
ret = -EINVAL;
|
||||
goto err_put;
|
||||
}
|
||||
@ -1650,10 +1641,10 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof(cmd)))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd),
|
||||
resp_size);
|
||||
INIT_UDATA(&uhw, buf + sizeof(cmd),
|
||||
(unsigned long)cmd.response + resp_size,
|
||||
ib_uverbs_init_udata(&ucore, buf, u64_to_user_ptr(cmd.response),
|
||||
sizeof(cmd), resp_size);
|
||||
ib_uverbs_init_udata(&uhw, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + resp_size,
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - resp_size);
|
||||
|
||||
@ -1750,8 +1741,8 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long) cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -1795,8 +1786,7 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
|
||||
resp.qpn = qp->qp_num;
|
||||
resp.qp_handle = obj->uevent.uobject.id;
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp)) {
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
|
||||
ret = -EFAULT;
|
||||
goto err_destroy;
|
||||
}
|
||||
@ -1911,8 +1901,7 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
|
||||
resp.max_inline_data = init_attr->cap.max_inline_data;
|
||||
resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
|
||||
ret = -EFAULT;
|
||||
|
||||
out:
|
||||
@ -2042,7 +2031,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
|
||||
~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL,
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd.base), NULL,
|
||||
in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len);
|
||||
|
||||
@ -2126,8 +2115,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
|
||||
resp.events_reported = obj->uevent.events_reported;
|
||||
uverbs_uobject_put(uobj);
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
|
||||
return -EFAULT;
|
||||
|
||||
return in_len;
|
||||
@ -2311,8 +2299,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
|
||||
break;
|
||||
}
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
|
||||
ret = -EFAULT;
|
||||
|
||||
out_put:
|
||||
@ -2460,8 +2447,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
|
||||
}
|
||||
}
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
|
||||
ret = -EFAULT;
|
||||
|
||||
out:
|
||||
@ -2510,8 +2496,7 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
|
||||
break;
|
||||
}
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
|
||||
ret = -EFAULT;
|
||||
|
||||
out:
|
||||
@ -2537,7 +2522,6 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
||||
struct rdma_ah_attr attr;
|
||||
int ret;
|
||||
struct ib_udata udata;
|
||||
u8 *dmac;
|
||||
|
||||
if (out_len < sizeof resp)
|
||||
return -ENOSPC;
|
||||
@ -2548,8 +2532,8 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
||||
if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
|
||||
return -EINVAL;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long)cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -2580,28 +2564,20 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
||||
} else {
|
||||
rdma_ah_set_ah_flags(&attr, 0);
|
||||
}
|
||||
dmac = rdma_ah_retrieve_dmac(&attr);
|
||||
if (dmac)
|
||||
memset(dmac, 0, ETH_ALEN);
|
||||
|
||||
ah = pd->device->create_ah(pd, &attr, &udata);
|
||||
|
||||
ah = rdma_create_user_ah(pd, &attr, &udata);
|
||||
if (IS_ERR(ah)) {
|
||||
ret = PTR_ERR(ah);
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
ah->device = pd->device;
|
||||
ah->pd = pd;
|
||||
atomic_inc(&pd->usecnt);
|
||||
ah->uobject = uobj;
|
||||
uobj->user_handle = cmd.user_handle;
|
||||
uobj->object = ah;
|
||||
|
||||
resp.ah_handle = uobj->id;
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp)) {
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp)) {
|
||||
ret = -EFAULT;
|
||||
goto err_copy;
|
||||
}
|
||||
@ -3627,8 +3603,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
|
||||
xcmd.max_sge = cmd.max_sge;
|
||||
xcmd.srq_limit = cmd.srq_limit;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long) cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -3654,8 +3630,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof(cmd),
|
||||
(unsigned long) cmd.response + sizeof(resp),
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof(cmd),
|
||||
u64_to_user_ptr(cmd.response) + sizeof(resp),
|
||||
in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
|
||||
out_len - sizeof(resp));
|
||||
|
||||
@ -3680,7 +3656,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
|
||||
if (copy_from_user(&cmd, buf, sizeof cmd))
|
||||
return -EFAULT;
|
||||
|
||||
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
|
||||
ib_uverbs_init_udata(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
|
||||
out_len);
|
||||
|
||||
srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
|
||||
@ -3731,8 +3707,7 @@ ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
|
||||
resp.max_sge = attr.max_sge;
|
||||
resp.srq_limit = attr.srq_limit;
|
||||
|
||||
if (copy_to_user((void __user *) (unsigned long) cmd.response,
|
||||
&resp, sizeof resp))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof resp))
|
||||
return -EFAULT;
|
||||
|
||||
return in_len;
|
||||
@ -3773,8 +3748,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
|
||||
}
|
||||
resp.events_reported = obj->events_reported;
|
||||
uverbs_uobject_put(uobj);
|
||||
if (copy_to_user((void __user *)(unsigned long)cmd.response,
|
||||
&resp, sizeof(resp)))
|
||||
if (copy_to_user(u64_to_user_ptr(cmd.response), &resp, sizeof(resp)))
|
||||
return -EFAULT;
|
||||
|
||||
return in_len;
|
||||
@ -3878,7 +3852,58 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
|
||||
resp.tm_caps.max_sge = attr.tm_caps.max_sge;
|
||||
resp.tm_caps.flags = attr.tm_caps.flags;
|
||||
resp.response_length += sizeof(resp.tm_caps);
|
||||
|
||||
if (ucore->outlen < resp.response_length + sizeof(resp.cq_moderation_caps))
|
||||
goto end;
|
||||
|
||||
resp.cq_moderation_caps.max_cq_moderation_count =
|
||||
attr.cq_caps.max_cq_moderation_count;
|
||||
resp.cq_moderation_caps.max_cq_moderation_period =
|
||||
attr.cq_caps.max_cq_moderation_period;
|
||||
resp.response_length += sizeof(resp.cq_moderation_caps);
|
||||
end:
|
||||
err = ib_copy_to_udata(ucore, &resp, resp.response_length);
|
||||
return err;
|
||||
}
|
||||
|
||||
int ib_uverbs_ex_modify_cq(struct ib_uverbs_file *file,
|
||||
struct ib_device *ib_dev,
|
||||
struct ib_udata *ucore,
|
||||
struct ib_udata *uhw)
|
||||
{
|
||||
struct ib_uverbs_ex_modify_cq cmd = {};
|
||||
struct ib_cq *cq;
|
||||
size_t required_cmd_sz;
|
||||
int ret;
|
||||
|
||||
required_cmd_sz = offsetof(typeof(cmd), reserved) +
|
||||
sizeof(cmd.reserved);
|
||||
if (ucore->inlen < required_cmd_sz)
|
||||
return -EINVAL;
|
||||
|
||||
/* sanity checks */
|
||||
if (ucore->inlen > sizeof(cmd) &&
|
||||
!ib_is_udata_cleared(ucore, sizeof(cmd),
|
||||
ucore->inlen - sizeof(cmd)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!cmd.attr_mask || cmd.reserved)
|
||||
return -EINVAL;
|
||||
|
||||
if (cmd.attr_mask > IB_CQ_MODERATE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
|
||||
if (!cq)
|
||||
return -EINVAL;
|
||||
|
||||
ret = rdma_set_cq_moderation(cq, cmd.attr.cq_count, cmd.attr.cq_period);
|
||||
|
||||
uobj_put_obj_read(cq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -241,9 +241,7 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
|
||||
struct uverbs_attr *curr_attr;
|
||||
unsigned long *curr_bitmap;
|
||||
size_t ctx_size;
|
||||
#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
|
||||
uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
|
||||
#endif
|
||||
|
||||
if (hdr->reserved)
|
||||
return -EINVAL;
|
||||
@ -269,13 +267,10 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
|
||||
(method_spec->num_child_attrs / BITS_PER_LONG +
|
||||
method_spec->num_buckets);
|
||||
|
||||
#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
|
||||
if (ctx_size <= UVERBS_OPTIMIZE_USING_STACK_SZ)
|
||||
ctx = (void *)data;
|
||||
|
||||
if (!ctx)
|
||||
#endif
|
||||
ctx = kmalloc(ctx_size, GFP_KERNEL);
|
||||
ctx = kmalloc(ctx_size, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -311,10 +306,8 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
|
||||
err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
|
||||
file, method_spec, ctx->uverbs_attr_bundle);
|
||||
out:
|
||||
#ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
|
||||
if (ctx_size > UVERBS_OPTIMIZE_USING_STACK_SZ)
|
||||
#endif
|
||||
kfree(ctx);
|
||||
if (ctx != (void *)data)
|
||||
kfree(ctx);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -376,7 +376,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
|
||||
min_id) ||
|
||||
WARN(attr_obj_with_special_access &&
|
||||
!(attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY),
|
||||
"ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy aceess but isn't mandatory\n",
|
||||
"ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy access but isn't mandatory\n",
|
||||
min_id) ||
|
||||
WARN(IS_ATTR_OBJECT(attr) &&
|
||||
attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ,
|
||||
|
@ -128,6 +128,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
|
||||
[IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL] = ib_uverbs_ex_create_rwq_ind_table,
|
||||
[IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL] = ib_uverbs_ex_destroy_rwq_ind_table,
|
||||
[IB_USER_VERBS_EX_CMD_MODIFY_QP] = ib_uverbs_ex_modify_qp,
|
||||
[IB_USER_VERBS_EX_CMD_MODIFY_CQ] = ib_uverbs_ex_modify_cq,
|
||||
};
|
||||
|
||||
static void ib_uverbs_add_one(struct ib_device *device);
|
||||
@ -763,7 +764,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
||||
}
|
||||
|
||||
if (!access_ok(VERIFY_WRITE,
|
||||
(void __user *) (unsigned long) ex_hdr.response,
|
||||
u64_to_user_ptr(ex_hdr.response),
|
||||
(hdr.out_words + ex_hdr.provider_out_words) * 8)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
@ -775,19 +776,17 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
|
||||
}
|
||||
}
|
||||
|
||||
INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
|
||||
hdr.in_words * 8, hdr.out_words * 8);
|
||||
ib_uverbs_init_udata_buf_or_null(&ucore, buf,
|
||||
u64_to_user_ptr(ex_hdr.response),
|
||||
hdr.in_words * 8, hdr.out_words * 8);
|
||||
|
||||
INIT_UDATA_BUF_OR_NULL(&uhw,
|
||||
buf + ucore.inlen,
|
||||
(unsigned long) ex_hdr.response + ucore.outlen,
|
||||
ex_hdr.provider_in_words * 8,
|
||||
ex_hdr.provider_out_words * 8);
|
||||
ib_uverbs_init_udata_buf_or_null(&uhw,
|
||||
buf + ucore.inlen,
|
||||
u64_to_user_ptr(ex_hdr.response) + ucore.outlen,
|
||||
ex_hdr.provider_in_words * 8,
|
||||
ex_hdr.provider_out_words * 8);
|
||||
|
||||
ret = uverbs_ex_cmd_table[command](file,
|
||||
ib_dev,
|
||||
&ucore,
|
||||
&uhw);
|
||||
ret = uverbs_ex_cmd_table[command](file, ib_dev, &ucore, &uhw);
|
||||
if (!ret)
|
||||
ret = written_count;
|
||||
} else {
|
||||
|
@ -69,8 +69,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device,
|
||||
memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
|
||||
|
||||
if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) &&
|
||||
(rdma_ah_get_dlid(ah_attr) >=
|
||||
be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
|
||||
(rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) &&
|
||||
(!rdma_ah_conv_opa_to_ib(device, &conv_ah, ah_attr)))
|
||||
src = &conv_ah;
|
||||
|
||||
@ -176,18 +175,18 @@ EXPORT_SYMBOL(ib_copy_path_rec_to_user);
|
||||
void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
|
||||
struct ib_user_path_rec *src)
|
||||
{
|
||||
__be32 slid, dlid;
|
||||
u32 slid, dlid;
|
||||
|
||||
memset(dst, 0, sizeof(*dst));
|
||||
if ((ib_is_opa_gid((union ib_gid *)src->sgid)) ||
|
||||
(ib_is_opa_gid((union ib_gid *)src->dgid))) {
|
||||
dst->rec_type = SA_PATH_REC_TYPE_OPA;
|
||||
slid = htonl(opa_get_lid_from_gid((union ib_gid *)src->sgid));
|
||||
dlid = htonl(opa_get_lid_from_gid((union ib_gid *)src->dgid));
|
||||
slid = opa_get_lid_from_gid((union ib_gid *)src->sgid);
|
||||
dlid = opa_get_lid_from_gid((union ib_gid *)src->dgid);
|
||||
} else {
|
||||
dst->rec_type = SA_PATH_REC_TYPE_IB;
|
||||
slid = htonl(ntohs(src->slid));
|
||||
dlid = htonl(ntohs(src->dlid));
|
||||
slid = ntohs(src->slid);
|
||||
dlid = ntohs(src->dlid);
|
||||
}
|
||||
memcpy(dst->dgid.raw, src->dgid, sizeof dst->dgid);
|
||||
memcpy(dst->sgid.raw, src->sgid, sizeof dst->sgid);
|
||||
|
@ -227,26 +227,26 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
|
||||
* to use uverbs_attr_bundle instead of ib_udata.
|
||||
* Assume attr == 0 is input and attr == 1 is output.
|
||||
*/
|
||||
void __user *inbuf;
|
||||
size_t inbuf_len = 0;
|
||||
void __user *outbuf;
|
||||
size_t outbuf_len = 0;
|
||||
const struct uverbs_attr *uhw_in =
|
||||
uverbs_attr_get(ctx, UVERBS_UHW_IN);
|
||||
const struct uverbs_attr *uhw_out =
|
||||
uverbs_attr_get(ctx, UVERBS_UHW_OUT);
|
||||
|
||||
if (!IS_ERR(uhw_in)) {
|
||||
inbuf = uhw_in->ptr_attr.ptr;
|
||||
inbuf_len = uhw_in->ptr_attr.len;
|
||||
udata->inbuf = uhw_in->ptr_attr.ptr;
|
||||
udata->inlen = uhw_in->ptr_attr.len;
|
||||
} else {
|
||||
udata->inbuf = NULL;
|
||||
udata->inlen = 0;
|
||||
}
|
||||
|
||||
if (!IS_ERR(uhw_out)) {
|
||||
outbuf = uhw_out->ptr_attr.ptr;
|
||||
outbuf_len = uhw_out->ptr_attr.len;
|
||||
udata->outbuf = uhw_out->ptr_attr.ptr;
|
||||
udata->outlen = uhw_out->ptr_attr.len;
|
||||
} else {
|
||||
udata->outbuf = NULL;
|
||||
udata->outlen = 0;
|
||||
}
|
||||
|
||||
INIT_UDATA_BUF_OR_NULL(udata, inbuf, outbuf, inbuf_len, outbuf_len);
|
||||
}
|
||||
|
||||
static int uverbs_create_cq_handler(struct ib_device *ib_dev,
|
||||
|
@ -53,6 +53,9 @@
|
||||
|
||||
#include "core_priv.h"
|
||||
|
||||
static int ib_resolve_eth_dmac(struct ib_device *device,
|
||||
struct rdma_ah_attr *ah_attr);
|
||||
|
||||
static const char * const ib_events[] = {
|
||||
[IB_EVENT_CQ_ERR] = "CQ error",
|
||||
[IB_EVENT_QP_FATAL] = "QP fatal error",
|
||||
@ -302,11 +305,13 @@ EXPORT_SYMBOL(ib_dealloc_pd);
|
||||
|
||||
/* Address handles */
|
||||
|
||||
struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
|
||||
static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
|
||||
struct rdma_ah_attr *ah_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct ib_ah *ah;
|
||||
|
||||
ah = pd->device->create_ah(pd, ah_attr, NULL);
|
||||
ah = pd->device->create_ah(pd, ah_attr, udata);
|
||||
|
||||
if (!IS_ERR(ah)) {
|
||||
ah->device = pd->device;
|
||||
@ -318,8 +323,42 @@ struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
|
||||
|
||||
return ah;
|
||||
}
|
||||
|
||||
struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr)
|
||||
{
|
||||
return _rdma_create_ah(pd, ah_attr, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_create_ah);
|
||||
|
||||
/**
|
||||
* rdma_create_user_ah - Creates an address handle for the
|
||||
* given address vector.
|
||||
* It resolves destination mac address for ah attribute of RoCE type.
|
||||
* @pd: The protection domain associated with the address handle.
|
||||
* @ah_attr: The attributes of the address vector.
|
||||
* @udata: pointer to user's input output buffer information need by
|
||||
* provider driver.
|
||||
*
|
||||
* It returns 0 on success and returns appropriate error code on error.
|
||||
* The address handle is used to reference a local or global destination
|
||||
* in all UD QP post sends.
|
||||
*/
|
||||
struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
|
||||
struct rdma_ah_attr *ah_attr,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
|
||||
err = ib_resolve_eth_dmac(pd->device, ah_attr);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
return _rdma_create_ah(pd, ah_attr, udata);
|
||||
}
|
||||
EXPORT_SYMBOL(rdma_create_user_ah);
|
||||
|
||||
int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
|
||||
{
|
||||
const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
|
||||
@ -1221,8 +1260,8 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
|
||||
}
|
||||
EXPORT_SYMBOL(ib_modify_qp_is_ok);
|
||||
|
||||
int ib_resolve_eth_dmac(struct ib_device *device,
|
||||
struct rdma_ah_attr *ah_attr)
|
||||
static int ib_resolve_eth_dmac(struct ib_device *device,
|
||||
struct rdma_ah_attr *ah_attr)
|
||||
{
|
||||
int ret = 0;
|
||||
struct ib_global_route *grh;
|
||||
@ -1281,7 +1320,6 @@ int ib_resolve_eth_dmac(struct ib_device *device,
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_resolve_eth_dmac);
|
||||
|
||||
/**
|
||||
* ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
|
||||
@ -1512,12 +1550,12 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
|
||||
}
|
||||
EXPORT_SYMBOL(ib_create_cq);
|
||||
|
||||
int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
|
||||
int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
|
||||
{
|
||||
return cq->device->modify_cq ?
|
||||
cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
|
||||
}
|
||||
EXPORT_SYMBOL(ib_modify_cq);
|
||||
EXPORT_SYMBOL(rdma_set_cq_moderation);
|
||||
|
||||
int ib_destroy_cq(struct ib_cq *cq)
|
||||
{
|
||||
|
@ -394,6 +394,7 @@ int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
|
||||
ctx->idx = tbl_idx;
|
||||
ctx->refcnt = 1;
|
||||
ctx_tbl[tbl_idx] = ctx;
|
||||
*context = ctx;
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -665,7 +666,6 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
|
||||
struct bnxt_re_ah *ah;
|
||||
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
|
||||
int rc;
|
||||
u16 vlan_tag;
|
||||
u8 nw_type;
|
||||
|
||||
struct ib_gid_attr sgid_attr;
|
||||
@ -711,11 +711,8 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
|
||||
grh->sgid_index);
|
||||
goto fail;
|
||||
}
|
||||
if (sgid_attr.ndev) {
|
||||
if (is_vlan_dev(sgid_attr.ndev))
|
||||
vlan_tag = vlan_dev_vlan_id(sgid_attr.ndev);
|
||||
if (sgid_attr.ndev)
|
||||
dev_put(sgid_attr.ndev);
|
||||
}
|
||||
/* Get network header type for this GID */
|
||||
nw_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
|
||||
switch (nw_type) {
|
||||
@ -729,14 +726,6 @@ struct ib_ah *bnxt_re_create_ah(struct ib_pd *ib_pd,
|
||||
ah->qplib_ah.nw_type = CMDQ_CREATE_AH_TYPE_V1;
|
||||
break;
|
||||
}
|
||||
rc = rdma_addr_find_l2_eth_by_grh(&sgid, &grh->dgid,
|
||||
ah_attr->roce.dmac, &vlan_tag,
|
||||
&sgid_attr.ndev->ifindex,
|
||||
NULL);
|
||||
if (rc) {
|
||||
dev_err(rdev_to_dev(rdev), "Failed to get dmac\n");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(ah->qplib_ah.dmac, ah_attr->roce.dmac, ETH_ALEN);
|
||||
@ -796,6 +785,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
|
||||
struct bnxt_re_dev *rdev = qp->rdev;
|
||||
int rc;
|
||||
|
||||
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
|
||||
bnxt_qplib_del_flush_qp(&qp->qplib_qp);
|
||||
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
|
||||
if (rc) {
|
||||
@ -1643,7 +1633,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
|
||||
u8 ip_version = 0;
|
||||
u16 vlan_id = 0xFFFF;
|
||||
void *buf;
|
||||
int i, rc = 0, size;
|
||||
int i, rc = 0;
|
||||
|
||||
memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
|
||||
|
||||
@ -1760,7 +1750,7 @@ static int bnxt_re_build_qp1_send_v2(struct bnxt_re_qp *qp,
|
||||
/* Pack the QP1 to the transmit buffer */
|
||||
buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
|
||||
if (buf) {
|
||||
size = ib_ud_header_pack(&qp->qp1_hdr, buf);
|
||||
ib_ud_header_pack(&qp->qp1_hdr, buf);
|
||||
for (i = wqe->num_sge; i; i--) {
|
||||
wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
|
||||
wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
|
||||
@ -2216,7 +2206,7 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
|
||||
struct ib_recv_wr *wr)
|
||||
{
|
||||
struct bnxt_qplib_swqe wqe;
|
||||
int rc = 0, payload_sz = 0;
|
||||
int rc = 0;
|
||||
|
||||
memset(&wqe, 0, sizeof(wqe));
|
||||
while (wr) {
|
||||
@ -2231,8 +2221,7 @@ static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
}
|
||||
payload_sz = bnxt_re_build_sgl(wr->sg_list, wqe.sg_list,
|
||||
wr->num_sge);
|
||||
bnxt_re_build_sgl(wr->sg_list, wqe.sg_list, wr->num_sge);
|
||||
wqe.wr_id = wr->wr_id;
|
||||
wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
|
||||
|
||||
@ -2569,7 +2558,7 @@ static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
|
||||
static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
|
||||
u16 raweth_qp1_flags2)
|
||||
{
|
||||
bool is_udp = false, is_ipv6 = false, is_ipv4 = false;
|
||||
bool is_ipv6 = false, is_ipv4 = false;
|
||||
|
||||
/* raweth_qp1_flags Bit 9-6 indicates itype */
|
||||
if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
|
||||
@ -2580,7 +2569,6 @@ static int bnxt_re_check_packet_type(u16 raweth_qp1_flags,
|
||||
CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
|
||||
raweth_qp1_flags2 &
|
||||
CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
|
||||
is_udp = true;
|
||||
/* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
|
||||
(raweth_qp1_flags2 &
|
||||
CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
|
||||
@ -2781,6 +2769,32 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
|
||||
wc->wc_flags |= IB_WC_GRH;
|
||||
}
|
||||
|
||||
static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
|
||||
u16 *vid, u8 *sl)
|
||||
{
|
||||
bool ret = false;
|
||||
u32 metadata;
|
||||
u16 tpid;
|
||||
|
||||
metadata = orig_cqe->raweth_qp1_metadata;
|
||||
if (orig_cqe->raweth_qp1_flags2 &
|
||||
CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
|
||||
tpid = ((metadata &
|
||||
CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
|
||||
CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
|
||||
if (tpid == ETH_P_8021Q) {
|
||||
*vid = metadata &
|
||||
CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
|
||||
*sl = (metadata &
|
||||
CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
|
||||
CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
|
||||
ret = true;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
|
||||
struct bnxt_qplib_cqe *cqe)
|
||||
{
|
||||
@ -2800,12 +2814,14 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
|
||||
struct ib_wc *wc,
|
||||
struct bnxt_qplib_cqe *cqe)
|
||||
{
|
||||
u32 tbl_idx;
|
||||
struct bnxt_re_dev *rdev = qp->rdev;
|
||||
struct bnxt_re_qp *qp1_qp = NULL;
|
||||
struct bnxt_qplib_cqe *orig_cqe = NULL;
|
||||
struct bnxt_re_sqp_entries *sqp_entry = NULL;
|
||||
int nw_type;
|
||||
u32 tbl_idx;
|
||||
u16 vlan_id;
|
||||
u8 sl;
|
||||
|
||||
tbl_idx = cqe->wr_id;
|
||||
|
||||
@ -2820,6 +2836,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *qp,
|
||||
wc->ex.imm_data = orig_cqe->immdata;
|
||||
wc->src_qp = orig_cqe->src_qp;
|
||||
memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
|
||||
if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
|
||||
wc->vlan_id = vlan_id;
|
||||
wc->sl = sl;
|
||||
wc->wc_flags |= IB_WC_WITH_VLAN;
|
||||
}
|
||||
wc->port_num = 1;
|
||||
wc->vendor_err = orig_cqe->status;
|
||||
|
||||
@ -3008,8 +3029,10 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
|
||||
enum ib_cq_notify_flags ib_cqn_flags)
|
||||
{
|
||||
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
|
||||
int type = 0;
|
||||
int type = 0, rc = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cq->cq_lock, flags);
|
||||
/* Trigger on the very next completion */
|
||||
if (ib_cqn_flags & IB_CQ_NEXT_COMP)
|
||||
type = DBR_DBR_TYPE_CQ_ARMALL;
|
||||
@ -3019,12 +3042,15 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
|
||||
|
||||
/* Poll to see if there are missed events */
|
||||
if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
|
||||
!(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
|
||||
return 1;
|
||||
|
||||
!(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) {
|
||||
rc = 1;
|
||||
goto exit;
|
||||
}
|
||||
bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
|
||||
|
||||
return 0;
|
||||
exit:
|
||||
spin_unlock_irqrestore(&cq->cq_lock, flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Memory Regions */
|
||||
|
@ -78,6 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
|
||||
/* Mutex to protect the list of bnxt_re devices added */
|
||||
static DEFINE_MUTEX(bnxt_re_dev_lock);
|
||||
static struct workqueue_struct *bnxt_re_wq;
|
||||
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
|
||||
|
||||
/* for handling bnxt_en callbacks later */
|
||||
static void bnxt_re_stop(void *p)
|
||||
@ -92,11 +93,22 @@ static void bnxt_re_sriov_config(void *p, int num_vfs)
|
||||
{
|
||||
}
|
||||
|
||||
static void bnxt_re_shutdown(void *p)
|
||||
{
|
||||
struct bnxt_re_dev *rdev = p;
|
||||
|
||||
if (!rdev)
|
||||
return;
|
||||
|
||||
bnxt_re_ib_unreg(rdev, false);
|
||||
}
|
||||
|
||||
static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
|
||||
.ulp_async_notifier = NULL,
|
||||
.ulp_stop = bnxt_re_stop,
|
||||
.ulp_start = bnxt_re_start,
|
||||
.ulp_sriov_config = bnxt_re_sriov_config
|
||||
.ulp_sriov_config = bnxt_re_sriov_config,
|
||||
.ulp_shutdown = bnxt_re_shutdown
|
||||
};
|
||||
|
||||
/* RoCE -> Net driver */
|
||||
@ -1071,9 +1083,10 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
|
||||
*/
|
||||
rc = bnxt_qplib_alloc_rcfw_channel(rdev->en_dev->pdev, &rdev->rcfw,
|
||||
BNXT_RE_MAX_QPC_COUNT);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
pr_err("Failed to allocate RCFW Channel: %#x\n", rc);
|
||||
goto fail;
|
||||
|
||||
}
|
||||
rc = bnxt_re_net_ring_alloc
|
||||
(rdev, rdev->rcfw.creq.pbl[PBL_LVL_0].pg_map_arr,
|
||||
rdev->rcfw.creq.pbl[rdev->rcfw.creq.level].pg_count,
|
||||
|
@ -160,11 +160,6 @@ void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
|
||||
|
||||
static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
|
||||
{
|
||||
struct bnxt_qplib_cq *scq, *rcq;
|
||||
|
||||
scq = qp->scq;
|
||||
rcq = qp->rcq;
|
||||
|
||||
if (qp->sq.flushed) {
|
||||
qp->sq.flushed = false;
|
||||
list_del(&qp->sq_flush);
|
||||
@ -297,6 +292,12 @@ static void bnxt_qplib_service_nq(unsigned long data)
|
||||
if (!NQE_CMP_VALID(nqe, raw_cons, hwq->max_elements))
|
||||
break;
|
||||
|
||||
/*
|
||||
* The valid test of the entry must be done first before
|
||||
* reading any further.
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
|
||||
switch (type) {
|
||||
case NQ_BASE_TYPE_CQ_NOTIFICATION:
|
||||
@ -1118,6 +1119,11 @@ static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
|
||||
hw_cqe = &hw_cqe_ptr[CQE_PG(i)][CQE_IDX(i)];
|
||||
if (!CQE_CMP_VALID(hw_cqe, i, cq_hwq->max_elements))
|
||||
continue;
|
||||
/*
|
||||
* The valid test of the entry must be done first before
|
||||
* reading any further.
|
||||
*/
|
||||
dma_rmb();
|
||||
switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
|
||||
case CQ_BASE_CQE_TYPE_REQ:
|
||||
case CQ_BASE_CQE_TYPE_TERMINAL:
|
||||
@ -1360,7 +1366,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
|
||||
|
||||
break;
|
||||
}
|
||||
/* else, just fall thru */
|
||||
/* fall thru */
|
||||
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
|
||||
case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
|
||||
{
|
||||
@ -1901,6 +1907,11 @@ static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
|
||||
/* If the next hwcqe is VALID */
|
||||
if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
|
||||
cq->hwq.max_elements)) {
|
||||
/*
|
||||
* The valid test of the entry must be done first before
|
||||
* reading any further.
|
||||
*/
|
||||
dma_rmb();
|
||||
/* If the next hwcqe is a REQ */
|
||||
if ((peek_hwcqe->cqe_type_toggle &
|
||||
CQ_BASE_CQE_TYPE_MASK) ==
|
||||
@ -2107,6 +2118,7 @@ static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
|
||||
*pcqe = cqe;
|
||||
|
||||
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
|
||||
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
||||
/* Add qp to flush list of the CQ */
|
||||
bnxt_qplib_lock_buddy_cq(qp, cq);
|
||||
__bnxt_qplib_add_flush_qp(qp);
|
||||
@ -2170,6 +2182,7 @@ static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
|
||||
*pcqe = cqe;
|
||||
|
||||
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
|
||||
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
||||
/* Add qp to flush list of the CQ */
|
||||
bnxt_qplib_lock_buddy_cq(qp, cq);
|
||||
__bnxt_qplib_add_flush_qp(qp);
|
||||
@ -2241,6 +2254,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
|
||||
|
||||
cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
|
||||
cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
|
||||
cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
|
||||
|
||||
rq = &qp->rq;
|
||||
if (wr_id_idx > rq->hwq.max_elements) {
|
||||
@ -2257,6 +2271,7 @@ static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
|
||||
*pcqe = cqe;
|
||||
|
||||
if (hwcqe->status != CQ_RES_RC_STATUS_OK) {
|
||||
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
|
||||
/* Add qp to flush list of the CQ */
|
||||
bnxt_qplib_lock_buddy_cq(qp, cq);
|
||||
__bnxt_qplib_add_flush_qp(qp);
|
||||
@ -2445,6 +2460,11 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
|
||||
if (!CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements))
|
||||
break;
|
||||
|
||||
/*
|
||||
* The valid test of the entry must be done first before
|
||||
* reading any further.
|
||||
*/
|
||||
dma_rmb();
|
||||
/* From the device's respective CQE format to qplib_wc*/
|
||||
switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
|
||||
case CQ_BASE_CQE_TYPE_REQ:
|
||||
@ -2518,3 +2538,10 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
|
||||
atomic_set(&cq->arm_state, 1);
|
||||
spin_unlock_irqrestore(&cq->hwq.lock, flags);
|
||||
}
|
||||
|
||||
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
|
||||
{
|
||||
flush_workqueue(qp->scq->nq->cqn_wq);
|
||||
if (qp->scq != qp->rcq)
|
||||
flush_workqueue(qp->rcq->nq->cqn_wq);
|
||||
}
|
||||
|
@ -478,4 +478,5 @@ void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
|
||||
int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
|
||||
struct bnxt_qplib_cqe *cqe,
|
||||
int num_cqes);
|
||||
void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
|
||||
#endif /* __BNXT_QPLIB_FP_H__ */
|
||||
|
@ -88,7 +88,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
||||
unsigned long flags;
|
||||
u32 size, opcode;
|
||||
u16 cookie, cbit;
|
||||
int pg, idx;
|
||||
u8 *preq;
|
||||
|
||||
opcode = req->opcode;
|
||||
@ -149,9 +148,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
||||
preq = (u8 *)req;
|
||||
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
|
||||
do {
|
||||
pg = 0;
|
||||
idx = 0;
|
||||
|
||||
/* Locate the next cmdq slot */
|
||||
sw_prod = HWQ_CMP(cmdq->prod, cmdq);
|
||||
cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
|
||||
@ -172,14 +168,14 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
||||
rcfw->seq_num++;
|
||||
|
||||
cmdq_prod = cmdq->prod;
|
||||
if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
|
||||
if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) {
|
||||
/* The very first doorbell write
|
||||
* is required to set this flag
|
||||
* which prompts the FW to reset
|
||||
* its internal pointers
|
||||
*/
|
||||
cmdq_prod |= FIRMWARE_FIRST_FLAG;
|
||||
rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
|
||||
cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
|
||||
clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
|
||||
}
|
||||
|
||||
/* ring CMDQ DB */
|
||||
@ -306,6 +302,8 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
|
||||
"QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
|
||||
qp_id, err_event->req_err_state_reason,
|
||||
err_event->res_err_state_reason);
|
||||
if (!qp)
|
||||
break;
|
||||
bnxt_qplib_acquire_cq_locks(qp, &flags);
|
||||
bnxt_qplib_mark_qp_error(qp);
|
||||
bnxt_qplib_release_cq_locks(qp, &flags);
|
||||
@ -361,6 +359,10 @@ static void bnxt_qplib_service_creq(unsigned long data)
|
||||
creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
|
||||
if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
|
||||
break;
|
||||
/* The valid test of the entry must be done first before
|
||||
* reading any further.
|
||||
*/
|
||||
dma_rmb();
|
||||
|
||||
type = creqe->type & CREQ_BASE_TYPE_MASK;
|
||||
switch (type) {
|
||||
@ -622,7 +624,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
|
||||
|
||||
/* General */
|
||||
rcfw->seq_num = 0;
|
||||
rcfw->flags = FIRMWARE_FIRST_FLAG;
|
||||
set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
|
||||
bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
|
||||
sizeof(unsigned long));
|
||||
rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
|
||||
|
@ -162,9 +162,9 @@ struct bnxt_qplib_rcfw {
|
||||
unsigned long *cmdq_bitmap;
|
||||
u32 bmap_size;
|
||||
unsigned long flags;
|
||||
#define FIRMWARE_INITIALIZED_FLAG BIT(0)
|
||||
#define FIRMWARE_FIRST_FLAG BIT(31)
|
||||
#define FIRMWARE_TIMED_OUT BIT(3)
|
||||
#define FIRMWARE_INITIALIZED_FLAG 0
|
||||
#define FIRMWARE_FIRST_FLAG 31
|
||||
#define FIRMWARE_TIMED_OUT 3
|
||||
wait_queue_head_t waitq;
|
||||
int (*aeq_handler)(struct bnxt_qplib_rcfw *,
|
||||
struct creq_func_event *);
|
||||
|
@ -169,7 +169,7 @@ struct bnxt_qplib_ctx {
|
||||
u32 cq_count;
|
||||
struct bnxt_qplib_hwq cq_tbl;
|
||||
struct bnxt_qplib_hwq tim_tbl;
|
||||
#define MAX_TQM_ALLOC_REQ 32
|
||||
#define MAX_TQM_ALLOC_REQ 48
|
||||
#define MAX_TQM_ALLOC_BLK_SIZE 8
|
||||
u8 tqm_count[MAX_TQM_ALLOC_REQ];
|
||||
struct bnxt_qplib_hwq tqm_pde;
|
||||
|
@ -720,13 +720,12 @@ int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
|
||||
struct cmdq_map_tc_to_cos req;
|
||||
struct creq_map_tc_to_cos_resp resp;
|
||||
u16 cmd_flags = 0;
|
||||
int rc = 0;
|
||||
|
||||
RCFW_CMD_PREP(req, MAP_TC_TO_COS, cmd_flags);
|
||||
req.cos0 = cpu_to_le16(cids[0]);
|
||||
req.cos1 = cpu_to_le16(cids[1]);
|
||||
|
||||
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
|
||||
(void *)&resp, NULL, 0);
|
||||
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL,
|
||||
0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2644,7 +2644,7 @@ struct creq_query_func_resp_sb {
|
||||
u8 l2_db_space_size;
|
||||
__le16 max_srq;
|
||||
__le32 max_gid;
|
||||
__le32 tqm_alloc_reqs[8];
|
||||
__le32 tqm_alloc_reqs[12];
|
||||
};
|
||||
|
||||
/* Set resources command response (16 bytes) */
|
||||
|
@ -1,6 +1,6 @@
|
||||
config INFINIBAND_CXGB3
|
||||
tristate "Chelsio RDMA Driver"
|
||||
depends on CHELSIO_T3 && INET
|
||||
depends on CHELSIO_T3
|
||||
select GENERIC_ALLOCATOR
|
||||
---help---
|
||||
This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
|
||||
|
@ -404,12 +404,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
|
||||
|
||||
int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
|
||||
{
|
||||
__u32 ptr;
|
||||
__u32 ptr = wq->sq_rptr + count;
|
||||
int flushed = 0;
|
||||
struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2);
|
||||
struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
|
||||
|
||||
ptr = wq->sq_rptr + count;
|
||||
sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
|
||||
while (ptr != wq->sq_wptr) {
|
||||
sqp->signaled = 0;
|
||||
insert_sq_cqe(wq, cq, sqp);
|
||||
|
@ -107,7 +107,7 @@ static struct workqueue_struct *workq;
|
||||
static struct sk_buff_head rxq;
|
||||
|
||||
static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
|
||||
static void ep_timeout(unsigned long arg);
|
||||
static void ep_timeout(struct timer_list *t);
|
||||
static void connect_reply_upcall(struct iwch_ep *ep, int status);
|
||||
|
||||
static void start_ep_timer(struct iwch_ep *ep)
|
||||
@ -119,8 +119,6 @@ static void start_ep_timer(struct iwch_ep *ep)
|
||||
} else
|
||||
get_ep(&ep->com);
|
||||
ep->timer.expires = jiffies + ep_timeout_secs * HZ;
|
||||
ep->timer.data = (unsigned long)ep;
|
||||
ep->timer.function = ep_timeout;
|
||||
add_timer(&ep->timer);
|
||||
}
|
||||
|
||||
@ -1399,7 +1397,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
child_ep->l2t = l2t;
|
||||
child_ep->dst = dst;
|
||||
child_ep->hwtid = hwtid;
|
||||
init_timer(&child_ep->timer);
|
||||
timer_setup(&child_ep->timer, ep_timeout, 0);
|
||||
cxgb3_insert_tid(tdev, &t3c_client, child_ep, hwtid);
|
||||
accept_cr(child_ep, req->peer_ip, skb);
|
||||
goto out;
|
||||
@ -1719,9 +1717,9 @@ static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
|
||||
return CPL_RET_BUF_DONE;
|
||||
}
|
||||
|
||||
static void ep_timeout(unsigned long arg)
|
||||
static void ep_timeout(struct timer_list *t)
|
||||
{
|
||||
struct iwch_ep *ep = (struct iwch_ep *)arg;
|
||||
struct iwch_ep *ep = from_timer(ep, t, timer);
|
||||
struct iwch_qp_attributes attrs;
|
||||
unsigned long flags;
|
||||
int abort = 1;
|
||||
@ -1760,8 +1758,8 @@ static void ep_timeout(unsigned long arg)
|
||||
|
||||
int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
|
||||
{
|
||||
int err;
|
||||
struct iwch_ep *ep = to_ep(cm_id);
|
||||
|
||||
pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
|
||||
if (state_read(&ep->com) == DEAD) {
|
||||
@ -1772,8 +1770,8 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
|
||||
if (mpa_rev == 0)
|
||||
abort_connection(ep, NULL, GFP_KERNEL);
|
||||
else {
|
||||
err = send_mpa_reject(ep, pdata, pdata_len);
|
||||
err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
|
||||
send_mpa_reject(ep, pdata, pdata_len);
|
||||
iwch_ep_disconnect(ep, 0, GFP_KERNEL);
|
||||
}
|
||||
put_ep(&ep->com);
|
||||
return 0;
|
||||
@ -1899,7 +1897,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
init_timer(&ep->timer);
|
||||
timer_setup(&ep->timer, ep_timeout, 0);
|
||||
ep->plen = conn_param->private_data_len;
|
||||
if (ep->plen)
|
||||
memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
|
||||
|
@ -969,7 +969,6 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
|
||||
insert_mmap(ucontext, mm2);
|
||||
}
|
||||
qhp->ibqp.qp_num = qhp->wq.qpid;
|
||||
init_timer(&(qhp->timer));
|
||||
pr_debug("%s sq_num_entries %d, rq_num_entries %d qpid 0x%0x qhp %p dma_addr 0x%llx size %d rq_addr 0x%x\n",
|
||||
__func__, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
|
||||
qhp->wq.qpid, qhp, (unsigned long long)qhp->wq.dma_addr,
|
||||
|
@ -168,7 +168,6 @@ struct iwch_qp {
|
||||
atomic_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
enum IWCH_QP_FLAGS flags;
|
||||
struct timer_list timer;
|
||||
};
|
||||
|
||||
static inline int qp_quiesced(struct iwch_qp *qhp)
|
||||
|
@ -722,10 +722,13 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
|
||||
*/
|
||||
static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
|
||||
struct iwch_cq *schp)
|
||||
__releases(&qhp->lock)
|
||||
__acquires(&qhp->lock)
|
||||
{
|
||||
int count;
|
||||
int flushed;
|
||||
|
||||
lockdep_assert_held(&qhp->lock);
|
||||
|
||||
pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
|
||||
/* take a ref on the qhp since we must release the lock */
|
||||
|
@ -1,6 +1,6 @@
|
||||
config INFINIBAND_CXGB4
|
||||
tristate "Chelsio T4/T5 RDMA Driver"
|
||||
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
|
||||
depends on CHELSIO_T4 && INET
|
||||
select CHELSIO_LIB
|
||||
select GENERIC_ALLOCATOR
|
||||
---help---
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -33,12 +33,12 @@
|
||||
#include "iw_cxgb4.h"
|
||||
|
||||
static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
struct c4iw_dev_ucontext *uctx, struct sk_buff *skb)
|
||||
struct c4iw_dev_ucontext *uctx, struct sk_buff *skb,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
struct fw_ri_res_wr *res_wr;
|
||||
struct fw_ri_res *res;
|
||||
int wr_len;
|
||||
struct c4iw_wr_wait wr_wait;
|
||||
int ret;
|
||||
|
||||
wr_len = sizeof *res_wr + sizeof *res;
|
||||
@ -50,17 +50,14 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
FW_RI_RES_WR_NRES_V(1) |
|
||||
FW_WR_COMPL_F);
|
||||
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
|
||||
res_wr->cookie = (uintptr_t)&wr_wait;
|
||||
res_wr->cookie = (uintptr_t)wr_waitp;
|
||||
res = res_wr->res;
|
||||
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
|
||||
res->u.cq.op = FW_RI_RES_OP_RESET;
|
||||
res->u.cq.iqid = cpu_to_be32(cq->cqid);
|
||||
|
||||
c4iw_init_wr_wait(&wr_wait);
|
||||
ret = c4iw_ofld_send(rdev, skb);
|
||||
if (!ret) {
|
||||
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
|
||||
}
|
||||
c4iw_init_wr_wait(wr_waitp);
|
||||
ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
|
||||
|
||||
kfree(cq->sw_queue);
|
||||
dma_free_coherent(&(rdev->lldi.pdev->dev),
|
||||
@ -71,13 +68,13 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
}
|
||||
|
||||
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
struct c4iw_dev_ucontext *uctx)
|
||||
struct c4iw_dev_ucontext *uctx,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
struct fw_ri_res_wr *res_wr;
|
||||
struct fw_ri_res *res;
|
||||
int wr_len;
|
||||
int user = (uctx != &rdev->uctx);
|
||||
struct c4iw_wr_wait wr_wait;
|
||||
int ret;
|
||||
struct sk_buff *skb;
|
||||
|
||||
@ -119,7 +116,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
FW_RI_RES_WR_NRES_V(1) |
|
||||
FW_WR_COMPL_F);
|
||||
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
|
||||
res_wr->cookie = (uintptr_t)&wr_wait;
|
||||
res_wr->cookie = (uintptr_t)wr_waitp;
|
||||
res = res_wr->res;
|
||||
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
|
||||
res->u.cq.op = FW_RI_RES_OP_WRITE;
|
||||
@ -139,13 +136,8 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
||||
res->u.cq.iqsize = cpu_to_be16(cq->size);
|
||||
res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
|
||||
|
||||
c4iw_init_wr_wait(&wr_wait);
|
||||
|
||||
ret = c4iw_ofld_send(rdev, skb);
|
||||
if (ret)
|
||||
goto err4;
|
||||
pr_debug("%s wait_event wr_wait %p\n", __func__, &wr_wait);
|
||||
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
|
||||
c4iw_init_wr_wait(wr_waitp);
|
||||
ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
|
||||
if (ret)
|
||||
goto err4;
|
||||
|
||||
@ -178,7 +170,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
|
||||
{
|
||||
struct t4_cqe cqe;
|
||||
|
||||
pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
|
||||
pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
|
||||
wq, cq, cq->sw_cidx, cq->sw_pidx);
|
||||
memset(&cqe, 0, sizeof(cqe));
|
||||
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
|
||||
@ -196,8 +188,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
|
||||
int flushed = 0;
|
||||
int in_use = wq->rq.in_use - count;
|
||||
|
||||
BUG_ON(in_use < 0);
|
||||
pr_debug("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__,
|
||||
pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
|
||||
wq, cq, wq->rq.in_use, count);
|
||||
while (in_use--) {
|
||||
insert_recv_cqe(wq, cq);
|
||||
@ -211,7 +202,7 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
|
||||
{
|
||||
struct t4_cqe cqe;
|
||||
|
||||
pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__,
|
||||
pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
|
||||
wq, cq, cq->sw_cidx, cq->sw_pidx);
|
||||
memset(&cqe, 0, sizeof(cqe));
|
||||
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
|
||||
@ -239,14 +230,11 @@ int c4iw_flush_sq(struct c4iw_qp *qhp)
|
||||
if (wq->sq.flush_cidx == -1)
|
||||
wq->sq.flush_cidx = wq->sq.cidx;
|
||||
idx = wq->sq.flush_cidx;
|
||||
BUG_ON(idx >= wq->sq.size);
|
||||
while (idx != wq->sq.pidx) {
|
||||
swsqe = &wq->sq.sw_sq[idx];
|
||||
BUG_ON(swsqe->flushed);
|
||||
swsqe->flushed = 1;
|
||||
insert_sq_cqe(wq, cq, swsqe);
|
||||
if (wq->sq.oldest_read == swsqe) {
|
||||
BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
|
||||
advance_oldest_read(wq);
|
||||
}
|
||||
flushed++;
|
||||
@ -267,7 +255,6 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
|
||||
if (wq->sq.flush_cidx == -1)
|
||||
wq->sq.flush_cidx = wq->sq.cidx;
|
||||
cidx = wq->sq.flush_cidx;
|
||||
BUG_ON(cidx > wq->sq.size);
|
||||
|
||||
while (cidx != wq->sq.pidx) {
|
||||
swsqe = &wq->sq.sw_sq[cidx];
|
||||
@ -276,13 +263,11 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
|
||||
cidx = 0;
|
||||
} else if (swsqe->complete) {
|
||||
|
||||
BUG_ON(swsqe->flushed);
|
||||
|
||||
/*
|
||||
* Insert this completed cqe into the swcq.
|
||||
*/
|
||||
pr_debug("%s moving cqe into swcq sq idx %u cq idx %u\n",
|
||||
__func__, cidx, cq->sw_pidx);
|
||||
pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
|
||||
cidx, cq->sw_pidx);
|
||||
swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
|
||||
cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
|
||||
t4_swcq_produce(cq);
|
||||
@ -337,7 +322,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
|
||||
struct t4_swsqe *swsqe;
|
||||
int ret;
|
||||
|
||||
pr_debug("%s cqid 0x%x\n", __func__, chp->cq.cqid);
|
||||
pr_debug("cqid 0x%x\n", chp->cq.cqid);
|
||||
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
|
||||
|
||||
/*
|
||||
@ -430,7 +415,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
|
||||
u32 ptr;
|
||||
|
||||
*count = 0;
|
||||
pr_debug("%s count zero %d\n", __func__, *count);
|
||||
pr_debug("count zero %d\n", *count);
|
||||
ptr = cq->sw_cidx;
|
||||
while (ptr != cq->sw_pidx) {
|
||||
cqe = &cq->sw_queue[ptr];
|
||||
@ -440,7 +425,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
|
||||
if (++ptr == cq->size)
|
||||
ptr = 0;
|
||||
}
|
||||
pr_debug("%s cq %p count %d\n", __func__, cq, *count);
|
||||
pr_debug("cq %p count %d\n", cq, *count);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -471,8 +456,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pr_debug("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
|
||||
__func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
|
||||
pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
|
||||
CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
|
||||
CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
|
||||
CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
|
||||
CQE_WRID_LOW(hw_cqe));
|
||||
@ -603,8 +588,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
|
||||
if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
|
||||
struct t4_swsqe *swsqe;
|
||||
|
||||
pr_debug("%s out of order completion going in sw_sq at idx %u\n",
|
||||
__func__, CQE_WRID_SQ_IDX(hw_cqe));
|
||||
pr_debug("out of order completion going in sw_sq at idx %u\n",
|
||||
CQE_WRID_SQ_IDX(hw_cqe));
|
||||
swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
|
||||
swsqe->cqe = *hw_cqe;
|
||||
swsqe->complete = 1;
|
||||
@ -621,7 +606,6 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
|
||||
*/
|
||||
if (SQ_TYPE(hw_cqe)) {
|
||||
int idx = CQE_WRID_SQ_IDX(hw_cqe);
|
||||
BUG_ON(idx >= wq->sq.size);
|
||||
|
||||
/*
|
||||
* Account for any unsignaled completions completed by
|
||||
@ -635,18 +619,16 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
|
||||
wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
|
||||
else
|
||||
wq->sq.in_use -= idx - wq->sq.cidx;
|
||||
BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
|
||||
|
||||
wq->sq.cidx = (uint16_t)idx;
|
||||
pr_debug("%s completing sq idx %u\n", __func__, wq->sq.cidx);
|
||||
pr_debug("completing sq idx %u\n", wq->sq.cidx);
|
||||
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
|
||||
if (c4iw_wr_log)
|
||||
c4iw_log_wr_stats(wq, hw_cqe);
|
||||
t4_sq_consume(wq);
|
||||
} else {
|
||||
pr_debug("%s completing rq idx %u\n", __func__, wq->rq.cidx);
|
||||
pr_debug("completing rq idx %u\n", wq->rq.cidx);
|
||||
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
|
||||
BUG_ON(t4_rq_empty(wq));
|
||||
if (c4iw_wr_log)
|
||||
c4iw_log_wr_stats(wq, hw_cqe);
|
||||
t4_rq_consume(wq);
|
||||
@ -661,12 +643,12 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
|
||||
|
||||
skip_cqe:
|
||||
if (SW_CQE(hw_cqe)) {
|
||||
pr_debug("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
|
||||
__func__, cq, cq->cqid, cq->sw_cidx);
|
||||
pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
|
||||
cq, cq->cqid, cq->sw_cidx);
|
||||
t4_swcq_consume(cq);
|
||||
} else {
|
||||
pr_debug("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
|
||||
__func__, cq, cq->cqid, cq->cidx);
|
||||
pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
|
||||
cq, cq->cqid, cq->cidx);
|
||||
t4_hwcq_consume(cq);
|
||||
}
|
||||
return ret;
|
||||
@ -712,8 +694,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
|
||||
wc->vendor_err = CQE_STATUS(&cqe);
|
||||
wc->wc_flags = 0;
|
||||
|
||||
pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
|
||||
__func__, CQE_QPID(&cqe),
|
||||
pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
|
||||
CQE_QPID(&cqe),
|
||||
CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
|
||||
CQE_STATUS(&cqe), CQE_LEN(&cqe),
|
||||
CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
|
||||
@ -857,7 +839,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
|
||||
struct c4iw_cq *chp;
|
||||
struct c4iw_ucontext *ucontext;
|
||||
|
||||
pr_debug("%s ib_cq %p\n", __func__, ib_cq);
|
||||
pr_debug("ib_cq %p\n", ib_cq);
|
||||
chp = to_c4iw_cq(ib_cq);
|
||||
|
||||
remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
|
||||
@ -868,8 +850,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
|
||||
: NULL;
|
||||
destroy_cq(&chp->rhp->rdev, &chp->cq,
|
||||
ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx,
|
||||
chp->destroy_skb);
|
||||
chp->destroy_skb = NULL;
|
||||
chp->destroy_skb, chp->wr_waitp);
|
||||
c4iw_put_wr_wait(chp->wr_waitp);
|
||||
kfree(chp);
|
||||
return 0;
|
||||
}
|
||||
@ -889,7 +871,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
size_t memsize, hwentries;
|
||||
struct c4iw_mm_entry *mm, *mm2;
|
||||
|
||||
pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
|
||||
pr_debug("ib_dev %p entries %d\n", ibdev, entries);
|
||||
if (attr->flags)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
@ -901,12 +883,18 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
|
||||
if (!chp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
chp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
|
||||
if (!chp->wr_waitp) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_chp;
|
||||
}
|
||||
c4iw_init_wr_wait(chp->wr_waitp);
|
||||
|
||||
wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
|
||||
chp->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
|
||||
if (!chp->destroy_skb) {
|
||||
ret = -ENOMEM;
|
||||
goto err1;
|
||||
goto err_free_wr_wait;
|
||||
}
|
||||
|
||||
if (ib_context)
|
||||
@ -947,9 +935,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
chp->cq.vector = vector;
|
||||
|
||||
ret = create_cq(&rhp->rdev, &chp->cq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
|
||||
chp->wr_waitp);
|
||||
if (ret)
|
||||
goto err2;
|
||||
goto err_free_skb;
|
||||
|
||||
chp->rhp = rhp;
|
||||
chp->cq.size--; /* status page */
|
||||
@ -960,16 +949,16 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
init_waitqueue_head(&chp->wait);
|
||||
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
|
||||
if (ret)
|
||||
goto err3;
|
||||
goto err_destroy_cq;
|
||||
|
||||
if (ucontext) {
|
||||
ret = -ENOMEM;
|
||||
mm = kmalloc(sizeof *mm, GFP_KERNEL);
|
||||
if (!mm)
|
||||
goto err4;
|
||||
goto err_remove_handle;
|
||||
mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
|
||||
if (!mm2)
|
||||
goto err5;
|
||||
goto err_free_mm;
|
||||
|
||||
uresp.qid_mask = rhp->rdev.cqmask;
|
||||
uresp.cqid = chp->cq.cqid;
|
||||
@ -984,7 +973,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
ret = ib_copy_to_udata(udata, &uresp,
|
||||
sizeof(uresp) - sizeof(uresp.reserved));
|
||||
if (ret)
|
||||
goto err6;
|
||||
goto err_free_mm2;
|
||||
|
||||
mm->key = uresp.key;
|
||||
mm->addr = virt_to_phys(chp->cq.queue);
|
||||
@ -996,23 +985,25 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
|
||||
mm2->len = PAGE_SIZE;
|
||||
insert_mmap(ucontext, mm2);
|
||||
}
|
||||
pr_debug("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
|
||||
__func__, chp->cq.cqid, chp, chp->cq.size,
|
||||
pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
|
||||
chp->cq.cqid, chp, chp->cq.size,
|
||||
chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
|
||||
return &chp->ibcq;
|
||||
err6:
|
||||
err_free_mm2:
|
||||
kfree(mm2);
|
||||
err5:
|
||||
err_free_mm:
|
||||
kfree(mm);
|
||||
err4:
|
||||
err_remove_handle:
|
||||
remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
|
||||
err3:
|
||||
err_destroy_cq:
|
||||
destroy_cq(&chp->rhp->rdev, &chp->cq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
|
||||
chp->destroy_skb);
|
||||
err2:
|
||||
chp->destroy_skb, chp->wr_waitp);
|
||||
err_free_skb:
|
||||
kfree_skb(chp->destroy_skb);
|
||||
err1:
|
||||
err_free_wr_wait:
|
||||
c4iw_put_wr_wait(chp->wr_waitp);
|
||||
err_free_chp:
|
||||
kfree(chp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@ -64,14 +64,9 @@ module_param(c4iw_wr_log_size_order, int, 0444);
|
||||
MODULE_PARM_DESC(c4iw_wr_log_size_order,
|
||||
"Number of entries (log2) in the work request timing log.");
|
||||
|
||||
struct uld_ctx {
|
||||
struct list_head entry;
|
||||
struct cxgb4_lld_info lldi;
|
||||
struct c4iw_dev *dev;
|
||||
};
|
||||
|
||||
static LIST_HEAD(uld_ctx_list);
|
||||
static DEFINE_MUTEX(dev_mutex);
|
||||
struct workqueue_struct *reg_workq;
|
||||
|
||||
#define DB_FC_RESUME_SIZE 64
|
||||
#define DB_FC_RESUME_DELAY 1
|
||||
@ -811,8 +806,8 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
|
||||
|
||||
rdev->qpmask = rdev->lldi.udb_density - 1;
|
||||
rdev->cqmask = rdev->lldi.ucq_density - 1;
|
||||
pr_debug("%s dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
|
||||
__func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
|
||||
pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
|
||||
pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
|
||||
rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
|
||||
rdev->lldi.vr->pbl.start,
|
||||
rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
|
||||
@ -912,7 +907,7 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
|
||||
c4iw_destroy_resource(&rdev->resource);
|
||||
}
|
||||
|
||||
static void c4iw_dealloc(struct uld_ctx *ctx)
|
||||
void c4iw_dealloc(struct uld_ctx *ctx)
|
||||
{
|
||||
c4iw_rdev_close(&ctx->dev->rdev);
|
||||
WARN_ON_ONCE(!idr_is_empty(&ctx->dev->cqidr));
|
||||
@ -935,7 +930,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
|
||||
|
||||
static void c4iw_remove(struct uld_ctx *ctx)
|
||||
{
|
||||
pr_debug("%s c4iw_dev %p\n", __func__, ctx->dev);
|
||||
pr_debug("c4iw_dev %p\n", ctx->dev);
|
||||
c4iw_unregister_device(ctx->dev);
|
||||
c4iw_dealloc(ctx);
|
||||
}
|
||||
@ -969,8 +964,8 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
|
||||
devp->rdev.lldi = *infop;
|
||||
|
||||
/* init various hw-queue params based on lld info */
|
||||
pr_debug("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
|
||||
__func__, devp->rdev.lldi.sge_ingpadboundary,
|
||||
pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
|
||||
devp->rdev.lldi.sge_ingpadboundary,
|
||||
devp->rdev.lldi.sge_egrstatuspagesize);
|
||||
|
||||
devp->rdev.hw_queue.t4_eq_status_entries =
|
||||
@ -1069,8 +1064,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
|
||||
}
|
||||
ctx->lldi = *infop;
|
||||
|
||||
pr_debug("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
|
||||
__func__, pci_name(ctx->lldi.pdev),
|
||||
pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n",
|
||||
pci_name(ctx->lldi.pdev),
|
||||
ctx->lldi.nchan, ctx->lldi.nrxq,
|
||||
ctx->lldi.ntxq, ctx->lldi.nports);
|
||||
|
||||
@ -1102,8 +1097,8 @@ static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
|
||||
__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
|
||||
sizeof(struct rss_header) - pktshift);
|
||||
__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) +
|
||||
sizeof(struct rss_header) - pktshift);
|
||||
|
||||
/*
|
||||
* This skb will contain:
|
||||
@ -1203,13 +1198,11 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
|
||||
{
|
||||
struct uld_ctx *ctx = handle;
|
||||
|
||||
pr_debug("%s new_state %u\n", __func__, new_state);
|
||||
pr_debug("new_state %u\n", new_state);
|
||||
switch (new_state) {
|
||||
case CXGB4_STATE_UP:
|
||||
pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
|
||||
if (!ctx->dev) {
|
||||
int ret;
|
||||
|
||||
ctx->dev = c4iw_alloc(&ctx->lldi);
|
||||
if (IS_ERR(ctx->dev)) {
|
||||
pr_err("%s: initialization failed: %ld\n",
|
||||
@ -1218,12 +1211,9 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
|
||||
ctx->dev = NULL;
|
||||
break;
|
||||
}
|
||||
ret = c4iw_register_device(ctx->dev);
|
||||
if (ret) {
|
||||
pr_err("%s: RDMA registration failed: %d\n",
|
||||
pci_name(ctx->lldi.pdev), ret);
|
||||
c4iw_dealloc(ctx);
|
||||
}
|
||||
|
||||
INIT_WORK(&ctx->reg_work, c4iw_register_device);
|
||||
queue_work(reg_workq, &ctx->reg_work);
|
||||
}
|
||||
break;
|
||||
case CXGB4_STATE_DOWN:
|
||||
@ -1518,6 +1508,27 @@ static struct cxgb4_uld_info c4iw_uld_info = {
|
||||
.control = c4iw_uld_control,
|
||||
};
|
||||
|
||||
void _c4iw_free_wr_wait(struct kref *kref)
|
||||
{
|
||||
struct c4iw_wr_wait *wr_waitp;
|
||||
|
||||
wr_waitp = container_of(kref, struct c4iw_wr_wait, kref);
|
||||
pr_debug("Free wr_wait %p\n", wr_waitp);
|
||||
kfree(wr_waitp);
|
||||
}
|
||||
|
||||
struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp)
|
||||
{
|
||||
struct c4iw_wr_wait *wr_waitp;
|
||||
|
||||
wr_waitp = kzalloc(sizeof(*wr_waitp), gfp);
|
||||
if (wr_waitp) {
|
||||
kref_init(&wr_waitp->kref);
|
||||
pr_debug("wr_wait %p\n", wr_waitp);
|
||||
}
|
||||
return wr_waitp;
|
||||
}
|
||||
|
||||
static int __init c4iw_init_module(void)
|
||||
{
|
||||
int err;
|
||||
@ -1530,6 +1541,12 @@ static int __init c4iw_init_module(void)
|
||||
if (!c4iw_debugfs_root)
|
||||
pr_warn("could not create debugfs entry, continuing\n");
|
||||
|
||||
reg_workq = create_singlethread_workqueue("Register_iWARP_device");
|
||||
if (!reg_workq) {
|
||||
pr_err("Failed creating workqueue to register iwarp device\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
|
||||
|
||||
return 0;
|
||||
@ -1546,6 +1563,8 @@ static void __exit c4iw_exit_module(void)
|
||||
kfree(ctx);
|
||||
}
|
||||
mutex_unlock(&dev_mutex);
|
||||
flush_workqueue(reg_workq);
|
||||
destroy_workqueue(reg_workq);
|
||||
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
|
||||
c4iw_cm_term();
|
||||
debugfs_remove_recursive(c4iw_debugfs_root);
|
||||
|
@ -109,9 +109,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
|
||||
if (qhp->ibqp.event_handler)
|
||||
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
|
||||
|
||||
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||
if (t4_clear_cq_armed(&chp->cq)) {
|
||||
spin_lock_irqsave(&chp->comp_handler_lock, flag);
|
||||
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
|
||||
}
|
||||
}
|
||||
|
||||
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
|
||||
@ -234,7 +236,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
|
||||
if (atomic_dec_and_test(&chp->refcnt))
|
||||
wake_up(&chp->wait);
|
||||
} else {
|
||||
pr_debug("%s unknown cqid 0x%x\n", __func__, qid);
|
||||
pr_warn("%s unknown cqid 0x%x\n", __func__, qid);
|
||||
spin_unlock_irqrestore(&dev->lock, flag);
|
||||
}
|
||||
return 0;
|
||||
|
@ -73,7 +73,6 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
|
||||
unsigned long flags;
|
||||
|
||||
obj -= alloc->start;
|
||||
BUG_ON((int)obj < 0);
|
||||
|
||||
spin_lock_irqsave(&alloc->lock, flags);
|
||||
clear_bit(obj, alloc->table);
|
||||
|
@ -202,18 +202,50 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
|
||||
struct c4iw_wr_wait {
|
||||
struct completion completion;
|
||||
int ret;
|
||||
struct kref kref;
|
||||
};
|
||||
|
||||
void _c4iw_free_wr_wait(struct kref *kref);
|
||||
|
||||
static inline void c4iw_put_wr_wait(struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
pr_debug("wr_wait %p ref before put %u\n", wr_waitp,
|
||||
kref_read(&wr_waitp->kref));
|
||||
WARN_ON(kref_read(&wr_waitp->kref) == 0);
|
||||
kref_put(&wr_waitp->kref, _c4iw_free_wr_wait);
|
||||
}
|
||||
|
||||
static inline void c4iw_get_wr_wait(struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
pr_debug("wr_wait %p ref before get %u\n", wr_waitp,
|
||||
kref_read(&wr_waitp->kref));
|
||||
WARN_ON(kref_read(&wr_waitp->kref) == 0);
|
||||
kref_get(&wr_waitp->kref);
|
||||
}
|
||||
|
||||
static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
wr_waitp->ret = 0;
|
||||
init_completion(&wr_waitp->completion);
|
||||
}
|
||||
|
||||
static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
|
||||
static inline void _c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret,
|
||||
bool deref)
|
||||
{
|
||||
wr_waitp->ret = ret;
|
||||
complete(&wr_waitp->completion);
|
||||
if (deref)
|
||||
c4iw_put_wr_wait(wr_waitp);
|
||||
}
|
||||
|
||||
static inline void c4iw_wake_up_noref(struct c4iw_wr_wait *wr_waitp, int ret)
|
||||
{
|
||||
_c4iw_wake_up(wr_waitp, ret, false);
|
||||
}
|
||||
|
||||
static inline void c4iw_wake_up_deref(struct c4iw_wr_wait *wr_waitp, int ret)
|
||||
{
|
||||
_c4iw_wake_up(wr_waitp, ret, true);
|
||||
}
|
||||
|
||||
static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
|
||||
@ -230,18 +262,40 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
|
||||
|
||||
ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
|
||||
if (!ret) {
|
||||
pr_debug("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
|
||||
func, pci_name(rdev->lldi.pdev), hwtid, qpid);
|
||||
pr_err("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
|
||||
func, pci_name(rdev->lldi.pdev), hwtid, qpid);
|
||||
rdev->flags |= T4_FATAL_ERROR;
|
||||
wr_waitp->ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
if (wr_waitp->ret)
|
||||
pr_debug("%s: FW reply %d tid %u qpid %u\n",
|
||||
pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
|
||||
out:
|
||||
return wr_waitp->ret;
|
||||
}
|
||||
|
||||
int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
|
||||
|
||||
static inline int c4iw_ref_send_wait(struct c4iw_rdev *rdev,
|
||||
struct sk_buff *skb,
|
||||
struct c4iw_wr_wait *wr_waitp,
|
||||
u32 hwtid, u32 qpid,
|
||||
const char *func)
|
||||
{
|
||||
int ret;
|
||||
|
||||
pr_debug("%s wr_wait %p hwtid %u qpid %u\n", func, wr_waitp, hwtid,
|
||||
qpid);
|
||||
c4iw_get_wr_wait(wr_waitp);
|
||||
ret = c4iw_ofld_send(rdev, skb);
|
||||
if (ret) {
|
||||
c4iw_put_wr_wait(wr_waitp);
|
||||
return ret;
|
||||
}
|
||||
return c4iw_wait_for_reply(rdev, wr_waitp, hwtid, qpid, func);
|
||||
}
|
||||
|
||||
enum db_state {
|
||||
NORMAL = 0,
|
||||
FLOW_CONTROL = 1,
|
||||
@ -268,6 +322,13 @@ struct c4iw_dev {
|
||||
wait_queue_head_t wait;
|
||||
};
|
||||
|
||||
struct uld_ctx {
|
||||
struct list_head entry;
|
||||
struct cxgb4_lld_info lldi;
|
||||
struct c4iw_dev *dev;
|
||||
struct work_struct reg_work;
|
||||
};
|
||||
|
||||
static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
|
||||
{
|
||||
return container_of(ibdev, struct c4iw_dev, ibdev);
|
||||
@ -310,7 +371,6 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
|
||||
idr_preload_end();
|
||||
}
|
||||
|
||||
BUG_ON(ret == -ENOSPC);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
@ -394,6 +454,7 @@ struct c4iw_mr {
|
||||
dma_addr_t mpl_addr;
|
||||
u32 max_mpl_len;
|
||||
u32 mpl_len;
|
||||
struct c4iw_wr_wait *wr_waitp;
|
||||
};
|
||||
|
||||
static inline struct c4iw_mr *to_c4iw_mr(struct ib_mr *ibmr)
|
||||
@ -407,6 +468,7 @@ struct c4iw_mw {
|
||||
struct sk_buff *dereg_skb;
|
||||
u64 kva;
|
||||
struct tpt_attributes attr;
|
||||
struct c4iw_wr_wait *wr_waitp;
|
||||
};
|
||||
|
||||
static inline struct c4iw_mw *to_c4iw_mw(struct ib_mw *ibmw)
|
||||
@ -423,6 +485,7 @@ struct c4iw_cq {
|
||||
spinlock_t comp_handler_lock;
|
||||
atomic_t refcnt;
|
||||
wait_queue_head_t wait;
|
||||
struct c4iw_wr_wait *wr_waitp;
|
||||
};
|
||||
|
||||
static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq)
|
||||
@ -480,10 +543,10 @@ struct c4iw_qp {
|
||||
struct mutex mutex;
|
||||
struct kref kref;
|
||||
wait_queue_head_t wait;
|
||||
struct timer_list timer;
|
||||
int sq_sig_all;
|
||||
struct work_struct free_work;
|
||||
struct c4iw_ucontext *ucontext;
|
||||
struct c4iw_wr_wait *wr_waitp;
|
||||
};
|
||||
|
||||
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
|
||||
@ -537,8 +600,7 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
|
||||
if (mm->key == key && mm->len == len) {
|
||||
list_del_init(&mm->entry);
|
||||
spin_unlock(&ucontext->mmap_lock);
|
||||
pr_debug("%s key 0x%x addr 0x%llx len %d\n",
|
||||
__func__, key,
|
||||
pr_debug("key 0x%x addr 0x%llx len %d\n", key,
|
||||
(unsigned long long)mm->addr, mm->len);
|
||||
return mm;
|
||||
}
|
||||
@ -551,8 +613,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext,
|
||||
struct c4iw_mm_entry *mm)
|
||||
{
|
||||
spin_lock(&ucontext->mmap_lock);
|
||||
pr_debug("%s key 0x%x addr 0x%llx len %d\n",
|
||||
__func__, mm->key, (unsigned long long)mm->addr, mm->len);
|
||||
pr_debug("key 0x%x addr 0x%llx len %d\n",
|
||||
mm->key, (unsigned long long)mm->addr, mm->len);
|
||||
list_add_tail(&mm->entry, &ucontext->mmaps);
|
||||
spin_unlock(&ucontext->mmap_lock);
|
||||
}
|
||||
@ -671,16 +733,14 @@ enum c4iw_mmid_state {
|
||||
#define MPA_V2_IRD_ORD_MASK 0x3FFF
|
||||
|
||||
#define c4iw_put_ep(ep) { \
|
||||
pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \
|
||||
__func__, __LINE__, \
|
||||
pr_debug("put_ep ep %p refcnt %d\n", \
|
||||
ep, kref_read(&((ep)->kref))); \
|
||||
WARN_ON(kref_read(&((ep)->kref)) < 1); \
|
||||
kref_put(&((ep)->kref), _c4iw_free_ep); \
|
||||
}
|
||||
|
||||
#define c4iw_get_ep(ep) { \
|
||||
pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \
|
||||
__func__, __LINE__, \
|
||||
pr_debug("get_ep ep %p, refcnt %d\n", \
|
||||
ep, kref_read(&((ep)->kref))); \
|
||||
kref_get(&((ep)->kref)); \
|
||||
}
|
||||
@ -841,7 +901,7 @@ struct c4iw_ep_common {
|
||||
struct mutex mutex;
|
||||
struct sockaddr_storage local_addr;
|
||||
struct sockaddr_storage remote_addr;
|
||||
struct c4iw_wr_wait wr_wait;
|
||||
struct c4iw_wr_wait *wr_waitp;
|
||||
unsigned long flags;
|
||||
unsigned long history;
|
||||
};
|
||||
@ -935,7 +995,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
|
||||
void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev);
|
||||
void c4iw_destroy_resource(struct c4iw_resource *rscp);
|
||||
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
|
||||
int c4iw_register_device(struct c4iw_dev *dev);
|
||||
void c4iw_register_device(struct work_struct *work);
|
||||
void c4iw_unregister_device(struct c4iw_dev *dev);
|
||||
int __init c4iw_cm_init(void);
|
||||
void c4iw_cm_term(void);
|
||||
@ -961,6 +1021,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
|
||||
int c4iw_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
|
||||
unsigned int *sg_offset);
|
||||
int c4iw_dealloc_mw(struct ib_mw *mw);
|
||||
void c4iw_dealloc(struct uld_ctx *ctx);
|
||||
struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
struct ib_udata *udata);
|
||||
struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,
|
||||
@ -990,7 +1051,6 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
|
||||
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
|
||||
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
|
||||
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
|
||||
int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
|
||||
void c4iw_flush_hw_cq(struct c4iw_cq *chp);
|
||||
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
|
||||
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
|
||||
@ -1018,5 +1078,6 @@ extern int db_fc_threshold;
|
||||
extern int db_coalescing_threshold;
|
||||
extern int use_dsgl;
|
||||
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
|
||||
struct c4iw_wr_wait *c4iw_alloc_wr_wait(gfp_t gfp);
|
||||
|
||||
#endif
|
||||
|
@ -60,18 +60,18 @@ static int mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
|
||||
|
||||
static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
|
||||
u32 len, dma_addr_t data,
|
||||
int wait, struct sk_buff *skb)
|
||||
struct sk_buff *skb,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
struct ulp_mem_io *req;
|
||||
struct ulptx_sgl *sgl;
|
||||
u8 wr_len;
|
||||
int ret = 0;
|
||||
struct c4iw_wr_wait wr_wait;
|
||||
|
||||
addr &= 0x7FFFFFF;
|
||||
|
||||
if (wait)
|
||||
c4iw_init_wr_wait(&wr_wait);
|
||||
if (wr_waitp)
|
||||
c4iw_init_wr_wait(wr_waitp);
|
||||
wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
|
||||
|
||||
if (!skb) {
|
||||
@ -84,8 +84,8 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
|
||||
req = __skb_put_zero(skb, wr_len);
|
||||
INIT_ULPTX_WR(req, wr_len, 0, 0);
|
||||
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
|
||||
(wait ? FW_WR_COMPL_F : 0));
|
||||
req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
|
||||
(wr_waitp ? FW_WR_COMPL_F : 0));
|
||||
req->wr.wr_lo = wr_waitp ? (__force __be64)(unsigned long)wr_waitp : 0L;
|
||||
req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
|
||||
req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE) |
|
||||
T5_ULP_MEMIO_ORDER_V(1) |
|
||||
@ -100,22 +100,21 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
|
||||
sgl->len0 = cpu_to_be32(len);
|
||||
sgl->addr0 = cpu_to_be64(data);
|
||||
|
||||
ret = c4iw_ofld_send(rdev, skb);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (wait)
|
||||
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
|
||||
if (wr_waitp)
|
||||
ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
|
||||
else
|
||||
ret = c4iw_ofld_send(rdev, skb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
void *data, struct sk_buff *skb)
|
||||
void *data, struct sk_buff *skb,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
struct ulp_mem_io *req;
|
||||
struct ulptx_idata *sc;
|
||||
u8 wr_len, *to_dp, *from_dp;
|
||||
int copy_len, num_wqe, i, ret = 0;
|
||||
struct c4iw_wr_wait wr_wait;
|
||||
__be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
|
||||
|
||||
if (is_t4(rdev->lldi.adapter_type))
|
||||
@ -124,9 +123,9 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
|
||||
|
||||
addr &= 0x7FFFFFF;
|
||||
pr_debug("%s addr 0x%x len %u\n", __func__, addr, len);
|
||||
pr_debug("addr 0x%x len %u\n", addr, len);
|
||||
num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
|
||||
c4iw_init_wr_wait(&wr_wait);
|
||||
c4iw_init_wr_wait(wr_waitp);
|
||||
for (i = 0; i < num_wqe; i++) {
|
||||
|
||||
copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE :
|
||||
@ -147,7 +146,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
if (i == (num_wqe-1)) {
|
||||
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
|
||||
FW_WR_COMPL_F);
|
||||
req->wr.wr_lo = (__force __be64)(unsigned long)&wr_wait;
|
||||
req->wr.wr_lo = (__force __be64)(unsigned long)wr_waitp;
|
||||
} else
|
||||
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
|
||||
req->wr.wr_mid = cpu_to_be32(
|
||||
@ -173,19 +172,23 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
if (copy_len % T4_ULPTX_MIN_IO)
|
||||
memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
|
||||
(copy_len % T4_ULPTX_MIN_IO));
|
||||
ret = c4iw_ofld_send(rdev, skb);
|
||||
skb = NULL;
|
||||
if (i == (num_wqe-1))
|
||||
ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0,
|
||||
__func__);
|
||||
else
|
||||
ret = c4iw_ofld_send(rdev, skb);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
skb = NULL;
|
||||
len -= C4IW_MAX_INLINE_SIZE;
|
||||
}
|
||||
|
||||
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
void *data, struct sk_buff *skb)
|
||||
void *data, struct sk_buff *skb,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
u32 remain = len;
|
||||
u32 dmalen;
|
||||
@ -208,7 +211,7 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
dmalen = T4_ULPTX_MAX_DMA;
|
||||
remain -= dmalen;
|
||||
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
|
||||
!remain, skb);
|
||||
skb, remain ? NULL : wr_waitp);
|
||||
if (ret)
|
||||
goto out;
|
||||
addr += dmalen >> 5;
|
||||
@ -216,7 +219,8 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
daddr += dmalen;
|
||||
}
|
||||
if (remain)
|
||||
ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb);
|
||||
ret = _c4iw_write_mem_inline(rdev, addr, remain, data, skb,
|
||||
wr_waitp);
|
||||
out:
|
||||
dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
|
||||
return ret;
|
||||
@ -227,23 +231,33 @@ static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
* If data is NULL, clear len byte of memory to zero.
|
||||
*/
|
||||
static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
|
||||
void *data, struct sk_buff *skb)
|
||||
void *data, struct sk_buff *skb,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
if (rdev->lldi.ulptx_memwrite_dsgl && use_dsgl) {
|
||||
if (len > inline_threshold) {
|
||||
if (_c4iw_write_mem_dma(rdev, addr, len, data, skb)) {
|
||||
pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
|
||||
pci_name(rdev->lldi.pdev));
|
||||
return _c4iw_write_mem_inline(rdev, addr, len,
|
||||
data, skb);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
} else
|
||||
return _c4iw_write_mem_inline(rdev, addr,
|
||||
len, data, skb);
|
||||
} else
|
||||
return _c4iw_write_mem_inline(rdev, addr, len, data, skb);
|
||||
int ret;
|
||||
|
||||
if (!rdev->lldi.ulptx_memwrite_dsgl || !use_dsgl) {
|
||||
ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
|
||||
wr_waitp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (len <= inline_threshold) {
|
||||
ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
|
||||
wr_waitp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = _c4iw_write_mem_dma(rdev, addr, len, data, skb, wr_waitp);
|
||||
if (ret) {
|
||||
pr_warn_ratelimited("%s: dma map failure (non fatal)\n",
|
||||
pci_name(rdev->lldi.pdev));
|
||||
ret = _c4iw_write_mem_inline(rdev, addr, len, data, skb,
|
||||
wr_waitp);
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@ -257,7 +271,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
|
||||
enum fw_ri_stag_type type, enum fw_ri_mem_perms perm,
|
||||
int bind_enabled, u32 zbva, u64 to,
|
||||
u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
int err;
|
||||
struct fw_ri_tpte tpt;
|
||||
@ -285,8 +299,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
|
||||
}
|
||||
pr_debug("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
|
||||
__func__, stag_state, type, pdid, stag_idx);
|
||||
pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
|
||||
stag_state, type, pdid, stag_idx);
|
||||
|
||||
/* write TPT entry */
|
||||
if (reset_tpt_entry)
|
||||
@ -311,7 +325,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
|
||||
}
|
||||
err = write_adapter_mem(rdev, stag_idx +
|
||||
(rdev->lldi.vr->stag.start >> 5),
|
||||
sizeof(tpt), &tpt, skb);
|
||||
sizeof(tpt), &tpt, skb, wr_waitp);
|
||||
|
||||
if (reset_tpt_entry) {
|
||||
c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
|
||||
@ -323,45 +337,50 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
|
||||
}
|
||||
|
||||
static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
|
||||
u32 pbl_addr, u32 pbl_size)
|
||||
u32 pbl_addr, u32 pbl_size, struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
int err;
|
||||
|
||||
pr_debug("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
|
||||
__func__, pbl_addr, rdev->lldi.vr->pbl.start,
|
||||
pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
|
||||
pbl_addr, rdev->lldi.vr->pbl.start,
|
||||
pbl_size);
|
||||
|
||||
err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL);
|
||||
err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL,
|
||||
wr_waitp);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size,
|
||||
u32 pbl_addr, struct sk_buff *skb)
|
||||
u32 pbl_addr, struct sk_buff *skb,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0,
|
||||
pbl_size, pbl_addr, skb);
|
||||
pbl_size, pbl_addr, skb, wr_waitp);
|
||||
}
|
||||
|
||||
static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid)
|
||||
static int allocate_window(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
*stag = T4_STAG_UNSET;
|
||||
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0,
|
||||
0UL, 0, 0, 0, 0, NULL);
|
||||
0UL, 0, 0, 0, 0, NULL, wr_waitp);
|
||||
}
|
||||
|
||||
static int deallocate_window(struct c4iw_rdev *rdev, u32 stag,
|
||||
struct sk_buff *skb)
|
||||
struct sk_buff *skb,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0,
|
||||
0, skb);
|
||||
0, skb, wr_waitp);
|
||||
}
|
||||
|
||||
static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid,
|
||||
u32 pbl_size, u32 pbl_addr)
|
||||
u32 pbl_size, u32 pbl_addr,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
*stag = T4_STAG_UNSET;
|
||||
return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0,
|
||||
0UL, 0, 0, pbl_size, pbl_addr, NULL);
|
||||
0UL, 0, 0, pbl_size, pbl_addr, NULL, wr_waitp);
|
||||
}
|
||||
|
||||
static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
|
||||
@ -372,7 +391,7 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
|
||||
mhp->attr.stag = stag;
|
||||
mmid = stag >> 8;
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
|
||||
pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
|
||||
return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
|
||||
}
|
||||
|
||||
@ -388,14 +407,15 @@ static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php,
|
||||
mhp->attr.mw_bind_enable, mhp->attr.zbva,
|
||||
mhp->attr.va_fbo, mhp->attr.len ?
|
||||
mhp->attr.len : -1, shift - 12,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL);
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
|
||||
mhp->wr_waitp);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = finish_mem_reg(mhp, stag);
|
||||
if (ret) {
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb);
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
|
||||
mhp->dereg_skb = NULL;
|
||||
}
|
||||
return ret;
|
||||
@ -422,18 +442,24 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
int ret;
|
||||
u32 stag = T4_STAG_UNSET;
|
||||
|
||||
pr_debug("%s ib_pd %p\n", __func__, pd);
|
||||
pr_debug("ib_pd %p\n", pd);
|
||||
php = to_c4iw_pd(pd);
|
||||
rhp = php->rhp;
|
||||
|
||||
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
|
||||
if (!mhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
|
||||
if (!mhp->wr_waitp) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_mhp;
|
||||
}
|
||||
c4iw_init_wr_wait(mhp->wr_waitp);
|
||||
|
||||
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
|
||||
if (!mhp->dereg_skb) {
|
||||
ret = -ENOMEM;
|
||||
goto err0;
|
||||
goto err_free_wr_wait;
|
||||
}
|
||||
|
||||
mhp->rhp = rhp;
|
||||
@ -449,20 +475,22 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
|
||||
FW_RI_STAG_NSMR, mhp->attr.perms,
|
||||
mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
|
||||
NULL);
|
||||
NULL, mhp->wr_waitp);
|
||||
if (ret)
|
||||
goto err1;
|
||||
goto err_free_skb;
|
||||
|
||||
ret = finish_mem_reg(mhp, stag);
|
||||
if (ret)
|
||||
goto err2;
|
||||
goto err_dereg_mem;
|
||||
return &mhp->ibmr;
|
||||
err2:
|
||||
err_dereg_mem:
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb);
|
||||
err1:
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
|
||||
err_free_wr_wait:
|
||||
c4iw_put_wr_wait(mhp->wr_waitp);
|
||||
err_free_skb:
|
||||
kfree_skb(mhp->dereg_skb);
|
||||
err0:
|
||||
err_free_mhp:
|
||||
kfree(mhp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@ -473,13 +501,13 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
__be64 *pages;
|
||||
int shift, n, len;
|
||||
int i, k, entry;
|
||||
int err = 0;
|
||||
int err = -ENOMEM;
|
||||
struct scatterlist *sg;
|
||||
struct c4iw_dev *rhp;
|
||||
struct c4iw_pd *php;
|
||||
struct c4iw_mr *mhp;
|
||||
|
||||
pr_debug("%s ib_pd %p\n", __func__, pd);
|
||||
pr_debug("ib_pd %p\n", pd);
|
||||
|
||||
if (length == ~0ULL)
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -496,34 +524,31 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
|
||||
if (!mhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
|
||||
if (!mhp->wr_waitp)
|
||||
goto err_free_mhp;
|
||||
|
||||
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
|
||||
if (!mhp->dereg_skb) {
|
||||
kfree(mhp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
if (!mhp->dereg_skb)
|
||||
goto err_free_wr_wait;
|
||||
|
||||
mhp->rhp = rhp;
|
||||
|
||||
mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
|
||||
if (IS_ERR(mhp->umem)) {
|
||||
err = PTR_ERR(mhp->umem);
|
||||
kfree_skb(mhp->dereg_skb);
|
||||
kfree(mhp);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
if (IS_ERR(mhp->umem))
|
||||
goto err_free_skb;
|
||||
|
||||
shift = mhp->umem->page_shift;
|
||||
|
||||
n = mhp->umem->nmap;
|
||||
err = alloc_pbl(mhp, n);
|
||||
if (err)
|
||||
goto err;
|
||||
goto err_umem_release;
|
||||
|
||||
pages = (__be64 *) __get_free_page(GFP_KERNEL);
|
||||
if (!pages) {
|
||||
err = -ENOMEM;
|
||||
goto err_pbl;
|
||||
goto err_pbl_free;
|
||||
}
|
||||
|
||||
i = n = 0;
|
||||
@ -536,7 +561,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
if (i == PAGE_SIZE / sizeof *pages) {
|
||||
err = write_pbl(&mhp->rhp->rdev,
|
||||
pages,
|
||||
mhp->attr.pbl_addr + (n << 3), i);
|
||||
mhp->attr.pbl_addr + (n << 3), i,
|
||||
mhp->wr_waitp);
|
||||
if (err)
|
||||
goto pbl_done;
|
||||
n += i;
|
||||
@ -547,12 +573,13 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
|
||||
if (i)
|
||||
err = write_pbl(&mhp->rhp->rdev, pages,
|
||||
mhp->attr.pbl_addr + (n << 3), i);
|
||||
mhp->attr.pbl_addr + (n << 3), i,
|
||||
mhp->wr_waitp);
|
||||
|
||||
pbl_done:
|
||||
free_page((unsigned long) pages);
|
||||
if (err)
|
||||
goto err_pbl;
|
||||
goto err_pbl_free;
|
||||
|
||||
mhp->attr.pdid = php->pdid;
|
||||
mhp->attr.zbva = 0;
|
||||
@ -563,17 +590,20 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
|
||||
err = register_mem(rhp, php, mhp, shift);
|
||||
if (err)
|
||||
goto err_pbl;
|
||||
goto err_pbl_free;
|
||||
|
||||
return &mhp->ibmr;
|
||||
|
||||
err_pbl:
|
||||
err_pbl_free:
|
||||
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
|
||||
err:
|
||||
err_umem_release:
|
||||
ib_umem_release(mhp->umem);
|
||||
err_free_skb:
|
||||
kfree_skb(mhp->dereg_skb);
|
||||
err_free_wr_wait:
|
||||
c4iw_put_wr_wait(mhp->wr_waitp);
|
||||
err_free_mhp:
|
||||
kfree(mhp);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
@ -597,13 +627,19 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
if (!mhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
|
||||
if (!mhp->dereg_skb) {
|
||||
mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
|
||||
if (!mhp->wr_waitp) {
|
||||
ret = -ENOMEM;
|
||||
goto free_mhp;
|
||||
}
|
||||
|
||||
ret = allocate_window(&rhp->rdev, &stag, php->pdid);
|
||||
mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
|
||||
if (!mhp->dereg_skb) {
|
||||
ret = -ENOMEM;
|
||||
goto free_wr_wait;
|
||||
}
|
||||
|
||||
ret = allocate_window(&rhp->rdev, &stag, php->pdid, mhp->wr_waitp);
|
||||
if (ret)
|
||||
goto free_skb;
|
||||
mhp->rhp = rhp;
|
||||
@ -616,13 +652,16 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
|
||||
ret = -ENOMEM;
|
||||
goto dealloc_win;
|
||||
}
|
||||
pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
|
||||
pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
|
||||
return &(mhp->ibmw);
|
||||
|
||||
dealloc_win:
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
|
||||
mhp->wr_waitp);
|
||||
free_skb:
|
||||
kfree_skb(mhp->dereg_skb);
|
||||
free_wr_wait:
|
||||
c4iw_put_wr_wait(mhp->wr_waitp);
|
||||
free_mhp:
|
||||
kfree(mhp);
|
||||
return ERR_PTR(ret);
|
||||
@ -638,10 +677,12 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
|
||||
rhp = mhp->rhp;
|
||||
mmid = (mw->rkey) >> 8;
|
||||
remove_handle(rhp, &rhp->mmidr, mmid);
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
|
||||
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb,
|
||||
mhp->wr_waitp);
|
||||
kfree_skb(mhp->dereg_skb);
|
||||
c4iw_put_wr_wait(mhp->wr_waitp);
|
||||
kfree(mhp);
|
||||
pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp);
|
||||
pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -671,23 +712,31 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
|
||||
goto err;
|
||||
}
|
||||
|
||||
mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
|
||||
if (!mhp->wr_waitp) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_mhp;
|
||||
}
|
||||
c4iw_init_wr_wait(mhp->wr_waitp);
|
||||
|
||||
mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
|
||||
length, &mhp->mpl_addr, GFP_KERNEL);
|
||||
if (!mhp->mpl) {
|
||||
ret = -ENOMEM;
|
||||
goto err_mpl;
|
||||
goto err_free_wr_wait;
|
||||
}
|
||||
mhp->max_mpl_len = length;
|
||||
|
||||
mhp->rhp = rhp;
|
||||
ret = alloc_pbl(mhp, max_num_sg);
|
||||
if (ret)
|
||||
goto err1;
|
||||
goto err_free_dma;
|
||||
mhp->attr.pbl_size = max_num_sg;
|
||||
ret = allocate_stag(&rhp->rdev, &stag, php->pdid,
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr);
|
||||
mhp->attr.pbl_size, mhp->attr.pbl_addr,
|
||||
mhp->wr_waitp);
|
||||
if (ret)
|
||||
goto err2;
|
||||
goto err_free_pbl;
|
||||
mhp->attr.pdid = php->pdid;
|
||||
mhp->attr.type = FW_RI_STAG_NSMR;
|
||||
mhp->attr.stag = stag;
|
||||
@ -696,21 +745,23 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
|
||||
ret = -ENOMEM;
|
||||
goto err3;
|
||||
goto err_dereg;
|
||||
}
|
||||
|
||||
pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
|
||||
pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
|
||||
return &(mhp->ibmr);
|
||||
err3:
|
||||
err_dereg:
|
||||
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb);
|
||||
err2:
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
|
||||
err_free_pbl:
|
||||
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
err1:
|
||||
err_free_dma:
|
||||
dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
|
||||
mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
|
||||
err_mpl:
|
||||
err_free_wr_wait:
|
||||
c4iw_put_wr_wait(mhp->wr_waitp);
|
||||
err_free_mhp:
|
||||
kfree(mhp);
|
||||
err:
|
||||
return ERR_PTR(ret);
|
||||
@ -744,7 +795,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
|
||||
struct c4iw_mr *mhp;
|
||||
u32 mmid;
|
||||
|
||||
pr_debug("%s ib_mr %p\n", __func__, ib_mr);
|
||||
pr_debug("ib_mr %p\n", ib_mr);
|
||||
|
||||
mhp = to_c4iw_mr(ib_mr);
|
||||
rhp = mhp->rhp;
|
||||
@ -754,7 +805,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
|
||||
dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
|
||||
mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
|
||||
dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb);
|
||||
mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
|
||||
if (mhp->attr.pbl_size)
|
||||
c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
|
||||
mhp->attr.pbl_size << 3);
|
||||
@ -762,7 +813,8 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
|
||||
kfree((void *) (unsigned long) mhp->kva);
|
||||
if (mhp->umem)
|
||||
ib_umem_release(mhp->umem);
|
||||
pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp);
|
||||
pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
|
||||
c4iw_put_wr_wait(mhp->wr_waitp);
|
||||
kfree(mhp);
|
||||
return 0;
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ void _c4iw_free_ucontext(struct kref *kref)
|
||||
ucontext = container_of(kref, struct c4iw_ucontext, kref);
|
||||
rhp = to_c4iw_dev(ucontext->ibucontext.device);
|
||||
|
||||
pr_debug("%s ucontext %p\n", __func__, ucontext);
|
||||
pr_debug("ucontext %p\n", ucontext);
|
||||
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
|
||||
kfree(mm);
|
||||
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
|
||||
@ -113,7 +113,7 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
|
||||
{
|
||||
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
|
||||
|
||||
pr_debug("%s context %p\n", __func__, context);
|
||||
pr_debug("context %p\n", context);
|
||||
c4iw_put_ucontext(ucontext);
|
||||
return 0;
|
||||
}
|
||||
@ -127,7 +127,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
|
||||
int ret = 0;
|
||||
struct c4iw_mm_entry *mm = NULL;
|
||||
|
||||
pr_debug("%s ibdev %p\n", __func__, ibdev);
|
||||
pr_debug("ibdev %p\n", ibdev);
|
||||
context = kzalloc(sizeof(*context), GFP_KERNEL);
|
||||
if (!context) {
|
||||
ret = -ENOMEM;
|
||||
@ -185,7 +185,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
|
||||
struct c4iw_ucontext *ucontext;
|
||||
u64 addr;
|
||||
|
||||
pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff,
|
||||
pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
|
||||
key, len);
|
||||
|
||||
if (vma->vm_start & (PAGE_SIZE-1))
|
||||
@ -251,7 +251,7 @@ static int c4iw_deallocate_pd(struct ib_pd *pd)
|
||||
|
||||
php = to_c4iw_pd(pd);
|
||||
rhp = php->rhp;
|
||||
pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid);
|
||||
pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid);
|
||||
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
|
||||
mutex_lock(&rhp->rdev.stats.lock);
|
||||
rhp->rdev.stats.pd.cur--;
|
||||
@ -268,7 +268,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
|
||||
u32 pdid;
|
||||
struct c4iw_dev *rhp;
|
||||
|
||||
pr_debug("%s ibdev %p\n", __func__, ibdev);
|
||||
pr_debug("ibdev %p\n", ibdev);
|
||||
rhp = (struct c4iw_dev *) ibdev;
|
||||
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
|
||||
if (!pdid)
|
||||
@ -291,14 +291,14 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
|
||||
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
|
||||
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
|
||||
mutex_unlock(&rhp->rdev.stats.lock);
|
||||
pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php);
|
||||
pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
|
||||
return &php->ibpd;
|
||||
}
|
||||
|
||||
static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
|
||||
u16 *pkey)
|
||||
{
|
||||
pr_debug("%s ibdev %p\n", __func__, ibdev);
|
||||
pr_debug("ibdev %p\n", ibdev);
|
||||
*pkey = 0;
|
||||
return 0;
|
||||
}
|
||||
@ -308,10 +308,11 @@ static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
|
||||
{
|
||||
struct c4iw_dev *dev;
|
||||
|
||||
pr_debug("%s ibdev %p, port %d, index %d, gid %p\n",
|
||||
__func__, ibdev, port, index, gid);
|
||||
pr_debug("ibdev %p, port %d, index %d, gid %p\n",
|
||||
ibdev, port, index, gid);
|
||||
if (!port)
|
||||
return -EINVAL;
|
||||
dev = to_c4iw_dev(ibdev);
|
||||
BUG_ON(port == 0);
|
||||
memset(&(gid->raw[0]), 0, sizeof(gid->raw));
|
||||
memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6);
|
||||
return 0;
|
||||
@ -323,7 +324,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
|
||||
|
||||
struct c4iw_dev *dev;
|
||||
|
||||
pr_debug("%s ibdev %p\n", __func__, ibdev);
|
||||
pr_debug("ibdev %p\n", ibdev);
|
||||
|
||||
if (uhw->inlen || uhw->outlen)
|
||||
return -EINVAL;
|
||||
@ -364,7 +365,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
|
||||
struct net_device *netdev;
|
||||
struct in_device *inetdev;
|
||||
|
||||
pr_debug("%s ibdev %p\n", __func__, ibdev);
|
||||
pr_debug("ibdev %p\n", ibdev);
|
||||
|
||||
dev = to_c4iw_dev(ibdev);
|
||||
netdev = dev->rdev.lldi.ports[port-1];
|
||||
@ -406,7 +407,7 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
|
||||
{
|
||||
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
|
||||
ibdev.dev);
|
||||
pr_debug("%s dev 0x%p\n", __func__, dev);
|
||||
pr_debug("dev 0x%p\n", dev);
|
||||
return sprintf(buf, "%d\n",
|
||||
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
|
||||
}
|
||||
@ -419,7 +420,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
|
||||
struct ethtool_drvinfo info;
|
||||
struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
|
||||
|
||||
pr_debug("%s dev 0x%p\n", __func__, dev);
|
||||
pr_debug("dev 0x%p\n", dev);
|
||||
lldev->ethtool_ops->get_drvinfo(lldev, &info);
|
||||
return sprintf(buf, "%s\n", info.driver);
|
||||
}
|
||||
@ -429,7 +430,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
|
||||
{
|
||||
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
|
||||
ibdev.dev);
|
||||
pr_debug("%s dev 0x%p\n", __func__, dev);
|
||||
pr_debug("dev 0x%p\n", dev);
|
||||
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
|
||||
c4iw_dev->rdev.lldi.pdev->device);
|
||||
}
|
||||
@ -521,7 +522,7 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
|
||||
{
|
||||
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
|
||||
ibdev);
|
||||
pr_debug("%s dev 0x%p\n", __func__, dev);
|
||||
pr_debug("dev 0x%p\n", dev);
|
||||
|
||||
snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
|
||||
FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
|
||||
@ -530,13 +531,14 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
|
||||
FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
|
||||
}
|
||||
|
||||
int c4iw_register_device(struct c4iw_dev *dev)
|
||||
void c4iw_register_device(struct work_struct *work)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
struct uld_ctx *ctx = container_of(work, struct uld_ctx, reg_work);
|
||||
struct c4iw_dev *dev = ctx->dev;
|
||||
|
||||
pr_debug("%s c4iw_dev %p\n", __func__, dev);
|
||||
BUG_ON(!dev->rdev.lldi.ports[0]);
|
||||
pr_debug("c4iw_dev %p\n", dev);
|
||||
strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
|
||||
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
|
||||
memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
|
||||
@ -609,8 +611,10 @@ int c4iw_register_device(struct c4iw_dev *dev)
|
||||
dev->ibdev.get_dev_fw_str = get_dev_fw_str;
|
||||
|
||||
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
|
||||
if (!dev->ibdev.iwcm)
|
||||
return -ENOMEM;
|
||||
if (!dev->ibdev.iwcm) {
|
||||
ret = -ENOMEM;
|
||||
goto err_dealloc_ctx;
|
||||
}
|
||||
|
||||
dev->ibdev.iwcm->connect = c4iw_connect;
|
||||
dev->ibdev.iwcm->accept = c4iw_accept_cr;
|
||||
@ -625,27 +629,31 @@ int c4iw_register_device(struct c4iw_dev *dev)
|
||||
|
||||
ret = ib_register_device(&dev->ibdev, NULL);
|
||||
if (ret)
|
||||
goto bail1;
|
||||
goto err_kfree_iwcm;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) {
|
||||
ret = device_create_file(&dev->ibdev.dev,
|
||||
c4iw_class_attributes[i]);
|
||||
if (ret)
|
||||
goto bail2;
|
||||
goto err_unregister_device;
|
||||
}
|
||||
return 0;
|
||||
bail2:
|
||||
return;
|
||||
err_unregister_device:
|
||||
ib_unregister_device(&dev->ibdev);
|
||||
bail1:
|
||||
err_kfree_iwcm:
|
||||
kfree(dev->ibdev.iwcm);
|
||||
return ret;
|
||||
err_dealloc_ctx:
|
||||
pr_err("%s - Failed registering iwarp device: %d\n",
|
||||
pci_name(ctx->lldi.pdev), ret);
|
||||
c4iw_dealloc(ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
void c4iw_unregister_device(struct c4iw_dev *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
pr_debug("%s c4iw_dev %p\n", __func__, dev);
|
||||
pr_debug("c4iw_dev %p\n", dev);
|
||||
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
|
||||
device_remove_file(&dev->ibdev.dev,
|
||||
c4iw_class_attributes[i]);
|
||||
|
@ -194,13 +194,13 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
|
||||
|
||||
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
struct t4_cq *rcq, struct t4_cq *scq,
|
||||
struct c4iw_dev_ucontext *uctx)
|
||||
struct c4iw_dev_ucontext *uctx,
|
||||
struct c4iw_wr_wait *wr_waitp)
|
||||
{
|
||||
int user = (uctx != &rdev->uctx);
|
||||
struct fw_ri_res_wr *res_wr;
|
||||
struct fw_ri_res *res;
|
||||
int wr_len;
|
||||
struct c4iw_wr_wait wr_wait;
|
||||
struct sk_buff *skb;
|
||||
int ret = 0;
|
||||
int eqsize;
|
||||
@ -254,8 +254,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
ret = -ENOMEM;
|
||||
goto free_sq;
|
||||
}
|
||||
pr_debug("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
|
||||
__func__, wq->sq.queue,
|
||||
pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
|
||||
wq->sq.queue,
|
||||
(unsigned long long)virt_to_phys(wq->sq.queue),
|
||||
wq->rq.queue,
|
||||
(unsigned long long)virt_to_phys(wq->rq.queue));
|
||||
@ -299,7 +299,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
FW_RI_RES_WR_NRES_V(2) |
|
||||
FW_WR_COMPL_F);
|
||||
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
|
||||
res_wr->cookie = (uintptr_t)&wr_wait;
|
||||
res_wr->cookie = (uintptr_t)wr_waitp;
|
||||
res = res_wr->res;
|
||||
res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
|
||||
res->u.sqrq.op = FW_RI_RES_OP_WRITE;
|
||||
@ -352,17 +352,13 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
|
||||
res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
|
||||
res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
|
||||
|
||||
c4iw_init_wr_wait(&wr_wait);
|
||||
|
||||
ret = c4iw_ofld_send(rdev, skb);
|
||||
if (ret)
|
||||
goto free_dma;
|
||||
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
|
||||
c4iw_init_wr_wait(wr_waitp);
|
||||
ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
|
||||
if (ret)
|
||||
goto free_dma;
|
||||
|
||||
pr_debug("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
|
||||
__func__, wq->sq.qid, wq->rq.qid, wq->db,
|
||||
pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
|
||||
wq->sq.qid, wq->rq.qid, wq->db,
|
||||
wq->sq.bar2_va, wq->rq.bar2_va);
|
||||
|
||||
return 0;
|
||||
@ -693,7 +689,6 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
|
||||
if (++p == (__be64 *)&sq->queue[sq->size])
|
||||
p = (__be64 *)sq->queue;
|
||||
}
|
||||
BUG_ON(rem < 0);
|
||||
while (rem) {
|
||||
*p = 0;
|
||||
rem -= sizeof(*p);
|
||||
@ -724,12 +719,13 @@ static void free_qp_work(struct work_struct *work)
|
||||
ucontext = qhp->ucontext;
|
||||
rhp = qhp->rhp;
|
||||
|
||||
pr_debug("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
|
||||
pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
|
||||
destroy_qp(&rhp->rdev, &qhp->wq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
|
||||
if (ucontext)
|
||||
c4iw_put_ucontext(ucontext);
|
||||
c4iw_put_wr_wait(qhp->wr_waitp);
|
||||
kfree(qhp);
|
||||
}
|
||||
|
||||
@ -738,19 +734,19 @@ static void queue_qp_free(struct kref *kref)
|
||||
struct c4iw_qp *qhp;
|
||||
|
||||
qhp = container_of(kref, struct c4iw_qp, kref);
|
||||
pr_debug("%s qhp %p\n", __func__, qhp);
|
||||
pr_debug("qhp %p\n", qhp);
|
||||
queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
|
||||
}
|
||||
|
||||
void c4iw_qp_add_ref(struct ib_qp *qp)
|
||||
{
|
||||
pr_debug("%s ib_qp %p\n", __func__, qp);
|
||||
pr_debug("ib_qp %p\n", qp);
|
||||
kref_get(&to_c4iw_qp(qp)->kref);
|
||||
}
|
||||
|
||||
void c4iw_qp_rem_ref(struct ib_qp *qp)
|
||||
{
|
||||
pr_debug("%s ib_qp %p\n", __func__, qp);
|
||||
pr_debug("ib_qp %p\n", qp);
|
||||
kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
|
||||
}
|
||||
|
||||
@ -817,10 +813,12 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
|
||||
t4_swcq_produce(cq);
|
||||
spin_unlock_irqrestore(&schp->lock, flag);
|
||||
|
||||
spin_lock_irqsave(&schp->comp_handler_lock, flag);
|
||||
(*schp->ibcq.comp_handler)(&schp->ibcq,
|
||||
schp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
|
||||
if (t4_clear_cq_armed(&schp->cq)) {
|
||||
spin_lock_irqsave(&schp->comp_handler_lock, flag);
|
||||
(*schp->ibcq.comp_handler)(&schp->ibcq,
|
||||
schp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
|
||||
}
|
||||
}
|
||||
|
||||
static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
|
||||
@ -846,10 +844,12 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
|
||||
t4_swcq_produce(cq);
|
||||
spin_unlock_irqrestore(&rchp->lock, flag);
|
||||
|
||||
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
|
||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
|
||||
rchp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
|
||||
if (t4_clear_cq_armed(&rchp->cq)) {
|
||||
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
|
||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq,
|
||||
rchp->ibcq.cq_context);
|
||||
spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
|
||||
}
|
||||
}
|
||||
|
||||
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
@ -958,8 +958,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
|
||||
break;
|
||||
default:
|
||||
pr_debug("%s post of type=%d TBD!\n", __func__,
|
||||
wr->opcode);
|
||||
pr_warn("%s post of type=%d TBD!\n", __func__,
|
||||
wr->opcode);
|
||||
err = -EINVAL;
|
||||
}
|
||||
if (err) {
|
||||
@ -980,8 +980,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
|
||||
|
||||
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
|
||||
|
||||
pr_debug("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
|
||||
__func__,
|
||||
pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
|
||||
(unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
|
||||
swsqe->opcode, swsqe->read_len);
|
||||
wr = wr->next;
|
||||
@ -1057,8 +1056,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
|
||||
wqe->recv.r2[1] = 0;
|
||||
wqe->recv.r2[2] = 0;
|
||||
wqe->recv.len16 = len16;
|
||||
pr_debug("%s cookie 0x%llx pidx %u\n",
|
||||
__func__,
|
||||
pr_debug("cookie 0x%llx pidx %u\n",
|
||||
(unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
|
||||
t4_rq_produce(&qhp->wq, len16);
|
||||
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
|
||||
@ -1218,7 +1216,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
|
||||
struct sk_buff *skb;
|
||||
struct terminate_message *term;
|
||||
|
||||
pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
|
||||
pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
|
||||
qhp->ep->hwtid);
|
||||
|
||||
skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
|
||||
@ -1255,33 +1253,36 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
|
||||
int rq_flushed, sq_flushed;
|
||||
unsigned long flag;
|
||||
|
||||
pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
|
||||
pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
|
||||
|
||||
/* locking hierarchy: cq lock first, then qp lock. */
|
||||
/* locking hierarchy: cqs lock first, then qp lock. */
|
||||
spin_lock_irqsave(&rchp->lock, flag);
|
||||
if (schp != rchp)
|
||||
spin_lock(&schp->lock);
|
||||
spin_lock(&qhp->lock);
|
||||
|
||||
if (qhp->wq.flushed) {
|
||||
spin_unlock(&qhp->lock);
|
||||
if (schp != rchp)
|
||||
spin_unlock(&schp->lock);
|
||||
spin_unlock_irqrestore(&rchp->lock, flag);
|
||||
return;
|
||||
}
|
||||
qhp->wq.flushed = 1;
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
|
||||
c4iw_flush_hw_cq(rchp);
|
||||
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
|
||||
rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
|
||||
spin_unlock(&qhp->lock);
|
||||
spin_unlock_irqrestore(&rchp->lock, flag);
|
||||
|
||||
/* locking hierarchy: cq lock first, then qp lock. */
|
||||
spin_lock_irqsave(&schp->lock, flag);
|
||||
spin_lock(&qhp->lock);
|
||||
if (schp != rchp)
|
||||
c4iw_flush_hw_cq(schp);
|
||||
sq_flushed = c4iw_flush_sq(qhp);
|
||||
|
||||
spin_unlock(&qhp->lock);
|
||||
spin_unlock_irqrestore(&schp->lock, flag);
|
||||
if (schp != rchp)
|
||||
spin_unlock(&schp->lock);
|
||||
spin_unlock_irqrestore(&rchp->lock, flag);
|
||||
|
||||
if (schp == rchp) {
|
||||
if (t4_clear_cq_armed(&rchp->cq) &&
|
||||
@ -1315,8 +1316,8 @@ static void flush_qp(struct c4iw_qp *qhp)
|
||||
rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
|
||||
schp = to_c4iw_cq(qhp->ibqp.send_cq);
|
||||
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
if (qhp->ibqp.uobject) {
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
t4_set_cq_in_error(&rchp->cq);
|
||||
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
|
||||
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
|
||||
@ -1340,8 +1341,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
int ret;
|
||||
struct sk_buff *skb;
|
||||
|
||||
pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
|
||||
ep->hwtid);
|
||||
pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
|
||||
|
||||
skb = skb_dequeue(&ep->com.ep_skb_list);
|
||||
if (WARN_ON(!skb))
|
||||
@ -1357,23 +1357,20 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
wqe->flowid_len16 = cpu_to_be32(
|
||||
FW_WR_FLOWID_V(ep->hwtid) |
|
||||
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
|
||||
wqe->cookie = (uintptr_t)&ep->com.wr_wait;
|
||||
wqe->cookie = (uintptr_t)ep->com.wr_waitp;
|
||||
|
||||
wqe->u.fini.type = FW_RI_TYPE_FINI;
|
||||
ret = c4iw_ofld_send(&rhp->rdev, skb);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
|
||||
qhp->wq.sq.qid, __func__);
|
||||
out:
|
||||
pr_debug("%s ret %d\n", __func__, ret);
|
||||
ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
|
||||
qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
|
||||
|
||||
pr_debug("ret %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
|
||||
{
|
||||
pr_debug("%s p2p_type = %d\n", __func__, p2p_type);
|
||||
pr_debug("p2p_type = %d\n", p2p_type);
|
||||
memset(&init->u, 0, sizeof init->u);
|
||||
switch (p2p_type) {
|
||||
case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
|
||||
@ -1402,7 +1399,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
|
||||
int ret;
|
||||
struct sk_buff *skb;
|
||||
|
||||
pr_debug("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
|
||||
pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
|
||||
qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
|
||||
|
||||
skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
|
||||
@ -1427,7 +1424,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
|
||||
FW_WR_FLOWID_V(qhp->ep->hwtid) |
|
||||
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
|
||||
|
||||
wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
|
||||
wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
|
||||
|
||||
wqe->u.init.type = FW_RI_TYPE_INIT;
|
||||
wqe->u.init.mpareqbit_p2ptype =
|
||||
@ -1464,18 +1461,14 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
|
||||
if (qhp->attr.mpa_attr.initiator)
|
||||
build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
|
||||
|
||||
ret = c4iw_ofld_send(&rhp->rdev, skb);
|
||||
if (ret)
|
||||
goto err1;
|
||||
|
||||
ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
|
||||
qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
|
||||
ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
|
||||
qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
|
||||
if (!ret)
|
||||
goto out;
|
||||
err1:
|
||||
|
||||
free_ird(rhp, qhp->attr.max_ird);
|
||||
out:
|
||||
pr_debug("%s ret %d\n", __func__, ret);
|
||||
pr_debug("ret %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1492,8 +1485,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
int free = 0;
|
||||
struct c4iw_ep *ep = NULL;
|
||||
|
||||
pr_debug("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
|
||||
__func__,
|
||||
pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
|
||||
qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
|
||||
(mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
|
||||
|
||||
@ -1582,7 +1574,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
case C4IW_QP_STATE_RTS:
|
||||
switch (attrs->next_state) {
|
||||
case C4IW_QP_STATE_CLOSING:
|
||||
BUG_ON(kref_read(&qhp->ep->com.kref) < 2);
|
||||
t4_set_wq_in_error(&qhp->wq);
|
||||
set_state(qhp, C4IW_QP_STATE_CLOSING);
|
||||
ep = qhp->ep;
|
||||
@ -1680,7 +1671,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
}
|
||||
goto out;
|
||||
err:
|
||||
pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
|
||||
pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
|
||||
qhp->wq.sq.qid);
|
||||
|
||||
/* disassociate the LLP connection */
|
||||
@ -1691,7 +1682,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
set_state(qhp, C4IW_QP_STATE_ERROR);
|
||||
free = 1;
|
||||
abort = 1;
|
||||
BUG_ON(!ep);
|
||||
flush_qp(qhp);
|
||||
wake_up(&qhp->wait);
|
||||
out:
|
||||
@ -1717,7 +1707,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
|
||||
*/
|
||||
if (free)
|
||||
c4iw_put_ep(&ep->com);
|
||||
pr_debug("%s exit state %d\n", __func__, qhp->attr.state);
|
||||
pr_debug("exit state %d\n", qhp->attr.state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1747,7 +1737,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
|
||||
|
||||
c4iw_qp_rem_ref(ib_qp);
|
||||
|
||||
pr_debug("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
|
||||
pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1766,7 +1756,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
|
||||
struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
|
||||
|
||||
pr_debug("%s ib_pd %p\n", __func__, pd);
|
||||
pr_debug("ib_pd %p\n", pd);
|
||||
|
||||
if (attrs->qp_type != IB_QPT_RC)
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -1798,6 +1788,13 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
|
||||
if (!qhp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
|
||||
if (!qhp->wr_waitp) {
|
||||
ret = -ENOMEM;
|
||||
goto err_free_qhp;
|
||||
}
|
||||
|
||||
qhp->wq.sq.size = sqsize;
|
||||
qhp->wq.sq.memsize =
|
||||
(sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
|
||||
@ -1814,9 +1811,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
}
|
||||
|
||||
ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
|
||||
qhp->wr_waitp);
|
||||
if (ret)
|
||||
goto err1;
|
||||
goto err_free_wr_wait;
|
||||
|
||||
attrs->cap.max_recv_wr = rqsize - 1;
|
||||
attrs->cap.max_send_wr = sqsize - 1;
|
||||
@ -1847,35 +1845,35 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
|
||||
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
|
||||
if (ret)
|
||||
goto err2;
|
||||
goto err_destroy_qp;
|
||||
|
||||
if (udata) {
|
||||
if (udata && ucontext) {
|
||||
sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
|
||||
if (!sq_key_mm) {
|
||||
ret = -ENOMEM;
|
||||
goto err3;
|
||||
goto err_remove_handle;
|
||||
}
|
||||
rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
|
||||
if (!rq_key_mm) {
|
||||
ret = -ENOMEM;
|
||||
goto err4;
|
||||
goto err_free_sq_key;
|
||||
}
|
||||
sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
|
||||
if (!sq_db_key_mm) {
|
||||
ret = -ENOMEM;
|
||||
goto err5;
|
||||
goto err_free_rq_key;
|
||||
}
|
||||
rq_db_key_mm = kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
|
||||
if (!rq_db_key_mm) {
|
||||
ret = -ENOMEM;
|
||||
goto err6;
|
||||
goto err_free_sq_db_key;
|
||||
}
|
||||
if (t4_sq_onchip(&qhp->wq.sq)) {
|
||||
ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
|
||||
GFP_KERNEL);
|
||||
if (!ma_sync_key_mm) {
|
||||
ret = -ENOMEM;
|
||||
goto err7;
|
||||
goto err_free_rq_db_key;
|
||||
}
|
||||
uresp.flags = C4IW_QPF_ONCHIP;
|
||||
} else
|
||||
@ -1905,7 +1903,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
spin_unlock(&ucontext->mmap_lock);
|
||||
ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
|
||||
if (ret)
|
||||
goto err8;
|
||||
goto err_free_ma_sync_key;
|
||||
sq_key_mm->key = uresp.sq_key;
|
||||
sq_key_mm->addr = qhp->wq.sq.phys_addr;
|
||||
sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
|
||||
@ -1935,30 +1933,30 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
|
||||
qhp->ucontext = ucontext;
|
||||
}
|
||||
qhp->ibqp.qp_num = qhp->wq.sq.qid;
|
||||
init_timer(&(qhp->timer));
|
||||
INIT_LIST_HEAD(&qhp->db_fc_entry);
|
||||
pr_debug("%s sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
|
||||
__func__,
|
||||
pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
|
||||
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
|
||||
attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
|
||||
qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
|
||||
return &qhp->ibqp;
|
||||
err8:
|
||||
err_free_ma_sync_key:
|
||||
kfree(ma_sync_key_mm);
|
||||
err7:
|
||||
err_free_rq_db_key:
|
||||
kfree(rq_db_key_mm);
|
||||
err6:
|
||||
err_free_sq_db_key:
|
||||
kfree(sq_db_key_mm);
|
||||
err5:
|
||||
err_free_rq_key:
|
||||
kfree(rq_key_mm);
|
||||
err4:
|
||||
err_free_sq_key:
|
||||
kfree(sq_key_mm);
|
||||
err3:
|
||||
err_remove_handle:
|
||||
remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid);
|
||||
err2:
|
||||
err_destroy_qp:
|
||||
destroy_qp(&rhp->rdev, &qhp->wq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
err1:
|
||||
err_free_wr_wait:
|
||||
c4iw_put_wr_wait(qhp->wr_waitp);
|
||||
err_free_qhp:
|
||||
kfree(qhp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@ -1971,7 +1969,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
enum c4iw_qp_attr_mask mask = 0;
|
||||
struct c4iw_qp_attributes attrs;
|
||||
|
||||
pr_debug("%s ib_qp %p\n", __func__, ibqp);
|
||||
pr_debug("ib_qp %p\n", ibqp);
|
||||
|
||||
/* iwarp does not support the RTR state */
|
||||
if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
|
||||
@ -2017,7 +2015,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
|
||||
struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
|
||||
{
|
||||
pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn);
|
||||
pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
|
||||
return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table)
|
||||
|
||||
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
|
||||
{
|
||||
pr_debug("%s entry 0x%x\n", __func__, entry);
|
||||
pr_debug("entry 0x%x\n", entry);
|
||||
c4iw_id_free(id_table, entry);
|
||||
}
|
||||
|
||||
@ -141,7 +141,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&uctx->lock);
|
||||
pr_debug("%s qid 0x%x\n", __func__, qid);
|
||||
pr_debug("qid 0x%x\n", qid);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (rdev->stats.qid.cur > rdev->stats.qid.max)
|
||||
rdev->stats.qid.max = rdev->stats.qid.cur;
|
||||
@ -157,7 +157,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
return;
|
||||
pr_debug("%s qid 0x%x\n", __func__, qid);
|
||||
pr_debug("qid 0x%x\n", qid);
|
||||
entry->qid = qid;
|
||||
mutex_lock(&uctx->lock);
|
||||
list_add_tail(&entry->entry, &uctx->cqids);
|
||||
@ -215,7 +215,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&uctx->lock);
|
||||
pr_debug("%s qid 0x%x\n", __func__, qid);
|
||||
pr_debug("qid 0x%x\n", qid);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (rdev->stats.qid.cur > rdev->stats.qid.max)
|
||||
rdev->stats.qid.max = rdev->stats.qid.cur;
|
||||
@ -231,7 +231,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
|
||||
entry = kmalloc(sizeof *entry, GFP_KERNEL);
|
||||
if (!entry)
|
||||
return;
|
||||
pr_debug("%s qid 0x%x\n", __func__, qid);
|
||||
pr_debug("qid 0x%x\n", qid);
|
||||
entry->qid = qid;
|
||||
mutex_lock(&uctx->lock);
|
||||
list_add_tail(&entry->entry, &uctx->qpids);
|
||||
@ -254,7 +254,7 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
|
||||
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
|
||||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
|
||||
pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
|
||||
pr_debug("addr 0x%x size %d\n", (u32)addr, size);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
if (addr) {
|
||||
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
|
||||
@ -268,7 +268,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
|
||||
|
||||
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
||||
{
|
||||
pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
|
||||
pr_debug("addr 0x%x size %d\n", addr, size);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
@ -290,8 +290,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
|
||||
while (pbl_start < pbl_top) {
|
||||
pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
|
||||
if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
|
||||
pr_debug("%s failed to add PBL chunk (%x/%x)\n",
|
||||
__func__, pbl_start, pbl_chunk);
|
||||
pr_debug("failed to add PBL chunk (%x/%x)\n",
|
||||
pbl_start, pbl_chunk);
|
||||
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
|
||||
pr_warn("Failed to add all PBL chunks (%x/%x)\n",
|
||||
pbl_start, pbl_top - pbl_start);
|
||||
@ -299,8 +299,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
|
||||
}
|
||||
pbl_chunk >>= 1;
|
||||
} else {
|
||||
pr_debug("%s added PBL chunk (%x/%x)\n",
|
||||
__func__, pbl_start, pbl_chunk);
|
||||
pr_debug("added PBL chunk (%x/%x)\n",
|
||||
pbl_start, pbl_chunk);
|
||||
pbl_start += pbl_chunk;
|
||||
}
|
||||
}
|
||||
@ -322,7 +322,7 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
|
||||
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
|
||||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
|
||||
pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
|
||||
pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6);
|
||||
if (!addr)
|
||||
pr_warn_ratelimited("%s: Out of RQT memory\n",
|
||||
pci_name(rdev->lldi.pdev));
|
||||
@ -339,7 +339,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
|
||||
|
||||
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
||||
{
|
||||
pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6);
|
||||
pr_debug("addr 0x%x size %d\n", addr, size << 6);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
@ -361,8 +361,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
|
||||
while (rqt_start < rqt_top) {
|
||||
rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
|
||||
if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
|
||||
pr_debug("%s failed to add RQT chunk (%x/%x)\n",
|
||||
__func__, rqt_start, rqt_chunk);
|
||||
pr_debug("failed to add RQT chunk (%x/%x)\n",
|
||||
rqt_start, rqt_chunk);
|
||||
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
|
||||
pr_warn("Failed to add all RQT chunks (%x/%x)\n",
|
||||
rqt_start, rqt_top - rqt_start);
|
||||
@ -370,8 +370,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
|
||||
}
|
||||
rqt_chunk >>= 1;
|
||||
} else {
|
||||
pr_debug("%s added RQT chunk (%x/%x)\n",
|
||||
__func__, rqt_start, rqt_chunk);
|
||||
pr_debug("added RQT chunk (%x/%x)\n",
|
||||
rqt_start, rqt_chunk);
|
||||
rqt_start += rqt_chunk;
|
||||
}
|
||||
}
|
||||
@ -391,7 +391,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
|
||||
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
|
||||
{
|
||||
unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
|
||||
pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
|
||||
pr_debug("addr 0x%x size %d\n", (u32)addr, size);
|
||||
if (addr) {
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
|
||||
@ -404,7 +404,7 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
|
||||
|
||||
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
|
||||
{
|
||||
pr_debug("%s addr 0x%x size %d\n", __func__, addr, size);
|
||||
pr_debug("addr 0x%x size %d\n", addr, size);
|
||||
mutex_lock(&rdev->stats.lock);
|
||||
rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
|
||||
mutex_unlock(&rdev->stats.lock);
|
||||
@ -426,8 +426,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
|
||||
while (start < top) {
|
||||
chunk = min(top - start + 1, chunk);
|
||||
if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
|
||||
pr_debug("%s failed to add OCQP chunk (%x/%x)\n",
|
||||
__func__, start, chunk);
|
||||
pr_debug("failed to add OCQP chunk (%x/%x)\n",
|
||||
start, chunk);
|
||||
if (chunk <= 1024 << MIN_OCQP_SHIFT) {
|
||||
pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
|
||||
start, top - start);
|
||||
@ -435,8 +435,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
|
||||
}
|
||||
chunk >>= 1;
|
||||
} else {
|
||||
pr_debug("%s added OCQP chunk (%x/%x)\n",
|
||||
__func__, start, chunk);
|
||||
pr_debug("added OCQP chunk (%x/%x)\n",
|
||||
start, chunk);
|
||||
start += chunk;
|
||||
}
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ struct t4_cqe {
|
||||
__be32 msn;
|
||||
} rcqe;
|
||||
struct {
|
||||
u32 stag;
|
||||
__be32 stag;
|
||||
u16 nada2;
|
||||
u16 cidx;
|
||||
} scqe;
|
||||
@ -425,7 +425,6 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
|
||||
|
||||
static inline void t4_sq_consume(struct t4_wq *wq)
|
||||
{
|
||||
BUG_ON(wq->sq.in_use < 1);
|
||||
if (wq->sq.cidx == wq->sq.flush_cidx)
|
||||
wq->sq.flush_cidx = -1;
|
||||
wq->sq.in_use--;
|
||||
@ -466,14 +465,12 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
|
||||
wmb();
|
||||
if (wq->sq.bar2_va) {
|
||||
if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
|
||||
pr_debug("%s: WC wq->sq.pidx = %d\n",
|
||||
__func__, wq->sq.pidx);
|
||||
pr_debug("WC wq->sq.pidx = %d\n", wq->sq.pidx);
|
||||
pio_copy((u64 __iomem *)
|
||||
(wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
|
||||
(u64 *)wqe);
|
||||
} else {
|
||||
pr_debug("%s: DB wq->sq.pidx = %d\n",
|
||||
__func__, wq->sq.pidx);
|
||||
pr_debug("DB wq->sq.pidx = %d\n", wq->sq.pidx);
|
||||
writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid),
|
||||
wq->sq.bar2_va + SGE_UDB_KDOORBELL);
|
||||
}
|
||||
@ -493,14 +490,12 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
|
||||
wmb();
|
||||
if (wq->rq.bar2_va) {
|
||||
if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
|
||||
pr_debug("%s: WC wq->rq.pidx = %d\n",
|
||||
__func__, wq->rq.pidx);
|
||||
pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx);
|
||||
pio_copy((u64 __iomem *)
|
||||
(wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
|
||||
(void *)wqe);
|
||||
} else {
|
||||
pr_debug("%s: DB wq->rq.pidx = %d\n",
|
||||
__func__, wq->rq.pidx);
|
||||
pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx);
|
||||
writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
|
||||
wq->rq.bar2_va + SGE_UDB_KDOORBELL);
|
||||
}
|
||||
@ -601,10 +596,11 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
|
||||
{
|
||||
cq->sw_in_use++;
|
||||
if (cq->sw_in_use == cq->size) {
|
||||
pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
|
||||
__func__, cq->cqid);
|
||||
pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
|
||||
__func__, cq->cqid);
|
||||
cq->error = 1;
|
||||
BUG_ON(1);
|
||||
cq->sw_in_use--;
|
||||
return;
|
||||
}
|
||||
if (++cq->sw_pidx == cq->size)
|
||||
cq->sw_pidx = 0;
|
||||
@ -612,7 +608,6 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
|
||||
|
||||
static inline void t4_swcq_consume(struct t4_cq *cq)
|
||||
{
|
||||
BUG_ON(cq->sw_in_use < 1);
|
||||
cq->sw_in_use--;
|
||||
if (++cq->sw_cidx == cq->size)
|
||||
cq->sw_cidx = 0;
|
||||
@ -658,7 +653,6 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
|
||||
ret = -EOVERFLOW;
|
||||
cq->error = 1;
|
||||
pr_err("cq overflow cqid %u\n", cq->cqid);
|
||||
BUG_ON(1);
|
||||
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
|
||||
|
||||
/* Ensure CQE is flushed to memory */
|
||||
@ -673,10 +667,9 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
|
||||
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
|
||||
{
|
||||
if (cq->sw_in_use == cq->size) {
|
||||
pr_debug("%s cxgb4 sw cq overflow cqid %u\n",
|
||||
__func__, cq->cqid);
|
||||
pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
|
||||
__func__, cq->cqid);
|
||||
cq->error = 1;
|
||||
BUG_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
if (cq->sw_in_use)
|
||||
|
@ -675,8 +675,8 @@ struct fw_ri_fr_nsmr_tpte_wr {
|
||||
__u16 wrid;
|
||||
__u8 r1[3];
|
||||
__u8 len16;
|
||||
__u32 r2;
|
||||
__u32 stag;
|
||||
__be32 r2;
|
||||
__be32 stag;
|
||||
struct fw_ri_tpte tpte;
|
||||
__u64 pbl[2];
|
||||
};
|
||||
|
@ -218,9 +218,9 @@ static inline void aspm_ctx_disable(struct hfi1_ctxtdata *rcd)
|
||||
}
|
||||
|
||||
/* Timer function for re-enabling ASPM in the absence of interrupt activity */
|
||||
static inline void aspm_ctx_timer_function(unsigned long data)
|
||||
static inline void aspm_ctx_timer_function(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_ctxtdata *rcd = (struct hfi1_ctxtdata *)data;
|
||||
struct hfi1_ctxtdata *rcd = from_timer(rcd, t, aspm_timer);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rcd->aspm_lock, flags);
|
||||
@ -281,8 +281,7 @@ static inline void aspm_enable_all(struct hfi1_devdata *dd)
|
||||
static inline void aspm_ctx_init(struct hfi1_ctxtdata *rcd)
|
||||
{
|
||||
spin_lock_init(&rcd->aspm_lock);
|
||||
setup_timer(&rcd->aspm_timer, aspm_ctx_timer_function,
|
||||
(unsigned long)rcd);
|
||||
timer_setup(&rcd->aspm_timer, aspm_ctx_timer_function, 0);
|
||||
rcd->aspm_intr_supported = rcd->dd->aspm_supported &&
|
||||
aspm_mode == ASPM_MODE_DYNAMIC &&
|
||||
rcd->ctxt < rcd->dd->first_dyn_alloc_ctxt;
|
||||
|
@ -1036,7 +1036,6 @@ static void read_vc_local_link_width(struct hfi1_devdata *dd, u8 *misc_bits,
|
||||
u8 *flag_bits, u16 *link_widths);
|
||||
static void read_remote_device_id(struct hfi1_devdata *dd, u16 *device_id,
|
||||
u8 *device_rev);
|
||||
static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed);
|
||||
static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx);
|
||||
static int read_tx_settings(struct hfi1_devdata *dd, u8 *enable_lane_tx,
|
||||
u8 *tx_polarity_inversion,
|
||||
@ -5538,9 +5537,9 @@ static void handle_cce_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
|
||||
* associated with them.
|
||||
*/
|
||||
#define RCVERR_CHECK_TIME 10
|
||||
static void update_rcverr_timer(unsigned long opaque)
|
||||
static void update_rcverr_timer(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
|
||||
struct hfi1_devdata *dd = from_timer(dd, t, rcverr_timer);
|
||||
struct hfi1_pportdata *ppd = dd->pport;
|
||||
u32 cur_ovfl_cnt = read_dev_cntr(dd, C_RCV_OVF, CNTR_INVALID_VL);
|
||||
|
||||
@ -5559,7 +5558,7 @@ static void update_rcverr_timer(unsigned long opaque)
|
||||
|
||||
static int init_rcverr(struct hfi1_devdata *dd)
|
||||
{
|
||||
setup_timer(&dd->rcverr_timer, update_rcverr_timer, (unsigned long)dd);
|
||||
timer_setup(&dd->rcverr_timer, update_rcverr_timer, 0);
|
||||
/* Assume the hardware counter has been reset */
|
||||
dd->rcv_ovfl_cnt = 0;
|
||||
return mod_timer(&dd->rcverr_timer, jiffies + HZ * RCVERR_CHECK_TIME);
|
||||
@ -5567,9 +5566,8 @@ static int init_rcverr(struct hfi1_devdata *dd)
|
||||
|
||||
static void free_rcverr(struct hfi1_devdata *dd)
|
||||
{
|
||||
if (dd->rcverr_timer.data)
|
||||
if (dd->rcverr_timer.function)
|
||||
del_timer_sync(&dd->rcverr_timer);
|
||||
dd->rcverr_timer.data = 0;
|
||||
}
|
||||
|
||||
static void handle_rxe_err(struct hfi1_devdata *dd, u32 unused, u64 reg)
|
||||
@ -6520,12 +6518,11 @@ static void _dc_start(struct hfi1_devdata *dd)
|
||||
if (!dd->dc_shutdown)
|
||||
return;
|
||||
|
||||
/* Take the 8051 out of reset */
|
||||
write_csr(dd, DC_DC8051_CFG_RST, 0ull);
|
||||
/* Wait until 8051 is ready */
|
||||
if (wait_fm_ready(dd, TIMEOUT_8051_START))
|
||||
dd_dev_err(dd, "%s: timeout starting 8051 firmware\n",
|
||||
__func__);
|
||||
/*
|
||||
* Take the 8051 out of reset, wait until 8051 is ready, and set host
|
||||
* version bit.
|
||||
*/
|
||||
release_and_wait_ready_8051_firmware(dd);
|
||||
|
||||
/* Take away reset for LCB and RX FPE (set in lcb_shutdown). */
|
||||
write_csr(dd, DCC_CFG_RESET, 0x10);
|
||||
@ -6819,7 +6816,8 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
|
||||
rcd = hfi1_rcd_get_by_index(dd, i);
|
||||
|
||||
/* Ensure all non-user contexts(including vnic) are enabled */
|
||||
if (!rcd || !rcd->sc || (rcd->sc->type == SC_USER)) {
|
||||
if (!rcd ||
|
||||
(i >= dd->first_dyn_alloc_ctxt && !rcd->is_vnic)) {
|
||||
hfi1_rcd_put(rcd);
|
||||
continue;
|
||||
}
|
||||
@ -7199,27 +7197,6 @@ static int lcb_to_port_ltp(int lcb_crc)
|
||||
return port_ltp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Our neighbor has indicated that we are allowed to act as a fabric
|
||||
* manager, so place the full management partition key in the second
|
||||
* (0-based) pkey array position (see OPAv1, section 20.2.2.6.8). Note
|
||||
* that we should already have the limited management partition key in
|
||||
* array element 1, and also that the port is not yet up when
|
||||
* add_full_mgmt_pkey() is invoked.
|
||||
*/
|
||||
static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
|
||||
{
|
||||
struct hfi1_devdata *dd = ppd->dd;
|
||||
|
||||
/* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
|
||||
if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
|
||||
dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
|
||||
__func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
|
||||
ppd->pkeys[2] = FULL_MGMT_P_KEY;
|
||||
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
|
||||
hfi1_event_pkey_change(ppd->dd, ppd->port);
|
||||
}
|
||||
|
||||
static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
|
||||
{
|
||||
if (ppd->pkeys[2] != 0) {
|
||||
@ -7416,11 +7393,7 @@ void handle_verify_cap(struct work_struct *work)
|
||||
&partner_supported_crc);
|
||||
read_vc_remote_link_width(dd, &remote_tx_rate, &link_widths);
|
||||
read_remote_device_id(dd, &device_id, &device_rev);
|
||||
/*
|
||||
* And the 'MgmtAllowed' information, which is exchanged during
|
||||
* LNI, is also be available at this point.
|
||||
*/
|
||||
read_mgmt_allowed(dd, &ppd->mgmt_allowed);
|
||||
|
||||
/* print the active widths */
|
||||
get_link_widths(dd, &active_tx, &active_rx);
|
||||
dd_dev_info(dd,
|
||||
@ -7548,9 +7521,6 @@ void handle_verify_cap(struct work_struct *work)
|
||||
write_csr(dd, DC_LCB_ERR_EN, 0); /* mask LCB errors */
|
||||
set_8051_lcb_access(dd);
|
||||
|
||||
if (ppd->mgmt_allowed)
|
||||
add_full_mgmt_pkey(ppd);
|
||||
|
||||
/* tell the 8051 to go to LinkUp */
|
||||
set_link_state(ppd, HLS_GOING_UP);
|
||||
}
|
||||
@ -8124,8 +8094,7 @@ static void is_rcv_avail_int(struct hfi1_devdata *dd, unsigned int source)
|
||||
rcd = hfi1_rcd_get_by_index(dd, source);
|
||||
if (rcd) {
|
||||
/* Check for non-user contexts, including vnic */
|
||||
if ((source < dd->first_dyn_alloc_ctxt) ||
|
||||
(rcd->sc && (rcd->sc->type == SC_KERNEL)))
|
||||
if (source < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
|
||||
rcd->do_interrupt(rcd, 0);
|
||||
else
|
||||
handle_user_interrupt(rcd);
|
||||
@ -8155,8 +8124,8 @@ static void is_rcv_urgent_int(struct hfi1_devdata *dd, unsigned int source)
|
||||
rcd = hfi1_rcd_get_by_index(dd, source);
|
||||
if (rcd) {
|
||||
/* only pay attention to user urgent interrupts */
|
||||
if ((source >= dd->first_dyn_alloc_ctxt) &&
|
||||
(!rcd->sc || (rcd->sc->type == SC_USER)))
|
||||
if (source >= dd->first_dyn_alloc_ctxt &&
|
||||
!rcd->is_vnic)
|
||||
handle_user_interrupt(rcd);
|
||||
|
||||
hfi1_rcd_put(rcd);
|
||||
@ -8595,30 +8564,23 @@ int write_lcb_csr(struct hfi1_devdata *dd, u32 addr, u64 data)
|
||||
}
|
||||
|
||||
/*
|
||||
* If the 8051 is in reset mode (dd->dc_shutdown == 1), this function
|
||||
* will still continue executing.
|
||||
*
|
||||
* Returns:
|
||||
* < 0 = Linux error, not able to get access
|
||||
* > 0 = 8051 command RETURN_CODE
|
||||
*/
|
||||
static int do_8051_command(
|
||||
struct hfi1_devdata *dd,
|
||||
u32 type,
|
||||
u64 in_data,
|
||||
u64 *out_data)
|
||||
static int _do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
|
||||
u64 *out_data)
|
||||
{
|
||||
u64 reg, completed;
|
||||
int return_code;
|
||||
unsigned long timeout;
|
||||
|
||||
lockdep_assert_held(&dd->dc8051_lock);
|
||||
hfi1_cdbg(DC8051, "type %d, data 0x%012llx", type, in_data);
|
||||
|
||||
mutex_lock(&dd->dc8051_lock);
|
||||
|
||||
/* We can't send any commands to the 8051 if it's in reset */
|
||||
if (dd->dc_shutdown) {
|
||||
return_code = -ENODEV;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/*
|
||||
* If an 8051 host command timed out previously, then the 8051 is
|
||||
* stuck.
|
||||
@ -8718,6 +8680,29 @@ static int do_8051_command(
|
||||
*/
|
||||
write_csr(dd, DC_DC8051_CFG_HOST_CMD_0, 0);
|
||||
|
||||
fail:
|
||||
return return_code;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns:
|
||||
* < 0 = Linux error, not able to get access
|
||||
* > 0 = 8051 command RETURN_CODE
|
||||
*/
|
||||
static int do_8051_command(struct hfi1_devdata *dd, u32 type, u64 in_data,
|
||||
u64 *out_data)
|
||||
{
|
||||
int return_code;
|
||||
|
||||
mutex_lock(&dd->dc8051_lock);
|
||||
/* We can't send any commands to the 8051 if it's in reset */
|
||||
if (dd->dc_shutdown) {
|
||||
return_code = -ENODEV;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return_code = _do_8051_command(dd, type, in_data, out_data);
|
||||
|
||||
fail:
|
||||
mutex_unlock(&dd->dc8051_lock);
|
||||
return return_code;
|
||||
@ -8728,16 +8713,17 @@ static int set_physical_link_state(struct hfi1_devdata *dd, u64 state)
|
||||
return do_8051_command(dd, HCMD_CHANGE_PHY_STATE, state, NULL);
|
||||
}
|
||||
|
||||
int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
|
||||
u8 lane_id, u32 config_data)
|
||||
static int _load_8051_config(struct hfi1_devdata *dd, u8 field_id,
|
||||
u8 lane_id, u32 config_data)
|
||||
{
|
||||
u64 data;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dd->dc8051_lock);
|
||||
data = (u64)field_id << LOAD_DATA_FIELD_ID_SHIFT
|
||||
| (u64)lane_id << LOAD_DATA_LANE_ID_SHIFT
|
||||
| (u64)config_data << LOAD_DATA_DATA_SHIFT;
|
||||
ret = do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
|
||||
ret = _do_8051_command(dd, HCMD_LOAD_CONFIG_DATA, data, NULL);
|
||||
if (ret != HCMD_SUCCESS) {
|
||||
dd_dev_err(dd,
|
||||
"load 8051 config: field id %d, lane %d, err %d\n",
|
||||
@ -8746,6 +8732,18 @@ int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int load_8051_config(struct hfi1_devdata *dd, u8 field_id,
|
||||
u8 lane_id, u32 config_data)
|
||||
{
|
||||
int return_code;
|
||||
|
||||
mutex_lock(&dd->dc8051_lock);
|
||||
return_code = _load_8051_config(dd, field_id, lane_id, config_data);
|
||||
mutex_unlock(&dd->dc8051_lock);
|
||||
|
||||
return return_code;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the 8051 firmware "registers". Use the RAM directly. Always
|
||||
* set the result, even on error.
|
||||
@ -8861,13 +8859,14 @@ int write_host_interface_version(struct hfi1_devdata *dd, u8 version)
|
||||
u32 frame;
|
||||
u32 mask;
|
||||
|
||||
lockdep_assert_held(&dd->dc8051_lock);
|
||||
mask = (HOST_INTERFACE_VERSION_MASK << HOST_INTERFACE_VERSION_SHIFT);
|
||||
read_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG, &frame);
|
||||
/* Clear, then set field */
|
||||
frame &= ~mask;
|
||||
frame |= ((u32)version << HOST_INTERFACE_VERSION_SHIFT);
|
||||
return load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
|
||||
frame);
|
||||
return _load_8051_config(dd, RESERVED_REGISTERS, GENERAL_CONFIG,
|
||||
frame);
|
||||
}
|
||||
|
||||
void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
|
||||
@ -8932,14 +8931,6 @@ static void read_local_lni(struct hfi1_devdata *dd, u8 *enable_lane_rx)
|
||||
*enable_lane_rx = (frame >> ENABLE_LANE_RX_SHIFT) & ENABLE_LANE_RX_MASK;
|
||||
}
|
||||
|
||||
static void read_mgmt_allowed(struct hfi1_devdata *dd, u8 *mgmt_allowed)
|
||||
{
|
||||
u32 frame;
|
||||
|
||||
read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
|
||||
*mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT) & MGMT_ALLOWED_MASK;
|
||||
}
|
||||
|
||||
static void read_last_local_state(struct hfi1_devdata *dd, u32 *lls)
|
||||
{
|
||||
read_8051_config(dd, LAST_LOCAL_STATE_COMPLETE, GENERAL_CONFIG, lls);
|
||||
@ -9160,25 +9151,6 @@ static int do_quick_linkup(struct hfi1_devdata *dd)
|
||||
return 0; /* success */
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the SerDes to internal loopback mode.
|
||||
* Returns 0 on success, -errno on error.
|
||||
*/
|
||||
static int set_serdes_loopback_mode(struct hfi1_devdata *dd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = set_physical_link_state(dd, PLS_INTERNAL_SERDES_LOOPBACK);
|
||||
if (ret == HCMD_SUCCESS)
|
||||
return 0;
|
||||
dd_dev_err(dd,
|
||||
"Set physical link state to SerDes Loopback failed with return %d\n",
|
||||
ret);
|
||||
if (ret >= 0)
|
||||
ret = -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Do all special steps to set up loopback.
|
||||
*/
|
||||
@ -9204,13 +9176,11 @@ static int init_loopback(struct hfi1_devdata *dd)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* handle serdes loopback */
|
||||
if (loopback == LOOPBACK_SERDES) {
|
||||
/* internal serdes loopack needs quick linkup on RTL */
|
||||
if (dd->icode == ICODE_RTL_SILICON)
|
||||
quick_linkup = 1;
|
||||
return set_serdes_loopback_mode(dd);
|
||||
}
|
||||
/*
|
||||
* SerDes loopback init sequence is handled in set_local_link_attributes
|
||||
*/
|
||||
if (loopback == LOOPBACK_SERDES)
|
||||
return 0;
|
||||
|
||||
/* LCB loopback - handled at poll time */
|
||||
if (loopback == LOOPBACK_LCB) {
|
||||
@ -9269,7 +9239,7 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
|
||||
u8 tx_polarity_inversion;
|
||||
u8 rx_polarity_inversion;
|
||||
int ret;
|
||||
|
||||
u32 misc_bits = 0;
|
||||
/* reset our fabric serdes to clear any lingering problems */
|
||||
fabric_serdes_reset(dd);
|
||||
|
||||
@ -9315,7 +9285,14 @@ static int set_local_link_attributes(struct hfi1_pportdata *ppd)
|
||||
if (ret != HCMD_SUCCESS)
|
||||
goto set_local_link_attributes_fail;
|
||||
|
||||
ret = write_vc_local_link_width(dd, 0, 0,
|
||||
/*
|
||||
* SerDes loopback init sequence requires
|
||||
* setting bit 0 of MISC_CONFIG_BITS
|
||||
*/
|
||||
if (loopback == LOOPBACK_SERDES)
|
||||
misc_bits |= 1 << LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT;
|
||||
|
||||
ret = write_vc_local_link_width(dd, misc_bits, 0,
|
||||
opa_to_vc_link_widths(
|
||||
ppd->link_width_enabled));
|
||||
if (ret != HCMD_SUCCESS)
|
||||
@ -9809,9 +9786,9 @@ void hfi1_quiet_serdes(struct hfi1_pportdata *ppd)
|
||||
cancel_delayed_work_sync(&ppd->start_link_work);
|
||||
|
||||
ppd->offline_disabled_reason =
|
||||
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_SMA_DISABLED);
|
||||
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SMA_DISABLED, 0,
|
||||
OPA_LINKDOWN_REASON_SMA_DISABLED);
|
||||
HFI1_ODR_MASK(OPA_LINKDOWN_REASON_REBOOT);
|
||||
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_REBOOT, 0,
|
||||
OPA_LINKDOWN_REASON_REBOOT);
|
||||
set_link_state(ppd, HLS_DN_OFFLINE);
|
||||
|
||||
/* disable the port */
|
||||
@ -9952,7 +9929,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
|
||||
goto unimplemented;
|
||||
|
||||
case HFI1_IB_CFG_OP_VLS:
|
||||
val = ppd->vls_operational;
|
||||
val = ppd->actual_vls_operational;
|
||||
break;
|
||||
case HFI1_IB_CFG_VL_HIGH_CAP: /* VL arb high priority table size */
|
||||
val = VL_ARB_HIGH_PRIO_TABLE_SIZE;
|
||||
@ -9967,7 +9944,7 @@ int hfi1_get_ib_cfg(struct hfi1_pportdata *ppd, int which)
|
||||
val = ppd->phy_error_threshold;
|
||||
break;
|
||||
case HFI1_IB_CFG_LINKDEFAULT: /* IB link default (sleep/poll) */
|
||||
val = dd->link_default;
|
||||
val = HLS_DEFAULT;
|
||||
break;
|
||||
|
||||
case HFI1_IB_CFG_HRTBT: /* Heartbeat off/enable/auto */
|
||||
@ -10170,6 +10147,10 @@ static const char * const state_complete_reasons[] = {
|
||||
[0x33] =
|
||||
"Link partner completed the VerifyCap state, but the passing lanes do not meet the local link width policy",
|
||||
[0x34] = tx_out_of_policy,
|
||||
[0x35] = "Negotiated link width is mutually exclusive",
|
||||
[0x36] =
|
||||
"Timed out before receiving verifycap frames in VerifyCap.Exchange",
|
||||
[0x37] = "Unable to resolve secure data exchange",
|
||||
};
|
||||
|
||||
static const char *state_complete_reason_code_string(struct hfi1_pportdata *ppd,
|
||||
@ -10298,9 +10279,6 @@ static void force_logical_link_state_down(struct hfi1_pportdata *ppd)
|
||||
write_csr(dd, DC_LCB_CFG_ALLOW_LINK_UP, 0);
|
||||
write_csr(dd, DC_LCB_CFG_IGNORE_LOST_RCLK, 0);
|
||||
|
||||
/* adjust ppd->statusp, if needed */
|
||||
update_statusp(ppd, IB_PORT_DOWN);
|
||||
|
||||
dd_dev_info(ppd->dd, "logical state forced to LINK_DOWN\n");
|
||||
}
|
||||
|
||||
@ -10382,6 +10360,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
|
||||
force_logical_link_state_down(ppd);
|
||||
|
||||
ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
|
||||
update_statusp(ppd, IB_PORT_DOWN);
|
||||
|
||||
/*
|
||||
* The LNI has a mandatory wait time after the physical state
|
||||
@ -10569,7 +10548,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
|
||||
|
||||
orig_new_state = state;
|
||||
if (state == HLS_DN_DOWNDEF)
|
||||
state = dd->link_default;
|
||||
state = HLS_DEFAULT;
|
||||
|
||||
/* interpret poll -> poll as a link bounce */
|
||||
poll_bounce = ppd->host_link_state == HLS_DN_POLL &&
|
||||
@ -10643,6 +10622,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
|
||||
|
||||
handle_linkup_change(dd, 1);
|
||||
ppd->host_link_state = HLS_UP_INIT;
|
||||
update_statusp(ppd, IB_PORT_INIT);
|
||||
break;
|
||||
case HLS_UP_ARMED:
|
||||
if (ppd->host_link_state != HLS_UP_INIT)
|
||||
@ -10664,6 +10644,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
|
||||
break;
|
||||
}
|
||||
ppd->host_link_state = HLS_UP_ARMED;
|
||||
update_statusp(ppd, IB_PORT_ARMED);
|
||||
/*
|
||||
* The simulator does not currently implement SMA messages,
|
||||
* so neighbor_normal is not set. Set it here when we first
|
||||
@ -10686,6 +10667,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
|
||||
/* tell all engines to go running */
|
||||
sdma_all_running(dd);
|
||||
ppd->host_link_state = HLS_UP_ACTIVE;
|
||||
update_statusp(ppd, IB_PORT_ACTIVE);
|
||||
|
||||
/* Signal the IB layer that the port has went active */
|
||||
event.device = &dd->verbs_dev.rdi.ibdev;
|
||||
@ -12089,9 +12071,8 @@ static void free_cntrs(struct hfi1_devdata *dd)
|
||||
struct hfi1_pportdata *ppd;
|
||||
int i;
|
||||
|
||||
if (dd->synth_stats_timer.data)
|
||||
if (dd->synth_stats_timer.function)
|
||||
del_timer_sync(&dd->synth_stats_timer);
|
||||
dd->synth_stats_timer.data = 0;
|
||||
ppd = (struct hfi1_pportdata *)(dd + 1);
|
||||
for (i = 0; i < dd->num_pports; i++, ppd++) {
|
||||
kfree(ppd->cntrs);
|
||||
@ -12367,9 +12348,9 @@ static void do_update_synth_timer(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
|
||||
static void update_synth_timer(unsigned long opaque)
|
||||
static void update_synth_timer(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_devdata *dd = (struct hfi1_devdata *)opaque;
|
||||
struct hfi1_devdata *dd = from_timer(dd, t, synth_stats_timer);
|
||||
|
||||
queue_work(dd->update_cntr_wq, &dd->update_cntr_work);
|
||||
mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME);
|
||||
@ -12387,8 +12368,7 @@ static int init_cntrs(struct hfi1_devdata *dd)
|
||||
const int bit_type_32_sz = strlen(bit_type_32);
|
||||
|
||||
/* set up the stats timer; the add_timer is done at the end */
|
||||
setup_timer(&dd->synth_stats_timer, update_synth_timer,
|
||||
(unsigned long)dd);
|
||||
timer_setup(&dd->synth_stats_timer, update_synth_timer, 0);
|
||||
|
||||
/***********************/
|
||||
/* per device counters */
|
||||
@ -12701,6 +12681,17 @@ const char *opa_pstate_name(u32 pstate)
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
/**
|
||||
* update_statusp - Update userspace status flag
|
||||
* @ppd: Port data structure
|
||||
* @state: port state information
|
||||
*
|
||||
* Actual port status is determined by the host_link_state value
|
||||
* in the ppd.
|
||||
*
|
||||
* host_link_state MUST be updated before updating the user space
|
||||
* statusp.
|
||||
*/
|
||||
static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
|
||||
{
|
||||
/*
|
||||
@ -12726,9 +12717,11 @@ static void update_statusp(struct hfi1_pportdata *ppd, u32 state)
|
||||
break;
|
||||
}
|
||||
}
|
||||
dd_dev_info(ppd->dd, "logical state changed to %s (0x%x)\n",
|
||||
opa_lstate_name(state), state);
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* wait_logical_linkstate - wait for an IB link state change to occur
|
||||
* @ppd: port device
|
||||
* @state: the state to wait for
|
||||
@ -12759,11 +12752,6 @@ static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
|
||||
msleep(20);
|
||||
}
|
||||
|
||||
update_statusp(ppd, state);
|
||||
dd_dev_info(ppd->dd,
|
||||
"logical state changed to %s (0x%x)\n",
|
||||
opa_lstate_name(state),
|
||||
state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -12910,6 +12898,32 @@ int hfi1_tempsense_rd(struct hfi1_devdata *dd, struct hfi1_temp *temp)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_int_mask - get 64 bit int mask
|
||||
* @dd - the devdata
|
||||
* @i - the csr (relative to CCE_INT_MASK)
|
||||
*
|
||||
* Returns the mask with the urgent interrupt mask
|
||||
* bit clear for kernel receive contexts.
|
||||
*/
|
||||
static u64 get_int_mask(struct hfi1_devdata *dd, u32 i)
|
||||
{
|
||||
u64 mask = U64_MAX; /* default to no change */
|
||||
|
||||
if (i >= (IS_RCVURGENT_START / 64) && i < (IS_RCVURGENT_END / 64)) {
|
||||
int j = (i - (IS_RCVURGENT_START / 64)) * 64;
|
||||
int k = !j ? IS_RCVURGENT_START % 64 : 0;
|
||||
|
||||
if (j)
|
||||
j -= IS_RCVURGENT_START % 64;
|
||||
/* j = 0..dd->first_dyn_alloc_ctxt - 1,k = 0..63 */
|
||||
for (; j < dd->first_dyn_alloc_ctxt && k < 64; j++, k++)
|
||||
/* convert to bit in mask and clear */
|
||||
mask &= ~BIT_ULL(k);
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
||||
/* ========================================================================= */
|
||||
|
||||
/*
|
||||
@ -12923,9 +12937,12 @@ void set_intr_state(struct hfi1_devdata *dd, u32 enable)
|
||||
* In HFI, the mask needs to be 1 to allow interrupts.
|
||||
*/
|
||||
if (enable) {
|
||||
/* enable all interrupts */
|
||||
for (i = 0; i < CCE_NUM_INT_CSRS; i++)
|
||||
write_csr(dd, CCE_INT_MASK + (8 * i), ~(u64)0);
|
||||
/* enable all interrupts but urgent on kernel contexts */
|
||||
for (i = 0; i < CCE_NUM_INT_CSRS; i++) {
|
||||
u64 mask = get_int_mask(dd, i);
|
||||
|
||||
write_csr(dd, CCE_INT_MASK + (8 * i), mask);
|
||||
}
|
||||
|
||||
init_qsfp_int(dd);
|
||||
} else {
|
||||
@ -12980,7 +12997,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
|
||||
if (!me->arg) /* => no irq, no affinity */
|
||||
continue;
|
||||
hfi1_put_irq_affinity(dd, me);
|
||||
free_irq(me->irq, me->arg);
|
||||
pci_free_irq(dd->pcidev, i, me->arg);
|
||||
}
|
||||
|
||||
/* clean structures */
|
||||
@ -12990,7 +13007,7 @@ static void clean_up_interrupts(struct hfi1_devdata *dd)
|
||||
} else {
|
||||
/* INTx */
|
||||
if (dd->requested_intx_irq) {
|
||||
free_irq(dd->pcidev->irq, dd);
|
||||
pci_free_irq(dd->pcidev, 0, dd);
|
||||
dd->requested_intx_irq = 0;
|
||||
}
|
||||
disable_intx(dd->pcidev);
|
||||
@ -13049,10 +13066,8 @@ static int request_intx_irq(struct hfi1_devdata *dd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
snprintf(dd->intx_name, sizeof(dd->intx_name), DRIVER_NAME "_%d",
|
||||
dd->unit);
|
||||
ret = request_irq(dd->pcidev->irq, general_interrupt,
|
||||
IRQF_SHARED, dd->intx_name, dd);
|
||||
ret = pci_request_irq(dd->pcidev, 0, general_interrupt, NULL, dd,
|
||||
DRIVER_NAME "_%d", dd->unit);
|
||||
if (ret)
|
||||
dd_dev_err(dd, "unable to request INTx interrupt, err %d\n",
|
||||
ret);
|
||||
@ -13074,7 +13089,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
first_sdma = last_general;
|
||||
last_sdma = first_sdma + dd->num_sdma;
|
||||
first_rx = last_sdma;
|
||||
last_rx = first_rx + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
|
||||
last_rx = first_rx + dd->n_krcv_queues + dd->num_vnic_contexts;
|
||||
|
||||
/* VNIC MSIx interrupts get mapped when VNIC contexts are created */
|
||||
dd->first_dyn_msix_idx = first_rx + dd->n_krcv_queues;
|
||||
@ -13095,13 +13110,14 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
int idx;
|
||||
struct hfi1_ctxtdata *rcd = NULL;
|
||||
struct sdma_engine *sde = NULL;
|
||||
char name[MAX_NAME_SIZE];
|
||||
|
||||
/* obtain the arguments to request_irq */
|
||||
/* obtain the arguments to pci_request_irq */
|
||||
if (first_general <= i && i < last_general) {
|
||||
idx = i - first_general;
|
||||
handler = general_interrupt;
|
||||
arg = dd;
|
||||
snprintf(me->name, sizeof(me->name),
|
||||
snprintf(name, sizeof(name),
|
||||
DRIVER_NAME "_%d", dd->unit);
|
||||
err_info = "general";
|
||||
me->type = IRQ_GENERAL;
|
||||
@ -13110,14 +13126,14 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
sde = &dd->per_sdma[idx];
|
||||
handler = sdma_interrupt;
|
||||
arg = sde;
|
||||
snprintf(me->name, sizeof(me->name),
|
||||
snprintf(name, sizeof(name),
|
||||
DRIVER_NAME "_%d sdma%d", dd->unit, idx);
|
||||
err_info = "sdma";
|
||||
remap_sdma_interrupts(dd, idx, i);
|
||||
me->type = IRQ_SDMA;
|
||||
} else if (first_rx <= i && i < last_rx) {
|
||||
idx = i - first_rx;
|
||||
rcd = hfi1_rcd_get_by_index(dd, idx);
|
||||
rcd = hfi1_rcd_get_by_index_safe(dd, idx);
|
||||
if (rcd) {
|
||||
/*
|
||||
* Set the interrupt register and mask for this
|
||||
@ -13129,7 +13145,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
handler = receive_context_interrupt;
|
||||
thread = receive_context_thread;
|
||||
arg = rcd;
|
||||
snprintf(me->name, sizeof(me->name),
|
||||
snprintf(name, sizeof(name),
|
||||
DRIVER_NAME "_%d kctxt%d",
|
||||
dd->unit, idx);
|
||||
err_info = "receive context";
|
||||
@ -13150,18 +13166,10 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
if (!arg)
|
||||
continue;
|
||||
/* make sure the name is terminated */
|
||||
me->name[sizeof(me->name) - 1] = 0;
|
||||
name[sizeof(name) - 1] = 0;
|
||||
me->irq = pci_irq_vector(dd->pcidev, i);
|
||||
/*
|
||||
* On err return me->irq. Don't need to clear this
|
||||
* because 'arg' has not been set, and cleanup will
|
||||
* do the right thing.
|
||||
*/
|
||||
if (me->irq < 0)
|
||||
return me->irq;
|
||||
|
||||
ret = request_threaded_irq(me->irq, handler, thread, 0,
|
||||
me->name, arg);
|
||||
ret = pci_request_irq(dd->pcidev, i, handler, thread, arg,
|
||||
name);
|
||||
if (ret) {
|
||||
dd_dev_err(dd,
|
||||
"unable to allocate %s interrupt, irq %d, index %d, err %d\n",
|
||||
@ -13169,7 +13177,7 @@ static int request_msix_irqs(struct hfi1_devdata *dd)
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* assign arg after request_irq call, so it will be
|
||||
* assign arg after pci_request_irq call, so it will be
|
||||
* cleaned up
|
||||
*/
|
||||
me->arg = arg;
|
||||
@ -13187,7 +13195,7 @@ void hfi1_vnic_synchronize_irq(struct hfi1_devdata *dd)
|
||||
int i;
|
||||
|
||||
if (!dd->num_msix_entries) {
|
||||
synchronize_irq(dd->pcidev->irq);
|
||||
synchronize_irq(pci_irq_vector(dd->pcidev, 0));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -13208,7 +13216,7 @@ void hfi1_reset_vnic_msix_info(struct hfi1_ctxtdata *rcd)
|
||||
return;
|
||||
|
||||
hfi1_put_irq_affinity(dd, me);
|
||||
free_irq(me->irq, me->arg);
|
||||
pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
|
||||
|
||||
me->arg = NULL;
|
||||
}
|
||||
@ -13231,28 +13239,21 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
|
||||
rcd->ireg = (IS_RCVAVAIL_START + idx) / 64;
|
||||
rcd->imask = ((u64)1) <<
|
||||
((IS_RCVAVAIL_START + idx) % 64);
|
||||
|
||||
snprintf(me->name, sizeof(me->name),
|
||||
DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
|
||||
me->name[sizeof(me->name) - 1] = 0;
|
||||
me->type = IRQ_RCVCTXT;
|
||||
me->irq = pci_irq_vector(dd->pcidev, rcd->msix_intr);
|
||||
if (me->irq < 0) {
|
||||
dd_dev_err(dd, "vnic irq vector request (idx %d) fail %d\n",
|
||||
idx, me->irq);
|
||||
return;
|
||||
}
|
||||
remap_intr(dd, IS_RCVAVAIL_START + idx, rcd->msix_intr);
|
||||
|
||||
ret = request_threaded_irq(me->irq, receive_context_interrupt,
|
||||
receive_context_thread, 0, me->name, arg);
|
||||
ret = pci_request_irq(dd->pcidev, rcd->msix_intr,
|
||||
receive_context_interrupt,
|
||||
receive_context_thread, arg,
|
||||
DRIVER_NAME "_%d kctxt%d", dd->unit, idx);
|
||||
if (ret) {
|
||||
dd_dev_err(dd, "vnic irq request (irq %d, idx %d) fail %d\n",
|
||||
me->irq, idx, ret);
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* assign arg after request_irq call, so it will be
|
||||
* assign arg after pci_request_irq call, so it will be
|
||||
* cleaned up
|
||||
*/
|
||||
me->arg = arg;
|
||||
@ -13261,7 +13262,7 @@ void hfi1_set_vnic_msix_info(struct hfi1_ctxtdata *rcd)
|
||||
if (ret) {
|
||||
dd_dev_err(dd,
|
||||
"unable to pin IRQ %d\n", ret);
|
||||
free_irq(me->irq, me->arg);
|
||||
pci_free_irq(dd->pcidev, rcd->msix_intr, me->arg);
|
||||
}
|
||||
}
|
||||
|
||||
@ -13294,8 +13295,9 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
|
||||
* slow source, SDMACleanupDone)
|
||||
* N interrupts - one per used SDMA engine
|
||||
* M interrupt - one per kernel receive context
|
||||
* V interrupt - one for each VNIC context
|
||||
*/
|
||||
total = 1 + dd->num_sdma + dd->n_krcv_queues + HFI1_NUM_VNIC_CTXT;
|
||||
total = 1 + dd->num_sdma + dd->n_krcv_queues + dd->num_vnic_contexts;
|
||||
|
||||
/* ask for MSI-X interrupts */
|
||||
request = request_msix(dd, total);
|
||||
@ -13356,15 +13358,18 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
|
||||
* in array of contexts
|
||||
* freectxts - number of free user contexts
|
||||
* num_send_contexts - number of PIO send contexts being used
|
||||
* num_vnic_contexts - number of contexts reserved for VNIC
|
||||
*/
|
||||
static int set_up_context_variables(struct hfi1_devdata *dd)
|
||||
{
|
||||
unsigned long num_kernel_contexts;
|
||||
u16 num_vnic_contexts = HFI1_NUM_VNIC_CTXT;
|
||||
int total_contexts;
|
||||
int ret;
|
||||
unsigned ngroups;
|
||||
int qos_rmt_count;
|
||||
int user_rmt_reduced;
|
||||
u32 n_usr_ctxts;
|
||||
|
||||
/*
|
||||
* Kernel receive contexts:
|
||||
@ -13393,59 +13398,63 @@ static int set_up_context_variables(struct hfi1_devdata *dd)
|
||||
num_kernel_contexts);
|
||||
num_kernel_contexts = dd->chip_send_contexts - num_vls - 1;
|
||||
}
|
||||
|
||||
/* Accommodate VNIC contexts if possible */
|
||||
if ((num_kernel_contexts + num_vnic_contexts) > dd->chip_rcv_contexts) {
|
||||
dd_dev_err(dd, "No receive contexts available for VNIC\n");
|
||||
num_vnic_contexts = 0;
|
||||
}
|
||||
total_contexts = num_kernel_contexts + num_vnic_contexts;
|
||||
|
||||
/*
|
||||
* User contexts:
|
||||
* - default to 1 user context per real (non-HT) CPU core if
|
||||
* num_user_contexts is negative
|
||||
*/
|
||||
if (num_user_contexts < 0)
|
||||
num_user_contexts =
|
||||
cpumask_weight(&node_affinity.real_cpu_mask);
|
||||
|
||||
total_contexts = num_kernel_contexts + num_user_contexts;
|
||||
|
||||
n_usr_ctxts = cpumask_weight(&node_affinity.real_cpu_mask);
|
||||
else
|
||||
n_usr_ctxts = num_user_contexts;
|
||||
/*
|
||||
* Adjust the counts given a global max.
|
||||
*/
|
||||
if (total_contexts > dd->chip_rcv_contexts) {
|
||||
if (total_contexts + n_usr_ctxts > dd->chip_rcv_contexts) {
|
||||
dd_dev_err(dd,
|
||||
"Reducing # user receive contexts to: %d, from %d\n",
|
||||
(int)(dd->chip_rcv_contexts - num_kernel_contexts),
|
||||
(int)num_user_contexts);
|
||||
num_user_contexts = dd->chip_rcv_contexts - num_kernel_contexts;
|
||||
"Reducing # user receive contexts to: %d, from %u\n",
|
||||
(int)(dd->chip_rcv_contexts - total_contexts),
|
||||
n_usr_ctxts);
|
||||
/* recalculate */
|
||||
total_contexts = num_kernel_contexts + num_user_contexts;
|
||||
n_usr_ctxts = dd->chip_rcv_contexts - total_contexts;
|
||||
}
|
||||
|
||||
/* each user context requires an entry in the RMT */
|
||||
qos_rmt_count = qos_rmt_entries(dd, NULL, NULL);
|
||||
if (qos_rmt_count + num_user_contexts > NUM_MAP_ENTRIES) {
|
||||
if (qos_rmt_count + n_usr_ctxts > NUM_MAP_ENTRIES) {
|
||||
user_rmt_reduced = NUM_MAP_ENTRIES - qos_rmt_count;
|
||||
dd_dev_err(dd,
|
||||
"RMT size is reducing the number of user receive contexts from %d to %d\n",
|
||||
(int)num_user_contexts,
|
||||
"RMT size is reducing the number of user receive contexts from %u to %d\n",
|
||||
n_usr_ctxts,
|
||||
user_rmt_reduced);
|
||||
/* recalculate */
|
||||
num_user_contexts = user_rmt_reduced;
|
||||
total_contexts = num_kernel_contexts + num_user_contexts;
|
||||
n_usr_ctxts = user_rmt_reduced;
|
||||
}
|
||||
|
||||
/* Accommodate VNIC contexts */
|
||||
if ((total_contexts + HFI1_NUM_VNIC_CTXT) <= dd->chip_rcv_contexts)
|
||||
total_contexts += HFI1_NUM_VNIC_CTXT;
|
||||
total_contexts += n_usr_ctxts;
|
||||
|
||||
/* the first N are kernel contexts, the rest are user/vnic contexts */
|
||||
dd->num_rcv_contexts = total_contexts;
|
||||
dd->n_krcv_queues = num_kernel_contexts;
|
||||
dd->first_dyn_alloc_ctxt = num_kernel_contexts;
|
||||
dd->num_user_contexts = num_user_contexts;
|
||||
dd->freectxts = num_user_contexts;
|
||||
dd->num_vnic_contexts = num_vnic_contexts;
|
||||
dd->num_user_contexts = n_usr_ctxts;
|
||||
dd->freectxts = n_usr_ctxts;
|
||||
dd_dev_info(dd,
|
||||
"rcv contexts: chip %d, used %d (kernel %d, user %d)\n",
|
||||
"rcv contexts: chip %d, used %d (kernel %d, vnic %u, user %u)\n",
|
||||
(int)dd->chip_rcv_contexts,
|
||||
(int)dd->num_rcv_contexts,
|
||||
(int)dd->n_krcv_queues,
|
||||
(int)dd->num_rcv_contexts - dd->n_krcv_queues);
|
||||
dd->num_vnic_contexts,
|
||||
dd->num_user_contexts);
|
||||
|
||||
/*
|
||||
* Receive array allocation:
|
||||
@ -14962,8 +14971,6 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
|
||||
init_vl_arb_caches(ppd);
|
||||
}
|
||||
|
||||
dd->link_default = HLS_DN_POLL;
|
||||
|
||||
/*
|
||||
* Do remaining PCIe setup and save PCIe values in dd.
|
||||
* Any error printing is already done by the init code.
|
||||
|
@ -560,7 +560,7 @@ enum {
|
||||
/* timeouts */
|
||||
#define LINK_RESTART_DELAY 1000 /* link restart delay, in ms */
|
||||
#define TIMEOUT_8051_START 5000 /* 8051 start timeout, in ms */
|
||||
#define DC8051_COMMAND_TIMEOUT 20000 /* DC8051 command timeout, in ms */
|
||||
#define DC8051_COMMAND_TIMEOUT 1000 /* DC8051 command timeout, in ms */
|
||||
#define FREEZE_STATUS_TIMEOUT 20 /* wait for freeze indicators, in ms */
|
||||
#define VL_STATUS_CLEAR_TIMEOUT 5000 /* per-VL status clear, in ms */
|
||||
#define CCE_STATUS_TIMEOUT 10 /* time to clear CCE Status, in ms */
|
||||
@ -583,6 +583,9 @@ enum {
|
||||
#define LOOPBACK_LCB 2
|
||||
#define LOOPBACK_CABLE 3 /* external cable */
|
||||
|
||||
/* set up serdes bit in MISC_CONFIG_BITS */
|
||||
#define LOOPBACK_SERDES_CONFIG_BIT_MASK_SHIFT 0
|
||||
|
||||
/* read and write hardware registers */
|
||||
u64 read_csr(const struct hfi1_devdata *dd, u32 offset);
|
||||
void write_csr(const struct hfi1_devdata *dd, u32 offset, u64 value);
|
||||
@ -710,6 +713,7 @@ void read_misc_status(struct hfi1_devdata *dd, u8 *ver_major, u8 *ver_minor,
|
||||
u8 *ver_patch);
|
||||
int write_host_interface_version(struct hfi1_devdata *dd, u8 version);
|
||||
void read_guid(struct hfi1_devdata *dd);
|
||||
int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd);
|
||||
int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout);
|
||||
void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
|
||||
u8 neigh_reason, u8 rem_reason);
|
||||
|
@ -328,6 +328,7 @@ struct diag_pkt {
|
||||
#define SC15_PACKET 0xF
|
||||
#define SIZE_OF_CRC 1
|
||||
#define SIZE_OF_LT 1
|
||||
#define MAX_16B_PADDING 12 /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
|
||||
|
||||
#define LIM_MGMT_P_KEY 0x7FFF
|
||||
#define FULL_MGMT_P_KEY 0xFFFF
|
||||
|
@ -165,6 +165,17 @@ static void _opcode_stats_seq_stop(struct seq_file *s, void *v)
|
||||
{
|
||||
}
|
||||
|
||||
static int opcode_stats_show(struct seq_file *s, u8 i, u64 packets, u64 bytes)
|
||||
{
|
||||
if (!packets && !bytes)
|
||||
return SEQ_SKIP;
|
||||
seq_printf(s, "%02x %llu/%llu\n", i,
|
||||
(unsigned long long)packets,
|
||||
(unsigned long long)bytes);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _opcode_stats_seq_show(struct seq_file *s, void *v)
|
||||
{
|
||||
loff_t *spos = v;
|
||||
@ -182,19 +193,49 @@ static int _opcode_stats_seq_show(struct seq_file *s, void *v)
|
||||
}
|
||||
hfi1_rcd_put(rcd);
|
||||
}
|
||||
if (!n_packets && !n_bytes)
|
||||
return SEQ_SKIP;
|
||||
seq_printf(s, "%02llx %llu/%llu\n", i,
|
||||
(unsigned long long)n_packets,
|
||||
(unsigned long long)n_bytes);
|
||||
|
||||
return 0;
|
||||
return opcode_stats_show(s, i, n_packets, n_bytes);
|
||||
}
|
||||
|
||||
DEBUGFS_SEQ_FILE_OPS(opcode_stats);
|
||||
DEBUGFS_SEQ_FILE_OPEN(opcode_stats)
|
||||
DEBUGFS_FILE_OPS(opcode_stats);
|
||||
|
||||
static void *_tx_opcode_stats_seq_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
return _opcode_stats_seq_start(s, pos);
|
||||
}
|
||||
|
||||
static void *_tx_opcode_stats_seq_next(struct seq_file *s, void *v, loff_t *pos)
|
||||
{
|
||||
return _opcode_stats_seq_next(s, v, pos);
|
||||
}
|
||||
|
||||
static void _tx_opcode_stats_seq_stop(struct seq_file *s, void *v)
|
||||
{
|
||||
}
|
||||
|
||||
static int _tx_opcode_stats_seq_show(struct seq_file *s, void *v)
|
||||
{
|
||||
loff_t *spos = v;
|
||||
loff_t i = *spos;
|
||||
int j;
|
||||
u64 n_packets = 0, n_bytes = 0;
|
||||
struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
|
||||
struct hfi1_devdata *dd = dd_from_dev(ibd);
|
||||
|
||||
for_each_possible_cpu(j) {
|
||||
struct hfi1_opcode_stats_perctx *s =
|
||||
per_cpu_ptr(dd->tx_opstats, j);
|
||||
n_packets += s->stats[i].n_packets;
|
||||
n_bytes += s->stats[i].n_bytes;
|
||||
}
|
||||
return opcode_stats_show(s, i, n_packets, n_bytes);
|
||||
}
|
||||
|
||||
DEBUGFS_SEQ_FILE_OPS(tx_opcode_stats);
|
||||
DEBUGFS_SEQ_FILE_OPEN(tx_opcode_stats)
|
||||
DEBUGFS_FILE_OPS(tx_opcode_stats);
|
||||
|
||||
static void *_ctx_stats_seq_start(struct seq_file *s, loff_t *pos)
|
||||
{
|
||||
struct hfi1_ibdev *ibd = (struct hfi1_ibdev *)s->private;
|
||||
@ -243,7 +284,7 @@ static int _ctx_stats_seq_show(struct seq_file *s, void *v)
|
||||
spos = v;
|
||||
i = *spos;
|
||||
|
||||
rcd = hfi1_rcd_get_by_index(dd, i);
|
||||
rcd = hfi1_rcd_get_by_index_safe(dd, i);
|
||||
if (!rcd)
|
||||
return SEQ_SKIP;
|
||||
|
||||
@ -402,7 +443,7 @@ static int _rcds_seq_show(struct seq_file *s, void *v)
|
||||
loff_t *spos = v;
|
||||
loff_t i = *spos;
|
||||
|
||||
rcd = hfi1_rcd_get_by_index(dd, i);
|
||||
rcd = hfi1_rcd_get_by_index_safe(dd, i);
|
||||
if (rcd)
|
||||
seqfile_dump_rcd(s, rcd);
|
||||
hfi1_rcd_put(rcd);
|
||||
@ -1363,6 +1404,7 @@ void hfi1_dbg_ibdev_init(struct hfi1_ibdev *ibd)
|
||||
return;
|
||||
}
|
||||
DEBUGFS_SEQ_FILE_CREATE(opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
|
||||
DEBUGFS_SEQ_FILE_CREATE(tx_opcode_stats, ibd->hfi1_ibdev_dbg, ibd);
|
||||
DEBUGFS_SEQ_FILE_CREATE(ctx_stats, ibd->hfi1_ibdev_dbg, ibd);
|
||||
DEBUGFS_SEQ_FILE_CREATE(qp_stats, ibd->hfi1_ibdev_dbg, ibd);
|
||||
DEBUGFS_SEQ_FILE_CREATE(sdes, ibd->hfi1_ibdev_dbg, ibd);
|
||||
|
@ -433,6 +433,12 @@ static inline void init_packet(struct hfi1_ctxtdata *rcd,
|
||||
packet->numpkt = 0;
|
||||
}
|
||||
|
||||
/* We support only two types - 9B and 16B for now */
|
||||
static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
|
||||
[HFI1_PKT_TYPE_9B] = &return_cnp,
|
||||
[HFI1_PKT_TYPE_16B] = &return_cnp_16B
|
||||
};
|
||||
|
||||
void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
|
||||
bool do_cnp)
|
||||
{
|
||||
@ -866,7 +872,7 @@ static inline void set_nodma_rtail(struct hfi1_devdata *dd, u16 ctxt)
|
||||
* interrupt handler for all statically allocated kernel contexts.
|
||||
*/
|
||||
if (ctxt >= dd->first_dyn_alloc_ctxt) {
|
||||
rcd = hfi1_rcd_get_by_index(dd, ctxt);
|
||||
rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
|
||||
if (rcd) {
|
||||
rcd->do_interrupt =
|
||||
&handle_receive_interrupt_nodma_rtail;
|
||||
@ -895,7 +901,7 @@ static inline void set_dma_rtail(struct hfi1_devdata *dd, u16 ctxt)
|
||||
* interrupt handler for all statically allocated kernel contexts.
|
||||
*/
|
||||
if (ctxt >= dd->first_dyn_alloc_ctxt) {
|
||||
rcd = hfi1_rcd_get_by_index(dd, ctxt);
|
||||
rcd = hfi1_rcd_get_by_index_safe(dd, ctxt);
|
||||
if (rcd) {
|
||||
rcd->do_interrupt =
|
||||
&handle_receive_interrupt_dma_rtail;
|
||||
@ -923,10 +929,9 @@ void set_all_slowpath(struct hfi1_devdata *dd)
|
||||
rcd = hfi1_rcd_get_by_index(dd, i);
|
||||
if (!rcd)
|
||||
continue;
|
||||
if ((i < dd->first_dyn_alloc_ctxt) ||
|
||||
(rcd->sc && (rcd->sc->type == SC_KERNEL))) {
|
||||
if (i < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
|
||||
rcd->do_interrupt = &handle_receive_interrupt;
|
||||
}
|
||||
|
||||
hfi1_rcd_put(rcd);
|
||||
}
|
||||
}
|
||||
@ -1252,9 +1257,9 @@ void shutdown_led_override(struct hfi1_pportdata *ppd)
|
||||
write_csr(dd, DCC_CFG_LED_CNTRL, 0);
|
||||
}
|
||||
|
||||
static void run_led_override(unsigned long opaque)
|
||||
static void run_led_override(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_pportdata *ppd = (struct hfi1_pportdata *)opaque;
|
||||
struct hfi1_pportdata *ppd = from_timer(ppd, t, led_override_timer);
|
||||
struct hfi1_devdata *dd = ppd->dd;
|
||||
unsigned long timeout;
|
||||
int phase_idx;
|
||||
@ -1298,8 +1303,7 @@ void hfi1_start_led_override(struct hfi1_pportdata *ppd, unsigned int timeon,
|
||||
* timeout so the handler will be called soon to look at our request.
|
||||
*/
|
||||
if (!timer_pending(&ppd->led_override_timer)) {
|
||||
setup_timer(&ppd->led_override_timer, run_led_override,
|
||||
(unsigned long)ppd);
|
||||
timer_setup(&ppd->led_override_timer, run_led_override, 0);
|
||||
ppd->led_override_timer.expires = jiffies + 1;
|
||||
add_timer(&ppd->led_override_timer);
|
||||
atomic_set(&ppd->led_override_timer_active, 1);
|
||||
|
@ -78,16 +78,20 @@ static unsigned int hfi1_poll(struct file *fp, struct poll_table_struct *pt);
|
||||
static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma);
|
||||
|
||||
static u64 kvirt_to_phys(void *addr);
|
||||
static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo);
|
||||
static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len);
|
||||
static void init_subctxts(struct hfi1_ctxtdata *uctxt,
|
||||
const struct hfi1_user_info *uinfo);
|
||||
static int init_user_ctxt(struct hfi1_filedata *fd,
|
||||
struct hfi1_ctxtdata *uctxt);
|
||||
static void user_init(struct hfi1_ctxtdata *uctxt);
|
||||
static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
__u32 len);
|
||||
static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
__u32 len);
|
||||
static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
|
||||
static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len);
|
||||
static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len);
|
||||
static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len);
|
||||
static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len);
|
||||
static int setup_base_ctxt(struct hfi1_filedata *fd,
|
||||
struct hfi1_ctxtdata *uctxt);
|
||||
static int setup_subctxt(struct hfi1_ctxtdata *uctxt);
|
||||
@ -101,10 +105,11 @@ static void deallocate_ctxt(struct hfi1_ctxtdata *uctxt);
|
||||
static unsigned int poll_urgent(struct file *fp, struct poll_table_struct *pt);
|
||||
static unsigned int poll_next(struct file *fp, struct poll_table_struct *pt);
|
||||
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
unsigned long events);
|
||||
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey);
|
||||
unsigned long arg);
|
||||
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg);
|
||||
static int ctxt_reset(struct hfi1_ctxtdata *uctxt);
|
||||
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
int start_stop);
|
||||
unsigned long arg);
|
||||
static int vma_fault(struct vm_fault *vmf);
|
||||
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
@ -221,13 +226,8 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
|
||||
{
|
||||
struct hfi1_filedata *fd = fp->private_data;
|
||||
struct hfi1_ctxtdata *uctxt = fd->uctxt;
|
||||
struct hfi1_user_info uinfo;
|
||||
struct hfi1_tid_info tinfo;
|
||||
int ret = 0;
|
||||
unsigned long addr;
|
||||
int uval = 0;
|
||||
unsigned long ul_uval = 0;
|
||||
u16 uval16 = 0;
|
||||
|
||||
hfi1_cdbg(IOCTL, "IOCTL recv: 0x%x", cmd);
|
||||
if (cmd != HFI1_IOCTL_ASSIGN_CTXT &&
|
||||
@ -237,171 +237,55 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
|
||||
|
||||
switch (cmd) {
|
||||
case HFI1_IOCTL_ASSIGN_CTXT:
|
||||
if (uctxt)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&uinfo,
|
||||
(struct hfi1_user_info __user *)arg,
|
||||
sizeof(uinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = assign_ctxt(fd, &uinfo);
|
||||
ret = assign_ctxt(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_CTXT_INFO:
|
||||
ret = get_ctxt_info(fd, (void __user *)(unsigned long)arg,
|
||||
sizeof(struct hfi1_ctxt_info));
|
||||
ret = get_ctxt_info(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_USER_INFO:
|
||||
ret = get_base_info(fd, (void __user *)(unsigned long)arg,
|
||||
sizeof(struct hfi1_base_info));
|
||||
ret = get_base_info(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_CREDIT_UPD:
|
||||
if (uctxt)
|
||||
sc_return_credits(uctxt->sc);
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_TID_UPDATE:
|
||||
if (copy_from_user(&tinfo,
|
||||
(struct hfi11_tid_info __user *)arg,
|
||||
sizeof(tinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
|
||||
if (!ret) {
|
||||
/*
|
||||
* Copy the number of tidlist entries we used
|
||||
* and the length of the buffer we registered.
|
||||
*/
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
return -EFAULT;
|
||||
|
||||
addr = arg + offsetof(struct hfi1_tid_info, length);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.length,
|
||||
sizeof(tinfo.length)))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
ret = user_exp_rcv_setup(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_TID_FREE:
|
||||
if (copy_from_user(&tinfo,
|
||||
(struct hfi11_tid_info __user *)arg,
|
||||
sizeof(tinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
|
||||
if (ret)
|
||||
break;
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
ret = -EFAULT;
|
||||
ret = user_exp_rcv_clear(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_TID_INVAL_READ:
|
||||
if (copy_from_user(&tinfo,
|
||||
(struct hfi11_tid_info __user *)arg,
|
||||
sizeof(tinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
|
||||
if (ret)
|
||||
break;
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
ret = -EFAULT;
|
||||
ret = user_exp_rcv_invalid(fd, arg, _IOC_SIZE(cmd));
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_RECV_CTRL:
|
||||
ret = get_user(uval, (int __user *)arg);
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
ret = manage_rcvq(uctxt, fd->subctxt, uval);
|
||||
ret = manage_rcvq(uctxt, fd->subctxt, arg);
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_POLL_TYPE:
|
||||
ret = get_user(uval, (int __user *)arg);
|
||||
if (ret != 0)
|
||||
if (get_user(uval, (int __user *)arg))
|
||||
return -EFAULT;
|
||||
uctxt->poll_type = (typeof(uctxt->poll_type))uval;
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_ACK_EVENT:
|
||||
ret = get_user(ul_uval, (unsigned long __user *)arg);
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
ret = user_event_ack(uctxt, fd->subctxt, ul_uval);
|
||||
ret = user_event_ack(uctxt, fd->subctxt, arg);
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_SET_PKEY:
|
||||
ret = get_user(uval16, (u16 __user *)arg);
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
if (HFI1_CAP_IS_USET(PKEY_CHECK))
|
||||
ret = set_ctxt_pkey(uctxt, fd->subctxt, uval16);
|
||||
else
|
||||
return -EPERM;
|
||||
ret = set_ctxt_pkey(uctxt, arg);
|
||||
break;
|
||||
|
||||
case HFI1_IOCTL_CTXT_RESET: {
|
||||
struct send_context *sc;
|
||||
struct hfi1_devdata *dd;
|
||||
|
||||
if (!uctxt || !uctxt->dd || !uctxt->sc)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* There is no protection here. User level has to
|
||||
* guarantee that no one will be writing to the send
|
||||
* context while it is being re-initialized.
|
||||
* If user level breaks that guarantee, it will break
|
||||
* it's own context and no one else's.
|
||||
*/
|
||||
dd = uctxt->dd;
|
||||
sc = uctxt->sc;
|
||||
/*
|
||||
* Wait until the interrupt handler has marked the
|
||||
* context as halted or frozen. Report error if we time
|
||||
* out.
|
||||
*/
|
||||
wait_event_interruptible_timeout(
|
||||
sc->halt_wait, (sc->flags & SCF_HALTED),
|
||||
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
|
||||
if (!(sc->flags & SCF_HALTED))
|
||||
return -ENOLCK;
|
||||
|
||||
/*
|
||||
* If the send context was halted due to a Freeze,
|
||||
* wait until the device has been "unfrozen" before
|
||||
* resetting the context.
|
||||
*/
|
||||
if (sc->flags & SCF_FROZEN) {
|
||||
wait_event_interruptible_timeout(
|
||||
dd->event_queue,
|
||||
!(READ_ONCE(dd->flags) & HFI1_FROZEN),
|
||||
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
|
||||
if (dd->flags & HFI1_FROZEN)
|
||||
return -ENOLCK;
|
||||
|
||||
if (dd->flags & HFI1_FORCED_FREEZE)
|
||||
/*
|
||||
* Don't allow context reset if we are into
|
||||
* forced freeze
|
||||
*/
|
||||
return -ENODEV;
|
||||
|
||||
sc_disable(sc);
|
||||
ret = sc_enable(sc);
|
||||
hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
|
||||
} else {
|
||||
ret = sc_restart(sc);
|
||||
}
|
||||
if (!ret)
|
||||
sc_return_credits(sc);
|
||||
case HFI1_IOCTL_CTXT_RESET:
|
||||
ret = ctxt_reset(uctxt);
|
||||
break;
|
||||
}
|
||||
|
||||
case HFI1_IOCTL_GET_VERS:
|
||||
uval = HFI1_USER_SWVERSION;
|
||||
@ -595,9 +479,8 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
|
||||
* Use the page where this context's flags are. User level
|
||||
* knows where it's own bitmap is within the page.
|
||||
*/
|
||||
memaddr = (unsigned long)(dd->events +
|
||||
((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS)) & PAGE_MASK;
|
||||
memaddr = (unsigned long)
|
||||
(dd->events + uctxt_offset(uctxt)) & PAGE_MASK;
|
||||
memlen = PAGE_SIZE;
|
||||
/*
|
||||
* v3.7 removes VM_RESERVED but the effect is kept by
|
||||
@ -779,8 +662,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
|
||||
* Clear any left over, unhandled events so the next process that
|
||||
* gets this context doesn't get confused.
|
||||
*/
|
||||
ev = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS) + fdata->subctxt;
|
||||
ev = dd->events + uctxt_offset(uctxt) + fdata->subctxt;
|
||||
*ev = 0;
|
||||
|
||||
spin_lock_irqsave(&dd->uctxt_lock, flags);
|
||||
@ -891,21 +773,29 @@ static int complete_subctxt(struct hfi1_filedata *fd)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
|
||||
static int assign_ctxt(struct hfi1_filedata *fd, unsigned long arg, u32 len)
|
||||
{
|
||||
int ret;
|
||||
unsigned int swmajor, swminor;
|
||||
unsigned int swmajor;
|
||||
struct hfi1_ctxtdata *uctxt = NULL;
|
||||
struct hfi1_user_info uinfo;
|
||||
|
||||
swmajor = uinfo->userversion >> 16;
|
||||
if (fd->uctxt)
|
||||
return -EINVAL;
|
||||
|
||||
if (sizeof(uinfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&uinfo, (void __user *)arg, sizeof(uinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
swmajor = uinfo.userversion >> 16;
|
||||
if (swmajor != HFI1_USER_SWMAJOR)
|
||||
return -ENODEV;
|
||||
|
||||
if (uinfo->subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
|
||||
if (uinfo.subctxt_cnt > HFI1_MAX_SHARED_CTXTS)
|
||||
return -EINVAL;
|
||||
|
||||
swminor = uinfo->userversion & 0xffff;
|
||||
|
||||
/*
|
||||
* Acquire the mutex to protect against multiple creations of what
|
||||
* could be a shared base context.
|
||||
@ -915,14 +805,14 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
|
||||
* Get a sub context if available (fd->uctxt will be set).
|
||||
* ret < 0 error, 0 no context, 1 sub-context found
|
||||
*/
|
||||
ret = find_sub_ctxt(fd, uinfo);
|
||||
ret = find_sub_ctxt(fd, &uinfo);
|
||||
|
||||
/*
|
||||
* Allocate a base context if context sharing is not required or a
|
||||
* sub context wasn't found.
|
||||
*/
|
||||
if (!ret)
|
||||
ret = allocate_ctxt(fd, fd->dd, uinfo, &uctxt);
|
||||
ret = allocate_ctxt(fd, fd->dd, &uinfo, &uctxt);
|
||||
|
||||
mutex_unlock(&hfi1_mutex);
|
||||
|
||||
@ -1230,12 +1120,13 @@ static void user_init(struct hfi1_ctxtdata *uctxt)
|
||||
hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
|
||||
}
|
||||
|
||||
static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
__u32 len)
|
||||
static int get_ctxt_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
|
||||
{
|
||||
struct hfi1_ctxt_info cinfo;
|
||||
struct hfi1_ctxtdata *uctxt = fd->uctxt;
|
||||
int ret = 0;
|
||||
|
||||
if (sizeof(cinfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&cinfo, 0, sizeof(cinfo));
|
||||
cinfo.runtime_flags = (((uctxt->flags >> HFI1_CAP_MISC_SHIFT) &
|
||||
@ -1265,10 +1156,10 @@ static int get_ctxt_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
cinfo.rcvegr_size = uctxt->egrbufs.rcvtid_size;
|
||||
|
||||
trace_hfi1_ctxt_info(uctxt->dd, uctxt->ctxt, fd->subctxt, cinfo);
|
||||
if (copy_to_user(ubase, &cinfo, sizeof(cinfo)))
|
||||
ret = -EFAULT;
|
||||
if (copy_to_user((void __user *)arg, &cinfo, len))
|
||||
return -EFAULT;
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_user_ctxt(struct hfi1_filedata *fd,
|
||||
@ -1344,18 +1235,18 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
__u32 len)
|
||||
static int get_base_info(struct hfi1_filedata *fd, unsigned long arg, u32 len)
|
||||
{
|
||||
struct hfi1_base_info binfo;
|
||||
struct hfi1_ctxtdata *uctxt = fd->uctxt;
|
||||
struct hfi1_devdata *dd = uctxt->dd;
|
||||
ssize_t sz;
|
||||
unsigned offset;
|
||||
int ret = 0;
|
||||
|
||||
trace_hfi1_uctxtdata(uctxt->dd, uctxt, fd->subctxt);
|
||||
|
||||
if (sizeof(binfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
memset(&binfo, 0, sizeof(binfo));
|
||||
binfo.hw_version = dd->revision;
|
||||
binfo.sw_version = HFI1_KERN_SWVERSION;
|
||||
@ -1385,39 +1276,152 @@ static int get_base_info(struct hfi1_filedata *fd, void __user *ubase,
|
||||
fd->subctxt,
|
||||
uctxt->egrbufs.rcvtids[0].dma);
|
||||
binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
fd->subctxt, 0);
|
||||
/*
|
||||
* user regs are at
|
||||
* (RXE_PER_CONTEXT_USER + (ctxt * RXE_PER_CONTEXT_SIZE))
|
||||
*/
|
||||
binfo.user_regbase = HFI1_MMAP_TOKEN(UREGS, uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
offset = offset_in_page((((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS) + fd->subctxt) *
|
||||
sizeof(*dd->events));
|
||||
fd->subctxt, 0);
|
||||
offset = offset_in_page((uctxt_offset(uctxt) + fd->subctxt) *
|
||||
sizeof(*dd->events));
|
||||
binfo.events_bufbase = HFI1_MMAP_TOKEN(EVENTS, uctxt->ctxt,
|
||||
fd->subctxt,
|
||||
offset);
|
||||
fd->subctxt,
|
||||
offset);
|
||||
binfo.status_bufbase = HFI1_MMAP_TOKEN(STATUS, uctxt->ctxt,
|
||||
fd->subctxt,
|
||||
dd->status);
|
||||
fd->subctxt,
|
||||
dd->status);
|
||||
if (HFI1_CAP_IS_USET(DMA_RTAIL))
|
||||
binfo.rcvhdrtail_base = HFI1_MMAP_TOKEN(RTAIL, uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
fd->subctxt, 0);
|
||||
if (uctxt->subctxt_cnt) {
|
||||
binfo.subctxt_uregbase = HFI1_MMAP_TOKEN(SUBCTXT_UREGS,
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
binfo.subctxt_rcvhdrbuf = HFI1_MMAP_TOKEN(SUBCTXT_RCV_HDRQ,
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
binfo.subctxt_rcvegrbuf = HFI1_MMAP_TOKEN(SUBCTXT_EGRBUF,
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
uctxt->ctxt,
|
||||
fd->subctxt, 0);
|
||||
}
|
||||
sz = (len < sizeof(binfo)) ? len : sizeof(binfo);
|
||||
if (copy_to_user(ubase, &binfo, sz))
|
||||
|
||||
if (copy_to_user((void __user *)arg, &binfo, len))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* user_exp_rcv_setup - Set up the given tid rcv list
|
||||
* @fd: file data of the current driver instance
|
||||
* @arg: ioctl argumnent for user space information
|
||||
* @len: length of data structure associated with ioctl command
|
||||
*
|
||||
* Wrapper to validate ioctl information before doing _rcv_setup.
|
||||
*
|
||||
*/
|
||||
static int user_exp_rcv_setup(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len)
|
||||
{
|
||||
int ret;
|
||||
unsigned long addr;
|
||||
struct hfi1_tid_info tinfo;
|
||||
|
||||
if (sizeof(tinfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_setup(fd, &tinfo);
|
||||
if (!ret) {
|
||||
/*
|
||||
* Copy the number of tidlist entries we used
|
||||
* and the length of the buffer we registered.
|
||||
*/
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
return -EFAULT;
|
||||
|
||||
addr = arg + offsetof(struct hfi1_tid_info, length);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.length,
|
||||
sizeof(tinfo.length)))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* user_exp_rcv_clear - Clear the given tid rcv list
|
||||
* @fd: file data of the current driver instance
|
||||
* @arg: ioctl argumnent for user space information
|
||||
* @len: length of data structure associated with ioctl command
|
||||
*
|
||||
* The hfi1_user_exp_rcv_clear() can be called from the error path. Because
|
||||
* of this, we need to use this wrapper to copy the user space information
|
||||
* before doing the clear.
|
||||
*/
|
||||
static int user_exp_rcv_clear(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len)
|
||||
{
|
||||
int ret;
|
||||
unsigned long addr;
|
||||
struct hfi1_tid_info tinfo;
|
||||
|
||||
if (sizeof(tinfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_clear(fd, &tinfo);
|
||||
if (!ret) {
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* user_exp_rcv_invalid - Invalidate the given tid rcv list
|
||||
* @fd: file data of the current driver instance
|
||||
* @arg: ioctl argumnent for user space information
|
||||
* @len: length of data structure associated with ioctl command
|
||||
*
|
||||
* Wrapper to validate ioctl information before doing _rcv_invalid.
|
||||
*
|
||||
*/
|
||||
static int user_exp_rcv_invalid(struct hfi1_filedata *fd, unsigned long arg,
|
||||
u32 len)
|
||||
{
|
||||
int ret;
|
||||
unsigned long addr;
|
||||
struct hfi1_tid_info tinfo;
|
||||
|
||||
if (sizeof(tinfo) != len)
|
||||
return -EINVAL;
|
||||
|
||||
if (!fd->invalid_tids)
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&tinfo, (void __user *)arg, (sizeof(tinfo))))
|
||||
return -EFAULT;
|
||||
|
||||
ret = hfi1_user_exp_rcv_invalid(fd, &tinfo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
addr = arg + offsetof(struct hfi1_tid_info, tidcnt);
|
||||
if (copy_to_user((void __user *)addr, &tinfo.tidcnt,
|
||||
sizeof(tinfo.tidcnt)))
|
||||
ret = -EFAULT;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1485,14 +1489,13 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
|
||||
ctxt++) {
|
||||
uctxt = hfi1_rcd_get_by_index(dd, ctxt);
|
||||
if (uctxt) {
|
||||
unsigned long *evs = dd->events +
|
||||
(uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS;
|
||||
unsigned long *evs;
|
||||
int i;
|
||||
/*
|
||||
* subctxt_cnt is 0 if not shared, so do base
|
||||
* separately, first, then remaining subctxt, if any
|
||||
*/
|
||||
evs = dd->events + uctxt_offset(uctxt);
|
||||
set_bit(evtbit, evs);
|
||||
for (i = 1; i < uctxt->subctxt_cnt; i++)
|
||||
set_bit(evtbit, evs + i);
|
||||
@ -1514,13 +1517,18 @@ int hfi1_set_uevent_bits(struct hfi1_pportdata *ppd, const int evtbit)
|
||||
* re-init the software copy of the head register
|
||||
*/
|
||||
static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
int start_stop)
|
||||
unsigned long arg)
|
||||
{
|
||||
struct hfi1_devdata *dd = uctxt->dd;
|
||||
unsigned int rcvctrl_op;
|
||||
int start_stop;
|
||||
|
||||
if (subctxt)
|
||||
goto bail;
|
||||
return 0;
|
||||
|
||||
if (get_user(start_stop, (int __user *)arg))
|
||||
return -EFAULT;
|
||||
|
||||
/* atomically clear receive enable ctxt. */
|
||||
if (start_stop) {
|
||||
/*
|
||||
@ -1539,7 +1547,7 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
}
|
||||
hfi1_rcvctrl(dd, rcvctrl_op, uctxt);
|
||||
/* always; new head should be equal to new tail; see above */
|
||||
bail:
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1549,17 +1557,20 @@ static int manage_rcvq(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
* set, if desired, and checks again in future.
|
||||
*/
|
||||
static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
unsigned long events)
|
||||
unsigned long arg)
|
||||
{
|
||||
int i;
|
||||
struct hfi1_devdata *dd = uctxt->dd;
|
||||
unsigned long *evs;
|
||||
unsigned long events;
|
||||
|
||||
if (!dd->events)
|
||||
return 0;
|
||||
|
||||
evs = dd->events + ((uctxt->ctxt - dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS) + subctxt;
|
||||
if (get_user(events, (unsigned long __user *)arg))
|
||||
return -EFAULT;
|
||||
|
||||
evs = dd->events + uctxt_offset(uctxt) + subctxt;
|
||||
|
||||
for (i = 0; i <= _HFI1_MAX_EVENT_BIT; i++) {
|
||||
if (!test_bit(i, &events))
|
||||
@ -1569,26 +1580,89 @@ static int user_event_ack(struct hfi1_ctxtdata *uctxt, u16 subctxt,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, u16 subctxt, u16 pkey)
|
||||
static int set_ctxt_pkey(struct hfi1_ctxtdata *uctxt, unsigned long arg)
|
||||
{
|
||||
int ret = -ENOENT, i, intable = 0;
|
||||
int i;
|
||||
struct hfi1_pportdata *ppd = uctxt->ppd;
|
||||
struct hfi1_devdata *dd = uctxt->dd;
|
||||
u16 pkey;
|
||||
|
||||
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (!HFI1_CAP_IS_USET(PKEY_CHECK))
|
||||
return -EPERM;
|
||||
|
||||
if (get_user(pkey, (u16 __user *)arg))
|
||||
return -EFAULT;
|
||||
|
||||
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++)
|
||||
if (pkey == ppd->pkeys[i]) {
|
||||
intable = 1;
|
||||
break;
|
||||
}
|
||||
if (pkey == ppd->pkeys[i])
|
||||
return hfi1_set_ctxt_pkey(dd, uctxt, pkey);
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/**
|
||||
* ctxt_reset - Reset the user context
|
||||
* @uctxt: valid user context
|
||||
*/
|
||||
static int ctxt_reset(struct hfi1_ctxtdata *uctxt)
|
||||
{
|
||||
struct send_context *sc;
|
||||
struct hfi1_devdata *dd;
|
||||
int ret = 0;
|
||||
|
||||
if (!uctxt || !uctxt->dd || !uctxt->sc)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* There is no protection here. User level has to guarantee that
|
||||
* no one will be writing to the send context while it is being
|
||||
* re-initialized. If user level breaks that guarantee, it will
|
||||
* break it's own context and no one else's.
|
||||
*/
|
||||
dd = uctxt->dd;
|
||||
sc = uctxt->sc;
|
||||
|
||||
/*
|
||||
* Wait until the interrupt handler has marked the context as
|
||||
* halted or frozen. Report error if we time out.
|
||||
*/
|
||||
wait_event_interruptible_timeout(
|
||||
sc->halt_wait, (sc->flags & SCF_HALTED),
|
||||
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
|
||||
if (!(sc->flags & SCF_HALTED))
|
||||
return -ENOLCK;
|
||||
|
||||
/*
|
||||
* If the send context was halted due to a Freeze, wait until the
|
||||
* device has been "unfrozen" before resetting the context.
|
||||
*/
|
||||
if (sc->flags & SCF_FROZEN) {
|
||||
wait_event_interruptible_timeout(
|
||||
dd->event_queue,
|
||||
!(READ_ONCE(dd->flags) & HFI1_FROZEN),
|
||||
msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT));
|
||||
if (dd->flags & HFI1_FROZEN)
|
||||
return -ENOLCK;
|
||||
|
||||
if (dd->flags & HFI1_FORCED_FREEZE)
|
||||
/*
|
||||
* Don't allow context reset if we are into
|
||||
* forced freeze
|
||||
*/
|
||||
return -ENODEV;
|
||||
|
||||
sc_disable(sc);
|
||||
ret = sc_enable(sc);
|
||||
hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_ENB, uctxt);
|
||||
} else {
|
||||
ret = sc_restart(sc);
|
||||
}
|
||||
if (!ret)
|
||||
sc_return_credits(sc);
|
||||
|
||||
if (intable)
|
||||
ret = hfi1_set_ctxt_pkey(dd, uctxt, pkey);
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -70,6 +70,11 @@
|
||||
#define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw"
|
||||
#define HOST_INTERFACE_VERSION 1
|
||||
|
||||
MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC);
|
||||
MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME);
|
||||
MODULE_FIRMWARE(DEFAULT_FW_SBUS_NAME);
|
||||
MODULE_FIRMWARE(DEFAULT_FW_PCIE_NAME);
|
||||
|
||||
static uint fw_8051_load = 1;
|
||||
static uint fw_fabric_serdes_load = 1;
|
||||
static uint fw_pcie_serdes_load = 1;
|
||||
@ -113,6 +118,12 @@ struct css_header {
|
||||
#define MU_SIZE 8
|
||||
#define EXPONENT_SIZE 4
|
||||
|
||||
/* size of platform configuration partition */
|
||||
#define MAX_PLATFORM_CONFIG_FILE_SIZE 4096
|
||||
|
||||
/* size of file of plaform configuration encoded in format version 4 */
|
||||
#define PLATFORM_CONFIG_FORMAT_4_FILE_SIZE 528
|
||||
|
||||
/* the file itself */
|
||||
struct firmware_file {
|
||||
struct css_header css_header;
|
||||
@ -964,6 +975,46 @@ int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear all reset bits, releasing the 8051.
|
||||
* Wait for firmware to be ready to accept host requests.
|
||||
* Then, set host version bit.
|
||||
*
|
||||
* This function executes even if the 8051 is in reset mode when
|
||||
* dd->dc_shutdown == 1.
|
||||
*
|
||||
* Expects dd->dc8051_lock to be held.
|
||||
*/
|
||||
int release_and_wait_ready_8051_firmware(struct hfi1_devdata *dd)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&dd->dc8051_lock);
|
||||
/* clear all reset bits, releasing the 8051 */
|
||||
write_csr(dd, DC_DC8051_CFG_RST, 0ull);
|
||||
|
||||
/*
|
||||
* Wait for firmware to be ready to accept host
|
||||
* requests.
|
||||
*/
|
||||
ret = wait_fm_ready(dd, TIMEOUT_8051_START);
|
||||
if (ret) {
|
||||
dd_dev_err(dd, "8051 start timeout, current FW state 0x%x\n",
|
||||
get_firmware_state(dd));
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
|
||||
if (ret != HCMD_SUCCESS) {
|
||||
dd_dev_err(dd,
|
||||
"Failed to set host interface version, return 0x%x\n",
|
||||
ret);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Load the 8051 firmware.
|
||||
*/
|
||||
@ -1029,31 +1080,22 @@ static int load_8051_firmware(struct hfi1_devdata *dd,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* clear all reset bits, releasing the 8051 */
|
||||
write_csr(dd, DC_DC8051_CFG_RST, 0ull);
|
||||
|
||||
/*
|
||||
* Clear all reset bits, releasing the 8051.
|
||||
* DC reset step 5. Wait for firmware to be ready to accept host
|
||||
* requests.
|
||||
* Then, set host version bit.
|
||||
*/
|
||||
ret = wait_fm_ready(dd, TIMEOUT_8051_START);
|
||||
if (ret) { /* timed out */
|
||||
dd_dev_err(dd, "8051 start timeout, current state 0x%x\n",
|
||||
get_firmware_state(dd));
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
mutex_lock(&dd->dc8051_lock);
|
||||
ret = release_and_wait_ready_8051_firmware(dd);
|
||||
mutex_unlock(&dd->dc8051_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
read_misc_status(dd, &ver_major, &ver_minor, &ver_patch);
|
||||
dd_dev_info(dd, "8051 firmware version %d.%d.%d\n",
|
||||
(int)ver_major, (int)ver_minor, (int)ver_patch);
|
||||
dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch);
|
||||
ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION);
|
||||
if (ret != HCMD_SUCCESS) {
|
||||
dd_dev_err(dd,
|
||||
"Failed to set host interface version, return 0x%x\n",
|
||||
ret);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1387,7 +1429,14 @@ int acquire_hw_mutex(struct hfi1_devdata *dd)
|
||||
unsigned long timeout;
|
||||
int try = 0;
|
||||
u8 mask = 1 << dd->hfi1_id;
|
||||
u8 user;
|
||||
u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
|
||||
|
||||
if (user == mask) {
|
||||
dd_dev_info(dd,
|
||||
"Hardware mutex already acquired, mutex mask %u\n",
|
||||
(u32)mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
retry:
|
||||
timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies;
|
||||
@ -1418,7 +1467,15 @@ int acquire_hw_mutex(struct hfi1_devdata *dd)
|
||||
|
||||
void release_hw_mutex(struct hfi1_devdata *dd)
|
||||
{
|
||||
write_csr(dd, ASIC_CFG_MUTEX, 0);
|
||||
u8 mask = 1 << dd->hfi1_id;
|
||||
u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX);
|
||||
|
||||
if (user != mask)
|
||||
dd_dev_warn(dd,
|
||||
"Unable to release hardware mutex, mutex mask %u, my mask %u\n",
|
||||
(u32)user, (u32)mask);
|
||||
else
|
||||
write_csr(dd, ASIC_CFG_MUTEX, 0);
|
||||
}
|
||||
|
||||
/* return the given resource bit(s) as a mask for the given HFI */
|
||||
@ -1733,7 +1790,7 @@ static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table)
|
||||
ver_start /= 8;
|
||||
meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1);
|
||||
|
||||
if (meta_ver < 5) {
|
||||
if (meta_ver < 4) {
|
||||
dd_dev_info(
|
||||
dd, "%s:Please update platform config\n", __func__);
|
||||
return -EINVAL;
|
||||
@ -1774,7 +1831,20 @@ int parse_platform_config(struct hfi1_devdata *dd)
|
||||
|
||||
/* Field is file size in DWORDs */
|
||||
file_length = (*ptr) * 4;
|
||||
ptr++;
|
||||
|
||||
/*
|
||||
* Length can't be larger than partition size. Assume platform
|
||||
* config format version 4 is being used. Interpret the file size
|
||||
* field as header instead by not moving the pointer.
|
||||
*/
|
||||
if (file_length > MAX_PLATFORM_CONFIG_FILE_SIZE) {
|
||||
dd_dev_info(dd,
|
||||
"%s:File length out of bounds, using alternative format\n",
|
||||
__func__);
|
||||
file_length = PLATFORM_CONFIG_FORMAT_4_FILE_SIZE;
|
||||
} else {
|
||||
ptr++;
|
||||
}
|
||||
|
||||
if (file_length > dd->platform_config.size) {
|
||||
dd_dev_info(dd, "%s:File claims to be larger than read size\n",
|
||||
@ -1789,7 +1859,8 @@ int parse_platform_config(struct hfi1_devdata *dd)
|
||||
|
||||
/*
|
||||
* In both cases where we proceed, using the self-reported file length
|
||||
* is the safer option
|
||||
* is the safer option. In case of old format a predefined value is
|
||||
* being used.
|
||||
*/
|
||||
while (ptr < (u32 *)(dd->platform_config.data + file_length)) {
|
||||
header1 = *ptr;
|
||||
|
@ -95,6 +95,9 @@
|
||||
#define DROP_PACKET_OFF 0
|
||||
#define DROP_PACKET_ON 1
|
||||
|
||||
#define NEIGHBOR_TYPE_HFI 0
|
||||
#define NEIGHBOR_TYPE_SWITCH 1
|
||||
|
||||
extern unsigned long hfi1_cap_mask;
|
||||
#define HFI1_CAP_KGET_MASK(mask, cap) ((mask) & HFI1_CAP_##cap)
|
||||
#define HFI1_CAP_UGET_MASK(mask, cap) \
|
||||
@ -164,9 +167,7 @@ extern const struct pci_error_handlers hfi1_pci_err_handler;
|
||||
* Below contains all data related to a single context (formerly called port).
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct hfi1_opcode_stats_perctx;
|
||||
#endif
|
||||
|
||||
struct ctxt_eager_bufs {
|
||||
ssize_t size; /* total size of eager buffers */
|
||||
@ -283,7 +284,7 @@ struct hfi1_ctxtdata {
|
||||
u64 imask; /* clear interrupt mask */
|
||||
int ireg; /* clear interrupt register */
|
||||
unsigned numa_id; /* numa node of this context */
|
||||
/* verbs stats per CTX */
|
||||
/* verbs rx_stats per rcd */
|
||||
struct hfi1_opcode_stats_perctx *opstats;
|
||||
|
||||
/* Is ASPM interrupt supported for this context */
|
||||
@ -390,6 +391,7 @@ struct hfi1_packet {
|
||||
/*
|
||||
* OPA 16B L2/L4 Encodings
|
||||
*/
|
||||
#define OPA_16B_L4_9B 0x00
|
||||
#define OPA_16B_L2_TYPE 0x02
|
||||
#define OPA_16B_L4_IB_LOCAL 0x09
|
||||
#define OPA_16B_L4_IB_GLOBAL 0x0A
|
||||
@ -535,6 +537,8 @@ struct rvt_sge_state;
|
||||
#define HLS_UP (HLS_UP_INIT | HLS_UP_ARMED | HLS_UP_ACTIVE)
|
||||
#define HLS_DOWN ~(HLS_UP)
|
||||
|
||||
#define HLS_DEFAULT HLS_DN_POLL
|
||||
|
||||
/* use this MTU size if none other is given */
|
||||
#define HFI1_DEFAULT_ACTIVE_MTU 10240
|
||||
/* use this MTU size as the default maximum */
|
||||
@ -616,7 +620,6 @@ struct hfi1_msix_entry {
|
||||
enum irq_type type;
|
||||
int irq;
|
||||
void *arg;
|
||||
char name[MAX_NAME_SIZE];
|
||||
cpumask_t mask;
|
||||
struct irq_affinity_notify notify;
|
||||
};
|
||||
@ -1047,6 +1050,8 @@ struct hfi1_devdata {
|
||||
u64 z_send_schedule;
|
||||
|
||||
u64 __percpu *send_schedule;
|
||||
/* number of reserved contexts for VNIC usage */
|
||||
u16 num_vnic_contexts;
|
||||
/* number of receive contexts in use by the driver */
|
||||
u32 num_rcv_contexts;
|
||||
/* number of pio send contexts in use by the driver */
|
||||
@ -1109,8 +1114,7 @@ struct hfi1_devdata {
|
||||
u16 rcvegrbufsize_shift;
|
||||
/* both sides of the PCIe link are gen3 capable */
|
||||
u8 link_gen3_capable;
|
||||
/* default link down value (poll/sleep) */
|
||||
u8 link_default;
|
||||
u8 dc_shutdown;
|
||||
/* localbus width (1, 2,4,8,16,32) from config space */
|
||||
u32 lbus_width;
|
||||
/* localbus speed in MHz */
|
||||
@ -1183,7 +1187,6 @@ struct hfi1_devdata {
|
||||
|
||||
/* INTx information */
|
||||
u32 requested_intx_irq; /* did we request one? */
|
||||
char intx_name[MAX_NAME_SIZE]; /* INTx name */
|
||||
|
||||
/* general interrupt: mask of handled interrupts */
|
||||
u64 gi_mask[CCE_NUM_INT_CSRS];
|
||||
@ -1274,6 +1277,8 @@ struct hfi1_devdata {
|
||||
/* receive context data */
|
||||
struct hfi1_ctxtdata **rcd;
|
||||
u64 __percpu *int_counter;
|
||||
/* verbs tx opcode stats */
|
||||
struct hfi1_opcode_stats_perctx __percpu *tx_opstats;
|
||||
/* device (not port) flags, basically device capabilities */
|
||||
u16 flags;
|
||||
/* Number of physical ports available */
|
||||
@ -1295,7 +1300,6 @@ struct hfi1_devdata {
|
||||
u8 oui1;
|
||||
u8 oui2;
|
||||
u8 oui3;
|
||||
u8 dc_shutdown;
|
||||
|
||||
/* Timer and counter used to detect RcvBufOvflCnt changes */
|
||||
struct timer_list rcverr_timer;
|
||||
@ -1373,8 +1377,12 @@ struct hfi1_filedata {
|
||||
extern struct list_head hfi1_dev_list;
|
||||
extern spinlock_t hfi1_devs_lock;
|
||||
struct hfi1_devdata *hfi1_lookup(int unit);
|
||||
extern u32 hfi1_cpulist_count;
|
||||
extern unsigned long *hfi1_cpulist;
|
||||
|
||||
static inline unsigned long uctxt_offset(struct hfi1_ctxtdata *uctxt)
|
||||
{
|
||||
return (uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS;
|
||||
}
|
||||
|
||||
int hfi1_init(struct hfi1_devdata *dd, int reinit);
|
||||
int hfi1_count_active_units(void);
|
||||
@ -1396,6 +1404,8 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd,
|
||||
void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd);
|
||||
int hfi1_rcd_put(struct hfi1_ctxtdata *rcd);
|
||||
void hfi1_rcd_get(struct hfi1_ctxtdata *rcd);
|
||||
struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
|
||||
u16 ctxt);
|
||||
struct hfi1_ctxtdata *hfi1_rcd_get_by_index(struct hfi1_devdata *dd, u16 ctxt);
|
||||
int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread);
|
||||
int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *rcd, int thread);
|
||||
@ -1531,11 +1541,6 @@ typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
|
||||
u32 remote_qpn, u32 pkey, u32 slid, u32 dlid,
|
||||
u8 sc5, const struct ib_grh *old_grh);
|
||||
|
||||
/* We support only two types - 9B and 16B for now */
|
||||
static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
|
||||
[HFI1_PKT_TYPE_9B] = &return_cnp,
|
||||
[HFI1_PKT_TYPE_16B] = &return_cnp_16B
|
||||
};
|
||||
#define PKEY_CHECK_INVALID -1
|
||||
int egress_pkey_check(struct hfi1_pportdata *ppd, u32 slid, u16 pkey,
|
||||
u8 sc5, int8_t s_pkey_index);
|
||||
|
@ -123,8 +123,6 @@ MODULE_PARM_DESC(user_credit_return_threshold, "Credit return threshold for user
|
||||
static inline u64 encode_rcv_header_entry_size(u16 size);
|
||||
|
||||
static struct idr hfi1_unit_table;
|
||||
u32 hfi1_cpulist_count;
|
||||
unsigned long *hfi1_cpulist;
|
||||
|
||||
static int hfi1_create_kctxt(struct hfi1_devdata *dd,
|
||||
struct hfi1_pportdata *ppd)
|
||||
@ -285,6 +283,27 @@ static int allocate_rcd_index(struct hfi1_devdata *dd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_rcd_get_by_index_safe - validate the ctxt index before accessing the
|
||||
* array
|
||||
* @dd: pointer to a valid devdata structure
|
||||
* @ctxt: the index of an possilbe rcd
|
||||
*
|
||||
* This is a wrapper for hfi1_rcd_get_by_index() to validate that the given
|
||||
* ctxt index is valid.
|
||||
*
|
||||
* The caller is responsible for making the _put().
|
||||
*
|
||||
*/
|
||||
struct hfi1_ctxtdata *hfi1_rcd_get_by_index_safe(struct hfi1_devdata *dd,
|
||||
u16 ctxt)
|
||||
{
|
||||
if (ctxt < dd->num_rcv_contexts)
|
||||
return hfi1_rcd_get_by_index(dd, ctxt);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_rcd_get_by_index
|
||||
* @dd: pointer to a valid devdata structure
|
||||
@ -1006,7 +1025,7 @@ static void stop_timers(struct hfi1_devdata *dd)
|
||||
|
||||
for (pidx = 0; pidx < dd->num_pports; ++pidx) {
|
||||
ppd = dd->pport + pidx;
|
||||
if (ppd->led_override_timer.data) {
|
||||
if (ppd->led_override_timer.function) {
|
||||
del_timer_sync(&ppd->led_override_timer);
|
||||
atomic_set(&ppd->led_override_timer_active, 0);
|
||||
}
|
||||
@ -1198,6 +1217,7 @@ static void __hfi1_free_devdata(struct kobject *kobj)
|
||||
free_percpu(dd->int_counter);
|
||||
free_percpu(dd->rcv_limit);
|
||||
free_percpu(dd->send_schedule);
|
||||
free_percpu(dd->tx_opstats);
|
||||
rvt_dealloc_device(&dd->verbs_dev.rdi);
|
||||
}
|
||||
|
||||
@ -1272,39 +1292,27 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
|
||||
dd->int_counter = alloc_percpu(u64);
|
||||
if (!dd->int_counter) {
|
||||
ret = -ENOMEM;
|
||||
hfi1_early_err(&pdev->dev,
|
||||
"Could not allocate per-cpu int_counter\n");
|
||||
goto bail;
|
||||
}
|
||||
|
||||
dd->rcv_limit = alloc_percpu(u64);
|
||||
if (!dd->rcv_limit) {
|
||||
ret = -ENOMEM;
|
||||
hfi1_early_err(&pdev->dev,
|
||||
"Could not allocate per-cpu rcv_limit\n");
|
||||
goto bail;
|
||||
}
|
||||
|
||||
dd->send_schedule = alloc_percpu(u64);
|
||||
if (!dd->send_schedule) {
|
||||
ret = -ENOMEM;
|
||||
hfi1_early_err(&pdev->dev,
|
||||
"Could not allocate per-cpu int_counter\n");
|
||||
goto bail;
|
||||
}
|
||||
|
||||
if (!hfi1_cpulist_count) {
|
||||
u32 count = num_online_cpus();
|
||||
|
||||
hfi1_cpulist = kcalloc(BITS_TO_LONGS(count), sizeof(long),
|
||||
GFP_KERNEL);
|
||||
if (hfi1_cpulist)
|
||||
hfi1_cpulist_count = count;
|
||||
else
|
||||
hfi1_early_err(
|
||||
&pdev->dev,
|
||||
"Could not alloc cpulist info, cpu affinity might be wrong\n");
|
||||
dd->tx_opstats = alloc_percpu(struct hfi1_opcode_stats_perctx);
|
||||
if (!dd->tx_opstats) {
|
||||
ret = -ENOMEM;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
kobject_init(&dd->kobj, &hfi1_devdata_type);
|
||||
return dd;
|
||||
|
||||
@ -1477,8 +1485,6 @@ static void __exit hfi1_mod_cleanup(void)
|
||||
node_affinity_destroy();
|
||||
hfi1_wss_exit();
|
||||
hfi1_dbg_exit();
|
||||
hfi1_cpulist_count = 0;
|
||||
kfree(hfi1_cpulist);
|
||||
|
||||
idr_destroy(&hfi1_unit_table);
|
||||
dispose_firmware(); /* asymmetric with obtain_firmware() */
|
||||
@ -1801,8 +1807,7 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
|
||||
amt = PAGE_ALIGN(rcd->rcvhdrq_cnt * rcd->rcvhdrqentsize *
|
||||
sizeof(u32));
|
||||
|
||||
if ((rcd->ctxt < dd->first_dyn_alloc_ctxt) ||
|
||||
(rcd->sc && (rcd->sc->type == SC_KERNEL)))
|
||||
if (rcd->ctxt < dd->first_dyn_alloc_ctxt || rcd->is_vnic)
|
||||
gfp_flags = GFP_KERNEL;
|
||||
else
|
||||
gfp_flags = GFP_USER;
|
||||
|
@ -53,6 +53,42 @@
|
||||
#include "common.h"
|
||||
#include "sdma.h"
|
||||
|
||||
#define LINK_UP_DELAY 500 /* in microseconds */
|
||||
|
||||
static void set_mgmt_allowed(struct hfi1_pportdata *ppd)
|
||||
{
|
||||
u32 frame;
|
||||
struct hfi1_devdata *dd = ppd->dd;
|
||||
|
||||
if (ppd->neighbor_type == NEIGHBOR_TYPE_HFI) {
|
||||
ppd->mgmt_allowed = 1;
|
||||
} else {
|
||||
read_8051_config(dd, REMOTE_LNI_INFO, GENERAL_CONFIG, &frame);
|
||||
ppd->mgmt_allowed = (frame >> MGMT_ALLOWED_SHIFT)
|
||||
& MGMT_ALLOWED_MASK;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Our neighbor has indicated that we are allowed to act as a fabric
|
||||
* manager, so place the full management partition key in the second
|
||||
* (0-based) pkey array position. Note that we should already have
|
||||
* the limited management partition key in array element 1, and also
|
||||
* that the port is not yet up when add_full_mgmt_pkey() is invoked.
|
||||
*/
|
||||
static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
|
||||
{
|
||||
struct hfi1_devdata *dd = ppd->dd;
|
||||
|
||||
/* Sanity check - ppd->pkeys[2] should be 0, or already initialized */
|
||||
if (!((ppd->pkeys[2] == 0) || (ppd->pkeys[2] == FULL_MGMT_P_KEY)))
|
||||
dd_dev_warn(dd, "%s pkey[2] already set to 0x%x, resetting it to 0x%x\n",
|
||||
__func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
|
||||
ppd->pkeys[2] = FULL_MGMT_P_KEY;
|
||||
(void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
|
||||
hfi1_event_pkey_change(ppd->dd, ppd->port);
|
||||
}
|
||||
|
||||
/**
|
||||
* format_hwmsg - format a single hwerror message
|
||||
* @msg message buffer
|
||||
@ -102,9 +138,16 @@ static void signal_ib_event(struct hfi1_pportdata *ppd, enum ib_event_type ev)
|
||||
ib_dispatch_event(&event);
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* handle_linkup_change - finish linkup/down state changes
|
||||
* @dd: valid device
|
||||
* @linkup: link state information
|
||||
*
|
||||
* Handle a linkup or link down notification.
|
||||
* The HW needs time to finish its link up state change. Give it that chance.
|
||||
*
|
||||
* This is called outside an interrupt.
|
||||
*
|
||||
*/
|
||||
void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
|
||||
{
|
||||
@ -151,6 +194,18 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
|
||||
ppd->neighbor_guid, ppd->neighbor_type,
|
||||
ppd->neighbor_port_number);
|
||||
|
||||
/* HW needs LINK_UP_DELAY to settle, give it that chance */
|
||||
udelay(LINK_UP_DELAY);
|
||||
|
||||
/*
|
||||
* 'MgmtAllowed' information, which is exchanged during
|
||||
* LNI, is available at this point.
|
||||
*/
|
||||
set_mgmt_allowed(ppd);
|
||||
|
||||
if (ppd->mgmt_allowed)
|
||||
add_full_mgmt_pkey(ppd);
|
||||
|
||||
/* physical link went up */
|
||||
ppd->linkup = 1;
|
||||
ppd->offline_disabled_reason =
|
||||
|
@ -98,6 +98,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp)
|
||||
memset(data, 0, size);
|
||||
}
|
||||
|
||||
static u16 hfi1_lookup_pkey_value(struct hfi1_ibport *ibp, int pkey_idx)
|
||||
{
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
|
||||
if (pkey_idx < ARRAY_SIZE(ppd->pkeys))
|
||||
return ppd->pkeys[pkey_idx];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
|
||||
{
|
||||
struct ib_event event;
|
||||
@ -399,9 +409,9 @@ static void send_trap(struct hfi1_ibport *ibp, struct trap_node *trap)
|
||||
ib_free_send_mad(send_buf);
|
||||
}
|
||||
|
||||
void hfi1_handle_trap_timer(unsigned long data)
|
||||
void hfi1_handle_trap_timer(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_ibport *ibp = (struct hfi1_ibport *)data;
|
||||
struct hfi1_ibport *ibp = from_timer(ibp, t, rvp.trap_timer);
|
||||
struct trap_node *trap = NULL;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
@ -711,6 +721,7 @@ static int check_mkey(struct hfi1_ibport *ibp, struct ib_mad_hdr *mad,
|
||||
/* Bad mkey not a violation below level 2 */
|
||||
if (ibp->rvp.mkeyprot < 2)
|
||||
break;
|
||||
/* fall through */
|
||||
case IB_MGMT_METHOD_SET:
|
||||
case IB_MGMT_METHOD_TRAP_REPRESS:
|
||||
if (ibp->rvp.mkey_violations != 0xFFFF)
|
||||
@ -1227,8 +1238,7 @@ static int port_states_transition_allowed(struct hfi1_pportdata *ppd,
|
||||
}
|
||||
|
||||
static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
|
||||
u32 logical_state, u32 phys_state,
|
||||
int suppress_idle_sma)
|
||||
u32 logical_state, u32 phys_state)
|
||||
{
|
||||
struct hfi1_devdata *dd = ppd->dd;
|
||||
u32 link_state;
|
||||
@ -1309,7 +1319,7 @@ static int set_port_states(struct hfi1_pportdata *ppd, struct opa_smp *smp,
|
||||
break;
|
||||
case IB_PORT_ARMED:
|
||||
ret = set_link_state(ppd, HLS_UP_ARMED);
|
||||
if ((ret == 0) && (suppress_idle_sma == 0))
|
||||
if (!ret)
|
||||
send_idle_sma(dd, SMA_IDLE_ARM);
|
||||
break;
|
||||
case IB_PORT_ACTIVE:
|
||||
@ -1603,8 +1613,10 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
|
||||
if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
|
||||
ppd->is_sm_config_started = 1;
|
||||
} else if (ls_new == IB_PORT_ARMED) {
|
||||
if (ppd->is_sm_config_started == 0)
|
||||
if (ppd->is_sm_config_started == 0) {
|
||||
invalid = 1;
|
||||
smp->status |= IB_SMP_INVALID_FIELD;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1621,9 +1633,11 @@ static int __subn_set_opa_portinfo(struct opa_smp *smp, u32 am, u8 *data,
|
||||
* is down or is being set to down.
|
||||
*/
|
||||
|
||||
ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!invalid) {
|
||||
ret = set_port_states(ppd, smp, ls_new, ps_new);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = __subn_get_opa_portinfo(smp, am, data, ibdev, port, resp_len,
|
||||
max_len);
|
||||
@ -2100,17 +2114,18 @@ static int __subn_set_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
|
||||
if (ls_new == ls_old || (ls_new == IB_PORT_ARMED))
|
||||
ppd->is_sm_config_started = 1;
|
||||
} else if (ls_new == IB_PORT_ARMED) {
|
||||
if (ppd->is_sm_config_started == 0)
|
||||
if (ppd->is_sm_config_started == 0) {
|
||||
invalid = 1;
|
||||
smp->status |= IB_SMP_INVALID_FIELD;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret = set_port_states(ppd, smp, ls_new, ps_new, invalid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (invalid)
|
||||
smp->status |= IB_SMP_INVALID_FIELD;
|
||||
if (!invalid) {
|
||||
ret = set_port_states(ppd, smp, ls_new, ps_new);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return __subn_get_opa_psi(smp, am, data, ibdev, port, resp_len,
|
||||
max_len);
|
||||
@ -2888,7 +2903,6 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
|
||||
struct _vls_dctrs *vlinfo;
|
||||
size_t response_data_size;
|
||||
u32 num_ports;
|
||||
u8 num_pslm;
|
||||
u8 lq, num_vls;
|
||||
u8 res_lli, res_ler;
|
||||
u64 port_mask;
|
||||
@ -2898,7 +2912,6 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
|
||||
int vfi;
|
||||
|
||||
num_ports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
|
||||
num_pslm = hweight64(be64_to_cpu(req->port_select_mask[3]));
|
||||
num_vls = hweight32(be32_to_cpu(req->vl_select_mask));
|
||||
vl_select_mask = be32_to_cpu(req->vl_select_mask);
|
||||
res_lli = (u8)(be32_to_cpu(req->resolution) & MSK_LLI) >> MSK_LLI_SFT;
|
||||
@ -3688,7 +3701,11 @@ static void apply_cc_state(struct hfi1_pportdata *ppd)
|
||||
|
||||
*new_cc_state = *old_cc_state;
|
||||
|
||||
new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
|
||||
if (ppd->total_cct_entry)
|
||||
new_cc_state->cct.ccti_limit = ppd->total_cct_entry - 1;
|
||||
else
|
||||
new_cc_state->cct.ccti_limit = 0;
|
||||
|
||||
memcpy(new_cc_state->cct.entries, ppd->ccti_entries,
|
||||
ppd->total_cct_entry * sizeof(struct ib_cc_table_entry));
|
||||
|
||||
@ -3751,7 +3768,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
|
||||
struct hfi1_ibport *ibp = to_iport(ibdev, port);
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
struct opa_hfi1_cong_log *cong_log = (struct opa_hfi1_cong_log *)data;
|
||||
s64 ts;
|
||||
u64 ts;
|
||||
int i;
|
||||
|
||||
if (am || smp_length_check(sizeof(*cong_log), max_len)) {
|
||||
@ -3769,7 +3786,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
|
||||
ppd->threshold_cong_event_map,
|
||||
sizeof(cong_log->threshold_cong_event_map));
|
||||
/* keep timestamp in units of 1.024 usec */
|
||||
ts = ktime_to_ns(ktime_get()) / 1024;
|
||||
ts = ktime_get_ns() / 1024;
|
||||
cong_log->current_time_stamp = cpu_to_be32(ts);
|
||||
for (i = 0; i < OPA_CONG_LOG_ELEMS; i++) {
|
||||
struct opa_hfi1_cong_log_event_internal *cce =
|
||||
@ -3781,7 +3798,7 @@ static int __subn_get_opa_hfi1_cong_log(struct opa_smp *smp, u32 am,
|
||||
* required to wrap the counter are supposed to
|
||||
* be zeroed (CA10-49 IBTA, release 1.2.1, V1).
|
||||
*/
|
||||
if ((u64)(ts - cce->timestamp) > (2 * UINT_MAX))
|
||||
if ((ts - cce->timestamp) / 2 > U32_MAX)
|
||||
continue;
|
||||
memcpy(cong_log->events[i].local_qp_cn_entry, &cce->lqpn, 3);
|
||||
memcpy(cong_log->events[i].remote_qp_number_cn_entry,
|
||||
@ -4260,6 +4277,18 @@ void clear_linkup_counters(struct hfi1_devdata *dd)
|
||||
dd->err_info_xmit_constraint.status &= ~OPA_EI_STATUS_SMASK;
|
||||
}
|
||||
|
||||
static int is_full_mgmt_pkey_in_table(struct hfi1_ibport *ibp)
|
||||
{
|
||||
unsigned int i;
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ppd->pkeys); ++i)
|
||||
if (ppd->pkeys[i] == FULL_MGMT_P_KEY)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* is_local_mad() returns 1 if 'mad' is sent from, and destined to the
|
||||
* local node, 0 otherwise.
|
||||
@ -4293,7 +4322,6 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
|
||||
const struct ib_wc *in_wc)
|
||||
{
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
u16 slid = ib_lid_cpu16(in_wc->slid);
|
||||
u16 pkey;
|
||||
|
||||
if (in_wc->pkey_index >= ARRAY_SIZE(ppd->pkeys))
|
||||
@ -4320,10 +4348,71 @@ static int opa_local_smp_check(struct hfi1_ibport *ibp,
|
||||
*/
|
||||
if (pkey == LIM_MGMT_P_KEY || pkey == FULL_MGMT_P_KEY)
|
||||
return 0;
|
||||
ingress_pkey_table_fail(ppd, pkey, slid);
|
||||
/*
|
||||
* On OPA devices it is okay to lose the upper 16 bits of LID as this
|
||||
* information is obtained elsewhere. Mask off the upper 16 bits.
|
||||
*/
|
||||
ingress_pkey_table_fail(ppd, pkey, ib_lid_cpu16(0xFFFF & in_wc->slid));
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* hfi1_pkey_validation_pma - It validates PKEYs for incoming PMA MAD packets.
|
||||
* @ibp: IB port data
|
||||
* @in_mad: MAD packet with header and data
|
||||
* @in_wc: Work completion data such as source LID, port number, etc.
|
||||
*
|
||||
* These are all the possible logic rules for validating a pkey:
|
||||
*
|
||||
* a) If pkey neither FULL_MGMT_P_KEY nor LIM_MGMT_P_KEY,
|
||||
* and NOT self-originated packet:
|
||||
* Drop MAD packet as it should always be part of the
|
||||
* management partition unless it's a self-originated packet.
|
||||
*
|
||||
* b) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY in pkey table:
|
||||
* The packet is coming from a management node and the receiving node
|
||||
* is also a management node, so it is safe for the packet to go through.
|
||||
*
|
||||
* c) If pkey_index -> FULL_MGMT_P_KEY, and LIM_MGMT_P_KEY is NOT in pkey table:
|
||||
* Drop the packet as LIM_MGMT_P_KEY should always be in the pkey table.
|
||||
* It could be an FM misconfiguration.
|
||||
*
|
||||
* d) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY is NOT in pkey table:
|
||||
* It is safe for the packet to go through since a non-management node is
|
||||
* talking to another non-management node.
|
||||
*
|
||||
* e) If pkey_index -> LIM_MGMT_P_KEY and FULL_MGMT_P_KEY in pkey table:
|
||||
* Drop the packet because a non-management node is talking to a
|
||||
* management node, and it could be an attack.
|
||||
*
|
||||
* For the implementation, these rules can be simplied to only checking
|
||||
* for (a) and (e). There's no need to check for rule (b) as
|
||||
* the packet doesn't need to be dropped. Rule (c) is not possible in
|
||||
* the driver as LIM_MGMT_P_KEY is always in the pkey table.
|
||||
*
|
||||
* Return:
|
||||
* 0 - pkey is okay, -EINVAL it's a bad pkey
|
||||
*/
|
||||
static int hfi1_pkey_validation_pma(struct hfi1_ibport *ibp,
|
||||
const struct opa_mad *in_mad,
|
||||
const struct ib_wc *in_wc)
|
||||
{
|
||||
u16 pkey_value = hfi1_lookup_pkey_value(ibp, in_wc->pkey_index);
|
||||
|
||||
/* Rule (a) from above */
|
||||
if (!is_local_mad(ibp, in_mad, in_wc) &&
|
||||
pkey_value != LIM_MGMT_P_KEY &&
|
||||
pkey_value != FULL_MGMT_P_KEY)
|
||||
return -EINVAL;
|
||||
|
||||
/* Rule (e) from above */
|
||||
if (pkey_value == LIM_MGMT_P_KEY &&
|
||||
is_full_mgmt_pkey_in_table(ibp))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int process_subn_opa(struct ib_device *ibdev, int mad_flags,
|
||||
u8 port, const struct opa_mad *in_mad,
|
||||
struct opa_mad *out_mad,
|
||||
@ -4663,8 +4752,11 @@ static int hfi1_process_opa_mad(struct ib_device *ibdev, int mad_flags,
|
||||
out_mad, &resp_len);
|
||||
goto bail;
|
||||
case IB_MGMT_CLASS_PERF_MGMT:
|
||||
ret = process_perf_opa(ibdev, port, in_mad, out_mad,
|
||||
&resp_len);
|
||||
ret = hfi1_pkey_validation_pma(ibp, in_mad, in_wc);
|
||||
if (ret)
|
||||
return IB_MAD_RESULT_FAILURE;
|
||||
|
||||
ret = process_perf_opa(ibdev, port, in_mad, out_mad, &resp_len);
|
||||
goto bail;
|
||||
|
||||
default:
|
||||
|
@ -239,7 +239,7 @@ struct opa_hfi1_cong_log_event_internal {
|
||||
u8 sl;
|
||||
u8 svc_type;
|
||||
u32 rlid;
|
||||
s64 timestamp; /* wider than 32 bits to detect 32 bit rollover */
|
||||
u64 timestamp; /* wider than 32 bits to detect 32 bit rollover */
|
||||
};
|
||||
|
||||
struct opa_hfi1_cong_log_event {
|
||||
@ -428,6 +428,6 @@ struct sc2vlnt {
|
||||
COUNTER_MASK(1, 4))
|
||||
|
||||
void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
|
||||
void hfi1_handle_trap_timer(unsigned long data);
|
||||
void hfi1_handle_trap_timer(struct timer_list *t);
|
||||
|
||||
#endif /* _HFI1_MAD_H */
|
||||
|
@ -67,12 +67,9 @@ struct mmu_rb_handler {
|
||||
|
||||
static unsigned long mmu_node_start(struct mmu_rb_node *);
|
||||
static unsigned long mmu_node_last(struct mmu_rb_node *);
|
||||
static inline void mmu_notifier_range_start(struct mmu_notifier *,
|
||||
struct mm_struct *,
|
||||
unsigned long, unsigned long);
|
||||
static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
|
||||
struct mm_struct *,
|
||||
unsigned long, unsigned long);
|
||||
static void mmu_notifier_range_start(struct mmu_notifier *,
|
||||
struct mm_struct *,
|
||||
unsigned long, unsigned long);
|
||||
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
|
||||
unsigned long, unsigned long);
|
||||
static void do_remove(struct mmu_rb_handler *handler,
|
||||
@ -286,17 +283,10 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
|
||||
handler->ops->remove(handler->ops_arg, node);
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
mmu_notifier_mem_invalidate(mn, mm, start, end);
|
||||
}
|
||||
|
||||
static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
static void mmu_notifier_range_start(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
struct mmu_rb_handler *handler =
|
||||
container_of(mn, struct mmu_rb_handler, mn);
|
||||
|
@ -703,7 +703,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
|
||||
{
|
||||
struct send_context_info *sci;
|
||||
struct send_context *sc = NULL;
|
||||
int req_type = type;
|
||||
dma_addr_t dma;
|
||||
unsigned long flags;
|
||||
u64 reg;
|
||||
@ -730,13 +729,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* VNIC contexts are dynamically allocated.
|
||||
* Hence, pick a user context for VNIC.
|
||||
*/
|
||||
if (type == SC_VNIC)
|
||||
type = SC_USER;
|
||||
|
||||
spin_lock_irqsave(&dd->sc_lock, flags);
|
||||
ret = sc_hw_alloc(dd, type, &sw_index, &hw_context);
|
||||
if (ret) {
|
||||
@ -746,15 +738,6 @@ struct send_context *sc_alloc(struct hfi1_devdata *dd, int type,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* VNIC contexts are used by kernel driver.
|
||||
* Hence, mark them as kernel contexts.
|
||||
*/
|
||||
if (req_type == SC_VNIC) {
|
||||
dd->send_contexts[sw_index].type = SC_KERNEL;
|
||||
type = SC_KERNEL;
|
||||
}
|
||||
|
||||
sci = &dd->send_contexts[sw_index];
|
||||
sci->sc = sc;
|
||||
|
||||
|
@ -54,12 +54,6 @@
|
||||
#define SC_USER 3 /* must be the last one: it may take all left */
|
||||
#define SC_MAX 4 /* count of send context types */
|
||||
|
||||
/*
|
||||
* SC_VNIC types are allocated (dynamically) from the user context pool,
|
||||
* (SC_USER) and used by kernel driver as kernel contexts (SC_KERNEL).
|
||||
*/
|
||||
#define SC_VNIC SC_MAX
|
||||
|
||||
/* invalid send context index */
|
||||
#define INVALID_SCI 0xff
|
||||
|
||||
|
@ -276,7 +276,6 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
if (IS_ERR(ps->s_txreq))
|
||||
goto bail_no_tx;
|
||||
|
||||
ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
|
||||
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
|
||||
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
|
||||
hwords = 5;
|
||||
@ -1966,7 +1965,7 @@ static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
|
||||
cc_event->svc_type = svc_type;
|
||||
cc_event->rlid = rlid;
|
||||
/* keep timestamp in units of 1.024 usec */
|
||||
cc_event->timestamp = ktime_to_ns(ktime_get()) / 1024;
|
||||
cc_event->timestamp = ktime_get_ns() / 1024;
|
||||
|
||||
spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
|
||||
}
|
||||
@ -2175,7 +2174,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
|
||||
goto no_immediate_data;
|
||||
if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
|
||||
goto send_last_inv;
|
||||
/* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */
|
||||
/* FALLTHROUGH -- for SEND_ONLY_WITH_IMMEDIATE */
|
||||
case OP(SEND_LAST_WITH_IMMEDIATE):
|
||||
send_last_imm:
|
||||
wc.ex.imm_data = ohdr->u.imm_data;
|
||||
@ -2220,7 +2219,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
|
||||
wc.opcode = IB_WC_RECV;
|
||||
wc.qp = &qp->ibqp;
|
||||
wc.src_qp = qp->remote_qpn;
|
||||
wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
|
||||
wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
|
||||
/*
|
||||
* It seems that IB mandates the presence of an SL in a
|
||||
* work completion only for the UD transport (see section
|
||||
|
@ -560,7 +560,7 @@ static void ruc_loopback(struct rvt_qp *sqp)
|
||||
wc.byte_len = wqe->length;
|
||||
wc.qp = &qp->ibqp;
|
||||
wc.src_qp = qp->remote_qpn;
|
||||
wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
|
||||
wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
|
||||
wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
|
||||
wc.port_num = 1;
|
||||
/* Signal completion event if the solicited bit is set. */
|
||||
@ -825,11 +825,9 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
|
||||
{
|
||||
struct hfi1_qp_priv *priv = qp->priv;
|
||||
struct hfi1_ibport *ibp = ps->ibp;
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
u32 bth1 = 0;
|
||||
u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
|
||||
u16 lrh0 = HFI1_LRH_BTH;
|
||||
u16 slid;
|
||||
u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
|
||||
u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
|
||||
extra_bytes) >> 2);
|
||||
@ -866,13 +864,6 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
|
||||
bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
|
||||
}
|
||||
hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
|
||||
|
||||
if (!ppd->lid)
|
||||
slid = be16_to_cpu(IB_LID_PERMISSIVE);
|
||||
else
|
||||
slid = ppd->lid |
|
||||
(rdma_ah_get_path_bits(&qp->remote_ah_attr) &
|
||||
((1 << ppd->lmc) - 1));
|
||||
hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
|
||||
lrh0,
|
||||
qp->s_hdrwords + nwords,
|
||||
|
@ -491,10 +491,10 @@ static void sdma_err_progress_check_schedule(struct sdma_engine *sde)
|
||||
}
|
||||
}
|
||||
|
||||
static void sdma_err_progress_check(unsigned long data)
|
||||
static void sdma_err_progress_check(struct timer_list *t)
|
||||
{
|
||||
unsigned index;
|
||||
struct sdma_engine *sde = (struct sdma_engine *)data;
|
||||
struct sdma_engine *sde = from_timer(sde, t, err_progress_check_timer);
|
||||
|
||||
dd_dev_err(sde->dd, "SDE progress check event\n");
|
||||
for (index = 0; index < sde->dd->num_sdma; index++) {
|
||||
@ -1392,6 +1392,13 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
||||
return ret;
|
||||
|
||||
idle_cnt = ns_to_cclock(dd, idle_cnt);
|
||||
if (idle_cnt)
|
||||
dd->default_desc1 =
|
||||
SDMA_DESC1_HEAD_TO_HOST_FLAG;
|
||||
else
|
||||
dd->default_desc1 =
|
||||
SDMA_DESC1_INT_REQ_FLAG;
|
||||
|
||||
if (!sdma_desct_intr)
|
||||
sdma_desct_intr = SDMA_DESC_INTR;
|
||||
|
||||
@ -1436,13 +1443,6 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
||||
sde->tail_csr =
|
||||
get_kctxt_csr_addr(dd, this_idx, SD(TAIL));
|
||||
|
||||
if (idle_cnt)
|
||||
dd->default_desc1 =
|
||||
SDMA_DESC1_HEAD_TO_HOST_FLAG;
|
||||
else
|
||||
dd->default_desc1 =
|
||||
SDMA_DESC1_INT_REQ_FLAG;
|
||||
|
||||
tasklet_init(&sde->sdma_hw_clean_up_task, sdma_hw_clean_up_task,
|
||||
(unsigned long)sde);
|
||||
|
||||
@ -1453,8 +1453,8 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
||||
|
||||
sde->progress_check_head = 0;
|
||||
|
||||
setup_timer(&sde->err_progress_check_timer,
|
||||
sdma_err_progress_check, (unsigned long)sde);
|
||||
timer_setup(&sde->err_progress_check_timer,
|
||||
sdma_err_progress_check, 0);
|
||||
|
||||
sde->descq = dma_zalloc_coherent(
|
||||
&dd->pcidev->dev,
|
||||
@ -1465,13 +1465,8 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
|
||||
if (!sde->descq)
|
||||
goto bail;
|
||||
sde->tx_ring =
|
||||
kcalloc(descq_cnt, sizeof(struct sdma_txreq *),
|
||||
GFP_KERNEL);
|
||||
if (!sde->tx_ring)
|
||||
sde->tx_ring =
|
||||
vzalloc(
|
||||
sizeof(struct sdma_txreq *) *
|
||||
descq_cnt);
|
||||
kvzalloc_node(sizeof(struct sdma_txreq *) * descq_cnt,
|
||||
GFP_KERNEL, dd->node);
|
||||
if (!sde->tx_ring)
|
||||
goto bail;
|
||||
}
|
||||
@ -2144,7 +2139,6 @@ void sdma_dumpstate(struct sdma_engine *sde)
|
||||
|
||||
static void dump_sdma_state(struct sdma_engine *sde)
|
||||
{
|
||||
struct hw_sdma_desc *descq;
|
||||
struct hw_sdma_desc *descqp;
|
||||
u64 desc[2];
|
||||
u64 addr;
|
||||
@ -2155,7 +2149,6 @@ static void dump_sdma_state(struct sdma_engine *sde)
|
||||
head = sde->descq_head & sde->sdma_mask;
|
||||
tail = sde->descq_tail & sde->sdma_mask;
|
||||
cnt = sdma_descq_freecnt(sde);
|
||||
descq = sde->descq;
|
||||
|
||||
dd_dev_err(sde->dd,
|
||||
"SDMA (%u) descq_head: %u descq_tail: %u freecnt: %u FLE %d\n",
|
||||
@ -2593,7 +2586,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
|
||||
* 7220, e.g.
|
||||
*/
|
||||
ss->go_s99_running = 1;
|
||||
/* fall through and start dma engine */
|
||||
/* fall through -- and start dma engine */
|
||||
case sdma_event_e10_go_hw_start:
|
||||
/* This reference means the state machine is started */
|
||||
sdma_get(&sde->state);
|
||||
@ -3016,6 +3009,7 @@ static void __sdma_process_event(struct sdma_engine *sde,
|
||||
case sdma_event_e60_hw_halted:
|
||||
need_progress = 1;
|
||||
sdma_err_progress_check_schedule(sde);
|
||||
/* fall through */
|
||||
case sdma_event_e90_sw_halted:
|
||||
/*
|
||||
* SW initiated halt does not perform engines
|
||||
|
@ -543,7 +543,7 @@ static ssize_t show_nctxts(struct device *device,
|
||||
* give a more accurate picture of total contexts available.
|
||||
*/
|
||||
return scnprintf(buf, PAGE_SIZE, "%u\n",
|
||||
min(dd->num_rcv_contexts - dd->first_dyn_alloc_ctxt,
|
||||
min(dd->num_user_contexts,
|
||||
(u32)dd->sc_sizes[SC_USER].count));
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2015, 2016 Intel Corporation.
|
||||
* Copyright(c) 2015 - 2017 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@ -91,12 +91,17 @@ u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opa_hdr)
|
||||
return __get_16b_hdr_len(&opa_hdr->opah);
|
||||
}
|
||||
|
||||
const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet)
|
||||
const char *hfi1_trace_get_packet_l4_str(u8 l4)
|
||||
{
|
||||
if (packet->etype != RHF_RCV_TYPE_BYPASS)
|
||||
return "IB";
|
||||
if (l4)
|
||||
return "16B";
|
||||
else
|
||||
return "9B";
|
||||
}
|
||||
|
||||
switch (hfi1_16B_get_l2(packet->hdr)) {
|
||||
const char *hfi1_trace_get_packet_l2_str(u8 l2)
|
||||
{
|
||||
switch (l2) {
|
||||
case 0:
|
||||
return "0";
|
||||
case 1:
|
||||
@ -109,14 +114,6 @@ const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet)
|
||||
return "";
|
||||
}
|
||||
|
||||
const char *hfi1_trace_get_packet_type_str(u8 l4)
|
||||
{
|
||||
if (l4)
|
||||
return "16B";
|
||||
else
|
||||
return "9B";
|
||||
}
|
||||
|
||||
#define IMM_PRN "imm:%d"
|
||||
#define RETH_PRN "reth vaddr:0x%.16llx rkey:0x%.8x dlen:0x%.8x"
|
||||
#define AETH_PRN "aeth syn:0x%.2x %s msn:0x%.8x"
|
||||
@ -154,7 +151,7 @@ void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
|
||||
*opcode = ib_bth_get_opcode(ohdr);
|
||||
*tver = ib_bth_get_tver(ohdr);
|
||||
*pkey = ib_bth_get_pkey(ohdr);
|
||||
*psn = ib_bth_get_psn(ohdr);
|
||||
*psn = mask_psn(ib_bth_get_psn(ohdr));
|
||||
*qpn = ib_bth_get_qpn(ohdr);
|
||||
}
|
||||
|
||||
@ -169,7 +166,7 @@ void hfi1_trace_parse_16b_bth(struct ib_other_headers *ohdr,
|
||||
*pad = ib_bth_get_pad(ohdr);
|
||||
*se = ib_bth_get_se(ohdr);
|
||||
*tver = ib_bth_get_tver(ohdr);
|
||||
*psn = ib_bth_get_psn(ohdr);
|
||||
*psn = mask_psn(ib_bth_get_psn(ohdr));
|
||||
*qpn = ib_bth_get_qpn(ohdr);
|
||||
}
|
||||
|
||||
|
@ -44,6 +44,16 @@
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
|
||||
#define show_packettype(etype) \
|
||||
__print_symbolic(etype, \
|
||||
packettype_name(EXPECTED), \
|
||||
packettype_name(EAGER), \
|
||||
packettype_name(IB), \
|
||||
packettype_name(ERROR), \
|
||||
packettype_name(BYPASS))
|
||||
|
||||
#include "trace_dbg.h"
|
||||
#include "trace_misc.h"
|
||||
#include "trace_ctxts.h"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2015, 2016 Intel Corporation.
|
||||
* Copyright(c) 2015 - 2017 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@ -99,8 +99,7 @@ u8 ibhdr_exhdr_len(struct ib_header *hdr);
|
||||
const char *parse_everbs_hdrs(struct trace_seq *p, u8 opcode, void *ehdrs);
|
||||
u8 hfi1_trace_opa_hdr_len(struct hfi1_opa_header *opah);
|
||||
u8 hfi1_trace_packet_hdr_len(struct hfi1_packet *packet);
|
||||
const char *hfi1_trace_get_packet_type_str(u8 l4);
|
||||
const char *hfi1_trace_get_packet_str(struct hfi1_packet *packet);
|
||||
const char *hfi1_trace_get_packet_l4_str(u8 l4);
|
||||
void hfi1_trace_parse_9b_bth(struct ib_other_headers *ohdr,
|
||||
u8 *ack, u8 *becn, u8 *fecn, u8 *mig,
|
||||
u8 *se, u8 *pad, u8 *opcode, u8 *tver,
|
||||
@ -129,6 +128,8 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
|
||||
u8 se, u8 pad, u8 opcode, const char *opname,
|
||||
u8 tver, u16 pkey, u32 psn, u32 qpn);
|
||||
|
||||
const char *hfi1_trace_get_packet_l2_str(u8 l2);
|
||||
|
||||
#define __parse_ib_ehdrs(op, ehdrs) parse_everbs_hdrs(p, op, ehdrs)
|
||||
|
||||
#define lrh_name(lrh) { HFI1_##lrh, #lrh }
|
||||
@ -136,8 +137,6 @@ const char *hfi1_trace_fmt_bth(struct trace_seq *p, bool bypass,
|
||||
__print_symbolic(lrh, \
|
||||
lrh_name(LRH_BTH), \
|
||||
lrh_name(LRH_GRH))
|
||||
#define PKT_ENTRY(pkt) __string(ptype, hfi1_trace_get_packet_str(packet))
|
||||
#define PKT_ASSIGN(pkt) __assign_str(ptype, hfi1_trace_get_packet_str(packet))
|
||||
|
||||
DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
TP_PROTO(struct hfi1_devdata *dd,
|
||||
@ -146,12 +145,12 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
TP_ARGS(dd, packet, sc5),
|
||||
TP_STRUCT__entry(
|
||||
DD_DEV_ENTRY(dd)
|
||||
PKT_ENTRY(packet)
|
||||
__field(bool, bypass)
|
||||
__field(u8, etype)
|
||||
__field(u8, ack)
|
||||
__field(u8, age)
|
||||
__field(u8, becn)
|
||||
__field(u8, fecn)
|
||||
__field(u8, l2)
|
||||
__field(u8, l4)
|
||||
__field(u8, lnh)
|
||||
__field(u8, lver)
|
||||
@ -176,10 +175,10 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
),
|
||||
TP_fast_assign(
|
||||
DD_DEV_ASSIGN(dd);
|
||||
PKT_ASSIGN(packet);
|
||||
|
||||
if (packet->etype == RHF_RCV_TYPE_BYPASS) {
|
||||
__entry->bypass = true;
|
||||
__entry->etype = packet->etype;
|
||||
__entry->l2 = hfi1_16B_get_l2(packet->hdr);
|
||||
if (__entry->etype == RHF_RCV_TYPE_BYPASS) {
|
||||
hfi1_trace_parse_16b_hdr(packet->hdr,
|
||||
&__entry->age,
|
||||
&__entry->becn,
|
||||
@ -203,7 +202,6 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
&__entry->psn,
|
||||
&__entry->qpn);
|
||||
} else {
|
||||
__entry->bypass = false;
|
||||
hfi1_trace_parse_9b_hdr(packet->hdr, sc5,
|
||||
&__entry->lnh,
|
||||
&__entry->lver,
|
||||
@ -233,9 +231,13 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
),
|
||||
TP_printk("[%s] (%s) %s %s hlen:%d %s",
|
||||
__get_str(dev),
|
||||
__get_str(ptype),
|
||||
__entry->etype != RHF_RCV_TYPE_BYPASS ?
|
||||
show_packettype(__entry->etype) :
|
||||
hfi1_trace_get_packet_l2_str(
|
||||
__entry->l2),
|
||||
hfi1_trace_fmt_lrh(p,
|
||||
__entry->bypass,
|
||||
__entry->etype ==
|
||||
RHF_RCV_TYPE_BYPASS,
|
||||
__entry->age,
|
||||
__entry->becn,
|
||||
__entry->fecn,
|
||||
@ -252,7 +254,8 @@ DECLARE_EVENT_CLASS(hfi1_input_ibhdr_template,
|
||||
__entry->dlid,
|
||||
__entry->slid),
|
||||
hfi1_trace_fmt_bth(p,
|
||||
__entry->bypass,
|
||||
__entry->etype ==
|
||||
RHF_RCV_TYPE_BYPASS,
|
||||
__entry->ack,
|
||||
__entry->becn,
|
||||
__entry->fecn,
|
||||
@ -284,7 +287,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
TP_ARGS(dd, opah, sc5),
|
||||
TP_STRUCT__entry(
|
||||
DD_DEV_ENTRY(dd)
|
||||
__field(bool, bypass)
|
||||
__field(u8, hdr_type)
|
||||
__field(u8, ack)
|
||||
__field(u8, age)
|
||||
__field(u8, becn)
|
||||
@ -316,8 +319,8 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
|
||||
DD_DEV_ASSIGN(dd);
|
||||
|
||||
if (opah->hdr_type) {
|
||||
__entry->bypass = true;
|
||||
__entry->hdr_type = opah->hdr_type;
|
||||
if (__entry->hdr_type) {
|
||||
hfi1_trace_parse_16b_hdr(&opah->opah,
|
||||
&__entry->age,
|
||||
&__entry->becn,
|
||||
@ -331,7 +334,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
&__entry->dlid,
|
||||
&__entry->slid);
|
||||
|
||||
if (entry->l4 == OPA_16B_L4_IB_LOCAL)
|
||||
if (__entry->l4 == OPA_16B_L4_IB_LOCAL)
|
||||
ohdr = &opah->opah.u.oth;
|
||||
else
|
||||
ohdr = &opah->opah.u.l.oth;
|
||||
@ -345,7 +348,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
&__entry->psn,
|
||||
&__entry->qpn);
|
||||
} else {
|
||||
__entry->bypass = false;
|
||||
__entry->l4 = OPA_16B_L4_9B;
|
||||
hfi1_trace_parse_9b_hdr(&opah->ibh, sc5,
|
||||
&__entry->lnh,
|
||||
&__entry->lver,
|
||||
@ -354,7 +357,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
&__entry->len,
|
||||
&__entry->dlid,
|
||||
&__entry->slid);
|
||||
if (entry->lnh == HFI1_LRH_BTH)
|
||||
if (__entry->lnh == HFI1_LRH_BTH)
|
||||
ohdr = &opah->ibh.u.oth;
|
||||
else
|
||||
ohdr = &opah->ibh.u.l.oth;
|
||||
@ -378,9 +381,9 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
),
|
||||
TP_printk("[%s] (%s) %s %s hlen:%d %s",
|
||||
__get_str(dev),
|
||||
hfi1_trace_get_packet_type_str(__entry->l4),
|
||||
hfi1_trace_get_packet_l4_str(__entry->l4),
|
||||
hfi1_trace_fmt_lrh(p,
|
||||
__entry->bypass,
|
||||
!!__entry->hdr_type,
|
||||
__entry->age,
|
||||
__entry->becn,
|
||||
__entry->fecn,
|
||||
@ -397,7 +400,7 @@ DECLARE_EVENT_CLASS(hfi1_output_ibhdr_template,
|
||||
__entry->dlid,
|
||||
__entry->slid),
|
||||
hfi1_trace_fmt_bth(p,
|
||||
__entry->bypass,
|
||||
!!__entry->hdr_type,
|
||||
__entry->ack,
|
||||
__entry->becn,
|
||||
__entry->fecn,
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright(c) 2015, 2016 Intel Corporation.
|
||||
* Copyright(c) 2015 - 2017 Intel Corporation.
|
||||
*
|
||||
* This file is provided under a dual BSD/GPLv2 license. When using or
|
||||
* redistributing this file, you may do so under either license.
|
||||
@ -62,15 +62,6 @@ __print_symbolic(type, \
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM hfi1_rx
|
||||
|
||||
#define packettype_name(etype) { RHF_RCV_TYPE_##etype, #etype }
|
||||
#define show_packettype(etype) \
|
||||
__print_symbolic(etype, \
|
||||
packettype_name(EXPECTED), \
|
||||
packettype_name(EAGER), \
|
||||
packettype_name(IB), \
|
||||
packettype_name(ERROR), \
|
||||
packettype_name(BYPASS))
|
||||
|
||||
TRACE_EVENT(hfi1_rcvhdr,
|
||||
TP_PROTO(struct hfi1_devdata *dd,
|
||||
u32 ctxt,
|
||||
|
@ -93,7 +93,6 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
|
||||
goto done_free_tx;
|
||||
}
|
||||
|
||||
ps->s_txreq->phdr.hdr.hdr_type = priv->hdr_type;
|
||||
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
|
||||
/* header size in 32-bit words LRH+BTH = (8+12)/4. */
|
||||
hwords = 5;
|
||||
@ -463,7 +462,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
|
||||
wc.status = IB_WC_SUCCESS;
|
||||
wc.qp = &qp->ibqp;
|
||||
wc.src_qp = qp->remote_qpn;
|
||||
wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr);
|
||||
wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
|
||||
/*
|
||||
* It seems that IB mandates the presence of an SL in a
|
||||
* work completion only for the UD transport (see section
|
||||
|
@ -265,8 +265,8 @@ static void ud_loopback(struct rvt_qp *sqp, struct rvt_swqe *swqe)
|
||||
} else {
|
||||
wc.pkey_index = 0;
|
||||
}
|
||||
wc.slid = ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
|
||||
((1 << ppd->lmc) - 1));
|
||||
wc.slid = (ppd->lid | (rdma_ah_get_path_bits(ah_attr) &
|
||||
((1 << ppd->lmc) - 1))) & U16_MAX;
|
||||
/* Check for loopback when the port lid is not set */
|
||||
if (wc.slid == 0 && sqp->ibqp.qp_type == IB_QPT_GSI)
|
||||
wc.slid = be16_to_cpu(IB_LID_PERMISSIVE);
|
||||
@ -854,7 +854,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
|
||||
int mgmt_pkey_idx = -1;
|
||||
struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
|
||||
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
|
||||
struct ib_header *hdr = packet->hdr;
|
||||
void *data = packet->payload;
|
||||
u32 tlen = packet->tlen;
|
||||
struct rvt_qp *qp = packet->qp;
|
||||
@ -880,7 +879,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
|
||||
dlid_is_permissive = (dlid == permissive_lid);
|
||||
slid_is_permissive = (slid == permissive_lid);
|
||||
} else {
|
||||
hdr = packet->hdr;
|
||||
pkey = ib_bth_get_pkey(ohdr);
|
||||
dlid_is_permissive = (dlid == be16_to_cpu(IB_LID_PERMISSIVE));
|
||||
slid_is_permissive = (slid == be16_to_cpu(IB_LID_PERMISSIVE));
|
||||
@ -1039,7 +1037,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
|
||||
}
|
||||
if (slid_is_permissive)
|
||||
slid = be32_to_cpu(OPA_LID_PERMISSIVE);
|
||||
wc.slid = slid;
|
||||
wc.slid = slid & U16_MAX;
|
||||
wc.sl = sl_from_sc;
|
||||
|
||||
/*
|
||||
|
@ -542,14 +542,10 @@ int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
|
||||
{
|
||||
struct hfi1_ctxtdata *uctxt = fd->uctxt;
|
||||
unsigned long *ev = uctxt->dd->events +
|
||||
(((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS) + fd->subctxt);
|
||||
(uctxt_offset(uctxt) + fd->subctxt);
|
||||
u32 *array;
|
||||
int ret = 0;
|
||||
|
||||
if (!fd->invalid_tids)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* copy_to_user() can sleep, which will leave the invalid_lock
|
||||
* locked and cause the MMU notifier to be blocked on the lock
|
||||
@ -942,8 +938,7 @@ static int tid_rb_invalidate(void *arg, struct mmu_rb_node *mnode)
|
||||
* process in question.
|
||||
*/
|
||||
ev = uctxt->dd->events +
|
||||
(((uctxt->ctxt - uctxt->dd->first_dyn_alloc_ctxt) *
|
||||
HFI1_MAX_SHARED_CTXTS) + fdata->subctxt);
|
||||
(uctxt_offset(uctxt) + fdata->subctxt);
|
||||
set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
|
||||
}
|
||||
fdata->invalid_tid_idx++;
|
||||
|
@ -956,10 +956,8 @@ static int pin_sdma_pages(struct user_sdma_request *req,
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
|
||||
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages) {
|
||||
SDMA_DBG(req, "Failed page array alloc");
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(pages, node->pages, node->npages * sizeof(*pages));
|
||||
|
||||
npages -= node->npages;
|
||||
@ -1254,20 +1252,25 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
|
||||
struct user_sdma_txreq *tx, u32 datalen)
|
||||
{
|
||||
u32 ahg[AHG_KDETH_ARRAY_SIZE];
|
||||
int diff = 0;
|
||||
int idx = 0;
|
||||
u8 omfactor; /* KDETH.OM */
|
||||
struct hfi1_user_sdma_pkt_q *pq = req->pq;
|
||||
struct hfi1_pkt_header *hdr = &req->hdr;
|
||||
u16 pbclen = le16_to_cpu(hdr->pbc[0]);
|
||||
u32 val32, tidval = 0, lrhlen = get_lrh_len(*hdr, pad_len(datalen));
|
||||
size_t array_size = ARRAY_SIZE(ahg);
|
||||
|
||||
if (PBC2LRH(pbclen) != lrhlen) {
|
||||
/* PBC.PbcLengthDWs */
|
||||
AHG_HEADER_SET(ahg, diff, 0, 0, 12,
|
||||
cpu_to_le16(LRH2PBC(lrhlen)));
|
||||
idx = ahg_header_set(ahg, idx, array_size, 0, 0, 12,
|
||||
(__force u16)cpu_to_le16(LRH2PBC(lrhlen)));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
/* LRH.PktLen (we need the full 16 bits due to byte swap) */
|
||||
AHG_HEADER_SET(ahg, diff, 3, 0, 16,
|
||||
cpu_to_be16(lrhlen >> 2));
|
||||
idx = ahg_header_set(ahg, idx, array_size, 3, 0, 16,
|
||||
(__force u16)cpu_to_be16(lrhlen >> 2));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1278,12 +1281,23 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
|
||||
(HFI1_CAP_IS_KSET(EXTENDED_PSN) ? 0x7fffffff : 0xffffff);
|
||||
if (unlikely(tx->flags & TXREQ_FLAGS_REQ_ACK))
|
||||
val32 |= 1UL << 31;
|
||||
AHG_HEADER_SET(ahg, diff, 6, 0, 16, cpu_to_be16(val32 >> 16));
|
||||
AHG_HEADER_SET(ahg, diff, 6, 16, 16, cpu_to_be16(val32 & 0xffff));
|
||||
idx = ahg_header_set(ahg, idx, array_size, 6, 0, 16,
|
||||
(__force u16)cpu_to_be16(val32 >> 16));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
idx = ahg_header_set(ahg, idx, array_size, 6, 16, 16,
|
||||
(__force u16)cpu_to_be16(val32 & 0xffff));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
/* KDETH.Offset */
|
||||
AHG_HEADER_SET(ahg, diff, 15, 0, 16,
|
||||
cpu_to_le16(req->koffset & 0xffff));
|
||||
AHG_HEADER_SET(ahg, diff, 15, 16, 16, cpu_to_le16(req->koffset >> 16));
|
||||
idx = ahg_header_set(ahg, idx, array_size, 15, 0, 16,
|
||||
(__force u16)cpu_to_le16(req->koffset & 0xffff));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
idx = ahg_header_set(ahg, idx, array_size, 15, 16, 16,
|
||||
(__force u16)cpu_to_le16(req->koffset >> 16));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
if (req_opcode(req->info.ctrl) == EXPECTED) {
|
||||
__le16 val;
|
||||
|
||||
@ -1310,10 +1324,13 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
|
||||
KDETH_OM_MAX_SIZE) ? KDETH_OM_LARGE_SHIFT :
|
||||
KDETH_OM_SMALL_SHIFT;
|
||||
/* KDETH.OM and KDETH.OFFSET (TID) */
|
||||
AHG_HEADER_SET(ahg, diff, 7, 0, 16,
|
||||
((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
|
||||
idx = ahg_header_set(
|
||||
ahg, idx, array_size, 7, 0, 16,
|
||||
((!!(omfactor - KDETH_OM_SMALL_SHIFT)) << 15 |
|
||||
((req->tidoffset >> omfactor)
|
||||
& 0x7fff)));
|
||||
& 0x7fff)));
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
/* KDETH.TIDCtrl, KDETH.TID, KDETH.Intr, KDETH.SH */
|
||||
val = cpu_to_le16(((EXP_TID_GET(tidval, CTRL) & 0x3) << 10) |
|
||||
(EXP_TID_GET(tidval, IDX) & 0x3ff));
|
||||
@ -1330,21 +1347,22 @@ static int set_txreq_header_ahg(struct user_sdma_request *req,
|
||||
AHG_KDETH_INTR_SHIFT));
|
||||
}
|
||||
|
||||
AHG_HEADER_SET(ahg, diff, 7, 16, 14, val);
|
||||
idx = ahg_header_set(ahg, idx, array_size,
|
||||
7, 16, 14, (__force u16)val);
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
}
|
||||
if (diff < 0)
|
||||
return diff;
|
||||
|
||||
trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt,
|
||||
req->info.comp_idx, req->sde->this_idx,
|
||||
req->ahg_idx, ahg, diff, tidval);
|
||||
req->ahg_idx, ahg, idx, tidval);
|
||||
sdma_txinit_ahg(&tx->txreq,
|
||||
SDMA_TXREQ_F_USE_AHG,
|
||||
datalen, req->ahg_idx, diff,
|
||||
datalen, req->ahg_idx, idx,
|
||||
ahg, sizeof(req->hdr),
|
||||
user_sdma_txreq_cb);
|
||||
|
||||
return diff;
|
||||
return idx;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1410,6 +1428,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq)
|
||||
|
||||
static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!list_empty(&req->txps)) {
|
||||
struct sdma_txreq *t, *p;
|
||||
|
||||
@ -1421,22 +1441,20 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
|
||||
kmem_cache_free(req->pq->txreq_cache, tx);
|
||||
}
|
||||
}
|
||||
if (req->data_iovs) {
|
||||
struct sdma_mmu_node *node;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < req->data_iovs; i++) {
|
||||
node = req->iovs[i].node;
|
||||
if (!node)
|
||||
continue;
|
||||
for (i = 0; i < req->data_iovs; i++) {
|
||||
struct sdma_mmu_node *node = req->iovs[i].node;
|
||||
|
||||
if (unpin)
|
||||
hfi1_mmu_rb_remove(req->pq->handler,
|
||||
&node->rb);
|
||||
else
|
||||
atomic_dec(&node->refcount);
|
||||
}
|
||||
if (!node)
|
||||
continue;
|
||||
|
||||
if (unpin)
|
||||
hfi1_mmu_rb_remove(req->pq->handler,
|
||||
&node->rb);
|
||||
else
|
||||
atomic_dec(&node->refcount);
|
||||
}
|
||||
|
||||
kfree(req->tids);
|
||||
clear_bit(req->info.comp_idx, req->pq->req_in_use);
|
||||
}
|
||||
|
@ -80,15 +80,26 @@
|
||||
#define PBC2LRH(x) ((((x) & 0xfff) << 2) - 4)
|
||||
#define LRH2PBC(x) ((((x) >> 2) + 1) & 0xfff)
|
||||
|
||||
#define AHG_HEADER_SET(arr, idx, dw, bit, width, value) \
|
||||
do { \
|
||||
if ((idx) < ARRAY_SIZE((arr))) \
|
||||
(arr)[(idx++)] = sdma_build_ahg_descriptor( \
|
||||
(__force u16)(value), (dw), (bit), \
|
||||
(width)); \
|
||||
else \
|
||||
return -ERANGE; \
|
||||
} while (0)
|
||||
/**
|
||||
* Build an SDMA AHG header update descriptor and save it to an array.
|
||||
* @arr - Array to save the descriptor to.
|
||||
* @idx - Index of the array at which the descriptor will be saved.
|
||||
* @array_size - Size of the array arr.
|
||||
* @dw - Update index into the header in DWs.
|
||||
* @bit - Start bit.
|
||||
* @width - Field width.
|
||||
* @value - 16 bits of immediate data to write into the field.
|
||||
* Returns -ERANGE if idx is invalid. If successful, returns the next index
|
||||
* (idx + 1) of the array to be used for the next descriptor.
|
||||
*/
|
||||
static inline int ahg_header_set(u32 *arr, int idx, size_t array_size,
|
||||
u8 dw, u8 bit, u8 width, u16 value)
|
||||
{
|
||||
if ((size_t)idx >= array_size)
|
||||
return -ERANGE;
|
||||
arr[idx++] = sdma_build_ahg_descriptor(value, dw, bit, width);
|
||||
return idx;
|
||||
}
|
||||
|
||||
/* Tx request flag bits */
|
||||
#define TXREQ_FLAGS_REQ_ACK BIT(0) /* Set the ACK bit in the header */
|
||||
|
@ -146,6 +146,9 @@ static int pio_wait(struct rvt_qp *qp,
|
||||
/* Length of buffer to create verbs txreq cache name */
|
||||
#define TXREQ_NAME_LEN 24
|
||||
|
||||
/* 16B trailing buffer */
|
||||
static const u8 trail_buf[MAX_16B_PADDING];
|
||||
|
||||
static uint wss_threshold;
|
||||
module_param(wss_threshold, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
|
||||
@ -667,9 +670,9 @@ void hfi1_16B_rcv(struct hfi1_packet *packet)
|
||||
* This is called from a timer to check for QPs
|
||||
* which need kernel memory in order to send a packet.
|
||||
*/
|
||||
static void mem_timer(unsigned long data)
|
||||
static void mem_timer(struct timer_list *t)
|
||||
{
|
||||
struct hfi1_ibdev *dev = (struct hfi1_ibdev *)data;
|
||||
struct hfi1_ibdev *dev = from_timer(dev, t, mem_timer);
|
||||
struct list_head *list = &dev->memwait;
|
||||
struct rvt_qp *qp = NULL;
|
||||
struct iowait *wait;
|
||||
@ -793,6 +796,27 @@ static noinline int build_verbs_ulp_payload(
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* update_tx_opstats - record stats by opcode
|
||||
* @qp; the qp
|
||||
* @ps: transmit packet state
|
||||
* @plen: the plen in dwords
|
||||
*
|
||||
* This is a routine to record the tx opstats after a
|
||||
* packet has been presented to the egress mechanism.
|
||||
*/
|
||||
static void update_tx_opstats(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
||||
u32 plen)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
|
||||
struct hfi1_opcode_stats_perctx *s = get_cpu_ptr(dd->tx_opstats);
|
||||
|
||||
inc_opstats(plen * 4, &s->stats[ps->opcode]);
|
||||
put_cpu_ptr(s);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Build the number of DMA descriptors needed to send length bytes of data.
|
||||
*
|
||||
@ -812,9 +836,7 @@ static int build_verbs_tx_desc(
|
||||
int ret = 0;
|
||||
struct hfi1_sdma_header *phdr = &tx->phdr;
|
||||
u16 hdrbytes = tx->hdr_dwords << 2;
|
||||
u32 *hdr;
|
||||
u8 extra_bytes = 0;
|
||||
static char trail_buf[12]; /* CRC = 4, LT = 1, Pad = 0 to 7 bytes */
|
||||
|
||||
if (tx->phdr.hdr.hdr_type) {
|
||||
/*
|
||||
@ -823,9 +845,6 @@ static int build_verbs_tx_desc(
|
||||
*/
|
||||
extra_bytes = hfi1_get_16b_padding(hdrbytes - 8, length) +
|
||||
(SIZE_OF_CRC << 2) + SIZE_OF_LT;
|
||||
hdr = (u32 *)&phdr->hdr.opah;
|
||||
} else {
|
||||
hdr = (u32 *)&phdr->hdr.ibh;
|
||||
}
|
||||
if (!ahg_info->ahgcount) {
|
||||
ret = sdma_txinit_ahg(
|
||||
@ -869,9 +888,9 @@ static int build_verbs_tx_desc(
|
||||
}
|
||||
|
||||
/* add icrc, lt byte, and padding to flit */
|
||||
if (extra_bytes != 0)
|
||||
if (extra_bytes)
|
||||
ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
|
||||
trail_buf, extra_bytes);
|
||||
(void *)trail_buf, extra_bytes);
|
||||
|
||||
bail_txadd:
|
||||
return ret;
|
||||
@ -891,14 +910,12 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
||||
u8 sc5 = priv->s_sc;
|
||||
int ret;
|
||||
u32 dwords;
|
||||
bool bypass = false;
|
||||
|
||||
if (ps->s_txreq->phdr.hdr.hdr_type) {
|
||||
u8 extra_bytes = hfi1_get_16b_padding((hdrwords << 2), len);
|
||||
|
||||
dwords = (len + extra_bytes + (SIZE_OF_CRC << 2) +
|
||||
SIZE_OF_LT) >> 2;
|
||||
bypass = true;
|
||||
} else {
|
||||
dwords = (len + 3) >> 2;
|
||||
}
|
||||
@ -938,6 +955,8 @@ int hfi1_verbs_send_dma(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
||||
goto bail_ecomm;
|
||||
return ret;
|
||||
}
|
||||
|
||||
update_tx_opstats(qp, ps, plen);
|
||||
trace_sdma_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
|
||||
&ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
|
||||
return ret;
|
||||
@ -1033,8 +1052,6 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
||||
int wc_status = IB_WC_SUCCESS;
|
||||
int ret = 0;
|
||||
pio_release_cb cb = NULL;
|
||||
u32 lrh0_16b;
|
||||
bool bypass = false;
|
||||
u8 extra_bytes = 0;
|
||||
|
||||
if (ps->s_txreq->phdr.hdr.hdr_type) {
|
||||
@ -1043,8 +1060,6 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
||||
extra_bytes = pad_size + (SIZE_OF_CRC << 2) + SIZE_OF_LT;
|
||||
dwords = (len + extra_bytes) >> 2;
|
||||
hdr = (u32 *)&ps->s_txreq->phdr.hdr.opah;
|
||||
lrh0_16b = ps->s_txreq->phdr.hdr.opah.lrh[0];
|
||||
bypass = true;
|
||||
} else {
|
||||
dwords = (len + 3) >> 2;
|
||||
hdr = (u32 *)&ps->s_txreq->phdr.hdr.ibh;
|
||||
@ -1128,21 +1143,14 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
|
||||
len -= slen;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Bypass packet will need to copy additional
|
||||
* bytes to accommodate for CRC and LT bytes
|
||||
*/
|
||||
if (extra_bytes) {
|
||||
u8 *empty_buf;
|
||||
/* add icrc, lt byte, and padding to flit */
|
||||
if (extra_bytes)
|
||||
seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
|
||||
|
||||
empty_buf = kcalloc(extra_bytes, sizeof(u8),
|
||||
GFP_KERNEL);
|
||||
seg_pio_copy_mid(pbuf, empty_buf, extra_bytes);
|
||||
kfree(empty_buf);
|
||||
}
|
||||
seg_pio_copy_end(pbuf);
|
||||
}
|
||||
|
||||
update_tx_opstats(qp, ps, plen);
|
||||
trace_pio_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
|
||||
&ps->s_txreq->phdr.hdr, ib_is_sc5(sc5));
|
||||
|
||||
@ -1636,8 +1644,7 @@ static void init_ibport(struct hfi1_pportdata *ppd)
|
||||
|
||||
for (i = 0; i < RVT_MAX_TRAP_LISTS ; i++)
|
||||
INIT_LIST_HEAD(&ibp->rvp.trap_lists[i].list);
|
||||
setup_timer(&ibp->rvp.trap_timer, hfi1_handle_trap_timer,
|
||||
(unsigned long)ibp);
|
||||
timer_setup(&ibp->rvp.trap_timer, hfi1_handle_trap_timer, 0);
|
||||
|
||||
spin_lock_init(&ibp->rvp.lock);
|
||||
/* Set the prefix to the default value (see ch. 4.1.1) */
|
||||
@ -1844,7 +1851,7 @@ int hfi1_register_ib_device(struct hfi1_devdata *dd)
|
||||
|
||||
/* Only need to initialize non-zero fields. */
|
||||
|
||||
setup_timer(&dev->mem_timer, mem_timer, (unsigned long)dev);
|
||||
timer_setup(&dev->mem_timer, mem_timer, 0);
|
||||
|
||||
seqlock_init(&dev->iowait_lock);
|
||||
seqlock_init(&dev->txwait_lock);
|
||||
|
@ -92,6 +92,8 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
|
||||
tx->psc = priv->s_sendcontext;
|
||||
/* so that we can test if the sdma decriptors are there */
|
||||
tx->txreq.num_desc = 0;
|
||||
/* Set the header type */
|
||||
tx->phdr.hdr.hdr_type = priv->hdr_type;
|
||||
return tx;
|
||||
}
|
||||
|
||||
|
@ -67,8 +67,6 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
|
||||
unsigned int rcvctrl_ops = 0;
|
||||
int ret;
|
||||
|
||||
hfi1_init_ctxt(uctxt->sc);
|
||||
|
||||
uctxt->do_interrupt = &handle_receive_interrupt;
|
||||
|
||||
/* Now allocate the RcvHdr queue and eager buffers. */
|
||||
@ -96,8 +94,6 @@ static int setup_vnic_ctxt(struct hfi1_devdata *dd, struct hfi1_ctxtdata *uctxt)
|
||||
rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
|
||||
|
||||
hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
|
||||
|
||||
uctxt->is_vnic = true;
|
||||
done:
|
||||
return ret;
|
||||
}
|
||||
@ -122,20 +118,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
|
||||
HFI1_CAP_KGET(NODROP_EGR_FULL) |
|
||||
HFI1_CAP_KGET(DMA_RTAIL);
|
||||
uctxt->seq_cnt = 1;
|
||||
|
||||
/* Allocate and enable a PIO send context */
|
||||
uctxt->sc = sc_alloc(dd, SC_VNIC, uctxt->rcvhdrqentsize,
|
||||
uctxt->numa_id);
|
||||
|
||||
ret = uctxt->sc ? 0 : -ENOMEM;
|
||||
if (ret)
|
||||
goto bail;
|
||||
|
||||
dd_dev_dbg(dd, "allocated vnic send context %u(%u)\n",
|
||||
uctxt->sc->sw_index, uctxt->sc->hw_context);
|
||||
ret = sc_enable(uctxt->sc);
|
||||
if (ret)
|
||||
goto bail;
|
||||
uctxt->is_vnic = true;
|
||||
|
||||
if (dd->num_msix_entries)
|
||||
hfi1_set_vnic_msix_info(uctxt);
|
||||
@ -144,11 +127,7 @@ static int allocate_vnic_ctxt(struct hfi1_devdata *dd,
|
||||
dd_dev_dbg(dd, "created vnic context %d\n", uctxt->ctxt);
|
||||
*vnic_ctxt = uctxt;
|
||||
|
||||
return ret;
|
||||
bail:
|
||||
hfi1_free_ctxt(uctxt);
|
||||
dd_dev_dbg(dd, "vnic allocation failed. rc %d\n", ret);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
|
||||
@ -170,18 +149,6 @@ static void deallocate_vnic_ctxt(struct hfi1_devdata *dd,
|
||||
HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
|
||||
HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
|
||||
HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
|
||||
/*
|
||||
* VNIC contexts are allocated from user context pool.
|
||||
* Release them back to user context pool.
|
||||
*
|
||||
* Reset context integrity checks to default.
|
||||
* (writes to CSRs probably belong in chip.c)
|
||||
*/
|
||||
write_kctxt_csr(dd, uctxt->sc->hw_context, SEND_CTXT_CHECK_ENABLE,
|
||||
hfi1_pkt_default_send_ctxt_mask(dd, SC_USER));
|
||||
sc_disable(uctxt->sc);
|
||||
|
||||
dd->send_contexts[uctxt->sc->sw_index].type = SC_USER;
|
||||
|
||||
uctxt->event_flags = 0;
|
||||
|
||||
@ -840,6 +807,9 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
|
||||
struct rdma_netdev *rn;
|
||||
int i, size, rc;
|
||||
|
||||
if (!dd->num_vnic_contexts)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (!port_num || (port_num > dd->num_pports))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
@ -848,7 +818,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
|
||||
|
||||
size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo);
|
||||
netdev = alloc_netdev_mqs(size, name, name_assign_type, setup,
|
||||
dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT);
|
||||
dd->chip_sdma_engines, dd->num_vnic_contexts);
|
||||
if (!netdev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
@ -856,7 +826,7 @@ struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device,
|
||||
vinfo = opa_vnic_dev_priv(netdev);
|
||||
vinfo->dd = dd;
|
||||
vinfo->num_tx_q = dd->chip_sdma_engines;
|
||||
vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT;
|
||||
vinfo->num_rx_q = dd->num_vnic_contexts;
|
||||
vinfo->netdev = netdev;
|
||||
rn->free_rdma_netdev = hfi1_vnic_free_rn;
|
||||
rn->set_id = hfi1_vnic_set_vesw_id;
|
||||
|
@ -1,10 +1,31 @@
|
||||
config INFINIBAND_HNS
|
||||
tristate "HNS RoCE Driver"
|
||||
depends on NET_VENDOR_HISILICON
|
||||
depends on (ARM64 || (COMPILE_TEST && 64BIT)) && HNS && HNS_DSAF && HNS_ENET
|
||||
depends on ARM64 || (COMPILE_TEST && 64BIT)
|
||||
---help---
|
||||
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
|
||||
is used in Hisilicon Hi1610 and more further ICT SoC.
|
||||
is used in Hisilicon Hip06 and more further ICT SoC based on
|
||||
platform device.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called hns-roce.
|
||||
|
||||
config INFINIBAND_HNS_HIP06
|
||||
tristate "Hisilicon Hip06 Family RoCE support"
|
||||
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
|
||||
---help---
|
||||
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
|
||||
Hip07 SoC. These RoCE engines are platform devices.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called hns-roce-hw-v1.
|
||||
|
||||
config INFINIBAND_HNS_HIP08
|
||||
tristate "Hisilicon Hip08 Family RoCE support"
|
||||
depends on INFINIBAND_HNS && PCI && HNS3
|
||||
---help---
|
||||
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
|
||||
The RoCE engine is a PCI device.
|
||||
|
||||
To compile this driver as a module, choose M here: the module
|
||||
will be called hns-roce-hw-v2.
|
||||
|
@ -2,7 +2,13 @@
|
||||
# Makefile for the Hisilicon RoCE drivers.
|
||||
#
|
||||
|
||||
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
|
||||
|
||||
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
|
||||
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
|
||||
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
|
||||
hns_roce_cq.o hns_roce_alloc.o hns_roce_hw_v1.o
|
||||
hns_roce_cq.o hns_roce_alloc.o
|
||||
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
|
||||
hns-roce-hw-v1-objs := hns_roce_hw_v1.o
|
||||
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
|
||||
hns-roce-hw-v2-objs := hns_roce_hw_v2.o
|
||||
|
@ -44,11 +44,10 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct ib_gid_attr gid_attr;
|
||||
struct hns_roce_ah *ah;
|
||||
u16 vlan_tag = 0xffff;
|
||||
struct in6_addr in6;
|
||||
const struct ib_global_route *grh = rdma_ah_read_grh(ah_attr);
|
||||
union ib_gid sgid;
|
||||
int ret;
|
||||
@ -58,18 +57,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* Get mac address */
|
||||
memcpy(&in6, grh->dgid.raw, sizeof(grh->dgid.raw));
|
||||
if (rdma_is_multicast_addr(&in6)) {
|
||||
rdma_get_mcast_mac(&in6, ah->av.mac);
|
||||
} else {
|
||||
u8 *dmac = rdma_ah_retrieve_dmac(ah_attr);
|
||||
|
||||
if (!dmac) {
|
||||
kfree(ah);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
memcpy(ah->av.mac, dmac, ETH_ALEN);
|
||||
}
|
||||
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
|
||||
|
||||
/* Get source gid */
|
||||
ret = ib_get_cached_gid(ibpd->device, rdma_ah_get_port_num(ah_attr),
|
||||
|
@ -67,6 +67,7 @@ void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
|
||||
{
|
||||
hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_bitmap_free);
|
||||
|
||||
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
|
||||
int align, unsigned long *obj)
|
||||
@ -160,39 +161,47 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
|
||||
struct hns_roce_buf *buf)
|
||||
{
|
||||
int i;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
u32 bits_per_long = BITS_PER_LONG;
|
||||
|
||||
if (buf->nbufs == 1) {
|
||||
dma_free_coherent(dev, size, buf->direct.buf, buf->direct.map);
|
||||
} else {
|
||||
if (bits_per_long == 64)
|
||||
if (bits_per_long == 64 && buf->page_shift == PAGE_SHIFT)
|
||||
vunmap(buf->direct.buf);
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i)
|
||||
if (buf->page_list[i].buf)
|
||||
dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE,
|
||||
dma_free_coherent(dev, 1 << buf->page_shift,
|
||||
buf->page_list[i].buf,
|
||||
buf->page_list[i].map);
|
||||
kfree(buf->page_list);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_buf_free);
|
||||
|
||||
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
||||
struct hns_roce_buf *buf)
|
||||
struct hns_roce_buf *buf, u32 page_shift)
|
||||
{
|
||||
int i = 0;
|
||||
dma_addr_t t;
|
||||
struct page **pages;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
u32 bits_per_long = BITS_PER_LONG;
|
||||
u32 page_size = 1 << page_shift;
|
||||
u32 order;
|
||||
|
||||
/* SQ/RQ buf lease than one page, SQ + RQ = 8K */
|
||||
if (size <= max_direct) {
|
||||
buf->nbufs = 1;
|
||||
/* Npages calculated by page_size */
|
||||
buf->npages = 1 << get_order(size);
|
||||
buf->page_shift = PAGE_SHIFT;
|
||||
order = get_order(size);
|
||||
if (order <= page_shift - PAGE_SHIFT)
|
||||
order = 0;
|
||||
else
|
||||
order -= page_shift - PAGE_SHIFT;
|
||||
buf->npages = 1 << order;
|
||||
buf->page_shift = page_shift;
|
||||
/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
|
||||
buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
|
||||
if (!buf->direct.buf)
|
||||
@ -207,9 +216,9 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
||||
|
||||
memset(buf->direct.buf, 0, size);
|
||||
} else {
|
||||
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
|
||||
buf->nbufs = (size + page_size - 1) / page_size;
|
||||
buf->npages = buf->nbufs;
|
||||
buf->page_shift = PAGE_SHIFT;
|
||||
buf->page_shift = page_shift;
|
||||
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
|
||||
GFP_KERNEL);
|
||||
|
||||
@ -218,16 +227,16 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
||||
|
||||
for (i = 0; i < buf->nbufs; ++i) {
|
||||
buf->page_list[i].buf = dma_alloc_coherent(dev,
|
||||
PAGE_SIZE, &t,
|
||||
page_size, &t,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!buf->page_list[i].buf)
|
||||
goto err_free;
|
||||
|
||||
buf->page_list[i].map = t;
|
||||
memset(buf->page_list[i].buf, 0, PAGE_SIZE);
|
||||
memset(buf->page_list[i].buf, 0, page_size);
|
||||
}
|
||||
if (bits_per_long == 64) {
|
||||
if (bits_per_long == 64 && page_shift == PAGE_SHIFT) {
|
||||
pages = kmalloc_array(buf->nbufs, sizeof(*pages),
|
||||
GFP_KERNEL);
|
||||
if (!pages)
|
||||
@ -241,6 +250,8 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
||||
kfree(pages);
|
||||
if (!buf->direct.buf)
|
||||
goto err_free;
|
||||
} else {
|
||||
buf->direct.buf = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,69 +38,7 @@
|
||||
|
||||
#define CMD_POLL_TOKEN 0xffff
|
||||
#define CMD_MAX_NUM 32
|
||||
#define STATUS_MASK 0xff
|
||||
#define CMD_TOKEN_MASK 0x1f
|
||||
#define GO_BIT_TIMEOUT_MSECS 10000
|
||||
|
||||
enum {
|
||||
HCR_TOKEN_OFFSET = 0x14,
|
||||
HCR_STATUS_OFFSET = 0x18,
|
||||
HCR_GO_BIT = 15,
|
||||
};
|
||||
|
||||
static int cmd_pending(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
u32 status = readl(hr_dev->cmd.hcr + HCR_TOKEN_OFFSET);
|
||||
|
||||
return (!!(status & (1 << HCR_GO_BIT)));
|
||||
}
|
||||
|
||||
/* this function should be serialized with "hcr_mutex" */
|
||||
static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
|
||||
u64 in_param, u64 out_param,
|
||||
u32 in_modifier, u8 op_modifier, u16 op,
|
||||
u16 token, int event)
|
||||
{
|
||||
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
u32 __iomem *hcr = (u32 *)cmd->hcr;
|
||||
int ret = -EAGAIN;
|
||||
unsigned long end;
|
||||
u32 val = 0;
|
||||
|
||||
end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
|
||||
while (cmd_pending(hr_dev)) {
|
||||
if (time_after(jiffies, end)) {
|
||||
dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
|
||||
(int)end);
|
||||
goto out;
|
||||
}
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
|
||||
op);
|
||||
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
|
||||
ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
|
||||
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
|
||||
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
|
||||
roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
|
||||
ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
|
||||
|
||||
__raw_writeq(cpu_to_le64(in_param), hcr + 0);
|
||||
__raw_writeq(cpu_to_le64(out_param), hcr + 2);
|
||||
__raw_writel(cpu_to_le32(in_modifier), hcr + 4);
|
||||
/* Memory barrier */
|
||||
wmb();
|
||||
|
||||
__raw_writel(cpu_to_le32(val), hcr + 5);
|
||||
|
||||
mmiowb();
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
u64 out_param, u32 in_modifier,
|
||||
@ -108,12 +46,11 @@ static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
int event)
|
||||
{
|
||||
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
|
||||
int ret = -EAGAIN;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&cmd->hcr_mutex);
|
||||
ret = __hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
|
||||
in_modifier, op_modifier, op, token,
|
||||
event);
|
||||
ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
|
||||
op_modifier, op, token, event);
|
||||
mutex_unlock(&cmd->hcr_mutex);
|
||||
|
||||
return ret;
|
||||
@ -125,10 +62,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
u8 op_modifier, u16 op,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
u8 __iomem *hcr = hr_dev->cmd.hcr;
|
||||
unsigned long end = 0;
|
||||
u32 status = 0;
|
||||
struct device *dev = hr_dev->dev;
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
|
||||
@ -136,29 +70,10 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
CMD_POLL_TOKEN, 0);
|
||||
if (ret) {
|
||||
dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
|
||||
goto out;
|
||||
return ret;
|
||||
}
|
||||
|
||||
end = msecs_to_jiffies(timeout) + jiffies;
|
||||
while (cmd_pending(hr_dev) && time_before(jiffies, end))
|
||||
cond_resched();
|
||||
|
||||
if (cmd_pending(hr_dev)) {
|
||||
dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
|
||||
ret = -ETIMEDOUT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
status = le32_to_cpu((__force __be32)
|
||||
__raw_readl(hcr + HCR_STATUS_OFFSET));
|
||||
if ((status & STATUS_MASK) != 0x1) {
|
||||
dev_err(dev, "mailbox status 0x%x!\n", status);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
return hr_dev->hw->chk_mbox(hr_dev, timeout);
|
||||
}
|
||||
|
||||
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
@ -196,9 +111,9 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_cmd_context *context;
|
||||
int ret = 0;
|
||||
struct device *dev = hr_dev->dev;
|
||||
int ret;
|
||||
|
||||
spin_lock(&cmd->context_lock);
|
||||
WARN_ON(cmd->free_head < 0);
|
||||
@ -269,17 +184,17 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
|
||||
in_modifier, op_modifier, op,
|
||||
timeout);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
|
||||
|
||||
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
|
||||
mutex_init(&hr_dev->cmd.hcr_mutex);
|
||||
sema_init(&hr_dev->cmd.poll_sem, 1);
|
||||
hr_dev->cmd.use_events = 0;
|
||||
hr_dev->cmd.toggle = 1;
|
||||
hr_dev->cmd.max_cmds = CMD_MAX_NUM;
|
||||
hr_dev->cmd.hcr = hr_dev->reg_base + ROCEE_MB1_REG;
|
||||
hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
|
||||
HNS_ROCE_MAILBOX_SIZE,
|
||||
HNS_ROCE_MAILBOX_SIZE, 0);
|
||||
@ -356,6 +271,7 @@ struct hns_roce_cmd_mailbox
|
||||
|
||||
return mailbox;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_alloc_cmd_mailbox);
|
||||
|
||||
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmd_mailbox *mailbox)
|
||||
@ -366,3 +282,4 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
|
||||
dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
|
||||
kfree(mailbox);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_free_cmd_mailbox);
|
||||
|
@ -36,6 +36,60 @@
|
||||
#define HNS_ROCE_MAILBOX_SIZE 4096
|
||||
#define HNS_ROCE_CMD_TIMEOUT_MSECS 10000
|
||||
|
||||
enum {
|
||||
/* QPC BT commands */
|
||||
HNS_ROCE_CMD_WRITE_QPC_BT0 = 0x0,
|
||||
HNS_ROCE_CMD_WRITE_QPC_BT1 = 0x1,
|
||||
HNS_ROCE_CMD_WRITE_QPC_BT2 = 0x2,
|
||||
HNS_ROCE_CMD_READ_QPC_BT0 = 0x4,
|
||||
HNS_ROCE_CMD_READ_QPC_BT1 = 0x5,
|
||||
HNS_ROCE_CMD_READ_QPC_BT2 = 0x6,
|
||||
HNS_ROCE_CMD_DESTROY_QPC_BT0 = 0x8,
|
||||
HNS_ROCE_CMD_DESTROY_QPC_BT1 = 0x9,
|
||||
HNS_ROCE_CMD_DESTROY_QPC_BT2 = 0xa,
|
||||
|
||||
/* QPC operation */
|
||||
HNS_ROCE_CMD_MODIFY_QPC = 0x41,
|
||||
HNS_ROCE_CMD_QUERY_QPC = 0x42,
|
||||
|
||||
HNS_ROCE_CMD_MODIFY_CQC = 0x52,
|
||||
/* CQC BT commands */
|
||||
HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
|
||||
HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
|
||||
HNS_ROCE_CMD_WRITE_CQC_BT2 = 0x12,
|
||||
HNS_ROCE_CMD_READ_CQC_BT0 = 0x14,
|
||||
HNS_ROCE_CMD_READ_CQC_BT1 = 0x15,
|
||||
HNS_ROCE_CMD_READ_CQC_BT2 = 0x1b,
|
||||
HNS_ROCE_CMD_DESTROY_CQC_BT0 = 0x18,
|
||||
HNS_ROCE_CMD_DESTROY_CQC_BT1 = 0x19,
|
||||
HNS_ROCE_CMD_DESTROY_CQC_BT2 = 0x1a,
|
||||
|
||||
/* MPT BT commands */
|
||||
HNS_ROCE_CMD_WRITE_MPT_BT0 = 0x20,
|
||||
HNS_ROCE_CMD_WRITE_MPT_BT1 = 0x21,
|
||||
HNS_ROCE_CMD_WRITE_MPT_BT2 = 0x22,
|
||||
HNS_ROCE_CMD_READ_MPT_BT0 = 0x24,
|
||||
HNS_ROCE_CMD_READ_MPT_BT1 = 0x25,
|
||||
HNS_ROCE_CMD_READ_MPT_BT2 = 0x26,
|
||||
HNS_ROCE_CMD_DESTROY_MPT_BT0 = 0x28,
|
||||
HNS_ROCE_CMD_DESTROY_MPT_BT1 = 0x29,
|
||||
HNS_ROCE_CMD_DESTROY_MPT_BT2 = 0x2a,
|
||||
|
||||
/* MPT commands */
|
||||
HNS_ROCE_CMD_QUERY_MPT = 0x62,
|
||||
|
||||
/* SRQC BT commands */
|
||||
HNS_ROCE_CMD_WRITE_SRQC_BT0 = 0x30,
|
||||
HNS_ROCE_CMD_WRITE_SRQC_BT1 = 0x31,
|
||||
HNS_ROCE_CMD_WRITE_SRQC_BT2 = 0x32,
|
||||
HNS_ROCE_CMD_READ_SRQC_BT0 = 0x34,
|
||||
HNS_ROCE_CMD_READ_SRQC_BT1 = 0x35,
|
||||
HNS_ROCE_CMD_READ_SRQC_BT2 = 0x36,
|
||||
HNS_ROCE_CMD_DESTROY_SRQC_BT0 = 0x38,
|
||||
HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
|
||||
HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
|
||||
};
|
||||
|
||||
enum {
|
||||
/* TPT commands */
|
||||
HNS_ROCE_CMD_SW2HW_MPT = 0xd,
|
||||
|
@ -341,6 +341,7 @@
|
||||
#define ROCEE_BT_CMD_L_REG 0x200
|
||||
|
||||
#define ROCEE_MB1_REG 0x210
|
||||
#define ROCEE_MB6_REG 0x224
|
||||
#define ROCEE_DB_SQ_L_0_REG 0x230
|
||||
#define ROCEE_DB_OTHERS_L_0_REG 0x238
|
||||
#define ROCEE_QP1C_CFG0_0_REG 0x270
|
||||
@ -362,4 +363,26 @@
|
||||
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34
|
||||
#define ROCEE_ECC_CERR_ALM0_REG 0xB40
|
||||
|
||||
/* V2 ROCEE REG */
|
||||
#define ROCEE_TX_CMQ_BASEADDR_L_REG 0x07000
|
||||
#define ROCEE_TX_CMQ_BASEADDR_H_REG 0x07004
|
||||
#define ROCEE_TX_CMQ_DEPTH_REG 0x07008
|
||||
#define ROCEE_TX_CMQ_TAIL_REG 0x07010
|
||||
#define ROCEE_TX_CMQ_HEAD_REG 0x07014
|
||||
|
||||
#define ROCEE_RX_CMQ_BASEADDR_L_REG 0x07018
|
||||
#define ROCEE_RX_CMQ_BASEADDR_H_REG 0x0701c
|
||||
#define ROCEE_RX_CMQ_DEPTH_REG 0x07020
|
||||
#define ROCEE_RX_CMQ_TAIL_REG 0x07024
|
||||
#define ROCEE_RX_CMQ_HEAD_REG 0x07028
|
||||
|
||||
#define ROCEE_VF_SMAC_CFG0_REG 0x12000
|
||||
#define ROCEE_VF_SMAC_CFG1_REG 0x12004
|
||||
|
||||
#define ROCEE_VF_SGID_CFG0_REG 0x10000
|
||||
#define ROCEE_VF_SGID_CFG1_REG 0x10004
|
||||
#define ROCEE_VF_SGID_CFG2_REG 0x10008
|
||||
#define ROCEE_VF_SGID_CFG3_REG 0x1000c
|
||||
#define ROCEE_VF_SGID_CFG4_REG 0x10010
|
||||
|
||||
#endif /* _HNS_ROCE_COMMON_H */
|
||||
|
@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
|
||||
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
|
||||
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
|
||||
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
|
||||
dev_err(&hr_dev->pdev->dev,
|
||||
dev_err(hr_dev->dev,
|
||||
"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
|
||||
event_type, hr_cq->cqn);
|
||||
return;
|
||||
@ -85,17 +85,23 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
|
||||
struct hns_roce_uar *hr_uar,
|
||||
struct hns_roce_cq *hr_cq, int vector)
|
||||
{
|
||||
struct hns_roce_cmd_mailbox *mailbox = NULL;
|
||||
struct hns_roce_cq_table *cq_table = NULL;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_cmd_mailbox *mailbox;
|
||||
struct hns_roce_hem_table *mtt_table;
|
||||
struct hns_roce_cq_table *cq_table;
|
||||
struct device *dev = hr_dev->dev;
|
||||
dma_addr_t dma_handle;
|
||||
u64 *mtts = NULL;
|
||||
int ret = 0;
|
||||
u64 *mtts;
|
||||
int ret;
|
||||
|
||||
cq_table = &hr_dev->cq_table;
|
||||
|
||||
/* Get the physical address of cq buf */
|
||||
mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
|
||||
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
|
||||
mtt_table = &hr_dev->mr_table.mtt_cqe_table;
|
||||
else
|
||||
mtt_table = &hr_dev->mr_table.mtt_table;
|
||||
|
||||
mtts = hns_roce_table_find(hr_dev, mtt_table,
|
||||
hr_mtt->first_seg, &dma_handle);
|
||||
if (!mtts) {
|
||||
dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
|
||||
@ -150,6 +156,7 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
|
||||
}
|
||||
|
||||
hr_cq->cons_index = 0;
|
||||
hr_cq->arm_sn = 1;
|
||||
hr_cq->uar = hr_uar;
|
||||
|
||||
atomic_set(&hr_cq->refcount, 1);
|
||||
@ -182,21 +189,22 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
|
||||
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
|
||||
{
|
||||
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
|
||||
if (ret)
|
||||
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
|
||||
hr_cq->cqn);
|
||||
if (hr_dev->eq_table.eq) {
|
||||
/* Waiting interrupt process procedure carried out */
|
||||
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
|
||||
|
||||
/* Waiting interrupt process procedure carried out */
|
||||
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
|
||||
|
||||
/* wait for all interrupt processed */
|
||||
if (atomic_dec_and_test(&hr_cq->refcount))
|
||||
complete(&hr_cq->free);
|
||||
wait_for_completion(&hr_cq->free);
|
||||
/* wait for all interrupt processed */
|
||||
if (atomic_dec_and_test(&hr_cq->refcount))
|
||||
complete(&hr_cq->free);
|
||||
wait_for_completion(&hr_cq->free);
|
||||
}
|
||||
|
||||
spin_lock_irq(&cq_table->lock);
|
||||
radix_tree_delete(&cq_table->tree, hr_cq->cqn);
|
||||
@ -205,6 +213,7 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
|
||||
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
|
||||
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_free_cq);
|
||||
|
||||
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
|
||||
struct ib_ucontext *context,
|
||||
@ -212,14 +221,31 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
|
||||
struct ib_umem **umem, u64 buf_addr, int cqe)
|
||||
{
|
||||
int ret;
|
||||
u32 page_shift;
|
||||
u32 npages;
|
||||
|
||||
*umem = ib_umem_get(context, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
|
||||
IB_ACCESS_LOCAL_WRITE, 1);
|
||||
if (IS_ERR(*umem))
|
||||
return PTR_ERR(*umem);
|
||||
|
||||
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
|
||||
(*umem)->page_shift, &buf->hr_mtt);
|
||||
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
|
||||
buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
|
||||
else
|
||||
buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
|
||||
|
||||
if (hr_dev->caps.cqe_buf_pg_sz) {
|
||||
npages = (ib_umem_page_count(*umem) +
|
||||
(1 << hr_dev->caps.cqe_buf_pg_sz) - 1) /
|
||||
(1 << hr_dev->caps.cqe_buf_pg_sz);
|
||||
page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
|
||||
ret = hns_roce_mtt_init(hr_dev, npages, page_shift,
|
||||
&buf->hr_mtt);
|
||||
} else {
|
||||
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
|
||||
(*umem)->page_shift,
|
||||
&buf->hr_mtt);
|
||||
}
|
||||
if (ret)
|
||||
goto err_buf;
|
||||
|
||||
@ -241,12 +267,19 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cq_buf *buf, u32 nent)
|
||||
{
|
||||
int ret;
|
||||
u32 page_shift = PAGE_SHIFT + hr_dev->caps.cqe_buf_pg_sz;
|
||||
|
||||
ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz,
|
||||
PAGE_SIZE * 2, &buf->hr_buf);
|
||||
(1 << page_shift) * 2, &buf->hr_buf,
|
||||
page_shift);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
|
||||
buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
|
||||
else
|
||||
buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
|
||||
|
||||
ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
|
||||
buf->hr_buf.page_shift, &buf->hr_mtt);
|
||||
if (ret)
|
||||
@ -281,13 +314,13 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct hns_roce_ib_create_cq ucmd;
|
||||
struct hns_roce_cq *hr_cq = NULL;
|
||||
struct hns_roce_uar *uar = NULL;
|
||||
int vector = attr->comp_vector;
|
||||
int cq_entries = attr->cqe;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
|
||||
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
|
||||
@ -295,13 +328,12 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL);
|
||||
hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
|
||||
if (!hr_cq)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
/* In v1 engine, parameter verification */
|
||||
if (cq_entries < HNS_ROCE_MIN_CQE_NUM)
|
||||
cq_entries = HNS_ROCE_MIN_CQE_NUM;
|
||||
if (hr_dev->caps.min_cqes)
|
||||
cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
|
||||
|
||||
cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
|
||||
hr_cq->ib_cq.cqe = cq_entries - 1;
|
||||
@ -335,8 +367,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
}
|
||||
|
||||
uar = &hr_dev->priv_uar;
|
||||
hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG +
|
||||
0x1000 * uar->index;
|
||||
hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
|
||||
DB_REG_OFFSET * uar->index;
|
||||
}
|
||||
|
||||
/* Allocate cq index, fill cq_context */
|
||||
@ -353,7 +385,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
* problems if tptr is set to zero here, so we initialze it in user
|
||||
* space.
|
||||
*/
|
||||
if (!context)
|
||||
if (!context && hr_cq->tptr_addr)
|
||||
*hr_cq->tptr_addr = 0;
|
||||
|
||||
/* Get created cq handler and carry out event */
|
||||
@ -385,6 +417,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
|
||||
kfree(hr_cq);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq);
|
||||
|
||||
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
|
||||
{
|
||||
@ -410,10 +443,11 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq);
|
||||
|
||||
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct hns_roce_cq *cq;
|
||||
|
||||
cq = radix_tree_lookup(&hr_dev->cq_table.tree,
|
||||
@ -423,13 +457,14 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
|
||||
return;
|
||||
}
|
||||
|
||||
++cq->arm_sn;
|
||||
cq->comp(cq);
|
||||
}
|
||||
|
||||
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
|
||||
{
|
||||
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct hns_roce_cq *cq;
|
||||
|
||||
cq = radix_tree_lookup(&cq_table->tree,
|
||||
|
@ -78,6 +78,8 @@
|
||||
#define HNS_ROCE_MAX_GID_NUM 16
|
||||
#define HNS_ROCE_GID_SIZE 16
|
||||
|
||||
#define HNS_ROCE_HOP_NUM_0 0xff
|
||||
|
||||
#define BITMAP_NO_RR 0
|
||||
#define BITMAP_RR 1
|
||||
|
||||
@ -168,6 +170,16 @@ enum {
|
||||
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
|
||||
};
|
||||
|
||||
enum {
|
||||
HNS_ROCE_CAP_FLAG_REREG_MR = BIT(0),
|
||||
HNS_ROCE_CAP_FLAG_ROCE_V1_V2 = BIT(1),
|
||||
};
|
||||
|
||||
enum hns_roce_mtt_type {
|
||||
MTT_TYPE_WQE,
|
||||
MTT_TYPE_CQE,
|
||||
};
|
||||
|
||||
#define HNS_ROCE_CMD_SUCCESS 1
|
||||
|
||||
#define HNS_ROCE_PORT_DOWN 0
|
||||
@ -229,15 +241,21 @@ struct hns_roce_hem_table {
|
||||
unsigned long num_obj;
|
||||
/*Single obj size */
|
||||
unsigned long obj_size;
|
||||
unsigned long table_chunk_size;
|
||||
int lowmem;
|
||||
struct mutex mutex;
|
||||
struct hns_roce_hem **hem;
|
||||
u64 **bt_l1;
|
||||
dma_addr_t *bt_l1_dma_addr;
|
||||
u64 **bt_l0;
|
||||
dma_addr_t *bt_l0_dma_addr;
|
||||
};
|
||||
|
||||
struct hns_roce_mtt {
|
||||
unsigned long first_seg;
|
||||
int order;
|
||||
int page_shift;
|
||||
unsigned long first_seg;
|
||||
int order;
|
||||
int page_shift;
|
||||
enum hns_roce_mtt_type mtt_type;
|
||||
};
|
||||
|
||||
/* Only support 4K page size for mr register */
|
||||
@ -255,6 +273,19 @@ struct hns_roce_mr {
|
||||
int type; /* MR's register type */
|
||||
u64 *pbl_buf;/* MR's PBL space */
|
||||
dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
|
||||
u32 pbl_size;/* PA number in the PBL */
|
||||
u64 pbl_ba;/* page table address */
|
||||
u32 l0_chunk_last_num;/* L0 last number */
|
||||
u32 l1_chunk_last_num;/* L1 last number */
|
||||
u64 **pbl_bt_l2;/* PBL BT L2 */
|
||||
u64 **pbl_bt_l1;/* PBL BT L1 */
|
||||
u64 *pbl_bt_l0;/* PBL BT L0 */
|
||||
dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */
|
||||
dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */
|
||||
dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */
|
||||
u32 pbl_ba_pg_sz;/* BT chunk page size */
|
||||
u32 pbl_buf_pg_sz;/* buf chunk page size */
|
||||
u32 pbl_hop_num;/* multi-hop number */
|
||||
};
|
||||
|
||||
struct hns_roce_mr_table {
|
||||
@ -262,6 +293,8 @@ struct hns_roce_mr_table {
|
||||
struct hns_roce_buddy mtt_buddy;
|
||||
struct hns_roce_hem_table mtt_table;
|
||||
struct hns_roce_hem_table mtpt_table;
|
||||
struct hns_roce_buddy mtt_cqe_buddy;
|
||||
struct hns_roce_hem_table mtt_cqe_table;
|
||||
};
|
||||
|
||||
struct hns_roce_wq {
|
||||
@ -277,6 +310,12 @@ struct hns_roce_wq {
|
||||
void __iomem *db_reg_l;
|
||||
};
|
||||
|
||||
struct hns_roce_sge {
|
||||
int sge_cnt; /* SGE num */
|
||||
int offset;
|
||||
int sge_shift;/* SGE size */
|
||||
};
|
||||
|
||||
struct hns_roce_buf_list {
|
||||
void *buf;
|
||||
dma_addr_t map;
|
||||
@ -308,6 +347,7 @@ struct hns_roce_cq {
|
||||
u32 cons_index;
|
||||
void __iomem *cq_db_l;
|
||||
u16 *tptr_addr;
|
||||
int arm_sn;
|
||||
unsigned long cqn;
|
||||
u32 vector;
|
||||
atomic_t refcount;
|
||||
@ -328,6 +368,7 @@ struct hns_roce_qp_table {
|
||||
spinlock_t lock;
|
||||
struct hns_roce_hem_table qp_table;
|
||||
struct hns_roce_hem_table irrl_table;
|
||||
struct hns_roce_hem_table trrl_table;
|
||||
};
|
||||
|
||||
struct hns_roce_cq_table {
|
||||
@ -367,7 +408,6 @@ struct hns_roce_cmd_context {
|
||||
|
||||
struct hns_roce_cmdq {
|
||||
struct dma_pool *pool;
|
||||
u8 __iomem *hcr;
|
||||
struct mutex hcr_mutex;
|
||||
struct semaphore poll_sem;
|
||||
/*
|
||||
@ -429,6 +469,9 @@ struct hns_roce_qp {
|
||||
|
||||
atomic_t refcount;
|
||||
struct completion free;
|
||||
|
||||
struct hns_roce_sge sge;
|
||||
u32 next_sge;
|
||||
};
|
||||
|
||||
struct hns_roce_sqp {
|
||||
@ -439,7 +482,6 @@ struct hns_roce_ib_iboe {
|
||||
spinlock_t lock;
|
||||
struct net_device *netdevs[HNS_ROCE_MAX_PORTS];
|
||||
struct notifier_block nb;
|
||||
struct notifier_block nb_inet;
|
||||
u8 phy_port[HNS_ROCE_MAX_PORTS];
|
||||
};
|
||||
|
||||
@ -477,16 +519,20 @@ struct hns_roce_caps {
|
||||
u32 max_wqes; /* 16k */
|
||||
u32 max_sq_desc_sz; /* 64 */
|
||||
u32 max_rq_desc_sz; /* 64 */
|
||||
u32 max_srq_desc_sz;
|
||||
int max_qp_init_rdma;
|
||||
int max_qp_dest_rdma;
|
||||
int num_cqs;
|
||||
int max_cqes;
|
||||
int min_cqes;
|
||||
u32 min_wqes;
|
||||
int reserved_cqs;
|
||||
int num_aeq_vectors; /* 1 */
|
||||
int num_comp_vectors; /* 32 ceq */
|
||||
int num_other_vectors;
|
||||
int num_mtpts;
|
||||
u32 num_mtt_segs;
|
||||
u32 num_cqe_segs;
|
||||
int reserved_mrws;
|
||||
int reserved_uars;
|
||||
int num_pds;
|
||||
@ -498,29 +544,70 @@ struct hns_roce_caps {
|
||||
int mtpt_entry_sz;
|
||||
int qpc_entry_sz;
|
||||
int irrl_entry_sz;
|
||||
int trrl_entry_sz;
|
||||
int cqc_entry_sz;
|
||||
u32 pbl_ba_pg_sz;
|
||||
u32 pbl_buf_pg_sz;
|
||||
u32 pbl_hop_num;
|
||||
int aeqe_depth;
|
||||
int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
|
||||
enum ib_mtu max_mtu;
|
||||
u32 qpc_bt_num;
|
||||
u32 srqc_bt_num;
|
||||
u32 cqc_bt_num;
|
||||
u32 mpt_bt_num;
|
||||
u32 qpc_ba_pg_sz;
|
||||
u32 qpc_buf_pg_sz;
|
||||
u32 qpc_hop_num;
|
||||
u32 srqc_ba_pg_sz;
|
||||
u32 srqc_buf_pg_sz;
|
||||
u32 srqc_hop_num;
|
||||
u32 cqc_ba_pg_sz;
|
||||
u32 cqc_buf_pg_sz;
|
||||
u32 cqc_hop_num;
|
||||
u32 mpt_ba_pg_sz;
|
||||
u32 mpt_buf_pg_sz;
|
||||
u32 mpt_hop_num;
|
||||
u32 mtt_ba_pg_sz;
|
||||
u32 mtt_buf_pg_sz;
|
||||
u32 mtt_hop_num;
|
||||
u32 cqe_ba_pg_sz;
|
||||
u32 cqe_buf_pg_sz;
|
||||
u32 cqe_hop_num;
|
||||
u32 chunk_sz; /* chunk size in non multihop mode*/
|
||||
u64 flags;
|
||||
};
|
||||
|
||||
struct hns_roce_hw {
|
||||
int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
|
||||
void (*hw_profile)(struct hns_roce_dev *hr_dev);
|
||||
int (*cmq_init)(struct hns_roce_dev *hr_dev);
|
||||
void (*cmq_exit)(struct hns_roce_dev *hr_dev);
|
||||
int (*hw_profile)(struct hns_roce_dev *hr_dev);
|
||||
int (*hw_init)(struct hns_roce_dev *hr_dev);
|
||||
void (*hw_exit)(struct hns_roce_dev *hr_dev);
|
||||
void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
|
||||
union ib_gid *gid);
|
||||
void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
|
||||
int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
|
||||
u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
|
||||
u16 token, int event);
|
||||
int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
|
||||
int (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
|
||||
union ib_gid *gid, const struct ib_gid_attr *attr);
|
||||
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
|
||||
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
|
||||
enum ib_mtu mtu);
|
||||
int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
|
||||
unsigned long mtpt_idx);
|
||||
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mr *mr, int flags, u32 pdn,
|
||||
int mr_access_flags, u64 iova, u64 size,
|
||||
void *mb_buf);
|
||||
void (*write_cqc)(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
|
||||
dma_addr_t dma_handle, int nent, u32 vector);
|
||||
int (*set_hem)(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, int obj, int step_idx);
|
||||
int (*clear_hem)(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, int obj);
|
||||
struct hns_roce_hem_table *table, int obj,
|
||||
int step_idx);
|
||||
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
|
||||
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
|
||||
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
|
||||
@ -535,12 +622,14 @@ struct hns_roce_hw {
|
||||
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
|
||||
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
|
||||
int (*destroy_cq)(struct ib_cq *ibcq);
|
||||
void *priv;
|
||||
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
|
||||
};
|
||||
|
||||
struct hns_roce_dev {
|
||||
struct ib_device ib_dev;
|
||||
struct platform_device *pdev;
|
||||
struct pci_dev *pci_dev;
|
||||
struct device *dev;
|
||||
struct hns_roce_uar priv_uar;
|
||||
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
|
||||
spinlock_t sm_lock;
|
||||
@ -569,9 +658,12 @@ struct hns_roce_dev {
|
||||
|
||||
int cmd_mod;
|
||||
int loop_idc;
|
||||
u32 sdb_offset;
|
||||
u32 odb_offset;
|
||||
dma_addr_t tptr_dma_addr; /*only for hw v1*/
|
||||
u32 tptr_size; /*only for hw v1*/
|
||||
struct hns_roce_hw *hw;
|
||||
const struct hns_roce_hw *hw;
|
||||
void *priv;
|
||||
};
|
||||
|
||||
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
|
||||
@ -635,12 +727,14 @@ static inline struct hns_roce_qp
|
||||
static inline void *hns_roce_buf_offset(struct hns_roce_buf *buf, int offset)
|
||||
{
|
||||
u32 bits_per_long_val = BITS_PER_LONG;
|
||||
u32 page_size = 1 << buf->page_shift;
|
||||
|
||||
if (bits_per_long_val == 64 || buf->nbufs == 1)
|
||||
if ((bits_per_long_val == 64 && buf->page_shift == PAGE_SHIFT) ||
|
||||
buf->nbufs == 1)
|
||||
return (char *)(buf->direct.buf) + offset;
|
||||
else
|
||||
return (char *)(buf->page_list[offset >> PAGE_SHIFT].buf) +
|
||||
(offset & (PAGE_SIZE - 1));
|
||||
return (char *)(buf->page_list[offset >> buf->page_shift].buf) +
|
||||
(offset & (page_size - 1));
|
||||
}
|
||||
|
||||
int hns_roce_init_uar_table(struct hns_roce_dev *dev);
|
||||
@ -702,6 +796,9 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc);
|
||||
struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
u64 virt_addr, int access_flags,
|
||||
struct ib_udata *udata);
|
||||
int hns_roce_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
|
||||
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
|
||||
struct ib_udata *udata);
|
||||
int hns_roce_dereg_mr(struct ib_mr *ibmr);
|
||||
int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmd_mailbox *mailbox,
|
||||
@ -711,7 +808,7 @@ unsigned long key_to_hw_index(u32 key);
|
||||
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
|
||||
struct hns_roce_buf *buf);
|
||||
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
|
||||
struct hns_roce_buf *buf);
|
||||
struct hns_roce_buf *buf, u32 page_shift);
|
||||
|
||||
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mtt *mtt, struct ib_umem *umem);
|
||||
@ -723,6 +820,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
|
||||
int attr_mask, struct ib_udata *udata);
|
||||
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
|
||||
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
|
||||
void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
|
||||
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
|
||||
struct ib_cq *ib_cq);
|
||||
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
|
||||
@ -749,7 +847,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
|
||||
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
|
||||
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
|
||||
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
|
||||
|
||||
extern struct hns_roce_hw hns_roce_hw_v1;
|
||||
int hns_roce_init(struct hns_roce_dev *hr_dev);
|
||||
void hns_roce_exit(struct hns_roce_dev *hr_dev);
|
||||
|
||||
#endif /* _HNS_ROCE_DEVICE_H */
|
||||
|
@ -558,7 +558,7 @@ static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
|
||||
writel(eqshift_val, eqc);
|
||||
|
||||
/* Configure eq extended address 12~44bit */
|
||||
writel((u32)(eq->buf_list[0].map >> 12), (u8 *)eqc + 4);
|
||||
writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
|
||||
|
||||
/*
|
||||
* Configure eq extended address 45~49 bit.
|
||||
@ -572,13 +572,13 @@ static int hns_roce_create_eq(struct hns_roce_dev *hr_dev,
|
||||
roce_set_field(eqcuridx_val,
|
||||
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
|
||||
ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
|
||||
writel(eqcuridx_val, (u8 *)eqc + 8);
|
||||
writel(eqcuridx_val, eqc + 8);
|
||||
|
||||
/* Configure eq consumer index */
|
||||
roce_set_field(eqconsindx_val,
|
||||
ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
|
||||
ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
|
||||
writel(eqconsindx_val, (u8 *)eqc + 0xc);
|
||||
writel(eqconsindx_val, eqc + 0xc);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -36,14 +36,165 @@
|
||||
#include "hns_roce_hem.h"
|
||||
#include "hns_roce_common.h"
|
||||
|
||||
#define HNS_ROCE_HEM_ALLOC_SIZE (1 << 17)
|
||||
#define HNS_ROCE_TABLE_CHUNK_SIZE (1 << 17)
|
||||
|
||||
#define DMA_ADDR_T_SHIFT 12
|
||||
#define BT_BA_SHIFT 32
|
||||
|
||||
struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
|
||||
gfp_t gfp_mask)
|
||||
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
|
||||
{
|
||||
if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) ||
|
||||
(hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
|
||||
(hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
|
||||
(hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
|
||||
(hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
|
||||
(hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop);
|
||||
|
||||
static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
|
||||
u32 bt_chunk_num)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bt_chunk_num; i++)
|
||||
if (hem[start_idx + i])
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool hns_roce_check_bt_null(u64 **bt, u64 start_idx, u32 bt_chunk_num)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < bt_chunk_num; i++)
|
||||
if (bt[start_idx + i])
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
|
||||
{
|
||||
if (check_whether_bt_num_3(table_type, hop_num))
|
||||
return 3;
|
||||
else if (check_whether_bt_num_2(table_type, hop_num))
|
||||
return 2;
|
||||
else if (check_whether_bt_num_1(table_type, hop_num))
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long *obj,
|
||||
struct hns_roce_hem_mhop *mhop)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
u32 chunk_ba_num;
|
||||
u32 table_idx;
|
||||
u32 bt_num;
|
||||
u32 chunk_size;
|
||||
|
||||
switch (table->type) {
|
||||
case HEM_TYPE_QPC:
|
||||
mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
|
||||
mhop->hop_num = hr_dev->caps.qpc_hop_num;
|
||||
break;
|
||||
case HEM_TYPE_MTPT:
|
||||
mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
|
||||
mhop->hop_num = hr_dev->caps.mpt_hop_num;
|
||||
break;
|
||||
case HEM_TYPE_CQC:
|
||||
mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
|
||||
mhop->hop_num = hr_dev->caps.cqc_hop_num;
|
||||
break;
|
||||
case HEM_TYPE_SRQC:
|
||||
mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
|
||||
mhop->hop_num = hr_dev->caps.srqc_hop_num;
|
||||
break;
|
||||
case HEM_TYPE_MTT:
|
||||
mhop->buf_chunk_size = 1 << (hr_dev->caps.mtt_buf_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->ba_l0_num = mhop->bt_chunk_size / 8;
|
||||
mhop->hop_num = hr_dev->caps.mtt_hop_num;
|
||||
break;
|
||||
case HEM_TYPE_CQE:
|
||||
mhop->buf_chunk_size = 1 << (hr_dev->caps.cqe_buf_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
mhop->ba_l0_num = mhop->bt_chunk_size / 8;
|
||||
mhop->hop_num = hr_dev->caps.cqe_hop_num;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Table %d not support multi-hop addressing!\n",
|
||||
table->type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!obj)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* QPC/MTPT/CQC/SRQC alloc hem for buffer pages.
|
||||
* MTT/CQE alloc hem for bt pages.
|
||||
*/
|
||||
bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
|
||||
chunk_ba_num = mhop->bt_chunk_size / 8;
|
||||
chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
|
||||
mhop->bt_chunk_size;
|
||||
table_idx = (*obj & (table->num_obj - 1)) /
|
||||
(chunk_size / table->obj_size);
|
||||
switch (bt_num) {
|
||||
case 3:
|
||||
mhop->l2_idx = table_idx & (chunk_ba_num - 1);
|
||||
mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
|
||||
mhop->l0_idx = table_idx / chunk_ba_num / chunk_ba_num;
|
||||
break;
|
||||
case 2:
|
||||
mhop->l1_idx = table_idx & (chunk_ba_num - 1);
|
||||
mhop->l0_idx = table_idx / chunk_ba_num;
|
||||
break;
|
||||
case 1:
|
||||
mhop->l0_idx = table_idx;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Table %d not support hop_num = %d!\n",
|
||||
table->type, mhop->hop_num);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (mhop->l0_idx >= mhop->ba_l0_num)
|
||||
mhop->l0_idx %= mhop->ba_l0_num;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_calc_hem_mhop);
|
||||
|
||||
static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
|
||||
int npages,
|
||||
unsigned long hem_alloc_size,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct hns_roce_hem_chunk *chunk = NULL;
|
||||
struct hns_roce_hem *hem;
|
||||
@ -61,7 +212,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
|
||||
hem->refcount = 0;
|
||||
INIT_LIST_HEAD(&hem->chunk_list);
|
||||
|
||||
order = get_order(HNS_ROCE_HEM_ALLOC_SIZE);
|
||||
order = get_order(hem_alloc_size);
|
||||
|
||||
while (npages > 0) {
|
||||
if (!chunk) {
|
||||
@ -84,7 +235,7 @@ struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev, int npages,
|
||||
* memory, directly return fail.
|
||||
*/
|
||||
mem = &chunk->mem[chunk->npages];
|
||||
buf = dma_alloc_coherent(&hr_dev->pdev->dev, PAGE_SIZE << order,
|
||||
buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
|
||||
&sg_dma_address(mem), gfp_mask);
|
||||
if (!buf)
|
||||
goto fail;
|
||||
@ -115,7 +266,7 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
|
||||
|
||||
list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
|
||||
for (i = 0; i < chunk->npages; ++i)
|
||||
dma_free_coherent(&hr_dev->pdev->dev,
|
||||
dma_free_coherent(hr_dev->dev,
|
||||
chunk->mem[i].length,
|
||||
lowmem_page_address(sg_page(&chunk->mem[i])),
|
||||
sg_dma_address(&chunk->mem[i]));
|
||||
@ -128,8 +279,8 @@ void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
|
||||
static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long obj)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
spinlock_t *lock = &hr_dev->bt_cmd_lock;
|
||||
struct device *dev = hr_dev->dev;
|
||||
unsigned long end = 0;
|
||||
unsigned long flags;
|
||||
struct hns_roce_hem_iter iter;
|
||||
@ -142,7 +293,7 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
|
||||
|
||||
/* Find the HEM(Hardware Entry Memory) entry */
|
||||
unsigned long i = (obj & (table->num_obj - 1)) /
|
||||
(HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
|
||||
(table->table_chunk_size / table->obj_size);
|
||||
|
||||
switch (table->type) {
|
||||
case HEM_TYPE_QPC:
|
||||
@ -209,14 +360,185 @@ static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table,
|
||||
unsigned long obj)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct hns_roce_hem_mhop mhop;
|
||||
struct hns_roce_hem_iter iter;
|
||||
u32 buf_chunk_size;
|
||||
u32 bt_chunk_size;
|
||||
u32 chunk_ba_num;
|
||||
u32 hop_num;
|
||||
u32 size;
|
||||
u32 bt_num;
|
||||
u64 hem_idx;
|
||||
u64 bt_l1_idx = 0;
|
||||
u64 bt_l0_idx = 0;
|
||||
u64 bt_ba;
|
||||
unsigned long mhop_obj = obj;
|
||||
int bt_l1_allocated = 0;
|
||||
int bt_l0_allocated = 0;
|
||||
int step_idx;
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
buf_chunk_size = mhop.buf_chunk_size;
|
||||
bt_chunk_size = mhop.bt_chunk_size;
|
||||
hop_num = mhop.hop_num;
|
||||
chunk_ba_num = bt_chunk_size / 8;
|
||||
|
||||
bt_num = hns_roce_get_bt_num(table->type, hop_num);
|
||||
switch (bt_num) {
|
||||
case 3:
|
||||
hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
|
||||
mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
|
||||
bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
|
||||
bt_l0_idx = mhop.l0_idx;
|
||||
break;
|
||||
case 2:
|
||||
hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
|
||||
bt_l0_idx = mhop.l0_idx;
|
||||
break;
|
||||
case 1:
|
||||
hem_idx = mhop.l0_idx;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Table %d not support hop_num = %d!\n",
|
||||
table->type, hop_num);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&table->mutex);
|
||||
|
||||
if (table->hem[hem_idx]) {
|
||||
++table->hem[hem_idx]->refcount;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* alloc L1 BA's chunk */
|
||||
if ((check_whether_bt_num_3(table->type, hop_num) ||
|
||||
check_whether_bt_num_2(table->type, hop_num)) &&
|
||||
!table->bt_l0[bt_l0_idx]) {
|
||||
table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size,
|
||||
&(table->bt_l0_dma_addr[bt_l0_idx]),
|
||||
GFP_KERNEL);
|
||||
if (!table->bt_l0[bt_l0_idx]) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
bt_l0_allocated = 1;
|
||||
|
||||
/* set base address to hardware */
|
||||
if (table->type < HEM_TYPE_MTT) {
|
||||
step_idx = 0;
|
||||
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
|
||||
ret = -ENODEV;
|
||||
dev_err(dev, "set HEM base address to HW failed!\n");
|
||||
goto err_dma_alloc_l1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* alloc L2 BA's chunk */
|
||||
if (check_whether_bt_num_3(table->type, hop_num) &&
|
||||
!table->bt_l1[bt_l1_idx]) {
|
||||
table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size,
|
||||
&(table->bt_l1_dma_addr[bt_l1_idx]),
|
||||
GFP_KERNEL);
|
||||
if (!table->bt_l1[bt_l1_idx]) {
|
||||
ret = -ENOMEM;
|
||||
goto err_dma_alloc_l1;
|
||||
}
|
||||
bt_l1_allocated = 1;
|
||||
*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) =
|
||||
table->bt_l1_dma_addr[bt_l1_idx];
|
||||
|
||||
/* set base address to hardware */
|
||||
step_idx = 1;
|
||||
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
|
||||
ret = -ENODEV;
|
||||
dev_err(dev, "set HEM base address to HW failed!\n");
|
||||
goto err_alloc_hem_buf;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* alloc buffer space chunk for QPC/MTPT/CQC/SRQC.
|
||||
* alloc bt space chunk for MTT/CQE.
|
||||
*/
|
||||
size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
|
||||
table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev,
|
||||
size >> PAGE_SHIFT,
|
||||
size,
|
||||
(table->lowmem ? GFP_KERNEL :
|
||||
GFP_HIGHUSER) | __GFP_NOWARN);
|
||||
if (!table->hem[hem_idx]) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc_hem_buf;
|
||||
}
|
||||
|
||||
hns_roce_hem_first(table->hem[hem_idx], &iter);
|
||||
bt_ba = hns_roce_hem_addr(&iter);
|
||||
|
||||
if (table->type < HEM_TYPE_MTT) {
|
||||
if (hop_num == 2) {
|
||||
*(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba;
|
||||
step_idx = 2;
|
||||
} else if (hop_num == 1) {
|
||||
*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
|
||||
step_idx = 1;
|
||||
} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
|
||||
step_idx = 0;
|
||||
}
|
||||
|
||||
/* set HEM base address to hardware */
|
||||
if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
|
||||
ret = -ENODEV;
|
||||
dev_err(dev, "set HEM base address to HW failed!\n");
|
||||
goto err_alloc_hem_buf;
|
||||
}
|
||||
} else if (hop_num == 2) {
|
||||
*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
|
||||
}
|
||||
|
||||
++table->hem[hem_idx]->refcount;
|
||||
goto out;
|
||||
|
||||
err_alloc_hem_buf:
|
||||
if (bt_l1_allocated) {
|
||||
dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx],
|
||||
table->bt_l1_dma_addr[bt_l1_idx]);
|
||||
table->bt_l1[bt_l1_idx] = NULL;
|
||||
}
|
||||
|
||||
err_dma_alloc_l1:
|
||||
if (bt_l0_allocated) {
|
||||
dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx],
|
||||
table->bt_l0_dma_addr[bt_l0_idx]);
|
||||
table->bt_l0[bt_l0_idx] = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&table->mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long obj)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
int ret = 0;
|
||||
unsigned long i;
|
||||
|
||||
i = (obj & (table->num_obj - 1)) / (HNS_ROCE_TABLE_CHUNK_SIZE /
|
||||
if (hns_roce_check_whether_mhop(hr_dev, table->type))
|
||||
return hns_roce_table_mhop_get(hr_dev, table, obj);
|
||||
|
||||
i = (obj & (table->num_obj - 1)) / (table->table_chunk_size /
|
||||
table->obj_size);
|
||||
|
||||
mutex_lock(&table->mutex);
|
||||
@ -227,7 +549,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
|
||||
}
|
||||
|
||||
table->hem[i] = hns_roce_alloc_hem(hr_dev,
|
||||
HNS_ROCE_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
|
||||
table->table_chunk_size >> PAGE_SHIFT,
|
||||
table->table_chunk_size,
|
||||
(table->lowmem ? GFP_KERNEL :
|
||||
GFP_HIGHUSER) | __GFP_NOWARN);
|
||||
if (!table->hem[i]) {
|
||||
@ -237,6 +560,8 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
|
||||
|
||||
/* Set HEM base address(128K/page, pa) to Hardware */
|
||||
if (hns_roce_set_hem(hr_dev, table, obj)) {
|
||||
hns_roce_free_hem(hr_dev, table->hem[i]);
|
||||
table->hem[i] = NULL;
|
||||
ret = -ENODEV;
|
||||
dev_err(dev, "set HEM base address to HW failed.\n");
|
||||
goto out;
|
||||
@ -248,20 +573,139 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table,
|
||||
unsigned long obj,
|
||||
int check_refcount)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct hns_roce_hem_mhop mhop;
|
||||
unsigned long mhop_obj = obj;
|
||||
u32 bt_chunk_size;
|
||||
u32 chunk_ba_num;
|
||||
u32 hop_num;
|
||||
u32 start_idx;
|
||||
u32 bt_num;
|
||||
u64 hem_idx;
|
||||
u64 bt_l1_idx = 0;
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
bt_chunk_size = mhop.bt_chunk_size;
|
||||
hop_num = mhop.hop_num;
|
||||
chunk_ba_num = bt_chunk_size / 8;
|
||||
|
||||
bt_num = hns_roce_get_bt_num(table->type, hop_num);
|
||||
switch (bt_num) {
|
||||
case 3:
|
||||
hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
|
||||
mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
|
||||
bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
|
||||
break;
|
||||
case 2:
|
||||
hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
|
||||
break;
|
||||
case 1:
|
||||
hem_idx = mhop.l0_idx;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev, "Table %d not support hop_num = %d!\n",
|
||||
table->type, hop_num);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&table->mutex);
|
||||
|
||||
if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) {
|
||||
mutex_unlock(&table->mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
if (table->type < HEM_TYPE_MTT && hop_num == 1) {
|
||||
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
|
||||
dev_warn(dev, "Clear HEM base address failed.\n");
|
||||
} else if (table->type < HEM_TYPE_MTT && hop_num == 2) {
|
||||
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2))
|
||||
dev_warn(dev, "Clear HEM base address failed.\n");
|
||||
} else if (table->type < HEM_TYPE_MTT &&
|
||||
hop_num == HNS_ROCE_HOP_NUM_0) {
|
||||
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
|
||||
dev_warn(dev, "Clear HEM base address failed.\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* free buffer space chunk for QPC/MTPT/CQC/SRQC.
|
||||
* free bt space chunk for MTT/CQE.
|
||||
*/
|
||||
hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
|
||||
table->hem[hem_idx] = NULL;
|
||||
|
||||
if (check_whether_bt_num_2(table->type, hop_num)) {
|
||||
start_idx = mhop.l0_idx * chunk_ba_num;
|
||||
if (hns_roce_check_hem_null(table->hem, start_idx,
|
||||
chunk_ba_num)) {
|
||||
if (table->type < HEM_TYPE_MTT &&
|
||||
hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
|
||||
dev_warn(dev, "Clear HEM base address failed.\n");
|
||||
|
||||
dma_free_coherent(dev, bt_chunk_size,
|
||||
table->bt_l0[mhop.l0_idx],
|
||||
table->bt_l0_dma_addr[mhop.l0_idx]);
|
||||
table->bt_l0[mhop.l0_idx] = NULL;
|
||||
}
|
||||
} else if (check_whether_bt_num_3(table->type, hop_num)) {
|
||||
start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
|
||||
mhop.l1_idx * chunk_ba_num;
|
||||
if (hns_roce_check_hem_null(table->hem, start_idx,
|
||||
chunk_ba_num)) {
|
||||
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
|
||||
dev_warn(dev, "Clear HEM base address failed.\n");
|
||||
|
||||
dma_free_coherent(dev, bt_chunk_size,
|
||||
table->bt_l1[bt_l1_idx],
|
||||
table->bt_l1_dma_addr[bt_l1_idx]);
|
||||
table->bt_l1[bt_l1_idx] = NULL;
|
||||
|
||||
start_idx = mhop.l0_idx * chunk_ba_num;
|
||||
if (hns_roce_check_bt_null(table->bt_l1, start_idx,
|
||||
chunk_ba_num)) {
|
||||
if (hr_dev->hw->clear_hem(hr_dev, table, obj,
|
||||
0))
|
||||
dev_warn(dev, "Clear HEM base address failed.\n");
|
||||
|
||||
dma_free_coherent(dev, bt_chunk_size,
|
||||
table->bt_l0[mhop.l0_idx],
|
||||
table->bt_l0_dma_addr[mhop.l0_idx]);
|
||||
table->bt_l0[mhop.l0_idx] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&table->mutex);
|
||||
}
|
||||
|
||||
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long obj)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
unsigned long i;
|
||||
|
||||
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
|
||||
hns_roce_table_mhop_put(hr_dev, table, obj, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
i = (obj & (table->num_obj - 1)) /
|
||||
(HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size);
|
||||
(table->table_chunk_size / table->obj_size);
|
||||
|
||||
mutex_lock(&table->mutex);
|
||||
|
||||
if (--table->hem[i]->refcount == 0) {
|
||||
/* Clear HEM base address */
|
||||
if (hr_dev->hw->clear_hem(hr_dev, table, obj))
|
||||
if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
|
||||
dev_warn(dev, "Clear HEM base address failed.\n");
|
||||
|
||||
hns_roce_free_hem(hr_dev, table->hem[i]);
|
||||
@ -271,23 +715,48 @@ void hns_roce_table_put(struct hns_roce_dev *hr_dev,
|
||||
mutex_unlock(&table->mutex);
|
||||
}
|
||||
|
||||
void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
|
||||
dma_addr_t *dma_handle)
|
||||
void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table,
|
||||
unsigned long obj, dma_addr_t *dma_handle)
|
||||
{
|
||||
struct hns_roce_hem_chunk *chunk;
|
||||
unsigned long idx;
|
||||
int i;
|
||||
int offset, dma_offset;
|
||||
struct hns_roce_hem_mhop mhop;
|
||||
struct hns_roce_hem *hem;
|
||||
struct page *page = NULL;
|
||||
unsigned long mhop_obj = obj;
|
||||
unsigned long obj_per_chunk;
|
||||
unsigned long idx_offset;
|
||||
int offset, dma_offset;
|
||||
int i, j;
|
||||
u32 hem_idx = 0;
|
||||
|
||||
if (!table->lowmem)
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&table->mutex);
|
||||
idx = (obj & (table->num_obj - 1)) * table->obj_size;
|
||||
hem = table->hem[idx / HNS_ROCE_TABLE_CHUNK_SIZE];
|
||||
dma_offset = offset = idx % HNS_ROCE_TABLE_CHUNK_SIZE;
|
||||
|
||||
if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
|
||||
obj_per_chunk = table->table_chunk_size / table->obj_size;
|
||||
hem = table->hem[(obj & (table->num_obj - 1)) / obj_per_chunk];
|
||||
idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
|
||||
dma_offset = offset = idx_offset * table->obj_size;
|
||||
} else {
|
||||
hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
|
||||
/* mtt mhop */
|
||||
i = mhop.l0_idx;
|
||||
j = mhop.l1_idx;
|
||||
if (mhop.hop_num == 2)
|
||||
hem_idx = i * (mhop.bt_chunk_size / 8) + j;
|
||||
else if (mhop.hop_num == 1 ||
|
||||
mhop.hop_num == HNS_ROCE_HOP_NUM_0)
|
||||
hem_idx = i;
|
||||
|
||||
hem = table->hem[hem_idx];
|
||||
dma_offset = offset = (obj & (table->num_obj - 1)) *
|
||||
table->obj_size % mhop.bt_chunk_size;
|
||||
if (mhop.hop_num == 2)
|
||||
dma_offset = offset = 0;
|
||||
}
|
||||
|
||||
if (!hem)
|
||||
goto out;
|
||||
@ -314,14 +783,21 @@ void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
|
||||
mutex_unlock(&table->mutex);
|
||||
return page ? lowmem_page_address(page) + offset : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_table_find);
|
||||
|
||||
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long inc = HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size;
|
||||
unsigned long i = 0;
|
||||
int ret = 0;
|
||||
struct hns_roce_hem_mhop mhop;
|
||||
unsigned long inc = table->table_chunk_size / table->obj_size;
|
||||
unsigned long i;
|
||||
int ret;
|
||||
|
||||
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
|
||||
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
|
||||
inc = mhop.bt_chunk_size / table->obj_size;
|
||||
}
|
||||
|
||||
/* Allocate MTT entry memory according to chunk(128K) */
|
||||
for (i = start; i <= end; i += inc) {
|
||||
@ -344,10 +820,16 @@ void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
struct hns_roce_hem_mhop mhop;
|
||||
unsigned long inc = table->table_chunk_size / table->obj_size;
|
||||
unsigned long i;
|
||||
|
||||
for (i = start; i <= end;
|
||||
i += HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size)
|
||||
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
|
||||
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
|
||||
inc = mhop.bt_chunk_size / table->obj_size;
|
||||
}
|
||||
|
||||
for (i = start; i <= end; i += inc)
|
||||
hns_roce_table_put(hr_dev, table, i);
|
||||
}
|
||||
|
||||
@ -356,15 +838,120 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
|
||||
unsigned long obj_size, unsigned long nobj,
|
||||
int use_lowmem)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
unsigned long obj_per_chunk;
|
||||
unsigned long num_hem;
|
||||
|
||||
obj_per_chunk = HNS_ROCE_TABLE_CHUNK_SIZE / obj_size;
|
||||
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
|
||||
if (!hns_roce_check_whether_mhop(hr_dev, type)) {
|
||||
table->table_chunk_size = hr_dev->caps.chunk_sz;
|
||||
obj_per_chunk = table->table_chunk_size / obj_size;
|
||||
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
|
||||
|
||||
table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
|
||||
if (!table->hem)
|
||||
return -ENOMEM;
|
||||
table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
|
||||
if (!table->hem)
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
unsigned long buf_chunk_size;
|
||||
unsigned long bt_chunk_size;
|
||||
unsigned long bt_chunk_num;
|
||||
unsigned long num_bt_l0 = 0;
|
||||
u32 hop_num;
|
||||
|
||||
switch (type) {
|
||||
case HEM_TYPE_QPC:
|
||||
buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
num_bt_l0 = hr_dev->caps.qpc_bt_num;
|
||||
hop_num = hr_dev->caps.qpc_hop_num;
|
||||
break;
|
||||
case HEM_TYPE_MTPT:
|
||||
buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
num_bt_l0 = hr_dev->caps.mpt_bt_num;
|
||||
hop_num = hr_dev->caps.mpt_hop_num;
|
||||
break;
|
||||
case HEM_TYPE_CQC:
|
||||
buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
num_bt_l0 = hr_dev->caps.cqc_bt_num;
|
||||
hop_num = hr_dev->caps.cqc_hop_num;
|
||||
break;
|
||||
case HEM_TYPE_SRQC:
|
||||
buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
num_bt_l0 = hr_dev->caps.srqc_bt_num;
|
||||
hop_num = hr_dev->caps.srqc_hop_num;
|
||||
break;
|
||||
case HEM_TYPE_MTT:
|
||||
buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
bt_chunk_size = buf_chunk_size;
|
||||
hop_num = hr_dev->caps.mtt_hop_num;
|
||||
break;
|
||||
case HEM_TYPE_CQE:
|
||||
buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
|
||||
+ PAGE_SHIFT);
|
||||
bt_chunk_size = buf_chunk_size;
|
||||
hop_num = hr_dev->caps.cqe_hop_num;
|
||||
break;
|
||||
default:
|
||||
dev_err(dev,
|
||||
"Table %d not support to init hem table here!\n",
|
||||
type);
|
||||
return -EINVAL;
|
||||
}
|
||||
obj_per_chunk = buf_chunk_size / obj_size;
|
||||
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
|
||||
bt_chunk_num = bt_chunk_size / 8;
|
||||
if (table->type >= HEM_TYPE_MTT)
|
||||
num_bt_l0 = bt_chunk_num;
|
||||
|
||||
table->hem = kcalloc(num_hem, sizeof(*table->hem),
|
||||
GFP_KERNEL);
|
||||
if (!table->hem)
|
||||
goto err_kcalloc_hem_buf;
|
||||
|
||||
if (check_whether_bt_num_3(table->type, hop_num)) {
|
||||
unsigned long num_bt_l1;
|
||||
|
||||
num_bt_l1 = (num_hem + bt_chunk_num - 1) /
|
||||
bt_chunk_num;
|
||||
table->bt_l1 = kcalloc(num_bt_l1,
|
||||
sizeof(*table->bt_l1),
|
||||
GFP_KERNEL);
|
||||
if (!table->bt_l1)
|
||||
goto err_kcalloc_bt_l1;
|
||||
|
||||
table->bt_l1_dma_addr = kcalloc(num_bt_l1,
|
||||
sizeof(*table->bt_l1_dma_addr),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!table->bt_l1_dma_addr)
|
||||
goto err_kcalloc_l1_dma;
|
||||
}
|
||||
|
||||
if (check_whether_bt_num_2(table->type, hop_num) ||
|
||||
check_whether_bt_num_3(table->type, hop_num)) {
|
||||
table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
|
||||
GFP_KERNEL);
|
||||
if (!table->bt_l0)
|
||||
goto err_kcalloc_bt_l0;
|
||||
|
||||
table->bt_l0_dma_addr = kcalloc(num_bt_l0,
|
||||
sizeof(*table->bt_l0_dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!table->bt_l0_dma_addr)
|
||||
goto err_kcalloc_l0_dma;
|
||||
}
|
||||
}
|
||||
|
||||
table->type = type;
|
||||
table->num_hem = num_hem;
|
||||
@ -374,18 +961,72 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
|
||||
mutex_init(&table->mutex);
|
||||
|
||||
return 0;
|
||||
|
||||
err_kcalloc_l0_dma:
|
||||
kfree(table->bt_l0);
|
||||
table->bt_l0 = NULL;
|
||||
|
||||
err_kcalloc_bt_l0:
|
||||
kfree(table->bt_l1_dma_addr);
|
||||
table->bt_l1_dma_addr = NULL;
|
||||
|
||||
err_kcalloc_l1_dma:
|
||||
kfree(table->bt_l1);
|
||||
table->bt_l1 = NULL;
|
||||
|
||||
err_kcalloc_bt_l1:
|
||||
kfree(table->hem);
|
||||
table->hem = NULL;
|
||||
|
||||
err_kcalloc_hem_buf:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table)
|
||||
{
|
||||
struct hns_roce_hem_mhop mhop;
|
||||
u32 buf_chunk_size;
|
||||
int i;
|
||||
u64 obj;
|
||||
|
||||
hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
|
||||
buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
|
||||
mhop.bt_chunk_size;
|
||||
|
||||
for (i = 0; i < table->num_hem; ++i) {
|
||||
obj = i * buf_chunk_size / table->obj_size;
|
||||
if (table->hem[i])
|
||||
hns_roce_table_mhop_put(hr_dev, table, obj, 0);
|
||||
}
|
||||
|
||||
kfree(table->hem);
|
||||
table->hem = NULL;
|
||||
kfree(table->bt_l1);
|
||||
table->bt_l1 = NULL;
|
||||
kfree(table->bt_l1_dma_addr);
|
||||
table->bt_l1_dma_addr = NULL;
|
||||
kfree(table->bt_l0);
|
||||
table->bt_l0 = NULL;
|
||||
kfree(table->bt_l0_dma_addr);
|
||||
table->bt_l0_dma_addr = NULL;
|
||||
}
|
||||
|
||||
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
unsigned long i;
|
||||
|
||||
if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
|
||||
hns_roce_cleanup_mhop_hem_table(hr_dev, table);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < table->num_hem; ++i)
|
||||
if (table->hem[i]) {
|
||||
if (hr_dev->hw->clear_hem(hr_dev, table,
|
||||
i * HNS_ROCE_TABLE_CHUNK_SIZE / table->obj_size))
|
||||
i * table->table_chunk_size / table->obj_size, 0))
|
||||
dev_err(dev, "Clear HEM base address failed.\n");
|
||||
|
||||
hns_roce_free_hem(hr_dev, table->hem[i]);
|
||||
@ -398,7 +1039,13 @@ void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
|
||||
if (hr_dev->caps.trrl_entry_sz)
|
||||
hns_roce_cleanup_hem_table(hr_dev,
|
||||
&hr_dev->qp_table.trrl_table);
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
|
||||
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
|
||||
hns_roce_cleanup_hem_table(hr_dev,
|
||||
&hr_dev->mr_table.mtt_cqe_table);
|
||||
}
|
||||
|
@ -47,13 +47,27 @@ enum {
|
||||
|
||||
/* UNMAP HEM */
|
||||
HEM_TYPE_MTT,
|
||||
HEM_TYPE_CQE,
|
||||
HEM_TYPE_IRRL,
|
||||
HEM_TYPE_TRRL,
|
||||
};
|
||||
|
||||
#define HNS_ROCE_HEM_CHUNK_LEN \
|
||||
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
|
||||
(sizeof(struct scatterlist)))
|
||||
|
||||
#define check_whether_bt_num_3(type, hop_num) \
|
||||
(type < HEM_TYPE_MTT && hop_num == 2)
|
||||
|
||||
#define check_whether_bt_num_2(type, hop_num) \
|
||||
((type < HEM_TYPE_MTT && hop_num == 1) || \
|
||||
(type >= HEM_TYPE_MTT && hop_num == 2))
|
||||
|
||||
#define check_whether_bt_num_1(type, hop_num) \
|
||||
((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
|
||||
(type >= HEM_TYPE_MTT && hop_num == 1) || \
|
||||
(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
|
||||
|
||||
enum {
|
||||
HNS_ROCE_HEM_PAGE_SHIFT = 12,
|
||||
HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
|
||||
@ -77,12 +91,23 @@ struct hns_roce_hem_iter {
|
||||
int page_idx;
|
||||
};
|
||||
|
||||
struct hns_roce_hem_mhop {
|
||||
u32 hop_num;
|
||||
u32 buf_chunk_size;
|
||||
u32 bt_chunk_size;
|
||||
u32 ba_l0_num;
|
||||
u32 l0_idx;/* level 0 base address table index */
|
||||
u32 l1_idx;/* level 1 base address table index */
|
||||
u32 l2_idx;/* level 2 base address table index */
|
||||
};
|
||||
|
||||
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
|
||||
int hns_roce_table_get(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long obj);
|
||||
void hns_roce_table_put(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long obj);
|
||||
void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj,
|
||||
void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long obj,
|
||||
dma_addr_t *dma_handle);
|
||||
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table,
|
||||
@ -97,6 +122,10 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
|
||||
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table);
|
||||
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
|
||||
int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_hem_table *table, unsigned long *obj,
|
||||
struct hns_roce_hem_mhop *mhop);
|
||||
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
|
||||
|
||||
static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
|
||||
struct hns_roce_hem_iter *iter)
|
||||
@ -105,7 +134,7 @@ static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
|
||||
iter->chunk = list_empty(&hem->chunk_list) ? NULL :
|
||||
list_entry(hem->chunk_list.next,
|
||||
struct hns_roce_hem_chunk, list);
|
||||
iter->page_idx = 0;
|
||||
iter->page_idx = 0;
|
||||
}
|
||||
|
||||
static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -72,6 +72,8 @@
|
||||
#define HNS_ROCE_V1_CQE_ENTRY_SIZE 32
|
||||
#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000
|
||||
|
||||
#define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17)
|
||||
|
||||
#define HNS_ROCE_V1_EXT_RAQ_WF 8
|
||||
#define HNS_ROCE_V1_RAQ_ENTRY 64
|
||||
#define HNS_ROCE_V1_RAQ_DEPTH 32768
|
||||
@ -948,6 +950,11 @@ struct hns_roce_qp_context {
|
||||
#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
|
||||
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
|
||||
|
||||
#define STATUS_MASK 0xff
|
||||
#define GO_BIT_TIMEOUT_MSECS 10000
|
||||
#define HCR_STATUS_OFFSET 0x18
|
||||
#define HCR_GO_BIT 15
|
||||
|
||||
struct hns_roce_rq_db {
|
||||
u32 u32_4;
|
||||
u32 u32_8;
|
||||
|
3296
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
Normal file
3296
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
Normal file
File diff suppressed because it is too large
Load Diff
1177
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
Normal file
1177
drivers/infiniband/hw/hns/hns_roce_hw_v2.h
Normal file
File diff suppressed because it is too large
Load Diff
@ -57,20 +57,21 @@ int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index)
|
||||
{
|
||||
return gid_index * hr_dev->caps.num_ports + port;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_get_gid_index);
|
||||
|
||||
static void hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
|
||||
static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u8 port, u8 *addr)
|
||||
{
|
||||
u8 phy_port;
|
||||
u32 i = 0;
|
||||
|
||||
if (!memcmp(hr_dev->dev_addr[port], addr, MAC_ADDR_OCTET_NUM))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < MAC_ADDR_OCTET_NUM; i++)
|
||||
hr_dev->dev_addr[port][i] = addr[i];
|
||||
|
||||
phy_port = hr_dev->iboe.phy_port[port];
|
||||
hr_dev->hw->set_mac(hr_dev, phy_port, addr);
|
||||
return hr_dev->hw->set_mac(hr_dev, phy_port, addr);
|
||||
}
|
||||
|
||||
static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
|
||||
@ -80,17 +81,19 @@ static int hns_roce_add_gid(struct ib_device *device, u8 port_num,
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(device);
|
||||
u8 port = port_num - 1;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (port >= hr_dev->caps.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
|
||||
|
||||
hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid);
|
||||
ret = hr_dev->hw->set_gid(hr_dev, port, index, (union ib_gid *)gid,
|
||||
attr);
|
||||
|
||||
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
|
||||
@ -100,24 +103,26 @@ static int hns_roce_del_gid(struct ib_device *device, u8 port_num,
|
||||
union ib_gid zgid = { {0} };
|
||||
u8 port = port_num - 1;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (port >= hr_dev->caps.num_ports)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&hr_dev->iboe.lock, flags);
|
||||
|
||||
hr_dev->hw->set_gid(hr_dev, port, index, &zgid);
|
||||
ret = hr_dev->hw->set_gid(hr_dev, port, index, &zgid, NULL);
|
||||
|
||||
spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
|
||||
unsigned long event)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct net_device *netdev;
|
||||
int ret = 0;
|
||||
|
||||
netdev = hr_dev->iboe.netdevs[port];
|
||||
if (!netdev) {
|
||||
@ -130,7 +135,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
|
||||
case NETDEV_CHANGE:
|
||||
case NETDEV_REGISTER:
|
||||
case NETDEV_CHANGEADDR:
|
||||
hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
|
||||
ret = hns_roce_set_mac(hr_dev, port, netdev->dev_addr);
|
||||
break;
|
||||
case NETDEV_DOWN:
|
||||
/*
|
||||
@ -142,7 +147,7 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_netdev_event(struct notifier_block *self,
|
||||
@ -171,12 +176,17 @@ static int hns_roce_netdev_event(struct notifier_block *self,
|
||||
|
||||
static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
int ret;
|
||||
u8 i;
|
||||
|
||||
for (i = 0; i < hr_dev->caps.num_ports; i++) {
|
||||
hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
|
||||
hr_dev->caps.max_mtu);
|
||||
hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr);
|
||||
if (hr_dev->hw->set_mtu)
|
||||
hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
|
||||
hr_dev->caps.max_mtu);
|
||||
ret = hns_roce_set_mac(hr_dev, i,
|
||||
hr_dev->iboe.netdevs[i]->dev_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -200,7 +210,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
|
||||
props->max_qp_wr = hr_dev->caps.max_wqes;
|
||||
props->device_cap_flags = IB_DEVICE_PORT_ACTIVE_EVENT |
|
||||
IB_DEVICE_RC_RNR_NAK_GEN;
|
||||
props->max_sge = hr_dev->caps.max_sq_sg;
|
||||
props->max_sge = max(hr_dev->caps.max_sq_sg, hr_dev->caps.max_rq_sg);
|
||||
props->max_sge_rd = 1;
|
||||
props->max_cq = hr_dev->caps.num_cqs;
|
||||
props->max_cqe = hr_dev->caps.max_cqes;
|
||||
@ -238,7 +248,7 @@ static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
|
||||
struct ib_port_attr *props)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct net_device *net_dev;
|
||||
unsigned long flags;
|
||||
enum ib_mtu mtu;
|
||||
@ -379,7 +389,8 @@ static int hns_roce_mmap(struct ib_ucontext *context,
|
||||
to_hr_ucontext(context)->uar.pfn,
|
||||
PAGE_SIZE, vma->vm_page_prot))
|
||||
return -EAGAIN;
|
||||
} else if (vma->vm_pgoff == 1 && hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
|
||||
} else if (vma->vm_pgoff == 1 && hr_dev->tptr_dma_addr &&
|
||||
hr_dev->tptr_size) {
|
||||
/* vm_pgoff: 1 -- TPTR */
|
||||
if (io_remap_pfn_range(vma, vma->vm_start,
|
||||
hr_dev->tptr_dma_addr >> PAGE_SHIFT,
|
||||
@ -398,8 +409,6 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
|
||||
struct ib_port_attr attr;
|
||||
int ret;
|
||||
|
||||
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
|
||||
|
||||
ret = ib_query_port(ib_dev, port_num, &attr);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -408,6 +417,9 @@ static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num,
|
||||
immutable->gid_tbl_len = attr.gid_tbl_len;
|
||||
|
||||
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
|
||||
immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
|
||||
if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2)
|
||||
immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -416,7 +428,6 @@ static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_ib_iboe *iboe = &hr_dev->iboe;
|
||||
|
||||
unregister_inetaddr_notifier(&iboe->nb_inet);
|
||||
unregister_netdevice_notifier(&iboe->nb);
|
||||
ib_unregister_device(&hr_dev->ib_dev);
|
||||
}
|
||||
@ -426,7 +437,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
|
||||
int ret;
|
||||
struct hns_roce_ib_iboe *iboe = NULL;
|
||||
struct ib_device *ib_dev = NULL;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
|
||||
iboe = &hr_dev->iboe;
|
||||
spin_lock_init(&iboe->lock);
|
||||
@ -492,6 +503,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
|
||||
|
||||
/* CQ */
|
||||
ib_dev->create_cq = hns_roce_ib_create_cq;
|
||||
ib_dev->modify_cq = hr_dev->hw->modify_cq;
|
||||
ib_dev->destroy_cq = hns_roce_ib_destroy_cq;
|
||||
ib_dev->req_notify_cq = hr_dev->hw->req_notify_cq;
|
||||
ib_dev->poll_cq = hr_dev->hw->poll_cq;
|
||||
@ -500,6 +512,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
|
||||
ib_dev->get_dma_mr = hns_roce_get_dma_mr;
|
||||
ib_dev->reg_user_mr = hns_roce_reg_user_mr;
|
||||
ib_dev->dereg_mr = hns_roce_dereg_mr;
|
||||
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
|
||||
ib_dev->rereg_user_mr = hns_roce_rereg_user_mr;
|
||||
ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
|
||||
}
|
||||
|
||||
/* OTHERS */
|
||||
ib_dev->get_port_immutable = hns_roce_port_immutable;
|
||||
@ -531,173 +547,10 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct of_device_id hns_roce_of_match[] = {
|
||||
{ .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, hns_roce_of_match);
|
||||
|
||||
static const struct acpi_device_id hns_roce_acpi_match[] = {
|
||||
{ "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
|
||||
|
||||
static int hns_roce_node_match(struct device *dev, void *fwnode)
|
||||
{
|
||||
return dev->fwnode == fwnode;
|
||||
}
|
||||
|
||||
static struct
|
||||
platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
/* get the 'device'corresponding to matching 'fwnode' */
|
||||
dev = bus_find_device(&platform_bus_type, NULL,
|
||||
fwnode, hns_roce_node_match);
|
||||
/* get the platform device */
|
||||
return dev ? to_platform_device(dev) : NULL;
|
||||
}
|
||||
|
||||
static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
int i;
|
||||
int ret;
|
||||
u8 phy_port;
|
||||
int port_cnt = 0;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device_node *net_node;
|
||||
struct net_device *netdev = NULL;
|
||||
struct platform_device *pdev = NULL;
|
||||
struct resource *res;
|
||||
|
||||
/* check if we are compatible with the underlying SoC */
|
||||
if (dev_of_node(dev)) {
|
||||
const struct of_device_id *of_id;
|
||||
|
||||
of_id = of_match_node(hns_roce_of_match, dev->of_node);
|
||||
if (!of_id) {
|
||||
dev_err(dev, "device is not compatible!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
hr_dev->hw = (struct hns_roce_hw *)of_id->data;
|
||||
if (!hr_dev->hw) {
|
||||
dev_err(dev, "couldn't get H/W specific DT data!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
} else if (is_acpi_device_node(dev->fwnode)) {
|
||||
const struct acpi_device_id *acpi_id;
|
||||
|
||||
acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
|
||||
if (!acpi_id) {
|
||||
dev_err(dev, "device is not compatible!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
hr_dev->hw = (struct hns_roce_hw *) acpi_id->driver_data;
|
||||
if (!hr_dev->hw) {
|
||||
dev_err(dev, "couldn't get H/W specific ACPI data!\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
} else {
|
||||
dev_err(dev, "can't read compatibility data from DT or ACPI\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
/* get the mapped register base address */
|
||||
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(dev, "memory resource not found!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
hr_dev->reg_base = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(hr_dev->reg_base))
|
||||
return PTR_ERR(hr_dev->reg_base);
|
||||
|
||||
/* read the node_guid of IB device from the DT or ACPI */
|
||||
ret = device_property_read_u8_array(dev, "node-guid",
|
||||
(u8 *)&hr_dev->ib_dev.node_guid,
|
||||
GUID_LEN);
|
||||
if (ret) {
|
||||
dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* get the RoCE associated ethernet ports or netdevices */
|
||||
for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
|
||||
if (dev_of_node(dev)) {
|
||||
net_node = of_parse_phandle(dev->of_node, "eth-handle",
|
||||
i);
|
||||
if (!net_node)
|
||||
continue;
|
||||
pdev = of_find_device_by_node(net_node);
|
||||
} else if (is_acpi_device_node(dev->fwnode)) {
|
||||
struct acpi_reference_args args;
|
||||
struct fwnode_handle *fwnode;
|
||||
|
||||
ret = acpi_node_get_property_reference(dev->fwnode,
|
||||
"eth-handle",
|
||||
i, &args);
|
||||
if (ret)
|
||||
continue;
|
||||
fwnode = acpi_fwnode_handle(args.adev);
|
||||
pdev = hns_roce_find_pdev(fwnode);
|
||||
} else {
|
||||
dev_err(dev, "cannot read data from DT or ACPI\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (pdev) {
|
||||
netdev = platform_get_drvdata(pdev);
|
||||
phy_port = (u8)i;
|
||||
if (netdev) {
|
||||
hr_dev->iboe.netdevs[port_cnt] = netdev;
|
||||
hr_dev->iboe.phy_port[port_cnt] = phy_port;
|
||||
} else {
|
||||
dev_err(dev, "no netdev found with pdev %s\n",
|
||||
pdev->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
port_cnt++;
|
||||
}
|
||||
}
|
||||
|
||||
if (port_cnt == 0) {
|
||||
dev_err(dev, "unable to get eth-handle for available ports!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hr_dev->caps.num_ports = port_cnt;
|
||||
|
||||
/* cmd issue mode: 0 is poll, 1 is event */
|
||||
hr_dev->cmd_mod = 1;
|
||||
hr_dev->loop_idc = 0;
|
||||
|
||||
/* read the interrupt names from the DT or ACPI */
|
||||
ret = device_property_read_string_array(dev, "interrupt-names",
|
||||
hr_dev->irq_names,
|
||||
HNS_ROCE_MAX_IRQ_NUM);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* fetch the interrupt numbers */
|
||||
for (i = 0; i < HNS_ROCE_MAX_IRQ_NUM; i++) {
|
||||
hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
|
||||
if (hr_dev->irq[i] <= 0) {
|
||||
dev_err(dev, "platform get of irq[=%d] failed!\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
int ret;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
|
||||
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtt_table,
|
||||
HEM_TYPE_MTT, hr_dev->caps.mtt_entry_sz,
|
||||
@ -707,6 +560,17 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
|
||||
ret = hns_roce_init_hem_table(hr_dev,
|
||||
&hr_dev->mr_table.mtt_cqe_table,
|
||||
HEM_TYPE_CQE, hr_dev->caps.mtt_entry_sz,
|
||||
hr_dev->caps.num_cqe_segs, 1);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to init MTT CQE context memory, aborting.\n");
|
||||
goto err_unmap_cqe;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table,
|
||||
HEM_TYPE_MTPT, hr_dev->caps.mtpt_entry_sz,
|
||||
hr_dev->caps.num_mtpts, 1);
|
||||
@ -733,16 +597,35 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
|
||||
goto err_unmap_qp;
|
||||
}
|
||||
|
||||
if (hr_dev->caps.trrl_entry_sz) {
|
||||
ret = hns_roce_init_hem_table(hr_dev,
|
||||
&hr_dev->qp_table.trrl_table,
|
||||
HEM_TYPE_TRRL,
|
||||
hr_dev->caps.trrl_entry_sz *
|
||||
hr_dev->caps.max_qp_dest_rdma,
|
||||
hr_dev->caps.num_qps, 1);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
"Failed to init trrl_table memory, aborting.\n");
|
||||
goto err_unmap_irrl;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hns_roce_init_hem_table(hr_dev, &hr_dev->cq_table.table,
|
||||
HEM_TYPE_CQC, hr_dev->caps.cqc_entry_sz,
|
||||
hr_dev->caps.num_cqs, 1);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to init CQ context memory, aborting.\n");
|
||||
goto err_unmap_irrl;
|
||||
goto err_unmap_trrl;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_unmap_trrl:
|
||||
if (hr_dev->caps.trrl_entry_sz)
|
||||
hns_roce_cleanup_hem_table(hr_dev,
|
||||
&hr_dev->qp_table.trrl_table);
|
||||
|
||||
err_unmap_irrl:
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
|
||||
|
||||
@ -754,6 +637,12 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
|
||||
|
||||
err_unmap_mtt:
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
|
||||
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
|
||||
hns_roce_cleanup_hem_table(hr_dev,
|
||||
&hr_dev->mr_table.mtt_cqe_table);
|
||||
|
||||
err_unmap_cqe:
|
||||
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -766,7 +655,7 @@ static int hns_roce_init_hem(struct hns_roce_dev *hr_dev)
|
||||
static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
int ret;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
|
||||
spin_lock_init(&hr_dev->sm_lock);
|
||||
spin_lock_init(&hr_dev->bt_cmd_lock);
|
||||
@ -826,56 +715,45 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* hns_roce_probe - RoCE driver entrance
|
||||
* @pdev: pointer to platform device
|
||||
* Return : int
|
||||
*
|
||||
*/
|
||||
static int hns_roce_probe(struct platform_device *pdev)
|
||||
int hns_roce_init(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
int ret;
|
||||
struct hns_roce_dev *hr_dev;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
|
||||
hr_dev = (struct hns_roce_dev *)ib_alloc_device(sizeof(*hr_dev));
|
||||
if (!hr_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
hr_dev->pdev = pdev;
|
||||
platform_set_drvdata(pdev, hr_dev);
|
||||
|
||||
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
|
||||
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
|
||||
dev_err(dev, "Not usable DMA addressing mode\n");
|
||||
ret = -EIO;
|
||||
goto error_failed_get_cfg;
|
||||
if (hr_dev->hw->reset) {
|
||||
ret = hr_dev->hw->reset(hr_dev, true);
|
||||
if (ret) {
|
||||
dev_err(dev, "Reset RoCE engine failed!\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hns_roce_get_cfg(hr_dev);
|
||||
if (hr_dev->hw->cmq_init) {
|
||||
ret = hr_dev->hw->cmq_init(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Init RoCE Command Queue failed!\n");
|
||||
goto error_failed_cmq_init;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hr_dev->hw->hw_profile(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "Get Configuration failed!\n");
|
||||
goto error_failed_get_cfg;
|
||||
dev_err(dev, "Get RoCE engine profile failed!\n");
|
||||
goto error_failed_cmd_init;
|
||||
}
|
||||
|
||||
ret = hr_dev->hw->reset(hr_dev, true);
|
||||
if (ret) {
|
||||
dev_err(dev, "Reset RoCE engine failed!\n");
|
||||
goto error_failed_get_cfg;
|
||||
}
|
||||
|
||||
hr_dev->hw->hw_profile(hr_dev);
|
||||
|
||||
ret = hns_roce_cmd_init(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "cmd init failed!\n");
|
||||
goto error_failed_cmd_init;
|
||||
}
|
||||
|
||||
ret = hns_roce_init_eq_table(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "eq init failed!\n");
|
||||
goto error_failed_eq_table;
|
||||
if (hr_dev->cmd_mod) {
|
||||
ret = hns_roce_init_eq_table(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "eq init failed!\n");
|
||||
goto error_failed_eq_table;
|
||||
}
|
||||
}
|
||||
|
||||
if (hr_dev->cmd_mod) {
|
||||
@ -898,10 +776,12 @@ static int hns_roce_probe(struct platform_device *pdev)
|
||||
goto error_failed_setup_hca;
|
||||
}
|
||||
|
||||
ret = hr_dev->hw->hw_init(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "hw_init failed!\n");
|
||||
goto error_failed_engine_init;
|
||||
if (hr_dev->hw->hw_init) {
|
||||
ret = hr_dev->hw->hw_init(hr_dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "hw_init failed!\n");
|
||||
goto error_failed_engine_init;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hns_roce_register_device(hr_dev);
|
||||
@ -911,7 +791,8 @@ static int hns_roce_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
|
||||
error_failed_register_device:
|
||||
hr_dev->hw->hw_exit(hr_dev);
|
||||
if (hr_dev->hw->hw_exit)
|
||||
hr_dev->hw->hw_exit(hr_dev);
|
||||
|
||||
error_failed_engine_init:
|
||||
hns_roce_cleanup_bitmap(hr_dev);
|
||||
@ -924,58 +805,47 @@ static int hns_roce_probe(struct platform_device *pdev)
|
||||
hns_roce_cmd_use_polling(hr_dev);
|
||||
|
||||
error_failed_use_event:
|
||||
hns_roce_cleanup_eq_table(hr_dev);
|
||||
if (hr_dev->cmd_mod)
|
||||
hns_roce_cleanup_eq_table(hr_dev);
|
||||
|
||||
error_failed_eq_table:
|
||||
hns_roce_cmd_cleanup(hr_dev);
|
||||
|
||||
error_failed_cmd_init:
|
||||
ret = hr_dev->hw->reset(hr_dev, false);
|
||||
if (ret)
|
||||
dev_err(&hr_dev->pdev->dev, "roce_engine reset fail\n");
|
||||
if (hr_dev->hw->cmq_exit)
|
||||
hr_dev->hw->cmq_exit(hr_dev);
|
||||
|
||||
error_failed_get_cfg:
|
||||
ib_dealloc_device(&hr_dev->ib_dev);
|
||||
error_failed_cmq_init:
|
||||
if (hr_dev->hw->reset) {
|
||||
ret = hr_dev->hw->reset(hr_dev, false);
|
||||
if (ret)
|
||||
dev_err(dev, "Dereset RoCE engine failed!\n");
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_init);
|
||||
|
||||
/**
|
||||
* hns_roce_remove - remove RoCE device
|
||||
* @pdev: pointer to platform device
|
||||
*/
|
||||
static int hns_roce_remove(struct platform_device *pdev)
|
||||
void hns_roce_exit(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
|
||||
|
||||
hns_roce_unregister_device(hr_dev);
|
||||
hr_dev->hw->hw_exit(hr_dev);
|
||||
if (hr_dev->hw->hw_exit)
|
||||
hr_dev->hw->hw_exit(hr_dev);
|
||||
hns_roce_cleanup_bitmap(hr_dev);
|
||||
hns_roce_cleanup_hem(hr_dev);
|
||||
|
||||
if (hr_dev->cmd_mod)
|
||||
hns_roce_cmd_use_polling(hr_dev);
|
||||
|
||||
hns_roce_cleanup_eq_table(hr_dev);
|
||||
if (hr_dev->cmd_mod)
|
||||
hns_roce_cleanup_eq_table(hr_dev);
|
||||
hns_roce_cmd_cleanup(hr_dev);
|
||||
hr_dev->hw->reset(hr_dev, false);
|
||||
|
||||
ib_dealloc_device(&hr_dev->ib_dev);
|
||||
|
||||
return 0;
|
||||
if (hr_dev->hw->cmq_exit)
|
||||
hr_dev->hw->cmq_exit(hr_dev);
|
||||
if (hr_dev->hw->reset)
|
||||
hr_dev->hw->reset(hr_dev, false);
|
||||
}
|
||||
|
||||
static struct platform_driver hns_roce_driver = {
|
||||
.probe = hns_roce_probe,
|
||||
.remove = hns_roce_remove,
|
||||
.driver = {
|
||||
.name = DRV_NAME,
|
||||
.of_match_table = hns_roce_of_match,
|
||||
.acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(hns_roce_driver);
|
||||
EXPORT_SYMBOL_GPL(hns_roce_exit);
|
||||
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
|
||||
|
@ -47,6 +47,7 @@ unsigned long key_to_hw_index(u32 key)
|
||||
{
|
||||
return (key << 24) | (key >> 8);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(key_to_hw_index);
|
||||
|
||||
static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_cmd_mailbox *mailbox,
|
||||
@ -65,6 +66,7 @@ int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
|
||||
mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
|
||||
HNS_ROCE_CMD_TIMEOUT_MSECS);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt);
|
||||
|
||||
static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
|
||||
unsigned long *seg)
|
||||
@ -175,18 +177,28 @@ static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
|
||||
}
|
||||
|
||||
static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
|
||||
unsigned long *seg)
|
||||
unsigned long *seg, u32 mtt_type)
|
||||
{
|
||||
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
|
||||
int ret = 0;
|
||||
struct hns_roce_hem_table *table;
|
||||
struct hns_roce_buddy *buddy;
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_buddy_alloc(&mr_table->mtt_buddy, order, seg);
|
||||
if (mtt_type == MTT_TYPE_WQE) {
|
||||
buddy = &mr_table->mtt_buddy;
|
||||
table = &mr_table->mtt_table;
|
||||
} else {
|
||||
buddy = &mr_table->mtt_cqe_buddy;
|
||||
table = &mr_table->mtt_cqe_table;
|
||||
}
|
||||
|
||||
ret = hns_roce_buddy_alloc(buddy, order, seg);
|
||||
if (ret == -1)
|
||||
return -1;
|
||||
|
||||
if (hns_roce_table_get_range(hr_dev, &mr_table->mtt_table, *seg,
|
||||
if (hns_roce_table_get_range(hr_dev, table, *seg,
|
||||
*seg + (1 << order) - 1)) {
|
||||
hns_roce_buddy_free(&mr_table->mtt_buddy, *seg, order);
|
||||
hns_roce_buddy_free(buddy, *seg, order);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -196,7 +208,7 @@ static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
|
||||
int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
|
||||
struct hns_roce_mtt *mtt)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* Page num is zero, correspond to DMA memory register */
|
||||
@ -215,7 +227,8 @@ int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
|
||||
++mtt->order;
|
||||
|
||||
/* Allocate MTT entry */
|
||||
ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg);
|
||||
ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
|
||||
mtt->mtt_type);
|
||||
if (ret == -1)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -229,18 +242,261 @@ void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
|
||||
if (mtt->order < 0)
|
||||
return;
|
||||
|
||||
hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
|
||||
hns_roce_table_put_range(hr_dev, &mr_table->mtt_table, mtt->first_seg,
|
||||
mtt->first_seg + (1 << mtt->order) - 1);
|
||||
if (mtt->mtt_type == MTT_TYPE_WQE) {
|
||||
hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
|
||||
mtt->order);
|
||||
hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
|
||||
mtt->first_seg,
|
||||
mtt->first_seg + (1 << mtt->order) - 1);
|
||||
} else {
|
||||
hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
|
||||
mtt->order);
|
||||
hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
|
||||
mtt->first_seg,
|
||||
mtt->first_seg + (1 << mtt->order) - 1);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
|
||||
|
||||
static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mr *mr, int err_loop_index,
|
||||
int loop_i, int loop_j)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
u32 mhop_num;
|
||||
u32 pbl_bt_sz;
|
||||
u64 bt_idx;
|
||||
int i, j;
|
||||
|
||||
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
|
||||
mhop_num = hr_dev->caps.pbl_hop_num;
|
||||
|
||||
i = loop_i;
|
||||
if (mhop_num == 3 && err_loop_index == 2) {
|
||||
for (; i >= 0; i--) {
|
||||
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
|
||||
mr->pbl_l1_dma_addr[i]);
|
||||
|
||||
for (j = 0; j < pbl_bt_sz / 8; j++) {
|
||||
if (i == loop_i && j >= loop_j)
|
||||
break;
|
||||
|
||||
bt_idx = i * pbl_bt_sz / 8 + j;
|
||||
dma_free_coherent(dev, pbl_bt_sz,
|
||||
mr->pbl_bt_l2[bt_idx],
|
||||
mr->pbl_l2_dma_addr[bt_idx]);
|
||||
}
|
||||
}
|
||||
} else if (mhop_num == 3 && err_loop_index == 1) {
|
||||
for (i -= 1; i >= 0; i--) {
|
||||
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
|
||||
mr->pbl_l1_dma_addr[i]);
|
||||
|
||||
for (j = 0; j < pbl_bt_sz / 8; j++) {
|
||||
bt_idx = i * pbl_bt_sz / 8 + j;
|
||||
dma_free_coherent(dev, pbl_bt_sz,
|
||||
mr->pbl_bt_l2[bt_idx],
|
||||
mr->pbl_l2_dma_addr[bt_idx]);
|
||||
}
|
||||
}
|
||||
} else if (mhop_num == 2 && err_loop_index == 1) {
|
||||
for (i -= 1; i >= 0; i--)
|
||||
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
|
||||
mr->pbl_l1_dma_addr[i]);
|
||||
} else {
|
||||
dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
|
||||
mhop_num, err_loop_index);
|
||||
return;
|
||||
}
|
||||
|
||||
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
|
||||
mr->pbl_bt_l0 = NULL;
|
||||
mr->pbl_l0_dma_addr = 0;
|
||||
}
|
||||
|
||||
/* PBL multi hop addressing */
|
||||
static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
|
||||
struct hns_roce_mr *mr)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
int mr_alloc_done = 0;
|
||||
int npages_allocated;
|
||||
int i = 0, j = 0;
|
||||
u32 pbl_bt_sz;
|
||||
u32 mhop_num;
|
||||
u64 pbl_last_bt_num;
|
||||
u64 pbl_bt_cnt = 0;
|
||||
u64 bt_idx;
|
||||
u64 size;
|
||||
|
||||
mhop_num = hr_dev->caps.pbl_hop_num;
|
||||
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
|
||||
pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
|
||||
|
||||
if (mhop_num == HNS_ROCE_HOP_NUM_0)
|
||||
return 0;
|
||||
|
||||
/* hop_num = 1 */
|
||||
if (mhop_num == 1) {
|
||||
if (npages > pbl_bt_sz / 8) {
|
||||
dev_err(dev, "npages %d is larger than buf_pg_sz!",
|
||||
npages);
|
||||
return -EINVAL;
|
||||
}
|
||||
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
|
||||
&(mr->pbl_dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
mr->pbl_size = npages;
|
||||
mr->pbl_ba = mr->pbl_dma_addr;
|
||||
mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
|
||||
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
|
||||
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
|
||||
sizeof(*mr->pbl_l1_dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_l1_dma_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_bt_l1)
|
||||
goto err_kcalloc_bt_l1;
|
||||
|
||||
if (mhop_num == 3) {
|
||||
mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
|
||||
sizeof(*mr->pbl_l2_dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_l2_dma_addr)
|
||||
goto err_kcalloc_l2_dma;
|
||||
|
||||
mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
|
||||
sizeof(*mr->pbl_bt_l2),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_bt_l2)
|
||||
goto err_kcalloc_bt_l2;
|
||||
}
|
||||
|
||||
/* alloc L0 BT */
|
||||
mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
|
||||
&(mr->pbl_l0_dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_bt_l0)
|
||||
goto err_dma_alloc_l0;
|
||||
|
||||
if (mhop_num == 2) {
|
||||
/* alloc L1 BT */
|
||||
for (i = 0; i < pbl_bt_sz / 8; i++) {
|
||||
if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
|
||||
size = pbl_bt_sz;
|
||||
} else {
|
||||
npages_allocated = i * (pbl_bt_sz / 8);
|
||||
size = (npages - npages_allocated) * 8;
|
||||
}
|
||||
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
|
||||
&(mr->pbl_l1_dma_addr[i]),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_bt_l1[i]) {
|
||||
hns_roce_loop_free(hr_dev, mr, 1, i, 0);
|
||||
goto err_dma_alloc_l0;
|
||||
}
|
||||
|
||||
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
|
||||
|
||||
pbl_bt_cnt++;
|
||||
if (pbl_bt_cnt >= pbl_last_bt_num)
|
||||
break;
|
||||
}
|
||||
} else if (mhop_num == 3) {
|
||||
/* alloc L1, L2 BT */
|
||||
for (i = 0; i < pbl_bt_sz / 8; i++) {
|
||||
mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
|
||||
&(mr->pbl_l1_dma_addr[i]),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_bt_l1[i]) {
|
||||
hns_roce_loop_free(hr_dev, mr, 1, i, 0);
|
||||
goto err_dma_alloc_l0;
|
||||
}
|
||||
|
||||
*(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
|
||||
|
||||
for (j = 0; j < pbl_bt_sz / 8; j++) {
|
||||
bt_idx = i * pbl_bt_sz / 8 + j;
|
||||
|
||||
if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
|
||||
size = pbl_bt_sz;
|
||||
} else {
|
||||
npages_allocated = bt_idx *
|
||||
(pbl_bt_sz / 8);
|
||||
size = (npages - npages_allocated) * 8;
|
||||
}
|
||||
mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
|
||||
dev, size,
|
||||
&(mr->pbl_l2_dma_addr[bt_idx]),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_bt_l2[bt_idx]) {
|
||||
hns_roce_loop_free(hr_dev, mr, 2, i, j);
|
||||
goto err_dma_alloc_l0;
|
||||
}
|
||||
|
||||
*(mr->pbl_bt_l1[i] + j) =
|
||||
mr->pbl_l2_dma_addr[bt_idx];
|
||||
|
||||
pbl_bt_cnt++;
|
||||
if (pbl_bt_cnt >= pbl_last_bt_num) {
|
||||
mr_alloc_done = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (mr_alloc_done)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mr->l0_chunk_last_num = i + 1;
|
||||
if (mhop_num == 3)
|
||||
mr->l1_chunk_last_num = j + 1;
|
||||
|
||||
mr->pbl_size = npages;
|
||||
mr->pbl_ba = mr->pbl_l0_dma_addr;
|
||||
mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
|
||||
mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
|
||||
mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
|
||||
|
||||
return 0;
|
||||
|
||||
err_dma_alloc_l0:
|
||||
kfree(mr->pbl_bt_l2);
|
||||
mr->pbl_bt_l2 = NULL;
|
||||
|
||||
err_kcalloc_bt_l2:
|
||||
kfree(mr->pbl_l2_dma_addr);
|
||||
mr->pbl_l2_dma_addr = NULL;
|
||||
|
||||
err_kcalloc_l2_dma:
|
||||
kfree(mr->pbl_bt_l1);
|
||||
mr->pbl_bt_l1 = NULL;
|
||||
|
||||
err_kcalloc_bt_l1:
|
||||
kfree(mr->pbl_l1_dma_addr);
|
||||
mr->pbl_l1_dma_addr = NULL;
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
|
||||
u64 size, u32 access, int npages,
|
||||
struct hns_roce_mr *mr)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
unsigned long index = 0;
|
||||
int ret = 0;
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
|
||||
/* Allocate a key for mr from mr_table */
|
||||
ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
|
||||
@ -258,22 +514,117 @@ static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
|
||||
mr->type = MR_TYPE_DMA;
|
||||
mr->pbl_buf = NULL;
|
||||
mr->pbl_dma_addr = 0;
|
||||
/* PBL multi-hop addressing parameters */
|
||||
mr->pbl_bt_l2 = NULL;
|
||||
mr->pbl_bt_l1 = NULL;
|
||||
mr->pbl_bt_l0 = NULL;
|
||||
mr->pbl_l2_dma_addr = NULL;
|
||||
mr->pbl_l1_dma_addr = NULL;
|
||||
mr->pbl_l0_dma_addr = 0;
|
||||
} else {
|
||||
mr->type = MR_TYPE_MR;
|
||||
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
|
||||
&(mr->pbl_dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_buf)
|
||||
return -ENOMEM;
|
||||
if (!hr_dev->caps.pbl_hop_num) {
|
||||
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
|
||||
&(mr->pbl_dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_buf)
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mr *mr)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
int npages_allocated;
|
||||
int npages;
|
||||
int i, j;
|
||||
u32 pbl_bt_sz;
|
||||
u32 mhop_num;
|
||||
u64 bt_idx;
|
||||
|
||||
npages = ib_umem_page_count(mr->umem);
|
||||
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
|
||||
mhop_num = hr_dev->caps.pbl_hop_num;
|
||||
|
||||
if (mhop_num == HNS_ROCE_HOP_NUM_0)
|
||||
return;
|
||||
|
||||
/* hop_num = 1 */
|
||||
if (mhop_num == 1) {
|
||||
dma_free_coherent(dev, (unsigned int)(npages * 8),
|
||||
mr->pbl_buf, mr->pbl_dma_addr);
|
||||
return;
|
||||
}
|
||||
|
||||
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
|
||||
mr->pbl_l0_dma_addr);
|
||||
|
||||
if (mhop_num == 2) {
|
||||
for (i = 0; i < mr->l0_chunk_last_num; i++) {
|
||||
if (i == mr->l0_chunk_last_num - 1) {
|
||||
npages_allocated = i * (pbl_bt_sz / 8);
|
||||
|
||||
dma_free_coherent(dev,
|
||||
(npages - npages_allocated) * 8,
|
||||
mr->pbl_bt_l1[i],
|
||||
mr->pbl_l1_dma_addr[i]);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
|
||||
mr->pbl_l1_dma_addr[i]);
|
||||
}
|
||||
} else if (mhop_num == 3) {
|
||||
for (i = 0; i < mr->l0_chunk_last_num; i++) {
|
||||
dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
|
||||
mr->pbl_l1_dma_addr[i]);
|
||||
|
||||
for (j = 0; j < pbl_bt_sz / 8; j++) {
|
||||
bt_idx = i * (pbl_bt_sz / 8) + j;
|
||||
|
||||
if ((i == mr->l0_chunk_last_num - 1)
|
||||
&& j == mr->l1_chunk_last_num - 1) {
|
||||
npages_allocated = bt_idx *
|
||||
(pbl_bt_sz / 8);
|
||||
|
||||
dma_free_coherent(dev,
|
||||
(npages - npages_allocated) * 8,
|
||||
mr->pbl_bt_l2[bt_idx],
|
||||
mr->pbl_l2_dma_addr[bt_idx]);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
dma_free_coherent(dev, pbl_bt_sz,
|
||||
mr->pbl_bt_l2[bt_idx],
|
||||
mr->pbl_l2_dma_addr[bt_idx]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kfree(mr->pbl_bt_l1);
|
||||
kfree(mr->pbl_l1_dma_addr);
|
||||
mr->pbl_bt_l1 = NULL;
|
||||
mr->pbl_l1_dma_addr = NULL;
|
||||
if (mhop_num == 3) {
|
||||
kfree(mr->pbl_bt_l2);
|
||||
kfree(mr->pbl_l2_dma_addr);
|
||||
mr->pbl_bt_l2 = NULL;
|
||||
mr->pbl_l2_dma_addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mr *mr)
|
||||
{
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
int npages = 0;
|
||||
int ret;
|
||||
|
||||
@ -286,10 +637,18 @@ static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
|
||||
|
||||
if (mr->size != ~0ULL) {
|
||||
npages = ib_umem_page_count(mr->umem);
|
||||
dma_free_coherent(dev, (unsigned int)(npages * 8), mr->pbl_buf,
|
||||
mr->pbl_dma_addr);
|
||||
|
||||
if (!hr_dev->caps.pbl_hop_num)
|
||||
dma_free_coherent(dev, (unsigned int)(npages * 8),
|
||||
mr->pbl_buf, mr->pbl_dma_addr);
|
||||
else
|
||||
hns_roce_mhop_free(hr_dev, mr);
|
||||
}
|
||||
|
||||
if (mr->enabled)
|
||||
hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
|
||||
key_to_hw_index(mr->key));
|
||||
|
||||
hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
|
||||
key_to_hw_index(mr->key), BITMAP_NO_RR);
|
||||
}
|
||||
@ -299,7 +658,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
|
||||
{
|
||||
int ret;
|
||||
unsigned long mtpt_idx = key_to_hw_index(mr->key);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct hns_roce_cmd_mailbox *mailbox;
|
||||
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
|
||||
|
||||
@ -345,28 +704,44 @@ static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mtt *mtt, u32 start_index,
|
||||
u32 npages, u64 *page_list)
|
||||
{
|
||||
u32 i = 0;
|
||||
__le64 *mtts = NULL;
|
||||
struct hns_roce_hem_table *table;
|
||||
dma_addr_t dma_handle;
|
||||
__le64 *mtts;
|
||||
u32 s = start_index * sizeof(u64);
|
||||
u32 bt_page_size;
|
||||
u32 i;
|
||||
|
||||
if (mtt->mtt_type == MTT_TYPE_WQE)
|
||||
bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
|
||||
else
|
||||
bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
|
||||
|
||||
/* All MTTs must fit in the same page */
|
||||
if (start_index / (PAGE_SIZE / sizeof(u64)) !=
|
||||
(start_index + npages - 1) / (PAGE_SIZE / sizeof(u64)))
|
||||
if (start_index / (bt_page_size / sizeof(u64)) !=
|
||||
(start_index + npages - 1) / (bt_page_size / sizeof(u64)))
|
||||
return -EINVAL;
|
||||
|
||||
if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
|
||||
return -EINVAL;
|
||||
|
||||
mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table,
|
||||
if (mtt->mtt_type == MTT_TYPE_WQE)
|
||||
table = &hr_dev->mr_table.mtt_table;
|
||||
else
|
||||
table = &hr_dev->mr_table.mtt_cqe_table;
|
||||
|
||||
mtts = hns_roce_table_find(hr_dev, table,
|
||||
mtt->first_seg + s / hr_dev->caps.mtt_entry_sz,
|
||||
&dma_handle);
|
||||
if (!mtts)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Save page addr, low 12 bits : 0 */
|
||||
for (i = 0; i < npages; ++i)
|
||||
mtts[i] = (cpu_to_le64(page_list[i])) >> PAGE_ADDR_SHIFT;
|
||||
for (i = 0; i < npages; ++i) {
|
||||
if (!hr_dev->caps.mtt_hop_num)
|
||||
mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
|
||||
else
|
||||
mtts[i] = cpu_to_le64(page_list[i]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -377,12 +752,18 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
{
|
||||
int chunk;
|
||||
int ret;
|
||||
u32 bt_page_size;
|
||||
|
||||
if (mtt->order < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (mtt->mtt_type == MTT_TYPE_WQE)
|
||||
bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
|
||||
else
|
||||
bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
|
||||
|
||||
while (npages > 0) {
|
||||
chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
|
||||
chunk = min_t(int, bt_page_size / sizeof(u64), npages);
|
||||
|
||||
ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
|
||||
page_list);
|
||||
@ -400,9 +781,9 @@ static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
|
||||
{
|
||||
u32 i = 0;
|
||||
int ret = 0;
|
||||
u64 *page_list = NULL;
|
||||
u64 *page_list;
|
||||
int ret;
|
||||
u32 i;
|
||||
|
||||
page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
|
||||
if (!page_list)
|
||||
@ -425,7 +806,7 @@ int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
|
||||
{
|
||||
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
|
||||
hr_dev->caps.num_mtpts,
|
||||
@ -439,8 +820,17 @@ int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
|
||||
if (ret)
|
||||
goto err_buddy;
|
||||
|
||||
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
|
||||
ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
|
||||
ilog2(hr_dev->caps.num_cqe_segs));
|
||||
if (ret)
|
||||
goto err_buddy_cqe;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_buddy_cqe:
|
||||
hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
|
||||
|
||||
err_buddy:
|
||||
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
|
||||
return ret;
|
||||
@ -451,13 +841,15 @@ void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
|
||||
struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
|
||||
|
||||
hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
|
||||
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
|
||||
hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
|
||||
hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
|
||||
}
|
||||
|
||||
struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
{
|
||||
int ret = 0;
|
||||
struct hns_roce_mr *mr = NULL;
|
||||
struct hns_roce_mr *mr;
|
||||
int ret;
|
||||
|
||||
mr = kmalloc(sizeof(*mr), GFP_KERNEL);
|
||||
if (mr == NULL)
|
||||
@ -489,25 +881,44 @@ struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
|
||||
int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mtt *mtt, struct ib_umem *umem)
|
||||
{
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct scatterlist *sg;
|
||||
unsigned int order;
|
||||
int i, k, entry;
|
||||
int npage = 0;
|
||||
int ret = 0;
|
||||
u64 *pages;
|
||||
u32 n;
|
||||
int len;
|
||||
u64 page_addr;
|
||||
u64 *pages;
|
||||
u32 bt_page_size;
|
||||
u32 n;
|
||||
|
||||
pages = (u64 *) __get_free_page(GFP_KERNEL);
|
||||
order = mtt->mtt_type == MTT_TYPE_WQE ? hr_dev->caps.mtt_ba_pg_sz :
|
||||
hr_dev->caps.cqe_ba_pg_sz;
|
||||
bt_page_size = 1 << (order + PAGE_SHIFT);
|
||||
|
||||
pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
i = n = 0;
|
||||
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
|
||||
len = sg_dma_len(sg) >> mtt->page_shift;
|
||||
len = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
for (k = 0; k < len; ++k) {
|
||||
pages[i++] = sg_dma_address(sg) +
|
||||
(k << umem->page_shift);
|
||||
if (i == PAGE_SIZE / sizeof(u64)) {
|
||||
page_addr =
|
||||
sg_dma_address(sg) + (k << umem->page_shift);
|
||||
if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
|
||||
if (page_addr & ((1 << mtt->page_shift) - 1)) {
|
||||
dev_err(dev, "page_addr 0x%llx is not page_shift %d alignment!\n",
|
||||
page_addr, mtt->page_shift);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
pages[i++] = page_addr;
|
||||
}
|
||||
npage++;
|
||||
if (i == bt_page_size / sizeof(u64)) {
|
||||
ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
|
||||
pages);
|
||||
if (ret)
|
||||
@ -526,16 +937,44 @@ int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hns_roce_ib_umem_write_mr(struct hns_roce_mr *mr,
|
||||
static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
|
||||
struct hns_roce_mr *mr,
|
||||
struct ib_umem *umem)
|
||||
{
|
||||
int i = 0;
|
||||
int entry;
|
||||
struct scatterlist *sg;
|
||||
int i = 0, j = 0, k;
|
||||
int entry;
|
||||
int len;
|
||||
u64 page_addr;
|
||||
u32 pbl_bt_sz;
|
||||
|
||||
if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
|
||||
return 0;
|
||||
|
||||
pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
|
||||
mr->pbl_buf[i] = ((u64)sg_dma_address(sg)) >> 12;
|
||||
i++;
|
||||
len = sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
for (k = 0; k < len; ++k) {
|
||||
page_addr = sg_dma_address(sg) +
|
||||
(k << umem->page_shift);
|
||||
|
||||
if (!hr_dev->caps.pbl_hop_num) {
|
||||
mr->pbl_buf[i++] = page_addr >> 12;
|
||||
} else if (hr_dev->caps.pbl_hop_num == 1) {
|
||||
mr->pbl_buf[i++] = page_addr;
|
||||
} else {
|
||||
if (hr_dev->caps.pbl_hop_num == 2)
|
||||
mr->pbl_bt_l1[i][j] = page_addr;
|
||||
else if (hr_dev->caps.pbl_hop_num == 3)
|
||||
mr->pbl_bt_l2[i][j] = page_addr;
|
||||
|
||||
j++;
|
||||
if (j >= (pbl_bt_sz / 8)) {
|
||||
i++;
|
||||
j = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Memory barrier */
|
||||
@ -549,10 +988,12 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
|
||||
struct device *dev = &hr_dev->pdev->dev;
|
||||
struct hns_roce_mr *mr = NULL;
|
||||
int ret = 0;
|
||||
int n = 0;
|
||||
struct device *dev = hr_dev->dev;
|
||||
struct hns_roce_mr *mr;
|
||||
int bt_size;
|
||||
int ret;
|
||||
int n;
|
||||
int i;
|
||||
|
||||
mr = kmalloc(sizeof(*mr), GFP_KERNEL);
|
||||
if (!mr)
|
||||
@ -573,11 +1014,27 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
goto err_umem;
|
||||
}
|
||||
|
||||
if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
|
||||
dev_err(dev, " MR len %lld err. MR is limited to 4G at most!\n",
|
||||
length);
|
||||
ret = -EINVAL;
|
||||
goto err_umem;
|
||||
if (!hr_dev->caps.pbl_hop_num) {
|
||||
if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
|
||||
dev_err(dev,
|
||||
" MR len %lld err. MR is limited to 4G at most!\n",
|
||||
length);
|
||||
ret = -EINVAL;
|
||||
goto err_umem;
|
||||
}
|
||||
} else {
|
||||
int pbl_size = 1;
|
||||
|
||||
bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8;
|
||||
for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
|
||||
pbl_size *= bt_size;
|
||||
if (n > pbl_size) {
|
||||
dev_err(dev,
|
||||
" MR len %lld err. MR page num is limited to %d!\n",
|
||||
length, pbl_size);
|
||||
ret = -EINVAL;
|
||||
goto err_umem;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
|
||||
@ -585,7 +1042,7 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
if (ret)
|
||||
goto err_umem;
|
||||
|
||||
ret = hns_roce_ib_umem_write_mr(mr, mr->umem);
|
||||
ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
|
||||
if (ret)
|
||||
goto err_mr;
|
||||
|
||||
@ -608,6 +1065,129 @@ struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
|
||||
u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
|
||||
struct ib_udata *udata)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
|
||||
struct hns_roce_mr *mr = to_hr_mr(ibmr);
|
||||
struct hns_roce_cmd_mailbox *mailbox;
|
||||
struct device *dev = hr_dev->dev;
|
||||
unsigned long mtpt_idx;
|
||||
u32 pdn = 0;
|
||||
int npages;
|
||||
int ret;
|
||||
|
||||
if (!mr->enabled)
|
||||
return -EINVAL;
|
||||
|
||||
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
|
||||
if (IS_ERR(mailbox))
|
||||
return PTR_ERR(mailbox);
|
||||
|
||||
mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
|
||||
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
|
||||
HNS_ROCE_CMD_QUERY_MPT,
|
||||
HNS_ROCE_CMD_TIMEOUT_MSECS);
|
||||
if (ret)
|
||||
goto free_cmd_mbox;
|
||||
|
||||
ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
|
||||
if (ret)
|
||||
dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
|
||||
|
||||
mr->enabled = 0;
|
||||
|
||||
if (flags & IB_MR_REREG_PD)
|
||||
pdn = to_hr_pd(pd)->pdn;
|
||||
|
||||
if (flags & IB_MR_REREG_TRANS) {
|
||||
if (mr->size != ~0ULL) {
|
||||
npages = ib_umem_page_count(mr->umem);
|
||||
|
||||
if (hr_dev->caps.pbl_hop_num)
|
||||
hns_roce_mhop_free(hr_dev, mr);
|
||||
else
|
||||
dma_free_coherent(dev, npages * 8, mr->pbl_buf,
|
||||
mr->pbl_dma_addr);
|
||||
}
|
||||
ib_umem_release(mr->umem);
|
||||
|
||||
mr->umem = ib_umem_get(ibmr->uobject->context, start, length,
|
||||
mr_access_flags, 0);
|
||||
if (IS_ERR(mr->umem)) {
|
||||
ret = PTR_ERR(mr->umem);
|
||||
mr->umem = NULL;
|
||||
goto free_cmd_mbox;
|
||||
}
|
||||
npages = ib_umem_page_count(mr->umem);
|
||||
|
||||
if (hr_dev->caps.pbl_hop_num) {
|
||||
ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
|
||||
if (ret)
|
||||
goto release_umem;
|
||||
} else {
|
||||
mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
|
||||
&(mr->pbl_dma_addr),
|
||||
GFP_KERNEL);
|
||||
if (!mr->pbl_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto release_umem;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
|
||||
mr_access_flags, virt_addr,
|
||||
length, mailbox->buf);
|
||||
if (ret) {
|
||||
if (flags & IB_MR_REREG_TRANS)
|
||||
goto release_umem;
|
||||
else
|
||||
goto free_cmd_mbox;
|
||||
}
|
||||
|
||||
if (flags & IB_MR_REREG_TRANS) {
|
||||
ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
|
||||
if (ret) {
|
||||
if (mr->size != ~0ULL) {
|
||||
npages = ib_umem_page_count(mr->umem);
|
||||
|
||||
if (hr_dev->caps.pbl_hop_num)
|
||||
hns_roce_mhop_free(hr_dev, mr);
|
||||
else
|
||||
dma_free_coherent(dev, npages * 8,
|
||||
mr->pbl_buf,
|
||||
mr->pbl_dma_addr);
|
||||
}
|
||||
|
||||
goto release_umem;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
|
||||
if (ret) {
|
||||
dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
|
||||
goto release_umem;
|
||||
}
|
||||
|
||||
mr->enabled = 1;
|
||||
if (flags & IB_MR_REREG_ACCESS)
|
||||
mr->access = mr_access_flags;
|
||||
|
||||
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
||||
|
||||
return 0;
|
||||
|
||||
release_umem:
|
||||
ib_umem_release(mr->umem);
|
||||
|
||||
free_cmd_mbox:
|
||||
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hns_roce_dereg_mr(struct ib_mr *ibmr)
|
||||
{
|
||||
struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user