2015-03-31 01:34:21 +07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2015 Oracle. All rights reserved.
|
|
|
|
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Lightweight memory registration using Fast Registration Work
|
|
|
|
* Requests (FRWR). Also referred to sometimes as FRMR mode.
|
|
|
|
*
|
|
|
|
* FRWR features ordered asynchronous registration and deregistration
|
|
|
|
* of arbitrarily sized memory regions. This is the fastest and safest
|
|
|
|
* but most complex memory registration mode.
|
|
|
|
*/
|
|
|
|
|
2015-05-26 22:52:35 +07:00
|
|
|
/* Normal operation
|
|
|
|
*
|
|
|
|
* A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
|
|
|
|
* Work Request (frmr_op_map). When the RDMA operation is finished, this
|
|
|
|
* Memory Region is invalidated using a LOCAL_INV Work Request
|
|
|
|
* (frmr_op_unmap).
|
|
|
|
*
|
|
|
|
* Typically these Work Requests are not signaled, and neither are RDMA
|
|
|
|
* SEND Work Requests (with the exception of signaling occasionally to
|
|
|
|
* prevent provider work queue overflows). This greatly reduces HCA
|
|
|
|
* interrupt workload.
|
|
|
|
*
|
|
|
|
* As an optimization, frwr_op_unmap marks MRs INVALID before the
|
|
|
|
* LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
|
|
|
|
* rb_mws immediately so that no work (like managing a linked list
|
|
|
|
* under a spinlock) is needed in the completion upcall.
|
|
|
|
*
|
|
|
|
* But this means that frwr_op_map() can occasionally encounter an MR
|
|
|
|
* that is INVALID but the LOCAL_INV WR has not completed. Work Queue
|
|
|
|
* ordering prevents a subsequent FAST_REG WR from executing against
|
|
|
|
* that MR while it is still being invalidated.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Transport recovery
|
|
|
|
*
|
|
|
|
* ->op_map and the transport connect worker cannot run at the same
|
|
|
|
* time, but ->op_unmap can fire while the transport connect worker
|
|
|
|
* is running. Thus MR recovery is handled in ->op_map, to guarantee
|
|
|
|
* that recovered MRs are owned by a sending RPC, and not one where
|
|
|
|
* ->op_unmap could fire at the same time transport reconnect is
|
|
|
|
* being done.
|
|
|
|
*
|
|
|
|
* When the underlying transport disconnects, MRs are left in one of
|
2016-11-08 04:16:24 +07:00
|
|
|
* four states:
|
2015-05-26 22:52:35 +07:00
|
|
|
*
|
|
|
|
* INVALID: The MR was not in use before the QP entered ERROR state.
|
|
|
|
*
|
|
|
|
* VALID: The MR was registered before the QP entered ERROR state.
|
|
|
|
*
|
2016-11-08 04:16:24 +07:00
|
|
|
* FLUSHED_FR: The MR was being registered when the QP entered ERROR
|
|
|
|
* state, and the pending WR was flushed.
|
|
|
|
*
|
|
|
|
* FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
|
|
|
|
* state, and the pending WR was flushed.
|
|
|
|
*
|
|
|
|
* When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
|
|
|
|
* with ib_dereg_mr and then are re-initialized. Because MR recovery
|
2015-05-26 22:52:35 +07:00
|
|
|
* allocates fresh resources, it is deferred to a workqueue, and the
|
|
|
|
* recovered MRs are placed back on the rb_mws list when recovery is
|
|
|
|
* complete. frwr_op_map allocates another MR for the current RPC while
|
|
|
|
* the broken MR is reset.
|
|
|
|
*
|
|
|
|
* To ensure that frwr_op_map doesn't encounter an MR that is marked
|
|
|
|
* INVALID but that is about to be flushed due to a previous transport
|
|
|
|
* disconnect, the transport connect worker attempts to drain all
|
|
|
|
* pending send queue WRs before the transport is reconnected.
|
|
|
|
*/
|
|
|
|
|
2016-09-15 21:57:16 +07:00
|
|
|
#include <linux/sunrpc/rpc_rdma.h>
|
|
|
|
|
2015-03-31 01:34:21 +07:00
|
|
|
#include "xprt_rdma.h"
|
|
|
|
|
|
|
|
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
|
|
|
# define RPCDBG_FACILITY RPCDBG_TRANS
|
|
|
|
#endif
|
|
|
|
|
2016-06-30 00:53:27 +07:00
|
|
|
bool
|
|
|
|
frwr_is_supported(struct rpcrdma_ia *ia)
|
|
|
|
{
|
|
|
|
struct ib_device_attr *attrs = &ia->ri_device->attrs;
|
|
|
|
|
|
|
|
if (!(attrs->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS))
|
|
|
|
goto out_not_supported;
|
|
|
|
if (attrs->max_fast_reg_page_list_len == 0)
|
|
|
|
goto out_not_supported;
|
|
|
|
return true;
|
|
|
|
|
|
|
|
out_not_supported:
|
|
|
|
pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
|
|
|
|
ia->ri_device->name);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-06-30 00:52:29 +07:00
|
|
|
static int
|
2016-06-30 00:54:00 +07:00
|
|
|
frwr_op_init_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
|
2016-06-30 00:52:29 +07:00
|
|
|
{
|
2016-06-30 00:54:00 +07:00
|
|
|
unsigned int depth = ia->ri_max_frmr_depth;
|
2016-06-30 00:52:29 +07:00
|
|
|
struct rpcrdma_frmr *f = &r->frmr;
|
|
|
|
int rc;
|
|
|
|
|
xprtrdma: Support for SG_GAP devices
Some devices (such as the Mellanox CX-4) can register, under a
single R_key, a set of memory regions that are not contiguous. When
this is done, all the segments in a Reply list, say, can then be
invalidated in a single LocalInv Work Request (or via Remote
Invalidation, which can invalidate exactly one R_key when completing
a Receive).
This means a single FastReg WR is used to register, and one or zero
LocalInv WRs can invalidate, the memory involved with RDMA transfers
on behalf of an RPC.
In addition, xprtrdma constructs some Reply chunks from three or
more segments. By registering them with SG_GAP, only one segment
is needed for the Reply chunk, allowing the whole chunk to be
invalidated remotely.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-11-29 22:52:24 +07:00
|
|
|
f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype, depth);
|
2016-06-30 00:52:29 +07:00
|
|
|
if (IS_ERR(f->fr_mr))
|
|
|
|
goto out_mr_err;
|
|
|
|
|
|
|
|
r->mw_sg = kcalloc(depth, sizeof(*r->mw_sg), GFP_KERNEL);
|
|
|
|
if (!r->mw_sg)
|
|
|
|
goto out_list_err;
|
|
|
|
|
|
|
|
sg_init_table(r->mw_sg, depth);
|
|
|
|
init_completion(&f->fr_linv_done);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_mr_err:
|
|
|
|
rc = PTR_ERR(f->fr_mr);
|
|
|
|
dprintk("RPC: %s: ib_alloc_mr status %i\n",
|
|
|
|
__func__, rc);
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
out_list_err:
|
|
|
|
rc = -ENOMEM;
|
|
|
|
dprintk("RPC: %s: sg allocation failure\n",
|
|
|
|
__func__);
|
|
|
|
ib_dereg_mr(f->fr_mr);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2016-06-30 00:54:00 +07:00
|
|
|
frwr_op_release_mr(struct rpcrdma_mw *r)
|
2016-06-30 00:52:29 +07:00
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
|
2016-06-30 00:54:16 +07:00
|
|
|
/* Ensure MW is not on any rl_registered list */
|
|
|
|
if (!list_empty(&r->mw_list))
|
|
|
|
list_del(&r->mw_list);
|
|
|
|
|
2016-06-30 00:52:29 +07:00
|
|
|
rc = ib_dereg_mr(r->frmr.fr_mr);
|
|
|
|
if (rc)
|
|
|
|
pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
|
|
|
|
r, rc);
|
|
|
|
kfree(r->mw_sg);
|
2016-06-30 00:54:00 +07:00
|
|
|
kfree(r);
|
2016-06-30 00:52:29 +07:00
|
|
|
}
|
|
|
|
|
2016-05-03 01:42:12 +07:00
|
|
|
static int
|
|
|
|
__frwr_reset_mr(struct rpcrdma_ia *ia, struct rpcrdma_mw *r)
|
|
|
|
{
|
|
|
|
struct rpcrdma_frmr *f = &r->frmr;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = ib_dereg_mr(f->fr_mr);
|
|
|
|
if (rc) {
|
|
|
|
pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
|
|
|
|
rc, r);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
xprtrdma: Support for SG_GAP devices
Some devices (such as the Mellanox CX-4) can register, under a
single R_key, a set of memory regions that are not contiguous. When
this is done, all the segments in a Reply list, say, can then be
invalidated in a single LocalInv Work Request (or via Remote
Invalidation, which can invalidate exactly one R_key when completing
a Receive).
This means a single FastReg WR is used to register, and one or zero
LocalInv WRs can invalidate, the memory involved with RDMA transfers
on behalf of an RPC.
In addition, xprtrdma constructs some Reply chunks from three or
more segments. By registering them with SG_GAP, only one segment
is needed for the Reply chunk, allowing the whole chunk to be
invalidated remotely.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-11-29 22:52:24 +07:00
|
|
|
f->fr_mr = ib_alloc_mr(ia->ri_pd, ia->ri_mrtype,
|
2016-05-03 01:42:12 +07:00
|
|
|
ia->ri_max_frmr_depth);
|
|
|
|
if (IS_ERR(f->fr_mr)) {
|
|
|
|
pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
|
|
|
|
PTR_ERR(f->fr_mr), r);
|
|
|
|
return PTR_ERR(f->fr_mr);
|
|
|
|
}
|
|
|
|
|
2016-09-15 21:57:40 +07:00
|
|
|
dprintk("RPC: %s: recovered FRMR %p\n", __func__, f);
|
2016-05-03 01:42:12 +07:00
|
|
|
f->fr_state = FRMR_IS_INVALID;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-30 00:52:54 +07:00
|
|
|
/* Reset of a single FRMR. Generate a fresh rkey by replacing the MR.
|
|
|
|
*/
|
2016-05-03 01:42:21 +07:00
|
|
|
static void
|
2016-06-30 00:52:54 +07:00
|
|
|
frwr_op_recover_mr(struct rpcrdma_mw *mw)
|
2016-05-03 01:42:21 +07:00
|
|
|
{
|
2016-11-08 04:16:24 +07:00
|
|
|
enum rpcrdma_frmr_state state = mw->frmr.fr_state;
|
2016-06-30 00:52:21 +07:00
|
|
|
struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
|
2016-05-03 01:42:21 +07:00
|
|
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
rc = __frwr_reset_mr(ia, mw);
|
2016-11-08 04:16:24 +07:00
|
|
|
if (state != FRMR_FLUSHED_LI)
|
|
|
|
ib_dma_unmap_sg(ia->ri_device,
|
|
|
|
mw->mw_sg, mw->mw_nents, mw->mw_dir);
|
2016-06-30 00:54:08 +07:00
|
|
|
if (rc)
|
|
|
|
goto out_release;
|
2015-05-26 22:52:25 +07:00
|
|
|
|
2016-06-30 00:52:54 +07:00
|
|
|
rpcrdma_put_mw(r_xprt, mw);
|
|
|
|
r_xprt->rx_stats.mrs_recovered++;
|
2016-06-30 00:54:08 +07:00
|
|
|
return;
|
|
|
|
|
|
|
|
out_release:
|
|
|
|
pr_err("rpcrdma: FRMR reset failed %d, %p release\n", rc, mw);
|
|
|
|
r_xprt->rx_stats.mrs_orphaned++;
|
|
|
|
|
|
|
|
spin_lock(&r_xprt->rx_buf.rb_mwlock);
|
|
|
|
list_del(&mw->mw_all);
|
|
|
|
spin_unlock(&r_xprt->rx_buf.rb_mwlock);
|
|
|
|
|
|
|
|
frwr_op_release_mr(mw);
|
2015-05-26 22:52:25 +07:00
|
|
|
}
|
|
|
|
|
2015-03-31 01:35:26 +07:00
|
|
|
static int
|
|
|
|
frwr_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
|
|
|
|
struct rpcrdma_create_data_internal *cdata)
|
|
|
|
{
|
xprtrdma: Support for SG_GAP devices
Some devices (such as the Mellanox CX-4) can register, under a
single R_key, a set of memory regions that are not contiguous. When
this is done, all the segments in a Reply list, say, can then be
invalidated in a single LocalInv Work Request (or via Remote
Invalidation, which can invalidate exactly one R_key when completing
a Receive).
This means a single FastReg WR is used to register, and one or zero
LocalInv WRs can invalidate, the memory involved with RDMA transfers
on behalf of an RPC.
In addition, xprtrdma constructs some Reply chunks from three or
more segments. By registering them with SG_GAP, only one segment
is needed for the Reply chunk, allowing the whole chunk to be
invalidated remotely.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-11-29 22:52:24 +07:00
|
|
|
struct ib_device_attr *attrs = &ia->ri_device->attrs;
|
2015-03-31 01:35:26 +07:00
|
|
|
int depth, delta;
|
|
|
|
|
xprtrdma: Support for SG_GAP devices
Some devices (such as the Mellanox CX-4) can register, under a
single R_key, a set of memory regions that are not contiguous. When
this is done, all the segments in a Reply list, say, can then be
invalidated in a single LocalInv Work Request (or via Remote
Invalidation, which can invalidate exactly one R_key when completing
a Receive).
This means a single FastReg WR is used to register, and one or zero
LocalInv WRs can invalidate, the memory involved with RDMA transfers
on behalf of an RPC.
In addition, xprtrdma constructs some Reply chunks from three or
more segments. By registering them with SG_GAP, only one segment
is needed for the Reply chunk, allowing the whole chunk to be
invalidated remotely.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-11-29 22:52:24 +07:00
|
|
|
ia->ri_mrtype = IB_MR_TYPE_MEM_REG;
|
|
|
|
if (attrs->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
|
|
|
|
ia->ri_mrtype = IB_MR_TYPE_SG_GAPS;
|
|
|
|
|
2015-03-31 01:35:26 +07:00
|
|
|
ia->ri_max_frmr_depth =
|
|
|
|
min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
|
xprtrdma: Support for SG_GAP devices
Some devices (such as the Mellanox CX-4) can register, under a
single R_key, a set of memory regions that are not contiguous. When
this is done, all the segments in a Reply list, say, can then be
invalidated in a single LocalInv Work Request (or via Remote
Invalidation, which can invalidate exactly one R_key when completing
a Receive).
This means a single FastReg WR is used to register, and one or zero
LocalInv WRs can invalidate, the memory involved with RDMA transfers
on behalf of an RPC.
In addition, xprtrdma constructs some Reply chunks from three or
more segments. By registering them with SG_GAP, only one segment
is needed for the Reply chunk, allowing the whole chunk to be
invalidated remotely.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-11-29 22:52:24 +07:00
|
|
|
attrs->max_fast_reg_page_list_len);
|
2015-03-31 01:35:26 +07:00
|
|
|
dprintk("RPC: %s: device's max FR page list len = %u\n",
|
|
|
|
__func__, ia->ri_max_frmr_depth);
|
|
|
|
|
|
|
|
/* Add room for frmr register and invalidate WRs.
|
|
|
|
* 1. FRMR reg WR for head
|
|
|
|
* 2. FRMR invalidate WR for head
|
|
|
|
* 3. N FRMR reg WRs for pagelist
|
|
|
|
* 4. N FRMR invalidate WRs for pagelist
|
|
|
|
* 5. FRMR reg WR for tail
|
|
|
|
* 6. FRMR invalidate WR for tail
|
|
|
|
* 7. The RDMA_SEND WR
|
|
|
|
*/
|
|
|
|
depth = 7;
|
|
|
|
|
|
|
|
/* Calculate N if the device max FRMR depth is smaller than
|
|
|
|
* RPCRDMA_MAX_DATA_SEGS.
|
|
|
|
*/
|
|
|
|
if (ia->ri_max_frmr_depth < RPCRDMA_MAX_DATA_SEGS) {
|
|
|
|
delta = RPCRDMA_MAX_DATA_SEGS - ia->ri_max_frmr_depth;
|
|
|
|
do {
|
|
|
|
depth += 2; /* FRMR reg + invalidate */
|
|
|
|
delta -= ia->ri_max_frmr_depth;
|
|
|
|
} while (delta > 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
ep->rep_attr.cap.max_send_wr *= depth;
|
xprtrdma: Support for SG_GAP devices
Some devices (such as the Mellanox CX-4) can register, under a
single R_key, a set of memory regions that are not contiguous. When
this is done, all the segments in a Reply list, say, can then be
invalidated in a single LocalInv Work Request (or via Remote
Invalidation, which can invalidate exactly one R_key when completing
a Receive).
This means a single FastReg WR is used to register, and one or zero
LocalInv WRs can invalidate, the memory involved with RDMA transfers
on behalf of an RPC.
In addition, xprtrdma constructs some Reply chunks from three or
more segments. By registering them with SG_GAP, only one segment
is needed for the Reply chunk, allowing the whole chunk to be
invalidated remotely.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-11-29 22:52:24 +07:00
|
|
|
if (ep->rep_attr.cap.max_send_wr > attrs->max_qp_wr) {
|
|
|
|
cdata->max_requests = attrs->max_qp_wr / depth;
|
2015-03-31 01:35:26 +07:00
|
|
|
if (!cdata->max_requests)
|
|
|
|
return -EINVAL;
|
|
|
|
ep->rep_attr.cap.max_send_wr = cdata->max_requests *
|
|
|
|
depth;
|
|
|
|
}
|
|
|
|
|
2016-09-15 21:57:07 +07:00
|
|
|
ia->ri_max_segs = max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS /
|
|
|
|
ia->ri_max_frmr_depth);
|
2015-03-31 01:35:26 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-31 01:34:30 +07:00
|
|
|
/* FRWR mode conveys a list of pages per chunk segment. The
|
|
|
|
* maximum length of that list is the FRWR page list depth.
|
|
|
|
*/
|
|
|
|
static size_t
|
|
|
|
frwr_op_maxpages(struct rpcrdma_xprt *r_xprt)
|
|
|
|
{
|
|
|
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
|
|
|
|
|
|
|
return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
|
2016-05-03 01:40:56 +07:00
|
|
|
RPCRDMA_MAX_HDR_SEGS * ia->ri_max_frmr_depth);
|
2015-03-31 01:34:30 +07:00
|
|
|
}
|
|
|
|
|
2016-03-04 23:28:53 +07:00
|
|
|
static void
|
2016-11-08 04:16:24 +07:00
|
|
|
__frwr_sendcompletion_flush(struct ib_wc *wc, const char *wr)
|
2016-03-04 23:28:53 +07:00
|
|
|
{
|
|
|
|
if (wc->status != IB_WC_WR_FLUSH_ERR)
|
|
|
|
pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
|
|
|
|
wr, ib_wc_status_msg(wc->status),
|
|
|
|
wc->status, wc->vendor_err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* frwr_wc_fastreg - Invoked by RDMA provider for each polled FastReg WC
|
|
|
|
* @cq: completion queue (ignored)
|
|
|
|
* @wc: completed WR
|
2015-12-17 05:22:47 +07:00
|
|
|
*
|
|
|
|
*/
|
2015-03-31 01:35:35 +07:00
|
|
|
static void
|
2016-03-04 23:28:53 +07:00
|
|
|
frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
|
2015-03-31 01:35:35 +07:00
|
|
|
{
|
2016-03-04 23:28:53 +07:00
|
|
|
struct rpcrdma_frmr *frmr;
|
|
|
|
struct ib_cqe *cqe;
|
2015-12-17 05:22:47 +07:00
|
|
|
|
2016-03-04 23:28:53 +07:00
|
|
|
/* WARNING: Only wr_cqe and status are reliable at this point */
|
|
|
|
if (wc->status != IB_WC_SUCCESS) {
|
|
|
|
cqe = wc->wr_cqe;
|
|
|
|
frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
|
2016-11-08 04:16:24 +07:00
|
|
|
frmr->fr_state = FRMR_FLUSHED_FR;
|
|
|
|
__frwr_sendcompletion_flush(wc, "fastreg");
|
2016-03-04 23:28:53 +07:00
|
|
|
}
|
2015-03-31 01:35:35 +07:00
|
|
|
}
|
|
|
|
|
2016-03-04 23:28:53 +07:00
|
|
|
/**
|
|
|
|
* frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
|
|
|
|
* @cq: completion queue (ignored)
|
|
|
|
* @wc: completed WR
|
|
|
|
*
|
|
|
|
*/
|
2015-12-17 05:22:47 +07:00
|
|
|
static void
|
2016-03-04 23:28:53 +07:00
|
|
|
frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
|
2015-12-17 05:22:47 +07:00
|
|
|
{
|
2016-03-04 23:28:53 +07:00
|
|
|
struct rpcrdma_frmr *frmr;
|
|
|
|
struct ib_cqe *cqe;
|
2015-12-17 05:22:47 +07:00
|
|
|
|
2016-03-04 23:28:53 +07:00
|
|
|
/* WARNING: Only wr_cqe and status are reliable at this point */
|
|
|
|
if (wc->status != IB_WC_SUCCESS) {
|
|
|
|
cqe = wc->wr_cqe;
|
|
|
|
frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
|
2016-11-08 04:16:24 +07:00
|
|
|
frmr->fr_state = FRMR_FLUSHED_LI;
|
|
|
|
__frwr_sendcompletion_flush(wc, "localinv");
|
2016-03-04 23:28:53 +07:00
|
|
|
}
|
|
|
|
}
|
2015-12-17 05:22:47 +07:00
|
|
|
|
2016-03-04 23:28:53 +07:00
|
|
|
/**
|
|
|
|
* frwr_wc_localinv - Invoked by RDMA provider for each polled LocalInv WC
|
|
|
|
* @cq: completion queue (ignored)
|
|
|
|
* @wc: completed WR
|
|
|
|
*
|
|
|
|
* Awaken anyone waiting for an MR to finish being fenced.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
|
|
|
|
{
|
|
|
|
struct rpcrdma_frmr *frmr;
|
|
|
|
struct ib_cqe *cqe;
|
|
|
|
|
|
|
|
/* WARNING: Only wr_cqe and status are reliable at this point */
|
|
|
|
cqe = wc->wr_cqe;
|
|
|
|
frmr = container_of(cqe, struct rpcrdma_frmr, fr_cqe);
|
2016-11-08 04:16:24 +07:00
|
|
|
if (wc->status != IB_WC_SUCCESS) {
|
|
|
|
frmr->fr_state = FRMR_FLUSHED_LI;
|
|
|
|
__frwr_sendcompletion_flush(wc, "localinv");
|
|
|
|
}
|
2016-09-23 15:41:57 +07:00
|
|
|
complete(&frmr->fr_linv_done);
|
2015-12-17 05:22:47 +07:00
|
|
|
}
|
|
|
|
|
2016-06-30 00:52:21 +07:00
|
|
|
/* Post a REG_MR Work Request to register a memory region
|
2015-03-31 01:34:39 +07:00
|
|
|
* for remote access via RDMA READ or RDMA WRITE.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
2016-06-30 00:54:16 +07:00
|
|
|
int nsegs, bool writing, struct rpcrdma_mw **out)
|
2015-03-31 01:34:39 +07:00
|
|
|
{
|
|
|
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
xprtrdma: Support for SG_GAP devices
Some devices (such as the Mellanox CX-4) can register, under a
single R_key, a set of memory regions that are not contiguous. When
this is done, all the segments in a Reply list, say, can then be
invalidated in a single LocalInv Work Request (or via Remote
Invalidation, which can invalidate exactly one R_key when completing
a Receive).
This means a single FastReg WR is used to register, and one or zero
LocalInv WRs can invalidate, the memory involved with RDMA transfers
on behalf of an RPC.
In addition, xprtrdma constructs some Reply chunks from three or
more segments. By registering them with SG_GAP, only one segment
is needed for the Reply chunk, allowing the whole chunk to be
invalidated remotely.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-11-29 22:52:24 +07:00
|
|
|
bool holes_ok = ia->ri_mrtype == IB_MR_TYPE_SG_GAPS;
|
2015-05-26 22:52:35 +07:00
|
|
|
struct rpcrdma_mw *mw;
|
|
|
|
struct rpcrdma_frmr *frmr;
|
|
|
|
struct ib_mr *mr;
|
2015-12-17 05:22:31 +07:00
|
|
|
struct ib_reg_wr *reg_wr;
|
2015-10-08 15:16:33 +07:00
|
|
|
struct ib_send_wr *bad_wr;
|
2015-10-13 23:11:35 +07:00
|
|
|
int rc, i, n, dma_nents;
|
2015-03-31 01:34:39 +07:00
|
|
|
u8 key;
|
|
|
|
|
2016-06-30 00:54:16 +07:00
|
|
|
mw = NULL;
|
2015-05-26 22:52:35 +07:00
|
|
|
do {
|
|
|
|
if (mw)
|
2016-06-30 00:52:54 +07:00
|
|
|
rpcrdma_defer_mr_recovery(mw);
|
2015-05-26 22:52:35 +07:00
|
|
|
mw = rpcrdma_get_mw(r_xprt);
|
|
|
|
if (!mw)
|
2016-06-30 00:53:43 +07:00
|
|
|
return -ENOBUFS;
|
2016-03-04 23:28:45 +07:00
|
|
|
} while (mw->frmr.fr_state != FRMR_IS_INVALID);
|
|
|
|
frmr = &mw->frmr;
|
2015-05-26 22:52:35 +07:00
|
|
|
frmr->fr_state = FRMR_IS_VALID;
|
2015-10-13 23:11:35 +07:00
|
|
|
mr = frmr->fr_mr;
|
2015-12-17 05:22:31 +07:00
|
|
|
reg_wr = &frmr->fr_regwr;
|
2015-05-26 22:52:35 +07:00
|
|
|
|
2015-03-31 01:34:39 +07:00
|
|
|
if (nsegs > ia->ri_max_frmr_depth)
|
|
|
|
nsegs = ia->ri_max_frmr_depth;
|
2015-10-13 23:11:35 +07:00
|
|
|
for (i = 0; i < nsegs;) {
|
|
|
|
if (seg->mr_page)
|
2016-06-30 00:52:21 +07:00
|
|
|
sg_set_page(&mw->mw_sg[i],
|
2015-10-13 23:11:35 +07:00
|
|
|
seg->mr_page,
|
|
|
|
seg->mr_len,
|
|
|
|
offset_in_page(seg->mr_offset));
|
|
|
|
else
|
2016-06-30 00:52:21 +07:00
|
|
|
sg_set_buf(&mw->mw_sg[i], seg->mr_offset,
|
2015-10-13 23:11:35 +07:00
|
|
|
seg->mr_len);
|
|
|
|
|
2015-03-31 01:34:39 +07:00
|
|
|
++seg;
|
|
|
|
++i;
|
xprtrdma: Support for SG_GAP devices
Some devices (such as the Mellanox CX-4) can register, under a
single R_key, a set of memory regions that are not contiguous. When
this is done, all the segments in a Reply list, say, can then be
invalidated in a single LocalInv Work Request (or via Remote
Invalidation, which can invalidate exactly one R_key when completing
a Receive).
This means a single FastReg WR is used to register, and one or zero
LocalInv WRs can invalidate, the memory involved with RDMA transfers
on behalf of an RPC.
In addition, xprtrdma constructs some Reply chunks from three or
more segments. By registering them with SG_GAP, only one segment
is needed for the Reply chunk, allowing the whole chunk to be
invalidated remotely.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-11-29 22:52:24 +07:00
|
|
|
if (holes_ok)
|
|
|
|
continue;
|
2015-03-31 01:34:39 +07:00
|
|
|
if ((i < nsegs && offset_in_page(seg->mr_offset)) ||
|
|
|
|
offset_in_page((seg-1)->mr_offset + (seg-1)->mr_len))
|
|
|
|
break;
|
|
|
|
}
|
2016-06-30 00:52:21 +07:00
|
|
|
mw->mw_nents = i;
|
|
|
|
mw->mw_dir = rpcrdma_data_dir(writing);
|
2016-06-30 00:53:52 +07:00
|
|
|
if (i == 0)
|
|
|
|
goto out_dmamap_err;
|
2015-10-13 23:11:35 +07:00
|
|
|
|
2016-06-30 00:52:21 +07:00
|
|
|
dma_nents = ib_dma_map_sg(ia->ri_device,
|
|
|
|
mw->mw_sg, mw->mw_nents, mw->mw_dir);
|
|
|
|
if (!dma_nents)
|
|
|
|
goto out_dmamap_err;
|
|
|
|
|
|
|
|
n = ib_map_mr_sg(mr, mw->mw_sg, mw->mw_nents, NULL, PAGE_SIZE);
|
|
|
|
if (unlikely(n != mw->mw_nents))
|
|
|
|
goto out_mapmr_err;
|
2015-10-13 23:11:35 +07:00
|
|
|
|
|
|
|
dprintk("RPC: %s: Using frmr %p to map %u segments (%u bytes)\n",
|
2016-09-15 21:57:40 +07:00
|
|
|
__func__, frmr, mw->mw_nents, mr->length);
|
2015-10-13 23:11:35 +07:00
|
|
|
|
2015-03-31 01:34:39 +07:00
|
|
|
key = (u8)(mr->rkey & 0x000000FF);
|
|
|
|
ib_update_fast_reg_key(mr, ++key);
|
2015-10-13 23:11:35 +07:00
|
|
|
|
2015-12-17 05:22:31 +07:00
|
|
|
reg_wr->wr.next = NULL;
|
|
|
|
reg_wr->wr.opcode = IB_WR_REG_MR;
|
2016-03-04 23:28:53 +07:00
|
|
|
frmr->fr_cqe.done = frwr_wc_fastreg;
|
|
|
|
reg_wr->wr.wr_cqe = &frmr->fr_cqe;
|
2015-12-17 05:22:31 +07:00
|
|
|
reg_wr->wr.num_sge = 0;
|
|
|
|
reg_wr->wr.send_flags = 0;
|
|
|
|
reg_wr->mr = mr;
|
|
|
|
reg_wr->key = mr->rkey;
|
|
|
|
reg_wr->access = writing ?
|
|
|
|
IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
|
|
|
|
IB_ACCESS_REMOTE_READ;
|
2015-03-31 01:34:39 +07:00
|
|
|
|
2016-11-29 22:52:16 +07:00
|
|
|
rpcrdma_set_signaled(&r_xprt->rx_ep, ®_wr->wr);
|
2015-12-17 05:22:31 +07:00
|
|
|
rc = ib_post_send(ia->ri_id->qp, ®_wr->wr, &bad_wr);
|
2015-03-31 01:34:39 +07:00
|
|
|
if (rc)
|
|
|
|
goto out_senderr;
|
|
|
|
|
2016-06-30 00:54:16 +07:00
|
|
|
mw->mw_handle = mr->rkey;
|
|
|
|
mw->mw_length = mr->length;
|
|
|
|
mw->mw_offset = mr->iova;
|
2015-10-13 23:11:35 +07:00
|
|
|
|
2016-06-30 00:54:16 +07:00
|
|
|
*out = mw;
|
2016-06-30 00:52:21 +07:00
|
|
|
return mw->mw_nents;
|
|
|
|
|
|
|
|
out_dmamap_err:
|
|
|
|
pr_err("rpcrdma: failed to dma map sg %p sg_nents %u\n",
|
|
|
|
mw->mw_sg, mw->mw_nents);
|
2016-06-30 00:53:02 +07:00
|
|
|
rpcrdma_defer_mr_recovery(mw);
|
2016-06-30 00:53:43 +07:00
|
|
|
return -EIO;
|
2016-06-30 00:52:21 +07:00
|
|
|
|
|
|
|
out_mapmr_err:
|
|
|
|
pr_err("rpcrdma: failed to map mr %p (%u/%u)\n",
|
|
|
|
frmr->fr_mr, n, mw->mw_nents);
|
2016-06-30 00:52:54 +07:00
|
|
|
rpcrdma_defer_mr_recovery(mw);
|
2016-06-30 00:53:43 +07:00
|
|
|
return -EIO;
|
2015-03-31 01:34:39 +07:00
|
|
|
|
|
|
|
out_senderr:
|
2016-06-30 00:53:43 +07:00
|
|
|
pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
|
2016-06-30 00:52:54 +07:00
|
|
|
rpcrdma_defer_mr_recovery(mw);
|
2016-06-30 00:53:43 +07:00
|
|
|
return -ENOTCONN;
|
2015-03-31 01:34:39 +07:00
|
|
|
}
|
|
|
|
|
2015-12-17 05:22:47 +07:00
|
|
|
/* Invalidate all memory regions that were registered for "req".
|
|
|
|
*
|
|
|
|
* Sleeps until it is safe for the host CPU to access the
|
|
|
|
* previously mapped memory regions.
|
2016-06-30 00:54:16 +07:00
|
|
|
*
|
2017-06-08 22:52:04 +07:00
|
|
|
* Caller ensures that @mws is not empty before the call. This
|
|
|
|
* function empties the list.
|
2015-12-17 05:22:47 +07:00
|
|
|
*/
|
|
|
|
static void
|
2017-06-08 22:52:04 +07:00
|
|
|
frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
|
2015-12-17 05:22:47 +07:00
|
|
|
{
|
2016-11-29 22:52:57 +07:00
|
|
|
struct ib_send_wr *first, **prev, *last, *bad_wr;
|
2015-12-17 05:22:47 +07:00
|
|
|
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
|
|
|
|
struct rpcrdma_frmr *f;
|
2017-02-09 05:00:43 +07:00
|
|
|
struct rpcrdma_mw *mw;
|
2016-11-29 22:52:16 +07:00
|
|
|
int count, rc;
|
2015-12-17 05:22:47 +07:00
|
|
|
|
2017-06-08 22:52:04 +07:00
|
|
|
/* ORDER: Invalidate all of the MRs first
|
2015-12-17 05:22:47 +07:00
|
|
|
*
|
|
|
|
* Chain the LOCAL_INV Work Requests and post them with
|
|
|
|
* a single ib_post_send() call.
|
|
|
|
*/
|
2016-06-30 00:54:16 +07:00
|
|
|
f = NULL;
|
2016-11-29 22:52:16 +07:00
|
|
|
count = 0;
|
2016-11-29 22:52:57 +07:00
|
|
|
prev = &first;
|
2017-06-08 22:52:04 +07:00
|
|
|
list_for_each_entry(mw, mws, mw_list) {
|
2016-11-29 22:52:57 +07:00
|
|
|
mw->frmr.fr_state = FRMR_IS_INVALID;
|
|
|
|
|
2017-06-08 22:51:56 +07:00
|
|
|
if (mw->mw_flags & RPCRDMA_MW_F_RI)
|
2016-09-15 21:57:16 +07:00
|
|
|
continue;
|
|
|
|
|
2016-11-29 22:52:57 +07:00
|
|
|
f = &mw->frmr;
|
|
|
|
dprintk("RPC: %s: invalidating frmr %p\n",
|
|
|
|
__func__, f);
|
|
|
|
|
|
|
|
f->fr_cqe.done = frwr_wc_localinv;
|
|
|
|
last = &f->fr_invwr;
|
|
|
|
memset(last, 0, sizeof(*last));
|
|
|
|
last->wr_cqe = &f->fr_cqe;
|
|
|
|
last->opcode = IB_WR_LOCAL_INV;
|
|
|
|
last->ex.invalidate_rkey = mw->mw_handle;
|
2016-11-29 22:52:16 +07:00
|
|
|
count++;
|
2015-12-17 05:22:47 +07:00
|
|
|
|
2016-11-29 22:52:57 +07:00
|
|
|
*prev = last;
|
|
|
|
prev = &last->next;
|
2015-12-17 05:22:47 +07:00
|
|
|
}
|
2016-09-15 21:57:16 +07:00
|
|
|
if (!f)
|
|
|
|
goto unmap;
|
2015-12-17 05:22:47 +07:00
|
|
|
|
|
|
|
/* Strong send queue ordering guarantees that when the
|
|
|
|
* last WR in the chain completes, all WRs in the chain
|
|
|
|
* are complete.
|
|
|
|
*/
|
2016-11-29 22:52:57 +07:00
|
|
|
last->send_flags = IB_SEND_SIGNALED;
|
2016-03-04 23:28:53 +07:00
|
|
|
f->fr_cqe.done = frwr_wc_localinv_wake;
|
|
|
|
reinit_completion(&f->fr_linv_done);
|
2016-11-29 22:52:16 +07:00
|
|
|
|
|
|
|
/* Initialize CQ count, since there is always a signaled
|
|
|
|
* WR being posted here. The new cqcount depends on how
|
|
|
|
* many SQEs are about to be consumed.
|
|
|
|
*/
|
|
|
|
rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
|
2015-12-17 05:22:47 +07:00
|
|
|
|
|
|
|
/* Transport disconnect drains the receive CQ before it
|
|
|
|
* replaces the QP. The RPC reply handler won't call us
|
|
|
|
* unless ri_id->qp is a valid pointer.
|
|
|
|
*/
|
2016-09-15 21:57:16 +07:00
|
|
|
r_xprt->rx_stats.local_inv_needed++;
|
2016-11-29 22:52:57 +07:00
|
|
|
rc = ib_post_send(ia->ri_id->qp, first, &bad_wr);
|
2016-05-03 01:42:12 +07:00
|
|
|
if (rc)
|
|
|
|
goto reset_mrs;
|
2015-12-17 05:22:47 +07:00
|
|
|
|
|
|
|
wait_for_completion(&f->fr_linv_done);
|
|
|
|
|
2017-06-08 22:52:04 +07:00
|
|
|
/* ORDER: Now DMA unmap all of the MRs, and return
|
2015-12-17 05:22:47 +07:00
|
|
|
* them to the free MW list.
|
|
|
|
*/
|
2016-03-04 23:28:01 +07:00
|
|
|
unmap:
|
2017-06-08 22:52:04 +07:00
|
|
|
while (!list_empty(mws)) {
|
|
|
|
mw = rpcrdma_pop_mw(mws);
|
2016-11-29 22:52:57 +07:00
|
|
|
dprintk("RPC: %s: DMA unmapping frmr %p\n",
|
2016-09-15 21:57:40 +07:00
|
|
|
__func__, &mw->frmr);
|
2016-06-30 00:52:21 +07:00
|
|
|
ib_dma_unmap_sg(ia->ri_device,
|
|
|
|
mw->mw_sg, mw->mw_nents, mw->mw_dir);
|
2016-05-03 01:42:12 +07:00
|
|
|
rpcrdma_put_mw(r_xprt, mw);
|
2015-12-17 05:22:47 +07:00
|
|
|
}
|
2016-05-03 01:42:12 +07:00
|
|
|
return;
|
2015-12-17 05:22:47 +07:00
|
|
|
|
2016-05-03 01:42:12 +07:00
|
|
|
reset_mrs:
|
2016-06-30 00:53:43 +07:00
|
|
|
pr_err("rpcrdma: FRMR invalidate ib_post_send returned %i\n", rc);
|
|
|
|
rdma_disconnect(ia->ri_id);
|
2015-03-31 01:34:48 +07:00
|
|
|
|
2016-05-03 01:42:12 +07:00
|
|
|
/* Find and reset the MRs in the LOCAL_INV WRs that did not
|
|
|
|
* get posted. This is synchronous, and slow.
|
|
|
|
*/
|
2017-06-08 22:52:04 +07:00
|
|
|
list_for_each_entry(mw, mws, mw_list) {
|
2016-05-03 01:42:12 +07:00
|
|
|
f = &mw->frmr;
|
2016-11-29 22:52:57 +07:00
|
|
|
if (mw->mw_handle == bad_wr->ex.invalidate_rkey) {
|
2016-05-03 01:42:12 +07:00
|
|
|
__frwr_reset_mr(ia, mw);
|
|
|
|
bad_wr = bad_wr->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
goto unmap;
|
2015-12-17 05:22:47 +07:00
|
|
|
}
|
2015-03-31 01:34:48 +07:00
|
|
|
|
2016-05-03 01:42:46 +07:00
|
|
|
/* Use a slow, safe mechanism to invalidate all memory regions
|
|
|
|
* that were registered for "req".
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
|
|
|
|
bool sync)
|
|
|
|
{
|
|
|
|
struct rpcrdma_mw *mw;
|
2015-05-26 22:52:35 +07:00
|
|
|
|
2016-06-30 00:54:16 +07:00
|
|
|
while (!list_empty(&req->rl_registered)) {
|
2017-02-09 05:00:43 +07:00
|
|
|
mw = rpcrdma_pop_mw(&req->rl_registered);
|
2016-05-03 01:42:46 +07:00
|
|
|
if (sync)
|
2016-06-30 00:52:54 +07:00
|
|
|
frwr_op_recover_mr(mw);
|
2016-05-03 01:42:46 +07:00
|
|
|
else
|
2016-06-30 00:52:54 +07:00
|
|
|
rpcrdma_defer_mr_recovery(mw);
|
2016-05-03 01:42:46 +07:00
|
|
|
}
|
2015-03-31 01:34:48 +07:00
|
|
|
}
|
|
|
|
|
2015-03-31 01:34:21 +07:00
|
|
|
const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops = {
|
2015-03-31 01:34:39 +07:00
|
|
|
.ro_map = frwr_op_map,
|
2015-12-17 05:22:47 +07:00
|
|
|
.ro_unmap_sync = frwr_op_unmap_sync,
|
2016-05-03 01:42:46 +07:00
|
|
|
.ro_unmap_safe = frwr_op_unmap_safe,
|
2016-06-30 00:52:54 +07:00
|
|
|
.ro_recover_mr = frwr_op_recover_mr,
|
2015-03-31 01:35:26 +07:00
|
|
|
.ro_open = frwr_op_open,
|
2015-03-31 01:34:30 +07:00
|
|
|
.ro_maxpages = frwr_op_maxpages,
|
2016-06-30 00:54:00 +07:00
|
|
|
.ro_init_mr = frwr_op_init_mr,
|
|
|
|
.ro_release_mr = frwr_op_release_mr,
|
2015-03-31 01:34:21 +07:00
|
|
|
.ro_displayname = "frwr",
|
2016-09-15 21:57:16 +07:00
|
|
|
.ro_send_w_inv_ok = RPCRDMA_CMP_F_SND_W_INV_OK,
|
2015-03-31 01:34:21 +07:00
|
|
|
};
|