mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-04-16 00:37:43 +07:00
xprtrdma: Update rkeys after transport reconnect
Various reports of: rpcrdma_qp_async_error_upcall: QP error 3 on device mlx4_0 ep ffff8800bfd3e848 Ensure that rkeys in already-marshalled RPC/RDMA headers are refreshed after the QP has been replaced by a reconnect. BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=249 Suggested-by: Selvin Xavier <Selvin.Xavier@Emulex.Com> Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Steve Wise <swise@opengridcomputing.com> Tested-by: Shirley Ma <shirley.ma@oracle.com> Tested-by: Devesh Sharma <devesh.sharma@emulex.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
This commit is contained in:
parent
43e9598817
commit
6ab59945f2
@ -53,14 +53,6 @@
|
|||||||
# define RPCDBG_FACILITY RPCDBG_TRANS
|
# define RPCDBG_FACILITY RPCDBG_TRANS
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
enum rpcrdma_chunktype {
|
|
||||||
rpcrdma_noch = 0,
|
|
||||||
rpcrdma_readch,
|
|
||||||
rpcrdma_areadch,
|
|
||||||
rpcrdma_writech,
|
|
||||||
rpcrdma_replych
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef RPC_DEBUG
|
#ifdef RPC_DEBUG
|
||||||
static const char transfertypes[][12] = {
|
static const char transfertypes[][12] = {
|
||||||
"pure inline", /* no chunks */
|
"pure inline", /* no chunks */
|
||||||
@ -285,6 +277,28 @@ rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
|
|||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Marshal chunks. This routine returns the header length
|
||||||
|
* consumed by marshaling.
|
||||||
|
*
|
||||||
|
* Returns positive RPC/RDMA header size, or negative errno.
|
||||||
|
*/
|
||||||
|
|
||||||
|
ssize_t
|
||||||
|
rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result)
|
||||||
|
{
|
||||||
|
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
|
||||||
|
struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)req->rl_base;
|
||||||
|
|
||||||
|
if (req->rl_rtype != rpcrdma_noch)
|
||||||
|
result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
|
||||||
|
headerp, req->rl_rtype);
|
||||||
|
else if (req->rl_wtype != rpcrdma_noch)
|
||||||
|
result = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
|
||||||
|
headerp, req->rl_wtype);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy write data inline.
|
* Copy write data inline.
|
||||||
* This function is used for "small" requests. Data which is passed
|
* This function is used for "small" requests. Data which is passed
|
||||||
@ -377,7 +391,6 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|||||||
char *base;
|
char *base;
|
||||||
size_t rpclen, padlen;
|
size_t rpclen, padlen;
|
||||||
ssize_t hdrlen;
|
ssize_t hdrlen;
|
||||||
enum rpcrdma_chunktype rtype, wtype;
|
|
||||||
struct rpcrdma_msg *headerp;
|
struct rpcrdma_msg *headerp;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -415,13 +428,13 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|||||||
* into pages; otherwise use reply chunks.
|
* into pages; otherwise use reply chunks.
|
||||||
*/
|
*/
|
||||||
if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
|
if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
|
||||||
wtype = rpcrdma_noch;
|
req->rl_wtype = rpcrdma_noch;
|
||||||
else if (rqst->rq_rcv_buf.page_len == 0)
|
else if (rqst->rq_rcv_buf.page_len == 0)
|
||||||
wtype = rpcrdma_replych;
|
req->rl_wtype = rpcrdma_replych;
|
||||||
else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
|
else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
|
||||||
wtype = rpcrdma_writech;
|
req->rl_wtype = rpcrdma_writech;
|
||||||
else
|
else
|
||||||
wtype = rpcrdma_replych;
|
req->rl_wtype = rpcrdma_replych;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Chunks needed for arguments?
|
* Chunks needed for arguments?
|
||||||
@ -438,16 +451,16 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|||||||
* TBD check NFSv4 setacl
|
* TBD check NFSv4 setacl
|
||||||
*/
|
*/
|
||||||
if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
|
if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
|
||||||
rtype = rpcrdma_noch;
|
req->rl_rtype = rpcrdma_noch;
|
||||||
else if (rqst->rq_snd_buf.page_len == 0)
|
else if (rqst->rq_snd_buf.page_len == 0)
|
||||||
rtype = rpcrdma_areadch;
|
req->rl_rtype = rpcrdma_areadch;
|
||||||
else
|
else
|
||||||
rtype = rpcrdma_readch;
|
req->rl_rtype = rpcrdma_readch;
|
||||||
|
|
||||||
/* The following simplification is not true forever */
|
/* The following simplification is not true forever */
|
||||||
if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
|
if (req->rl_rtype != rpcrdma_noch && req->rl_wtype == rpcrdma_replych)
|
||||||
wtype = rpcrdma_noch;
|
req->rl_wtype = rpcrdma_noch;
|
||||||
if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
|
if (req->rl_rtype != rpcrdma_noch && req->rl_wtype != rpcrdma_noch) {
|
||||||
dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
|
dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
|
||||||
__func__);
|
__func__);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
@ -461,7 +474,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|||||||
* When padding is in use and applies to the transfer, insert
|
* When padding is in use and applies to the transfer, insert
|
||||||
* it and change the message type.
|
* it and change the message type.
|
||||||
*/
|
*/
|
||||||
if (rtype == rpcrdma_noch) {
|
if (req->rl_rtype == rpcrdma_noch) {
|
||||||
|
|
||||||
padlen = rpcrdma_inline_pullup(rqst,
|
padlen = rpcrdma_inline_pullup(rqst,
|
||||||
RPCRDMA_INLINE_PAD_VALUE(rqst));
|
RPCRDMA_INLINE_PAD_VALUE(rqst));
|
||||||
@ -476,7 +489,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|||||||
headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
|
headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
|
||||||
headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
|
headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
|
||||||
hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
|
hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
|
||||||
if (wtype != rpcrdma_noch) {
|
if (req->rl_wtype != rpcrdma_noch) {
|
||||||
dprintk("RPC: %s: invalid chunk list\n",
|
dprintk("RPC: %s: invalid chunk list\n",
|
||||||
__func__);
|
__func__);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
@ -497,30 +510,18 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|||||||
* on receive. Therefore, we request a reply chunk
|
* on receive. Therefore, we request a reply chunk
|
||||||
* for non-writes wherever feasible and efficient.
|
* for non-writes wherever feasible and efficient.
|
||||||
*/
|
*/
|
||||||
if (wtype == rpcrdma_noch)
|
if (req->rl_wtype == rpcrdma_noch)
|
||||||
wtype = rpcrdma_replych;
|
req->rl_wtype = rpcrdma_replych;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
hdrlen = rpcrdma_marshal_chunks(rqst, hdrlen);
|
||||||
* Marshal chunks. This routine will return the header length
|
|
||||||
* consumed by marshaling.
|
|
||||||
*/
|
|
||||||
if (rtype != rpcrdma_noch) {
|
|
||||||
hdrlen = rpcrdma_create_chunks(rqst,
|
|
||||||
&rqst->rq_snd_buf, headerp, rtype);
|
|
||||||
wtype = rtype; /* simplify dprintk */
|
|
||||||
|
|
||||||
} else if (wtype != rpcrdma_noch) {
|
|
||||||
hdrlen = rpcrdma_create_chunks(rqst,
|
|
||||||
&rqst->rq_rcv_buf, headerp, wtype);
|
|
||||||
}
|
|
||||||
if (hdrlen < 0)
|
if (hdrlen < 0)
|
||||||
return hdrlen;
|
return hdrlen;
|
||||||
|
|
||||||
dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
|
dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
|
||||||
" headerp 0x%p base 0x%p lkey 0x%x\n",
|
" headerp 0x%p base 0x%p lkey 0x%x\n",
|
||||||
__func__, transfertypes[wtype], hdrlen, rpclen, padlen,
|
__func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen,
|
||||||
headerp, base, req->rl_iov.lkey);
|
headerp, base, req->rl_iov.lkey);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -597,13 +597,14 @@ xprt_rdma_send_request(struct rpc_task *task)
|
|||||||
struct rpc_xprt *xprt = rqst->rq_xprt;
|
struct rpc_xprt *xprt = rqst->rq_xprt;
|
||||||
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
|
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
|
||||||
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
||||||
int rc;
|
int rc = 0;
|
||||||
|
|
||||||
if (req->rl_niovs == 0) {
|
if (req->rl_niovs == 0)
|
||||||
rc = rpcrdma_marshal_req(rqst);
|
rc = rpcrdma_marshal_req(rqst);
|
||||||
|
else if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
|
||||||
|
rc = rpcrdma_marshal_chunks(rqst, 0);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto failed_marshal;
|
goto failed_marshal;
|
||||||
}
|
|
||||||
|
|
||||||
if (req->rl_reply == NULL) /* e.g. reconnection */
|
if (req->rl_reply == NULL) /* e.g. reconnection */
|
||||||
rpcrdma_recv_buffer_get(req);
|
rpcrdma_recv_buffer_get(req);
|
||||||
|
@ -99,6 +99,14 @@ struct rpcrdma_ep {
|
|||||||
#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
|
#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
|
||||||
#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
|
#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
|
||||||
|
|
||||||
|
enum rpcrdma_chunktype {
|
||||||
|
rpcrdma_noch = 0,
|
||||||
|
rpcrdma_readch,
|
||||||
|
rpcrdma_areadch,
|
||||||
|
rpcrdma_writech,
|
||||||
|
rpcrdma_replych
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* struct rpcrdma_rep -- this structure encapsulates state required to recv
|
* struct rpcrdma_rep -- this structure encapsulates state required to recv
|
||||||
* and complete a reply, asychronously. It needs several pieces of
|
* and complete a reply, asychronously. It needs several pieces of
|
||||||
@ -192,6 +200,7 @@ struct rpcrdma_req {
|
|||||||
unsigned int rl_niovs; /* 0, 2 or 4 */
|
unsigned int rl_niovs; /* 0, 2 or 4 */
|
||||||
unsigned int rl_nchunks; /* non-zero if chunks */
|
unsigned int rl_nchunks; /* non-zero if chunks */
|
||||||
unsigned int rl_connect_cookie; /* retry detection */
|
unsigned int rl_connect_cookie; /* retry detection */
|
||||||
|
enum rpcrdma_chunktype rl_rtype, rl_wtype;
|
||||||
struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
|
struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
|
||||||
struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
|
struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
|
||||||
struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */
|
struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */
|
||||||
@ -347,6 +356,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
|
|||||||
/*
|
/*
|
||||||
* RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
|
* RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
|
||||||
*/
|
*/
|
||||||
|
ssize_t rpcrdma_marshal_chunks(struct rpc_rqst *, ssize_t);
|
||||||
int rpcrdma_marshal_req(struct rpc_rqst *);
|
int rpcrdma_marshal_req(struct rpc_rqst *);
|
||||||
size_t rpcrdma_max_payload(struct rpcrdma_xprt *);
|
size_t rpcrdma_max_payload(struct rpcrdma_xprt *);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user