mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 00:30:52 +07:00
net/xprtrdma: Simplify ib_post_(send|recv|srq_recv)() calls
Instead of declaring and passing a dummy 'bad_wr' pointer, pass NULL as third argument to ib_post_(send|recv|srq_recv)(). Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Acked-by: Anna Schumaker <Anna.Schumaker@netapp.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
2e3bbe46b4
commit
ed288d74a9
@ -279,9 +279,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
||||
static int
|
||||
fmr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
|
||||
{
|
||||
struct ib_send_wr *bad_wr;
|
||||
|
||||
return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, &bad_wr);
|
||||
return ib_post_send(ia->ri_id->qp, &req->rl_sendctx->sc_wr, NULL);
|
||||
}
|
||||
|
||||
/* Invalidate all memory regions that were registered for "req".
|
||||
|
@ -464,7 +464,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
|
||||
static int
|
||||
frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
|
||||
{
|
||||
struct ib_send_wr *post_wr, *bad_wr;
|
||||
struct ib_send_wr *post_wr;
|
||||
struct rpcrdma_mr *mr;
|
||||
|
||||
post_wr = &req->rl_sendctx->sc_wr;
|
||||
@ -486,7 +486,7 @@ frwr_op_send(struct rpcrdma_ia *ia, struct rpcrdma_req *req)
|
||||
/* If ib_post_send fails, the next ->send_request for
|
||||
* @req will queue these MWs for recovery.
|
||||
*/
|
||||
return ib_post_send(ia->ri_id->qp, post_wr, &bad_wr);
|
||||
return ib_post_send(ia->ri_id->qp, post_wr, NULL);
|
||||
}
|
||||
|
||||
/* Handle a remotely invalidated mr on the @mrs list
|
||||
|
@ -229,11 +229,10 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
|
||||
static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
|
||||
struct svc_rdma_recv_ctxt *ctxt)
|
||||
{
|
||||
struct ib_recv_wr *bad_recv_wr;
|
||||
int ret;
|
||||
|
||||
svc_xprt_get(&rdma->sc_xprt);
|
||||
ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, &bad_recv_wr);
|
||||
ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
|
||||
trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
|
||||
if (ret)
|
||||
goto err_post;
|
||||
|
@ -329,7 +329,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
|
||||
do {
|
||||
if (atomic_sub_return(cc->cc_sqecount,
|
||||
&rdma->sc_sq_avail) > 0) {
|
||||
ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
|
||||
ret = ib_post_send(rdma->sc_qp, first_wr, NULL);
|
||||
trace_svcrdma_post_rw(&cc->cc_cqe,
|
||||
cc->cc_sqecount, ret);
|
||||
if (ret)
|
||||
|
@ -291,7 +291,6 @@ static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
|
||||
*/
|
||||
int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
|
||||
{
|
||||
struct ib_send_wr *bad_wr;
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
@ -311,7 +310,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
|
||||
}
|
||||
|
||||
svc_xprt_get(&rdma->sc_xprt);
|
||||
ret = ib_post_send(rdma->sc_qp, wr, &bad_wr);
|
||||
ret = ib_post_send(rdma->sc_qp, wr, NULL);
|
||||
trace_svcrdma_post_send(wr, ret);
|
||||
if (ret) {
|
||||
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
|
||||
|
Loading…
Reference in New Issue
Block a user