summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2017-10-20 10:48:28 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2017-11-17 13:47:57 -0500
commit0ba6f37012db2f88f881cd818aec6e1886f61abb (patch)
treee1e66327ea39b3d85cd3e4728901f50b97c8a893 /net/sunrpc
parent531cca0c9b17c185377fd081b43ffca953cfecad (diff)
xprtrdma: Refactor rpcrdma_deferred_completion
Invoke a common routine for releasing hardware resources (for example, invalidating MRs). This needs to be done whether an RPC Reply has arrived or the RPC was terminated early. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c26
-rw-r--r--net/sunrpc/xprtrdma/transport.c6
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h3
3 files changed, 22 insertions, 13 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 9951c81b82ed..853dede38900 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -1293,6 +1293,20 @@ out_badheader:
1293 goto out; 1293 goto out;
1294} 1294}
1295 1295
1296void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
1297{
1298 /* Invalidate and unmap the data payloads before waking
1299 * the waiting application. This guarantees the memory
1300 * regions are properly fenced from the server before the
1301 * application accesses the data. It also ensures proper
1302 * send flow control: waking the next RPC waits until this
1303 * RPC has relinquished all its Send Queue entries.
1304 */
1305 if (!list_empty(&req->rl_registered))
1306 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt,
1307 &req->rl_registered);
1308}
1309
1296/* Reply handling runs in the poll worker thread. Anything that 1310/* Reply handling runs in the poll worker thread. Anything that
1297 * might wait is deferred to a separate workqueue. 1311 * might wait is deferred to a separate workqueue.
1298 */ 1312 */
@@ -1301,18 +1315,9 @@ void rpcrdma_deferred_completion(struct work_struct *work)
1301 struct rpcrdma_rep *rep = 1315 struct rpcrdma_rep *rep =
1302 container_of(work, struct rpcrdma_rep, rr_work); 1316 container_of(work, struct rpcrdma_rep, rr_work);
1303 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst); 1317 struct rpcrdma_req *req = rpcr_to_rdmar(rep->rr_rqst);
1304 struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
1305 1318
1306 /* Invalidate and unmap the data payloads before waking
1307 * the waiting application. This guarantees the memory
1308 * regions are properly fenced from the server before the
1309 * application accesses the data. It also ensures proper
1310 * send flow control: waking the next RPC waits until this
1311 * RPC has relinquished all its Send Queue entries.
1312 */
1313 rpcrdma_mark_remote_invalidation(&req->rl_registered, rep); 1319 rpcrdma_mark_remote_invalidation(&req->rl_registered, rep);
1314 r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, &req->rl_registered); 1320 rpcrdma_release_rqst(rep->rr_rxprt, req);
1315
1316 rpcrdma_complete_rqst(rep); 1321 rpcrdma_complete_rqst(rep);
1317} 1322}
1318 1323
@@ -1374,6 +1379,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
1374 req = rpcr_to_rdmar(rqst); 1379 req = rpcr_to_rdmar(rqst);
1375 req->rl_reply = rep; 1380 req->rl_reply = rep;
1376 rep->rr_rqst = rqst; 1381 rep->rr_rqst = rqst;
1382 clear_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
1377 1383
1378 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n", 1384 dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
1379 __func__, rep, req, be32_to_cpu(rep->rr_xid)); 1385 __func__, rep, req, be32_to_cpu(rep->rr_xid));
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index acdb2e9c72c8..35aefe201848 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -678,15 +678,14 @@ xprt_rdma_free(struct rpc_task *task)
678 struct rpc_rqst *rqst = task->tk_rqstp; 678 struct rpc_rqst *rqst = task->tk_rqstp;
679 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt); 679 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
680 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 680 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
681 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
682 681
683 if (test_bit(RPCRDMA_REQ_F_BACKCHANNEL, &req->rl_flags)) 682 if (test_bit(RPCRDMA_REQ_F_BACKCHANNEL, &req->rl_flags))
684 return; 683 return;
685 684
686 dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply); 685 dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
687 686
688 if (!list_empty(&req->rl_registered)) 687 if (test_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags))
689 ia->ri_ops->ro_unmap_sync(r_xprt, &req->rl_registered); 688 rpcrdma_release_rqst(r_xprt, req);
690 rpcrdma_buffer_put(req); 689 rpcrdma_buffer_put(req);
691} 690}
692 691
@@ -742,6 +741,7 @@ xprt_rdma_send_request(struct rpc_task *task)
742 goto drop_connection; 741 goto drop_connection;
743 req->rl_connect_cookie = xprt->connect_cookie; 742 req->rl_connect_cookie = xprt->connect_cookie;
744 743
744 set_bit(RPCRDMA_REQ_F_PENDING, &req->rl_flags);
745 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) 745 if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req))
746 goto drop_connection; 746 goto drop_connection;
747 747
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 417532069842..c260475baa36 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -386,6 +386,7 @@ struct rpcrdma_req {
386/* rl_flags */ 386/* rl_flags */
387enum { 387enum {
388 RPCRDMA_REQ_F_BACKCHANNEL = 0, 388 RPCRDMA_REQ_F_BACKCHANNEL = 0,
389 RPCRDMA_REQ_F_PENDING,
389}; 390};
390 391
391static inline void 392static inline void
@@ -655,6 +656,8 @@ int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
655void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *); 656void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
656void rpcrdma_complete_rqst(struct rpcrdma_rep *rep); 657void rpcrdma_complete_rqst(struct rpcrdma_rep *rep);
657void rpcrdma_reply_handler(struct rpcrdma_rep *rep); 658void rpcrdma_reply_handler(struct rpcrdma_rep *rep);
659void rpcrdma_release_rqst(struct rpcrdma_xprt *r_xprt,
660 struct rpcrdma_req *req);
658void rpcrdma_deferred_completion(struct work_struct *work); 661void rpcrdma_deferred_completion(struct work_struct *work);
659 662
660static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len) 663static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)