diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2016-09-15 10:56:43 -0400 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2016-09-19 13:08:38 -0400 |
commit | 90aab6029606152d3d7ea91b41064580f77d7d19 (patch) | |
tree | a227e7a1576ed9691bda90df8d507c4d881c222c | |
parent | b157380af1941a43f3cfa244db1018f717031a42 (diff) |
xprtrdma: Move send_wr to struct rpcrdma_req
Clean up: Most of the fields in each send_wr do not vary. There is
no need to initialize them before each ib_post_send(). This removes
a large-ish data structure from the stack.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
-rw-r--r-- | net/sunrpc/xprtrdma/backchannel.c | 3 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 5 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 36 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 4 |
4 files changed, 24 insertions, 24 deletions
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index 887ef44c1351..61a58f59133f 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c | |||
@@ -241,7 +241,8 @@ int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) | |||
241 | req->rl_send_iov[1].length = rpclen; | 241 | req->rl_send_iov[1].length = rpclen; |
242 | req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); | 242 | req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); |
243 | 243 | ||
244 | req->rl_niovs = 2; | 244 | req->rl_send_wr.num_sge = 2; |
245 | |||
245 | return 0; | 246 | return 0; |
246 | 247 | ||
247 | out_map: | 248 | out_map: |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 6187cee87fa9..c2906e314287 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -687,7 +687,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
687 | req->rl_send_iov[0].length = hdrlen; | 687 | req->rl_send_iov[0].length = hdrlen; |
688 | req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf); | 688 | req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf); |
689 | 689 | ||
690 | req->rl_niovs = 1; | 690 | req->rl_send_wr.num_sge = 1; |
691 | if (rtype == rpcrdma_areadch) | 691 | if (rtype == rpcrdma_areadch) |
692 | return 0; | 692 | return 0; |
693 | 693 | ||
@@ -697,7 +697,8 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst) | |||
697 | req->rl_send_iov[1].length = rpclen; | 697 | req->rl_send_iov[1].length = rpclen; |
698 | req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); | 698 | req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf); |
699 | 699 | ||
700 | req->rl_niovs = 2; | 700 | req->rl_send_wr.num_sge = 2; |
701 | |||
701 | return 0; | 702 | return 0; |
702 | 703 | ||
703 | out_overflow: | 704 | out_overflow: |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 7b189fe680bb..79a6346b96c2 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -849,6 +849,10 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) | |||
849 | req->rl_cqe.done = rpcrdma_wc_send; | 849 | req->rl_cqe.done = rpcrdma_wc_send; |
850 | req->rl_buffer = &r_xprt->rx_buf; | 850 | req->rl_buffer = &r_xprt->rx_buf; |
851 | INIT_LIST_HEAD(&req->rl_registered); | 851 | INIT_LIST_HEAD(&req->rl_registered); |
852 | req->rl_send_wr.next = NULL; | ||
853 | req->rl_send_wr.wr_cqe = &req->rl_cqe; | ||
854 | req->rl_send_wr.sg_list = req->rl_send_iov; | ||
855 | req->rl_send_wr.opcode = IB_WR_SEND; | ||
852 | return req; | 856 | return req; |
853 | } | 857 | } |
854 | 858 | ||
@@ -1128,7 +1132,7 @@ rpcrdma_buffer_put(struct rpcrdma_req *req) | |||
1128 | struct rpcrdma_buffer *buffers = req->rl_buffer; | 1132 | struct rpcrdma_buffer *buffers = req->rl_buffer; |
1129 | struct rpcrdma_rep *rep = req->rl_reply; | 1133 | struct rpcrdma_rep *rep = req->rl_reply; |
1130 | 1134 | ||
1131 | req->rl_niovs = 0; | 1135 | req->rl_send_wr.num_sge = 0; |
1132 | req->rl_reply = NULL; | 1136 | req->rl_reply = NULL; |
1133 | 1137 | ||
1134 | spin_lock(&buffers->rb_lock); | 1138 | spin_lock(&buffers->rb_lock); |
@@ -1259,38 +1263,32 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia, | |||
1259 | struct rpcrdma_req *req) | 1263 | struct rpcrdma_req *req) |
1260 | { | 1264 | { |
1261 | struct ib_device *device = ia->ri_device; | 1265 | struct ib_device *device = ia->ri_device; |
1262 | struct ib_send_wr send_wr, *send_wr_fail; | 1266 | struct ib_send_wr *send_wr = &req->rl_send_wr; |
1263 | struct rpcrdma_rep *rep = req->rl_reply; | 1267 | struct ib_send_wr *send_wr_fail; |
1264 | struct ib_sge *iov = req->rl_send_iov; | 1268 | struct ib_sge *sge = req->rl_send_iov; |
1265 | int i, rc; | 1269 | int i, rc; |
1266 | 1270 | ||
1267 | if (rep) { | 1271 | if (req->rl_reply) { |
1268 | rc = rpcrdma_ep_post_recv(ia, rep); | 1272 | rc = rpcrdma_ep_post_recv(ia, req->rl_reply); |
1269 | if (rc) | 1273 | if (rc) |
1270 | return rc; | 1274 | return rc; |
1271 | req->rl_reply = NULL; | 1275 | req->rl_reply = NULL; |
1272 | } | 1276 | } |
1273 | 1277 | ||
1274 | send_wr.next = NULL; | 1278 | for (i = 0; i < send_wr->num_sge; i++) |
1275 | send_wr.wr_cqe = &req->rl_cqe; | 1279 | ib_dma_sync_single_for_device(device, sge[i].addr, |
1276 | send_wr.sg_list = iov; | 1280 | sge[i].length, DMA_TO_DEVICE); |
1277 | send_wr.num_sge = req->rl_niovs; | ||
1278 | send_wr.opcode = IB_WR_SEND; | ||
1279 | |||
1280 | for (i = 0; i < send_wr.num_sge; i++) | ||
1281 | ib_dma_sync_single_for_device(device, iov[i].addr, | ||
1282 | iov[i].length, DMA_TO_DEVICE); | ||
1283 | dprintk("RPC: %s: posting %d s/g entries\n", | 1281 | dprintk("RPC: %s: posting %d s/g entries\n", |
1284 | __func__, send_wr.num_sge); | 1282 | __func__, send_wr->num_sge); |
1285 | 1283 | ||
1286 | if (DECR_CQCOUNT(ep) > 0) | 1284 | if (DECR_CQCOUNT(ep) > 0) |
1287 | send_wr.send_flags = 0; | 1285 | send_wr->send_flags = 0; |
1288 | else { /* Provider must take a send completion every now and then */ | 1286 | else { /* Provider must take a send completion every now and then */ |
1289 | INIT_CQCOUNT(ep); | 1287 | INIT_CQCOUNT(ep); |
1290 | send_wr.send_flags = IB_SEND_SIGNALED; | 1288 | send_wr->send_flags = IB_SEND_SIGNALED; |
1291 | } | 1289 | } |
1292 | 1290 | ||
1293 | rc = ib_post_send(ia->ri_id->qp, &send_wr, &send_wr_fail); | 1291 | rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail); |
1294 | if (rc) | 1292 | if (rc) |
1295 | goto out_postsend_err; | 1293 | goto out_postsend_err; |
1296 | return 0; | 1294 | return 0; |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index decd13417ac2..3c5a89a4ff4f 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -284,10 +284,10 @@ struct rpcrdma_mr_seg { /* chunk descriptors */ | |||
284 | struct rpcrdma_buffer; | 284 | struct rpcrdma_buffer; |
285 | struct rpcrdma_req { | 285 | struct rpcrdma_req { |
286 | struct list_head rl_free; | 286 | struct list_head rl_free; |
287 | unsigned int rl_niovs; | ||
288 | unsigned int rl_connect_cookie; | 287 | unsigned int rl_connect_cookie; |
289 | struct rpcrdma_buffer *rl_buffer; | 288 | struct rpcrdma_buffer *rl_buffer; |
290 | struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ | 289 | struct rpcrdma_rep *rl_reply; |
290 | struct ib_send_wr rl_send_wr; | ||
291 | struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS]; | 291 | struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS]; |
292 | struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */ | 292 | struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */ |
293 | struct rpcrdma_regbuf *rl_sendbuf; /* rq_snd_buf */ | 293 | struct rpcrdma_regbuf *rl_sendbuf; /* rq_snd_buf */ |