aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2017-04-09 13:05:36 -0400
committerJ. Bruce Fields <bfields@redhat.com>2017-04-25 17:25:54 -0400
commit17f5f7f506aaca985b95df7ef7fc2ff49c36a8e9 (patch)
treee23f5692874b2d541dfc49c9bb0feb569f00f9cb
parent99bbf6ecc694dfe0b026e15359c5aa2a60b97a93 (diff)
svcrdma: Move send_wr to svc_rdma_op_ctxt
Clean up: Move the ib_send_wr off the stack, and move common code to post a Send Work Request into a helper. This is a refactoring change only. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r--include/linux/sunrpc/svc_rdma.h4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c11
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c64
3 files changed, 44 insertions, 35 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index b105f73e3ca2..287db5c179d8 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -85,6 +85,7 @@ struct svc_rdma_op_ctxt {
85 enum dma_data_direction direction; 85 enum dma_data_direction direction;
86 int count; 86 int count;
87 unsigned int mapped_sges; 87 unsigned int mapped_sges;
88 struct ib_send_wr send_wr;
88 struct ib_sge sge[RPCSVC_MAXPAGES]; 89 struct ib_sge sge[RPCSVC_MAXPAGES];
89 struct page *pages[RPCSVC_MAXPAGES]; 90 struct page *pages[RPCSVC_MAXPAGES];
90}; 91};
@@ -227,6 +228,9 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
227/* svc_rdma_sendto.c */ 228/* svc_rdma_sendto.c */
228extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, 229extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
229 struct svc_rdma_req_map *, bool); 230 struct svc_rdma_req_map *, bool);
231extern int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma,
232 struct svc_rdma_op_ctxt *ctxt,
233 int num_sge, u32 inv_rkey);
230extern int svc_rdma_sendto(struct svc_rqst *); 234extern int svc_rdma_sendto(struct svc_rqst *);
231extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *, 235extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
232 int); 236 int);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index ff1df40f0d26..f12f39c189c3 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -104,7 +104,6 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
104 struct xdr_buf *sndbuf = &rqst->rq_snd_buf; 104 struct xdr_buf *sndbuf = &rqst->rq_snd_buf;
105 struct svc_rdma_op_ctxt *ctxt; 105 struct svc_rdma_op_ctxt *ctxt;
106 struct svc_rdma_req_map *vec; 106 struct svc_rdma_req_map *vec;
107 struct ib_send_wr send_wr;
108 int ret; 107 int ret;
109 108
110 vec = svc_rdma_get_req_map(rdma); 109 vec = svc_rdma_get_req_map(rdma);
@@ -132,15 +131,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
132 } 131 }
133 svc_rdma_count_mappings(rdma, ctxt); 132 svc_rdma_count_mappings(rdma, ctxt);
134 133
135 memset(&send_wr, 0, sizeof(send_wr)); 134 ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0);
136 ctxt->cqe.done = svc_rdma_wc_send;
137 send_wr.wr_cqe = &ctxt->cqe;
138 send_wr.sg_list = ctxt->sge;
139 send_wr.num_sge = 1;
140 send_wr.opcode = IB_WR_SEND;
141 send_wr.send_flags = IB_SEND_SIGNALED;
142
143 ret = svc_rdma_send(rdma, &send_wr);
144 if (ret) { 135 if (ret) {
145 ret = -EIO; 136 ret = -EIO;
146 goto out_unmap; 137 goto out_unmap;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 515221b16d09..f90b40d0932f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -435,6 +435,43 @@ out_err:
435 return -EIO; 435 return -EIO;
436} 436}
437 437
438/**
439 * svc_rdma_post_send_wr - Set up and post one Send Work Request
440 * @rdma: controlling transport
441 * @ctxt: op_ctxt for transmitting the Send WR
442 * @num_sge: number of SGEs to send
443 * @inv_rkey: R_key argument to Send With Invalidate, or zero
444 *
445 * Returns:
446 * %0 if the Send* was posted successfully,
447 * %-ENOTCONN if the connection was lost or dropped,
448 * %-EINVAL if there was a problem with the Send we built,
449 * %-ENOMEM if ib_post_send failed.
450 */
451int svc_rdma_post_send_wr(struct svcxprt_rdma *rdma,
452 struct svc_rdma_op_ctxt *ctxt, int num_sge,
453 u32 inv_rkey)
454{
455 struct ib_send_wr *send_wr = &ctxt->send_wr;
456
457 dprintk("svcrdma: posting Send WR with %u sge(s)\n", num_sge);
458
459 send_wr->next = NULL;
460 ctxt->cqe.done = svc_rdma_wc_send;
461 send_wr->wr_cqe = &ctxt->cqe;
462 send_wr->sg_list = ctxt->sge;
463 send_wr->num_sge = num_sge;
464 send_wr->send_flags = IB_SEND_SIGNALED;
465 if (inv_rkey) {
466 send_wr->opcode = IB_WR_SEND_WITH_INV;
467 send_wr->ex.invalidate_rkey = inv_rkey;
468 } else {
469 send_wr->opcode = IB_WR_SEND;
470 }
471
472 return svc_rdma_send(rdma, send_wr);
473}
474
438/* This function prepares the portion of the RPCRDMA message to be 475/* This function prepares the portion of the RPCRDMA message to be
439 * sent in the RDMA_SEND. This function is called after data sent via 476 * sent in the RDMA_SEND. This function is called after data sent via
440 * RDMA has already been transmitted. There are three cases: 477 * RDMA has already been transmitted. There are three cases:
@@ -460,7 +497,6 @@ static int send_reply(struct svcxprt_rdma *rdma,
460 u32 inv_rkey) 497 u32 inv_rkey)
461{ 498{
462 struct svc_rdma_op_ctxt *ctxt; 499 struct svc_rdma_op_ctxt *ctxt;
463 struct ib_send_wr send_wr;
464 u32 xdr_off; 500 u32 xdr_off;
465 int sge_no; 501 int sge_no;
466 int sge_bytes; 502 int sge_bytes;
@@ -524,19 +560,8 @@ static int send_reply(struct svcxprt_rdma *rdma,
524 pr_err("svcrdma: Too many sges (%d)\n", sge_no); 560 pr_err("svcrdma: Too many sges (%d)\n", sge_no);
525 goto err; 561 goto err;
526 } 562 }
527 memset(&send_wr, 0, sizeof send_wr);
528 ctxt->cqe.done = svc_rdma_wc_send;
529 send_wr.wr_cqe = &ctxt->cqe;
530 send_wr.sg_list = ctxt->sge;
531 send_wr.num_sge = sge_no;
532 if (inv_rkey) {
533 send_wr.opcode = IB_WR_SEND_WITH_INV;
534 send_wr.ex.invalidate_rkey = inv_rkey;
535 } else
536 send_wr.opcode = IB_WR_SEND;
537 send_wr.send_flags = IB_SEND_SIGNALED;
538 563
539 ret = svc_rdma_send(rdma, &send_wr); 564 ret = svc_rdma_post_send_wr(rdma, ctxt, sge_no, inv_rkey);
540 if (ret) 565 if (ret)
541 goto err; 566 goto err;
542 567
@@ -652,7 +677,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
652void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, 677void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
653 int status) 678 int status)
654{ 679{
655 struct ib_send_wr err_wr;
656 struct page *p; 680 struct page *p;
657 struct svc_rdma_op_ctxt *ctxt; 681 struct svc_rdma_op_ctxt *ctxt;
658 enum rpcrdma_errcode err; 682 enum rpcrdma_errcode err;
@@ -692,17 +716,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
692 } 716 }
693 svc_rdma_count_mappings(xprt, ctxt); 717 svc_rdma_count_mappings(xprt, ctxt);
694 718
695 /* Prepare SEND WR */ 719 ret = svc_rdma_post_send_wr(xprt, ctxt, 1, 0);
696 memset(&err_wr, 0, sizeof(err_wr));
697 ctxt->cqe.done = svc_rdma_wc_send;
698 err_wr.wr_cqe = &ctxt->cqe;
699 err_wr.sg_list = ctxt->sge;
700 err_wr.num_sge = 1;
701 err_wr.opcode = IB_WR_SEND;
702 err_wr.send_flags = IB_SEND_SIGNALED;
703
704 /* Post It */
705 ret = svc_rdma_send(xprt, &err_wr);
706 if (ret) { 720 if (ret) {
707 dprintk("svcrdma: Error %d posting send for protocol error\n", 721 dprintk("svcrdma: Error %d posting send for protocol error\n",
708 ret); 722 ret);