aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2018-05-07 15:27:32 -0400
committerJ. Bruce Fields <bfields@redhat.com>2018-05-11 15:48:57 -0400
commit1e5f4160745690a0476929d128a336cae95c1df9 (patch)
treed8a7e8ad1aab3a1e343ca0f1de79a3806b057dd4
parent2c577bfea85e421bfa91df16ccf5156361aa8d4b (diff)
svcrdma: Simplify svc_rdma_recv_ctxt_put
Currently svc_rdma_recv_ctxt_put's callers have to know whether they want to free the ctxt's pages or not. This means the human developers have to know when and why to set that free_pages argument. Instead, the ctxt should carry that information with it so that svc_rdma_recv_ctxt_put does the right thing no matter who is calling. We want to keep track of the number of pages in the Receive buffer separately from the number of pages pulled over by RDMA Read. This is so that the correct number of pages can be freed properly and that number is well-documented. So now, rc_hdr_count is the number of pages consumed by head[0] (ie., the page index where the Read chunk should start); and rc_page_count is always the number of pages that need to be released when the ctxt is put. The @free_pages argument is no longer needed. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r--include/linux/sunrpc/svc_rdma.h3
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c41
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_rw.c4
3 files changed, 25 insertions, 23 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 3cb66319a814..f0bd0b6d8931 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -173,8 +173,7 @@ extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
173extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma); 173extern void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma);
174extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma); 174extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
175extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, 175extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
176 struct svc_rdma_recv_ctxt *ctxt, 176 struct svc_rdma_recv_ctxt *ctxt);
177 int free_pages);
178extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma); 177extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
179extern int svc_rdma_recvfrom(struct svc_rqst *); 178extern int svc_rdma_recvfrom(struct svc_rqst *);
180 179
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index b7d9c55ee896..ecfe7c90a268 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -175,18 +175,15 @@ static void svc_rdma_recv_ctxt_unmap(struct svcxprt_rdma *rdma,
175 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list 175 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
176 * @rdma: controlling svcxprt_rdma 176 * @rdma: controlling svcxprt_rdma
177 * @ctxt: object to return to the free list 177 * @ctxt: object to return to the free list
178 * @free_pages: Non-zero if rc_pages should be freed
179 * 178 *
180 */ 179 */
181void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma, 180void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
182 struct svc_rdma_recv_ctxt *ctxt, 181 struct svc_rdma_recv_ctxt *ctxt)
183 int free_pages)
184{ 182{
185 unsigned int i; 183 unsigned int i;
186 184
187 if (free_pages) 185 for (i = 0; i < ctxt->rc_page_count; i++)
188 for (i = 0; i < ctxt->rc_page_count; i++) 186 put_page(ctxt->rc_pages[i]);
189 put_page(ctxt->rc_pages[i]);
190 spin_lock(&rdma->sc_recv_lock); 187 spin_lock(&rdma->sc_recv_lock);
191 list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts); 188 list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts);
192 spin_unlock(&rdma->sc_recv_lock); 189 spin_unlock(&rdma->sc_recv_lock);
@@ -243,11 +240,11 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
243 240
244err_put_ctxt: 241err_put_ctxt:
245 svc_rdma_recv_ctxt_unmap(rdma, ctxt); 242 svc_rdma_recv_ctxt_unmap(rdma, ctxt);
246 svc_rdma_recv_ctxt_put(rdma, ctxt, 1); 243 svc_rdma_recv_ctxt_put(rdma, ctxt);
247 return -ENOMEM; 244 return -ENOMEM;
248err_post: 245err_post:
249 svc_rdma_recv_ctxt_unmap(rdma, ctxt); 246 svc_rdma_recv_ctxt_unmap(rdma, ctxt);
250 svc_rdma_recv_ctxt_put(rdma, ctxt, 1); 247 svc_rdma_recv_ctxt_put(rdma, ctxt);
251 svc_xprt_put(&rdma->sc_xprt); 248 svc_xprt_put(&rdma->sc_xprt);
252 return ret; 249 return ret;
253} 250}
@@ -316,7 +313,7 @@ flushed:
316 ib_wc_status_msg(wc->status), 313 ib_wc_status_msg(wc->status),
317 wc->status, wc->vendor_err); 314 wc->status, wc->vendor_err);
318post_err: 315post_err:
319 svc_rdma_recv_ctxt_put(rdma, ctxt, 1); 316 svc_rdma_recv_ctxt_put(rdma, ctxt);
320 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 317 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
321 svc_xprt_enqueue(&rdma->sc_xprt); 318 svc_xprt_enqueue(&rdma->sc_xprt);
322out: 319out:
@@ -334,11 +331,11 @@ void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
334 331
335 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) { 332 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
336 list_del(&ctxt->rc_list); 333 list_del(&ctxt->rc_list);
337 svc_rdma_recv_ctxt_put(rdma, ctxt, 1); 334 svc_rdma_recv_ctxt_put(rdma, ctxt);
338 } 335 }
339 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) { 336 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
340 list_del(&ctxt->rc_list); 337 list_del(&ctxt->rc_list);
341 svc_rdma_recv_ctxt_put(rdma, ctxt, 1); 338 svc_rdma_recv_ctxt_put(rdma, ctxt);
342 } 339 }
343} 340}
344 341
@@ -383,16 +380,19 @@ static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
383 len -= min_t(u32, len, ctxt->rc_sges[sge_no].length); 380 len -= min_t(u32, len, ctxt->rc_sges[sge_no].length);
384 sge_no++; 381 sge_no++;
385 } 382 }
383 ctxt->rc_hdr_count = sge_no;
386 rqstp->rq_respages = &rqstp->rq_pages[sge_no]; 384 rqstp->rq_respages = &rqstp->rq_pages[sge_no];
387 rqstp->rq_next_page = rqstp->rq_respages + 1; 385 rqstp->rq_next_page = rqstp->rq_respages + 1;
388 386
389 /* If not all pages were used from the SGL, free the remaining ones */ 387 /* If not all pages were used from the SGL, free the remaining ones */
390 len = sge_no;
391 while (sge_no < ctxt->rc_recv_wr.num_sge) { 388 while (sge_no < ctxt->rc_recv_wr.num_sge) {
392 page = ctxt->rc_pages[sge_no++]; 389 page = ctxt->rc_pages[sge_no++];
393 put_page(page); 390 put_page(page);
394 } 391 }
395 ctxt->rc_page_count = len; 392
393 /* @ctxt's pages have all been released or moved to @rqstp->rq_pages.
394 */
395 ctxt->rc_page_count = 0;
396 396
397 /* Set up tail */ 397 /* Set up tail */
398 rqstp->rq_arg.tail[0].iov_base = NULL; 398 rqstp->rq_arg.tail[0].iov_base = NULL;
@@ -602,11 +602,14 @@ static void rdma_read_complete(struct svc_rqst *rqstp,
602{ 602{
603 int page_no; 603 int page_no;
604 604
605 /* Copy RPC pages */ 605 /* Move Read chunk pages to rqstp so that they will be released
606 * when svc_process is done with them.
607 */
606 for (page_no = 0; page_no < head->rc_page_count; page_no++) { 608 for (page_no = 0; page_no < head->rc_page_count; page_no++) {
607 put_page(rqstp->rq_pages[page_no]); 609 put_page(rqstp->rq_pages[page_no]);
608 rqstp->rq_pages[page_no] = head->rc_pages[page_no]; 610 rqstp->rq_pages[page_no] = head->rc_pages[page_no];
609 } 611 }
612 head->rc_page_count = 0;
610 613
611 /* Point rq_arg.pages past header */ 614 /* Point rq_arg.pages past header */
612 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count]; 615 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
@@ -777,7 +780,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
777 if (svc_rdma_is_backchannel_reply(xprt, p)) { 780 if (svc_rdma_is_backchannel_reply(xprt, p)) {
778 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, 781 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
779 &rqstp->rq_arg); 782 &rqstp->rq_arg);
780 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0); 783 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
781 return ret; 784 return ret;
782 } 785 }
783 786
@@ -786,7 +789,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
786 goto out_readchunk; 789 goto out_readchunk;
787 790
788complete: 791complete:
789 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0); 792 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
790 rqstp->rq_prot = IPPROTO_MAX; 793 rqstp->rq_prot = IPPROTO_MAX;
791 svc_xprt_copy_addrs(rqstp, xprt); 794 svc_xprt_copy_addrs(rqstp, xprt);
792 return rqstp->rq_arg.len; 795 return rqstp->rq_arg.len;
@@ -799,16 +802,16 @@ out_readchunk:
799 802
800out_err: 803out_err:
801 svc_rdma_send_error(rdma_xprt, p, ret); 804 svc_rdma_send_error(rdma_xprt, p, ret);
802 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 0); 805 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
803 return 0; 806 return 0;
804 807
805out_postfail: 808out_postfail:
806 if (ret == -EINVAL) 809 if (ret == -EINVAL)
807 svc_rdma_send_error(rdma_xprt, p, ret); 810 svc_rdma_send_error(rdma_xprt, p, ret);
808 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 1); 811 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
809 return ret; 812 return ret;
810 813
811out_drop: 814out_drop:
812 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt, 1); 815 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
813 return 0; 816 return 0;
814} 817}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index c080ce20ff40..8242aa318ac1 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -282,7 +282,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
282 pr_err("svcrdma: read ctx: %s (%u/0x%x)\n", 282 pr_err("svcrdma: read ctx: %s (%u/0x%x)\n",
283 ib_wc_status_msg(wc->status), 283 ib_wc_status_msg(wc->status),
284 wc->status, wc->vendor_err); 284 wc->status, wc->vendor_err);
285 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt, 1); 285 svc_rdma_recv_ctxt_put(rdma, info->ri_readctxt);
286 } else { 286 } else {
287 spin_lock(&rdma->sc_rq_dto_lock); 287 spin_lock(&rdma->sc_rq_dto_lock);
288 list_add_tail(&info->ri_readctxt->rc_list, 288 list_add_tail(&info->ri_readctxt->rc_list,
@@ -834,7 +834,7 @@ int svc_rdma_recv_read_chunk(struct svcxprt_rdma *rdma, struct svc_rqst *rqstp,
834 * head->rc_arg. Pages involved with RDMA Read I/O are 834 * head->rc_arg. Pages involved with RDMA Read I/O are
835 * transferred there. 835 * transferred there.
836 */ 836 */
837 head->rc_hdr_count = head->rc_page_count; 837 head->rc_page_count = head->rc_hdr_count;
838 head->rc_arg.head[0] = rqstp->rq_arg.head[0]; 838 head->rc_arg.head[0] = rqstp->rq_arg.head[0];
839 head->rc_arg.tail[0] = rqstp->rq_arg.tail[0]; 839 head->rc_arg.tail[0] = rqstp->rq_arg.tail[0];
840 head->rc_arg.pages = head->rc_pages; 840 head->rc_arg.pages = head->rc_pages;