diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2016-05-04 10:53:05 -0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2016-05-13 15:53:05 -0400 |
commit | 9ec64052060f972101b49f47631f5072f075426c (patch) | |
tree | de3b068b304535de86b20e7eb495e1e23b00b51c | |
parent | 6625d0913771df5f12b9531c8cb8414e55f1c21d (diff) |
svcrdma: svc_rdma_put_context() is invoked twice in Send error path
Get a fresh op_ctxt in send_reply() instead of in svc_rdma_sendto().
This ensures that svc_rdma_put_context() is invoked only once if
send_reply() fails.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 28 |
1 files changed, 13 insertions, 15 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 4f1b1c4f45f9..54d533300620 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -463,25 +463,21 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
463 | struct svc_rqst *rqstp, | 463 | struct svc_rqst *rqstp, |
464 | struct page *page, | 464 | struct page *page, |
465 | struct rpcrdma_msg *rdma_resp, | 465 | struct rpcrdma_msg *rdma_resp, |
466 | struct svc_rdma_op_ctxt *ctxt, | ||
467 | struct svc_rdma_req_map *vec, | 466 | struct svc_rdma_req_map *vec, |
468 | int byte_count) | 467 | int byte_count) |
469 | { | 468 | { |
469 | struct svc_rdma_op_ctxt *ctxt; | ||
470 | struct ib_send_wr send_wr; | 470 | struct ib_send_wr send_wr; |
471 | u32 xdr_off; | 471 | u32 xdr_off; |
472 | int sge_no; | 472 | int sge_no; |
473 | int sge_bytes; | 473 | int sge_bytes; |
474 | int page_no; | 474 | int page_no; |
475 | int pages; | 475 | int pages; |
476 | int ret; | 476 | int ret = -EIO; |
477 | |||
478 | ret = svc_rdma_repost_recv(rdma, GFP_KERNEL); | ||
479 | if (ret) { | ||
480 | svc_rdma_put_context(ctxt, 0); | ||
481 | return -ENOTCONN; | ||
482 | } | ||
483 | 477 | ||
484 | /* Prepare the context */ | 478 | /* Prepare the context */ |
479 | ctxt = svc_rdma_get_context(rdma); | ||
480 | ctxt->direction = DMA_TO_DEVICE; | ||
485 | ctxt->pages[0] = page; | 481 | ctxt->pages[0] = page; |
486 | ctxt->count = 1; | 482 | ctxt->count = 1; |
487 | 483 | ||
@@ -565,8 +561,7 @@ static int send_reply(struct svcxprt_rdma *rdma, | |||
565 | err: | 561 | err: |
566 | svc_rdma_unmap_dma(ctxt); | 562 | svc_rdma_unmap_dma(ctxt); |
567 | svc_rdma_put_context(ctxt, 1); | 563 | svc_rdma_put_context(ctxt, 1); |
568 | pr_err("svcrdma: failed to send reply, rc=%d\n", ret); | 564 | return ret; |
569 | return -EIO; | ||
570 | } | 565 | } |
571 | 566 | ||
572 | void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp) | 567 | void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp) |
@@ -585,7 +580,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
585 | int ret; | 580 | int ret; |
586 | int inline_bytes; | 581 | int inline_bytes; |
587 | struct page *res_page; | 582 | struct page *res_page; |
588 | struct svc_rdma_op_ctxt *ctxt; | ||
589 | struct svc_rdma_req_map *vec; | 583 | struct svc_rdma_req_map *vec; |
590 | 584 | ||
591 | dprintk("svcrdma: sending response for rqstp=%p\n", rqstp); | 585 | dprintk("svcrdma: sending response for rqstp=%p\n", rqstp); |
@@ -598,8 +592,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
598 | rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary); | 592 | rp_ary = svc_rdma_get_reply_array(rdma_argp, wr_ary); |
599 | 593 | ||
600 | /* Build an req vec for the XDR */ | 594 | /* Build an req vec for the XDR */ |
601 | ctxt = svc_rdma_get_context(rdma); | ||
602 | ctxt->direction = DMA_TO_DEVICE; | ||
603 | vec = svc_rdma_get_req_map(rdma); | 595 | vec = svc_rdma_get_req_map(rdma); |
604 | ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL); | 596 | ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL); |
605 | if (ret) | 597 | if (ret) |
@@ -635,7 +627,12 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
635 | inline_bytes -= ret; | 627 | inline_bytes -= ret; |
636 | } | 628 | } |
637 | 629 | ||
638 | ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec, | 630 | /* Post a fresh Receive buffer _before_ sending the reply */ |
631 | ret = svc_rdma_post_recv(rdma, GFP_KERNEL); | ||
632 | if (ret) | ||
633 | goto err1; | ||
634 | |||
635 | ret = send_reply(rdma, rqstp, res_page, rdma_resp, vec, | ||
639 | inline_bytes); | 636 | inline_bytes); |
640 | if (ret < 0) | 637 | if (ret < 0) |
641 | goto err1; | 638 | goto err1; |
@@ -648,7 +645,8 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
648 | put_page(res_page); | 645 | put_page(res_page); |
649 | err0: | 646 | err0: |
650 | svc_rdma_put_req_map(rdma, vec); | 647 | svc_rdma_put_req_map(rdma, vec); |
651 | svc_rdma_put_context(ctxt, 0); | 648 | pr_err("svcrdma: Could not send reply, err=%d. Closing transport.\n", |
649 | ret); | ||
652 | set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); | 650 | set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); |
653 | return -ENOTCONN; | 651 | return -ENOTCONN; |
654 | } | 652 | } |