diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2017-10-20 10:47:55 -0400 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2017-11-17 13:47:56 -0500 |
commit | 857f9acab9343788fe59f7be3a4710131b705db4 (patch) | |
tree | 812b379159f43f949caca0783ed06aca895c30dd /net/sunrpc | |
parent | 394b2c77cb761fb1382b0e97b7cdff2dd717b5ee (diff) |
xprtrdma: Change return value of rpcrdma_prepare_send_sges()
Clean up: Make rpcrdma_prepare_send_sges() return a negative errno
instead of a bool. Soon callers will want distinct treatments of
different types of failures.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/xprtrdma/backchannel.c | 4 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 52 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 6 |
3 files changed, 38 insertions, 24 deletions
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index d31d0ac5ada9..f0d5998330fe 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c | |||
@@ -222,8 +222,8 @@ int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) | |||
222 | *p++ = xdr_zero; | 222 | *p++ = xdr_zero; |
223 | *p = xdr_zero; | 223 | *p = xdr_zero; |
224 | 224 | ||
225 | if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, RPCRDMA_HDRLEN_MIN, | 225 | if (rpcrdma_prepare_send_sges(r_xprt, req, RPCRDMA_HDRLEN_MIN, |
226 | &rqst->rq_snd_buf, rpcrdma_noch)) | 226 | &rqst->rq_snd_buf, rpcrdma_noch)) |
227 | return -EIO; | 227 | return -EIO; |
228 | return 0; | 228 | return 0; |
229 | } | 229 | } |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 4f6c5395d198..e3ece9843f9d 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -544,7 +544,7 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req, | |||
544 | 544 | ||
545 | if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) { | 545 | if (unlikely(!rpcrdma_regbuf_is_mapped(rb))) { |
546 | if (!__rpcrdma_dma_map_regbuf(ia, rb)) | 546 | if (!__rpcrdma_dma_map_regbuf(ia, rb)) |
547 | return false; | 547 | goto out_regbuf; |
548 | sge->addr = rdmab_addr(rb); | 548 | sge->addr = rdmab_addr(rb); |
549 | sge->lkey = rdmab_lkey(rb); | 549 | sge->lkey = rdmab_lkey(rb); |
550 | } | 550 | } |
@@ -554,6 +554,10 @@ rpcrdma_prepare_hdr_sge(struct rpcrdma_ia *ia, struct rpcrdma_req *req, | |||
554 | sge->length, DMA_TO_DEVICE); | 554 | sge->length, DMA_TO_DEVICE); |
555 | req->rl_send_wr.num_sge++; | 555 | req->rl_send_wr.num_sge++; |
556 | return true; | 556 | return true; |
557 | |||
558 | out_regbuf: | ||
559 | pr_err("rpcrdma: failed to DMA map a Send buffer\n"); | ||
560 | return false; | ||
557 | } | 561 | } |
558 | 562 | ||
559 | /* Prepare the Send SGEs. The head and tail iovec, and each entry | 563 | /* Prepare the Send SGEs. The head and tail iovec, and each entry |
@@ -574,7 +578,7 @@ rpcrdma_prepare_msg_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, | |||
574 | * DMA-mapped. Sync the content that has changed. | 578 | * DMA-mapped. Sync the content that has changed. |
575 | */ | 579 | */ |
576 | if (!rpcrdma_dma_map_regbuf(ia, rb)) | 580 | if (!rpcrdma_dma_map_regbuf(ia, rb)) |
577 | return false; | 581 | goto out_regbuf; |
578 | sge_no = 1; | 582 | sge_no = 1; |
579 | sge[sge_no].addr = rdmab_addr(rb); | 583 | sge[sge_no].addr = rdmab_addr(rb); |
580 | sge[sge_no].length = xdr->head[0].iov_len; | 584 | sge[sge_no].length = xdr->head[0].iov_len; |
@@ -662,6 +666,10 @@ out: | |||
662 | req->rl_send_wr.num_sge += sge_no; | 666 | req->rl_send_wr.num_sge += sge_no; |
663 | return true; | 667 | return true; |
664 | 668 | ||
669 | out_regbuf: | ||
670 | pr_err("rpcrdma: failed to DMA map a Send buffer\n"); | ||
671 | return false; | ||
672 | |||
665 | out_mapping_overflow: | 673 | out_mapping_overflow: |
666 | rpcrdma_unmap_sges(ia, req); | 674 | rpcrdma_unmap_sges(ia, req); |
667 | pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no); | 675 | pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no); |
@@ -673,26 +681,32 @@ out_mapping_err: | |||
673 | return false; | 681 | return false; |
674 | } | 682 | } |
675 | 683 | ||
676 | bool | 684 | /** |
677 | rpcrdma_prepare_send_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req, | 685 | * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR |
678 | u32 hdrlen, struct xdr_buf *xdr, | 686 | * @r_xprt: controlling transport |
679 | enum rpcrdma_chunktype rtype) | 687 | * @req: context of RPC Call being marshalled |
688 | * @hdrlen: size of transport header, in bytes | ||
689 | * @xdr: xdr_buf containing RPC Call | ||
690 | * @rtype: chunk type being encoded | ||
691 | * | ||
692 | * Returns 0 on success; otherwise a negative errno is returned. | ||
693 | */ | ||
694 | int | ||
695 | rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt, | ||
696 | struct rpcrdma_req *req, u32 hdrlen, | ||
697 | struct xdr_buf *xdr, enum rpcrdma_chunktype rtype) | ||
680 | { | 698 | { |
681 | req->rl_send_wr.num_sge = 0; | 699 | req->rl_send_wr.num_sge = 0; |
682 | req->rl_mapped_sges = 0; | 700 | req->rl_mapped_sges = 0; |
683 | 701 | ||
684 | if (!rpcrdma_prepare_hdr_sge(ia, req, hdrlen)) | 702 | if (!rpcrdma_prepare_hdr_sge(&r_xprt->rx_ia, req, hdrlen)) |
685 | goto out_map; | 703 | return -EIO; |
686 | 704 | ||
687 | if (rtype != rpcrdma_areadch) | 705 | if (rtype != rpcrdma_areadch) |
688 | if (!rpcrdma_prepare_msg_sges(ia, req, xdr, rtype)) | 706 | if (!rpcrdma_prepare_msg_sges(&r_xprt->rx_ia, req, xdr, rtype)) |
689 | goto out_map; | 707 | return -EIO; |
690 | |||
691 | return true; | ||
692 | 708 | ||
693 | out_map: | 709 | return 0; |
694 | pr_err("rpcrdma: failed to DMA map a Send buffer\n"); | ||
695 | return false; | ||
696 | } | 710 | } |
697 | 711 | ||
698 | /** | 712 | /** |
@@ -843,12 +857,10 @@ rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst) | |||
843 | transfertypes[rtype], transfertypes[wtype], | 857 | transfertypes[rtype], transfertypes[wtype], |
844 | xdr_stream_pos(xdr)); | 858 | xdr_stream_pos(xdr)); |
845 | 859 | ||
846 | if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, | 860 | ret = rpcrdma_prepare_send_sges(r_xprt, req, xdr_stream_pos(xdr), |
847 | xdr_stream_pos(xdr), | 861 | &rqst->rq_snd_buf, rtype); |
848 | &rqst->rq_snd_buf, rtype)) { | 862 | if (ret) |
849 | ret = -EIO; | ||
850 | goto out_err; | 863 | goto out_err; |
851 | } | ||
852 | return 0; | 864 | return 0; |
853 | 865 | ||
854 | out_err: | 866 | out_err: |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 0e0ae6195a5b..0b8ca5e5c706 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
@@ -613,8 +613,10 @@ enum rpcrdma_chunktype { | |||
613 | rpcrdma_replych | 613 | rpcrdma_replych |
614 | }; | 614 | }; |
615 | 615 | ||
616 | bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *, | 616 | int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt, |
617 | u32, struct xdr_buf *, enum rpcrdma_chunktype); | 617 | struct rpcrdma_req *req, u32 hdrlen, |
618 | struct xdr_buf *xdr, | ||
619 | enum rpcrdma_chunktype rtype); | ||
618 | void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *); | 620 | void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *); |
619 | int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst); | 621 | int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst); |
620 | void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *); | 622 | void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *); |