aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2016-06-29 13:54:58 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2016-07-11 15:50:43 -0400
commit64695bde6c289a62250eb0a078916703c8cf639a (patch)
treeb82a989d586d9e3174f4d14f65c9da220d8ac5de
parentcfabe2c634e617765af39ea1cb2920bdcbc5bb7e (diff)
xprtrdma: Clean up fixup_copy_count accounting
fixup_copy_count should count only the number of bytes copied to the page list. The head and tail are now always handled without a data copy. And the debugging at the end of rpcrdma_inline_fixup() is also no longer necessary, since copy_len will be non-zero when there is reply data in the tail (a normal and valid case). Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index a0e811dd7b84..dac2990ae2f7 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -755,11 +755,14 @@ rpcrdma_count_chunks(struct rpcrdma_rep *rep, int wrchunk, __be32 **iptrp)
755 * many cases this function simply updates iov_base pointers in 755 * many cases this function simply updates iov_base pointers in
756 * rq_rcv_buf to point directly to the received reply data, to 756 * rq_rcv_buf to point directly to the received reply data, to
757 * avoid copying reply data. 757 * avoid copying reply data.
758 *
759 * Returns the count of bytes which had to be memcopied.
758 */ 760 */
759static void 761static unsigned long
760rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad) 762rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
761{ 763{
762 int i, npages, curlen, olen; 764 unsigned long fixup_copy_count;
765 int i, npages, curlen;
763 char *destp; 766 char *destp;
764 struct page **ppages; 767 struct page **ppages;
765 int page_base; 768 int page_base;
@@ -781,13 +784,10 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
781 srcp += curlen; 784 srcp += curlen;
782 copy_len -= curlen; 785 copy_len -= curlen;
783 786
784 olen = copy_len;
785 i = 0;
786 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
787 page_base = rqst->rq_rcv_buf.page_base; 787 page_base = rqst->rq_rcv_buf.page_base;
788 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT); 788 ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
789 page_base &= ~PAGE_MASK; 789 page_base &= ~PAGE_MASK;
790 790 fixup_copy_count = 0;
791 if (copy_len && rqst->rq_rcv_buf.page_len) { 791 if (copy_len && rqst->rq_rcv_buf.page_len) {
792 int pagelist_len; 792 int pagelist_len;
793 793
@@ -795,7 +795,7 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
795 if (pagelist_len > copy_len) 795 if (pagelist_len > copy_len)
796 pagelist_len = copy_len; 796 pagelist_len = copy_len;
797 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT; 797 npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
798 for (; i < npages; i++) { 798 for (i = 0; i < npages; i++) {
799 curlen = PAGE_SIZE - page_base; 799 curlen = PAGE_SIZE - page_base;
800 if (curlen > pagelist_len) 800 if (curlen > pagelist_len)
801 curlen = pagelist_len; 801 curlen = pagelist_len;
@@ -809,6 +809,7 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
809 kunmap_atomic(destp); 809 kunmap_atomic(destp);
810 srcp += curlen; 810 srcp += curlen;
811 copy_len -= curlen; 811 copy_len -= curlen;
812 fixup_copy_count += curlen;
812 pagelist_len -= curlen; 813 pagelist_len -= curlen;
813 if (!pagelist_len) 814 if (!pagelist_len)
814 break; 815 break;
@@ -833,10 +834,7 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
833 rqst->rq_private_buf.tail[0].iov_base = srcp; 834 rqst->rq_private_buf.tail[0].iov_base = srcp;
834 } 835 }
835 836
836 if (copy_len) 837 return fixup_copy_count;
837 dprintk("RPC: %s: %d bytes in"
838 " %d extra segments (%d lost)\n",
839 __func__, olen, i, copy_len);
840} 838}
841 839
842void 840void
@@ -999,8 +997,10 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
999 rep->rr_len -= RPCRDMA_HDRLEN_MIN; 997 rep->rr_len -= RPCRDMA_HDRLEN_MIN;
1000 status = rep->rr_len; 998 status = rep->rr_len;
1001 } 999 }
1002 /* Fix up the rpc results for upper layer */ 1000
1003 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen); 1001 r_xprt->rx_stats.fixup_copy_count +=
1002 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len,
1003 rdmalen);
1004 break; 1004 break;
1005 1005
1006 case rdma_nomsg: 1006 case rdma_nomsg: