diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2016-03-01 13:05:54 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2016-03-01 16:06:34 -0500 |
commit | f6763c29ab86c3ee27760a06e07bbeab47635b61 (patch) | |
tree | 34252ca2fcf9334f18cb2be875ecf340bb0502ba | |
parent | cf570a93748ab95cf5d13d3d8058875f970f3a66 (diff) |
svcrdma: Do not send Write chunk XDR pad with inline content
The NFS server's XDR encoders adds an XDR pad for content in the
xdr_buf page list at the beginning of the xdr_buf's tail buffer.
On RDMA transports, Write chunks are sent separately and without an
XDR pad.
If a Write chunk is being sent, strip off the pad in the tail buffer
so that inline content following the Write chunk remains XDR-aligned
when it is sent to the client.
BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=294
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r-- | include/linux/sunrpc/svc_rdma.h | 2 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 2 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 22 |
3 files changed, 19 insertions, 7 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 5322fea6fe4c..40b678584041 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h | |||
@@ -224,7 +224,7 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *, | |||
224 | 224 | ||
225 | /* svc_rdma_sendto.c */ | 225 | /* svc_rdma_sendto.c */ |
226 | extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, | 226 | extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *, |
227 | struct svc_rdma_req_map *); | 227 | struct svc_rdma_req_map *, bool); |
228 | extern int svc_rdma_sendto(struct svc_rqst *); | 228 | extern int svc_rdma_sendto(struct svc_rqst *); |
229 | extern struct rpcrdma_read_chunk * | 229 | extern struct rpcrdma_read_chunk * |
230 | svc_rdma_get_read_chunk(struct rpcrdma_msg *); | 230 | svc_rdma_get_read_chunk(struct rpcrdma_msg *); |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 65a7c232a345..de3919624fac 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c | |||
@@ -107,7 +107,7 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, | |||
107 | int ret; | 107 | int ret; |
108 | 108 | ||
109 | vec = svc_rdma_get_req_map(rdma); | 109 | vec = svc_rdma_get_req_map(rdma); |
110 | ret = svc_rdma_map_xdr(rdma, sndbuf, vec); | 110 | ret = svc_rdma_map_xdr(rdma, sndbuf, vec, false); |
111 | if (ret) | 111 | if (ret) |
112 | goto out_err; | 112 | goto out_err; |
113 | 113 | ||
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 86fea5c59125..a8fab9968891 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -57,7 +57,8 @@ static u32 xdr_padsize(u32 len) | |||
57 | 57 | ||
58 | int svc_rdma_map_xdr(struct svcxprt_rdma *xprt, | 58 | int svc_rdma_map_xdr(struct svcxprt_rdma *xprt, |
59 | struct xdr_buf *xdr, | 59 | struct xdr_buf *xdr, |
60 | struct svc_rdma_req_map *vec) | 60 | struct svc_rdma_req_map *vec, |
61 | bool write_chunk_present) | ||
61 | { | 62 | { |
62 | int sge_no; | 63 | int sge_no; |
63 | u32 sge_bytes; | 64 | u32 sge_bytes; |
@@ -97,9 +98,20 @@ int svc_rdma_map_xdr(struct svcxprt_rdma *xprt, | |||
97 | 98 | ||
98 | /* Tail SGE */ | 99 | /* Tail SGE */ |
99 | if (xdr->tail[0].iov_len) { | 100 | if (xdr->tail[0].iov_len) { |
100 | vec->sge[sge_no].iov_base = xdr->tail[0].iov_base; | 101 | unsigned char *base = xdr->tail[0].iov_base; |
101 | vec->sge[sge_no].iov_len = xdr->tail[0].iov_len; | 102 | size_t len = xdr->tail[0].iov_len; |
102 | sge_no++; | 103 | u32 xdr_pad = xdr_padsize(xdr->page_len); |
104 | |||
105 | if (write_chunk_present && xdr_pad) { | ||
106 | base += xdr_pad; | ||
107 | len -= xdr_pad; | ||
108 | } | ||
109 | |||
110 | if (len) { | ||
111 | vec->sge[sge_no].iov_base = base; | ||
112 | vec->sge[sge_no].iov_len = len; | ||
113 | sge_no++; | ||
114 | } | ||
103 | } | 115 | } |
104 | 116 | ||
105 | dprintk("svcrdma: %s: sge_no %d page_no %d " | 117 | dprintk("svcrdma: %s: sge_no %d page_no %d " |
@@ -594,7 +606,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
594 | ctxt = svc_rdma_get_context(rdma); | 606 | ctxt = svc_rdma_get_context(rdma); |
595 | ctxt->direction = DMA_TO_DEVICE; | 607 | ctxt->direction = DMA_TO_DEVICE; |
596 | vec = svc_rdma_get_req_map(rdma); | 608 | vec = svc_rdma_get_req_map(rdma); |
597 | ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec); | 609 | ret = svc_rdma_map_xdr(rdma, &rqstp->rq_res, vec, wr_ary != NULL); |
598 | if (ret) | 610 | if (ret) |
599 | goto err0; | 611 | goto err0; |
600 | inline_bytes = rqstp->rq_res.len; | 612 | inline_bytes = rqstp->rq_res.len; |