aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-06-04 11:20:39 -0400
committerJ. Bruce Fields <bfields@redhat.com>2015-06-04 16:55:58 -0400
commit70747c25a701b563a54c20c4a77efe8292aad151 (patch)
tree166199a6b9e088a745f2334b9ff7f73d1cfd2fde
parent276f03e3ba242ebf2cf201cc3c7058d2884912b7 (diff)
svcrdma: Fix byte-swapping in svc_rdma_sendto.c
In send_write_chunks(), we have: for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; xfer_len && chunk_no < arg_ary->wc_nchunks; chunk_no++) { . . . } Note that arg_ary->wc_nchunk is in network byte-order. For the comparison to work correctly, both have to be in native byte-order. In send_reply_chunks, we have: write_len = min(xfer_len, htonl(ch->rs_length)); xfer_len is in native byte-order, and ch->rs_length is in network byte-order. be32_to_cpu() is the correct byte swap for ch->rs_length. As an additional clean up, replace ntohl() with be32_to_cpu() in a few other places. This appears to address a problem with large rsize hangs while using PHYSICAL memory registration. I suspect that is the only registration mode that uses more than one chunk element. BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=248 Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 7de33d1af9b6..109e9670be8c 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -240,6 +240,7 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
240 u32 xdr_off; 240 u32 xdr_off;
241 int chunk_off; 241 int chunk_off;
242 int chunk_no; 242 int chunk_no;
243 int nchunks;
243 struct rpcrdma_write_array *arg_ary; 244 struct rpcrdma_write_array *arg_ary;
244 struct rpcrdma_write_array *res_ary; 245 struct rpcrdma_write_array *res_ary;
245 int ret; 246 int ret;
@@ -251,14 +252,15 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
251 &rdma_resp->rm_body.rm_chunks[1]; 252 &rdma_resp->rm_body.rm_chunks[1];
252 253
253 /* Write chunks start at the pagelist */ 254 /* Write chunks start at the pagelist */
255 nchunks = be32_to_cpu(arg_ary->wc_nchunks);
254 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0; 256 for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
255 xfer_len && chunk_no < arg_ary->wc_nchunks; 257 xfer_len && chunk_no < nchunks;
256 chunk_no++) { 258 chunk_no++) {
257 struct rpcrdma_segment *arg_ch; 259 struct rpcrdma_segment *arg_ch;
258 u64 rs_offset; 260 u64 rs_offset;
259 261
260 arg_ch = &arg_ary->wc_array[chunk_no].wc_target; 262 arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
261 write_len = min(xfer_len, ntohl(arg_ch->rs_length)); 263 write_len = min(xfer_len, be32_to_cpu(arg_ch->rs_length));
262 264
263 /* Prepare the response chunk given the length actually 265 /* Prepare the response chunk given the length actually
264 * written */ 266 * written */
@@ -270,7 +272,7 @@ static int send_write_chunks(struct svcxprt_rdma *xprt,
270 chunk_off = 0; 272 chunk_off = 0;
271 while (write_len) { 273 while (write_len) {
272 ret = send_write(xprt, rqstp, 274 ret = send_write(xprt, rqstp,
273 ntohl(arg_ch->rs_handle), 275 be32_to_cpu(arg_ch->rs_handle),
274 rs_offset + chunk_off, 276 rs_offset + chunk_off,
275 xdr_off, 277 xdr_off,
276 write_len, 278 write_len,
@@ -318,13 +320,13 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
318 &rdma_resp->rm_body.rm_chunks[2]; 320 &rdma_resp->rm_body.rm_chunks[2];
319 321
320 /* xdr offset starts at RPC message */ 322 /* xdr offset starts at RPC message */
321 nchunks = ntohl(arg_ary->wc_nchunks); 323 nchunks = be32_to_cpu(arg_ary->wc_nchunks);
322 for (xdr_off = 0, chunk_no = 0; 324 for (xdr_off = 0, chunk_no = 0;
323 xfer_len && chunk_no < nchunks; 325 xfer_len && chunk_no < nchunks;
324 chunk_no++) { 326 chunk_no++) {
325 u64 rs_offset; 327 u64 rs_offset;
326 ch = &arg_ary->wc_array[chunk_no].wc_target; 328 ch = &arg_ary->wc_array[chunk_no].wc_target;
327 write_len = min(xfer_len, htonl(ch->rs_length)); 329 write_len = min(xfer_len, be32_to_cpu(ch->rs_length));
328 330
329 /* Prepare the reply chunk given the length actually 331 /* Prepare the reply chunk given the length actually
330 * written */ 332 * written */
@@ -335,7 +337,7 @@ static int send_reply_chunks(struct svcxprt_rdma *xprt,
335 chunk_off = 0; 337 chunk_off = 0;
336 while (write_len) { 338 while (write_len) {
337 ret = send_write(xprt, rqstp, 339 ret = send_write(xprt, rqstp,
338 ntohl(ch->rs_handle), 340 be32_to_cpu(ch->rs_handle),
339 rs_offset + chunk_off, 341 rs_offset + chunk_off,
340 xdr_off, 342 xdr_off,
341 write_len, 343 write_len,