aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-08-03 13:03:58 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2015-08-05 16:21:27 -0400
commit02eb57d8f44caa582e297f51f3555d47767c5fe9 (patch)
tree498f9292c46c4a0a824e82da9d2c9444e59645ca /net/sunrpc/xprtrdma
parent5457ced0b504b41afe9439a6533066dea2fc0e1a (diff)
xprtrdma: Always provide a write list when sending NFS READ
The client has been setting up a reply chunk for NFS READs that are smaller than the inline threshold. This is not efficient: both the server and client CPUs have to copy the reply's data payload into and out of the memory region that is then transferred via RDMA. Using the write list, the data payload is moved by the device and no extra data copying is necessary. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Devesh Sharma <devesh.sharma@avagotech.com> Reviewed-By: Sagi Grimberg <sagig@mellanox.com> Tested-by: Devesh Sharma <devesh.sharma@avagotech.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc/xprtrdma')
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c21
1 files changed, 4 insertions, 17 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 950b654bad80..e7cf976aff47 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -418,28 +418,15 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
418 /* 418 /*
419 * Chunks needed for results? 419 * Chunks needed for results?
420 * 420 *
421 * o Read ops return data as write chunk(s), header as inline.
421 * o If the expected result is under the inline threshold, all ops 422 * o If the expected result is under the inline threshold, all ops
422 * return as inline (but see later). 423 * return as inline (but see later).
423 * o Large non-read ops return as a single reply chunk. 424 * o Large non-read ops return as a single reply chunk.
424 * o Large read ops return data as write chunk(s), header as inline.
425 *
426 * Note: the NFS code sending down multiple result segments implies
427 * the op is one of read, readdir[plus], readlink or NFSv4 getacl.
428 */
429
430 /*
431 * This code can handle read chunks, write chunks OR reply
432 * chunks -- only one type. If the request is too big to fit
433 * inline, then we will choose read chunks. If the request is
434 * a READ, then use write chunks to separate the file data
435 * into pages; otherwise use reply chunks.
436 */ 425 */
437 if (rpcrdma_results_inline(rqst)) 426 if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
438 wtype = rpcrdma_noch;
439 else if (rqst->rq_rcv_buf.page_len == 0)
440 wtype = rpcrdma_replych;
441 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
442 wtype = rpcrdma_writech; 427 wtype = rpcrdma_writech;
428 else if (rpcrdma_results_inline(rqst))
429 wtype = rpcrdma_noch;
443 else 430 else
444 wtype = rpcrdma_replych; 431 wtype = rpcrdma_replych;
445 432