aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2015-01-13 11:03:37 -0500
committerJ. Bruce Fields <bfields@redhat.com>2015-01-15 15:01:47 -0500
commit0b056c224bea63060ce8a981e84193c93fac6f5d (patch)
tree001cbe890f90f8624ae71c6fb0bc19d447896c06 /net/sunrpc
parent61edbcb7c7f4efb65df4ad793d007237f9fa311f (diff)
svcrdma: Support RDMA_NOMSG requests
Currently the Linux server can not decode RDMA_NOMSG type requests. Operations whose length exceeds the fixed size of RDMA SEND buffers, like large NFSv4 CREATE(NF4LNK) operations, must be conveyed via RDMA_NOMSG. For an RDMA_MSG type request, the client sends the RPC/RDMA, RPC headers, and some or all of the NFS arguments via RDMA SEND. For an RDMA_NOMSG type request, the client sends just the RPC/RDMA header via RDMA SEND. The request's read list contains elements for the entire RPC message, including the RPC header. NFSD expects the RPC/RMDA header and RPC header to be contiguous in page zero of the XDR buffer. Add logic in the RDMA READ path to make the read list contents land where the server prefers, when the incoming message is a type RDMA_NOMSG message. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c39
1 files changed, 36 insertions, 3 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index a67dd1a081dd..36cf51a3eab7 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -60,6 +60,7 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
60 struct svc_rdma_op_ctxt *ctxt, 60 struct svc_rdma_op_ctxt *ctxt,
61 u32 byte_count) 61 u32 byte_count)
62{ 62{
63 struct rpcrdma_msg *rmsgp;
63 struct page *page; 64 struct page *page;
64 u32 bc; 65 u32 bc;
65 int sge_no; 66 int sge_no;
@@ -82,7 +83,14 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
82 /* If data remains, store it in the pagelist */ 83 /* If data remains, store it in the pagelist */
83 rqstp->rq_arg.page_len = bc; 84 rqstp->rq_arg.page_len = bc;
84 rqstp->rq_arg.page_base = 0; 85 rqstp->rq_arg.page_base = 0;
85 rqstp->rq_arg.pages = &rqstp->rq_pages[1]; 86
87 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
88 rmsgp = (struct rpcrdma_msg *)rqstp->rq_arg.head[0].iov_base;
89 if (be32_to_cpu(rmsgp->rm_type) == RDMA_NOMSG)
90 rqstp->rq_arg.pages = &rqstp->rq_pages[0];
91 else
92 rqstp->rq_arg.pages = &rqstp->rq_pages[1];
93
86 sge_no = 1; 94 sge_no = 1;
87 while (bc && sge_no < ctxt->count) { 95 while (bc && sge_no < ctxt->count) {
88 page = ctxt->pages[sge_no]; 96 page = ctxt->pages[sge_no];
@@ -383,7 +391,6 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
383 */ 391 */
384 head->arg.head[0] = rqstp->rq_arg.head[0]; 392 head->arg.head[0] = rqstp->rq_arg.head[0];
385 head->arg.tail[0] = rqstp->rq_arg.tail[0]; 393 head->arg.tail[0] = rqstp->rq_arg.tail[0];
386 head->arg.pages = &head->pages[head->count];
387 head->hdr_count = head->count; 394 head->hdr_count = head->count;
388 head->arg.page_base = 0; 395 head->arg.page_base = 0;
389 head->arg.page_len = 0; 396 head->arg.page_len = 0;
@@ -393,9 +400,17 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
393 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0]; 400 ch = (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
394 position = be32_to_cpu(ch->rc_position); 401 position = be32_to_cpu(ch->rc_position);
395 402
403 /* RDMA_NOMSG: RDMA READ data should land just after RDMA RECV data */
404 if (position == 0) {
405 head->arg.pages = &head->pages[0];
406 page_offset = head->byte_len;
407 } else {
408 head->arg.pages = &head->pages[head->count];
409 page_offset = 0;
410 }
411
396 ret = 0; 412 ret = 0;
397 page_no = 0; 413 page_no = 0;
398 page_offset = 0;
399 for (; ch->rc_discrim != xdr_zero; ch++) { 414 for (; ch->rc_discrim != xdr_zero; ch++) {
400 if (be32_to_cpu(ch->rc_position) != position) 415 if (be32_to_cpu(ch->rc_position) != position)
401 goto err; 416 goto err;
@@ -418,7 +433,10 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt,
418 head->arg.buflen += ret; 433 head->arg.buflen += ret;
419 } 434 }
420 } 435 }
436
421 ret = 1; 437 ret = 1;
438 head->position = position;
439
422 err: 440 err:
423 /* Detach arg pages. svc_recv will replenish them */ 441 /* Detach arg pages. svc_recv will replenish them */
424 for (page_no = 0; 442 for (page_no = 0;
@@ -465,6 +483,21 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
465 put_page(rqstp->rq_pages[page_no]); 483 put_page(rqstp->rq_pages[page_no]);
466 rqstp->rq_pages[page_no] = head->pages[page_no]; 484 rqstp->rq_pages[page_no] = head->pages[page_no];
467 } 485 }
486
487 /* Adjustments made for RDMA_NOMSG type requests */
488 if (head->position == 0) {
489 if (head->arg.len <= head->sge[0].length) {
490 head->arg.head[0].iov_len = head->arg.len -
491 head->byte_len;
492 head->arg.page_len = 0;
493 } else {
494 head->arg.head[0].iov_len = head->sge[0].length -
495 head->byte_len;
496 head->arg.page_len = head->arg.len -
497 head->sge[0].length;
498 }
499 }
500
468 /* Point rq_arg.pages past header */ 501 /* Point rq_arg.pages past header */
469 rdma_fix_xdr_pad(&head->arg); 502 rdma_fix_xdr_pad(&head->arg);
470 rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count]; 503 rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];