diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2015-01-13 11:03:45 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2015-01-15 15:01:48 -0500 |
commit | fcbeced5b4df5e7f05ed8a18b69acfac733aab11 (patch) | |
tree | 081d0b131a10270b55746c4568fcb80085b3edf2 /net/sunrpc | |
parent | 0b056c224bea63060ce8a981e84193c93fac6f5d (diff) |
svcrdma: Move read list XDR round-up logic
This is a pre-requisite for a subsequent patch.
Read list XDR round-up needs to be done _before_ additional inline
content is copied to the end of the XDR buffer's page list. Move
the logic added by commit e560e3b510d2 ("svcrdma: Add zero padding
if the client doesn't send it").
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 37 |
1 files changed, 9 insertions, 28 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 36cf51a3eab7..a345cadad4dd 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
@@ -43,7 +43,6 @@ | |||
43 | #include <linux/sunrpc/debug.h> | 43 | #include <linux/sunrpc/debug.h> |
44 | #include <linux/sunrpc/rpc_rdma.h> | 44 | #include <linux/sunrpc/rpc_rdma.h> |
45 | #include <linux/spinlock.h> | 45 | #include <linux/spinlock.h> |
46 | #include <linux/highmem.h> | ||
47 | #include <asm/unaligned.h> | 46 | #include <asm/unaligned.h> |
48 | #include <rdma/ib_verbs.h> | 47 | #include <rdma/ib_verbs.h> |
49 | #include <rdma/rdma_cm.h> | 48 | #include <rdma/rdma_cm.h> |
@@ -434,6 +433,15 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt, | |||
434 | } | 433 | } |
435 | } | 434 | } |
436 | 435 | ||
436 | /* Read list may need XDR round-up (see RFC 5666, s. 3.7) */ | ||
437 | if (page_offset & 3) { | ||
438 | u32 pad = 4 - (page_offset & 3); | ||
439 | |||
440 | head->arg.page_len += pad; | ||
441 | head->arg.len += pad; | ||
442 | head->arg.buflen += pad; | ||
443 | } | ||
444 | |||
437 | ret = 1; | 445 | ret = 1; |
438 | head->position = position; | 446 | head->position = position; |
439 | 447 | ||
@@ -446,32 +454,6 @@ static int rdma_read_chunks(struct svcxprt_rdma *xprt, | |||
446 | return ret; | 454 | return ret; |
447 | } | 455 | } |
448 | 456 | ||
449 | /* | ||
450 | * To avoid a separate RDMA READ just for a handful of zero bytes, | ||
451 | * RFC 5666 section 3.7 allows the client to omit the XDR zero pad | ||
452 | * in chunk lists. | ||
453 | */ | ||
454 | static void | ||
455 | rdma_fix_xdr_pad(struct xdr_buf *buf) | ||
456 | { | ||
457 | unsigned int page_len = buf->page_len; | ||
458 | unsigned int size = (XDR_QUADLEN(page_len) << 2) - page_len; | ||
459 | unsigned int offset, pg_no; | ||
460 | char *p; | ||
461 | |||
462 | if (size == 0) | ||
463 | return; | ||
464 | |||
465 | pg_no = page_len >> PAGE_SHIFT; | ||
466 | offset = page_len & ~PAGE_MASK; | ||
467 | p = page_address(buf->pages[pg_no]); | ||
468 | memset(p + offset, 0, size); | ||
469 | |||
470 | buf->page_len += size; | ||
471 | buf->buflen += size; | ||
472 | buf->len += size; | ||
473 | } | ||
474 | |||
475 | static int rdma_read_complete(struct svc_rqst *rqstp, | 457 | static int rdma_read_complete(struct svc_rqst *rqstp, |
476 | struct svc_rdma_op_ctxt *head) | 458 | struct svc_rdma_op_ctxt *head) |
477 | { | 459 | { |
@@ -499,7 +481,6 @@ static int rdma_read_complete(struct svc_rqst *rqstp, | |||
499 | } | 481 | } |
500 | 482 | ||
501 | /* Point rq_arg.pages past header */ | 483 | /* Point rq_arg.pages past header */ |
502 | rdma_fix_xdr_pad(&head->arg); | ||
503 | rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count]; | 484 | rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count]; |
504 | rqstp->rq_arg.page_len = head->arg.page_len; | 485 | rqstp->rq_arg.page_len = head->arg.page_len; |
505 | rqstp->rq_arg.page_base = head->arg.page_base; | 486 | rqstp->rq_arg.page_base = head->arg.page_base; |