aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2015-09-28 17:46:06 -0400
committerJ. Bruce Fields <bfields@redhat.com>2015-09-29 12:55:44 -0400
commitc91aed9896946721bb30705ea2904edb3725dd61 (patch)
tree85ffa36824c43392ef66206e858afec69f9470ac
parent9ffecb10283508260936b96022d4ee43a7798b4c (diff)
svcrdma: handle rdma read with a non-zero initial page offset
The server rdma_read_chunk_lcl() and rdma_read_chunk_frmr() functions were not taking into account the initial page_offset when determining the rdma read length. This resulted in a read who's starting address and length exceeded the base/bounds of the frmr. The server gets an async error from the rdma device and kills the connection, and the client then reconnects and resends. This repeats indefinitely, and the application hangs. Most work loads don't tickle this bug apparently, but one test hit it every time: building the linux kernel on a 16 core node with 'make -j 16 O=/mnt/0' where /mnt/0 is a ramdisk mounted via NFSRDMA. This bug seems to only be tripped with devices having small fastreg page list depths. I didn't see it with mlx4, for instance. Fixes: 0bf4828983df ('svcrdma: refactor marshalling logic') Signed-off-by: Steve Wise <swise@opengridcomputing.com> Tested-by: Chuck Lever <chuck.lever@oracle.com> Cc: stable@vger.kernel.org Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index cb5174284074..5f6ca47092b0 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -136,7 +136,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
136 ctxt->direction = DMA_FROM_DEVICE; 136 ctxt->direction = DMA_FROM_DEVICE;
137 ctxt->read_hdr = head; 137 ctxt->read_hdr = head;
138 pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd); 138 pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd);
139 read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); 139 read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
140 rs_length);
140 141
141 for (pno = 0; pno < pages_needed; pno++) { 142 for (pno = 0; pno < pages_needed; pno++) {
142 int len = min_t(int, rs_length, PAGE_SIZE - pg_off); 143 int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
@@ -235,7 +236,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
235 ctxt->direction = DMA_FROM_DEVICE; 236 ctxt->direction = DMA_FROM_DEVICE;
236 ctxt->frmr = frmr; 237 ctxt->frmr = frmr;
237 pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len); 238 pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len);
238 read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); 239 read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
240 rs_length);
239 241
240 frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]); 242 frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]);
241 frmr->direction = DMA_FROM_DEVICE; 243 frmr->direction = DMA_FROM_DEVICE;