aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSteve Wise <swise@opengridcomputing.com>2009-05-14 17:34:28 -0400
committerJ. Bruce Fields <bfields@citi.umich.edu>2009-05-27 18:57:24 -0400
commit98779be861a05c4cb75bed916df72ec0cba8b53d (patch)
tree6c7f51832c835cc3e98796fff83abc440edb3255
parent7f4218354fe312b327af06c3d8c95ed5f214c8ca (diff)
svcrdma: dma unmap the correct length for the RPCRDMA header page.
The svcrdma module was incorrectly unmapping the RPCRDMA header page. On IBM pserver systems this causes a resource leak that results in running out of bus address space (10 cthon iterations will reproduce it). The code was mapping the full page but only unmapping the actual header length. The fix is to only map the header length. I also cleaned up the use of ib_dma_map_page() calls since the unmap logic always uses ib_dma_unmap_single(). I made these symmetrical. Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c12
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c10
2 files changed, 11 insertions, 11 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 8b510c5e8777..f11be72a1a80 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -128,7 +128,8 @@ static int fast_reg_xdr(struct svcxprt_rdma *xprt,
128 page_bytes -= sge_bytes; 128 page_bytes -= sge_bytes;
129 129
130 frmr->page_list->page_list[page_no] = 130 frmr->page_list->page_list[page_no] =
131 ib_dma_map_page(xprt->sc_cm_id->device, page, 0, 131 ib_dma_map_single(xprt->sc_cm_id->device,
132 page_address(page),
132 PAGE_SIZE, DMA_TO_DEVICE); 133 PAGE_SIZE, DMA_TO_DEVICE);
133 if (ib_dma_mapping_error(xprt->sc_cm_id->device, 134 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
134 frmr->page_list->page_list[page_no])) 135 frmr->page_list->page_list[page_no]))
@@ -532,18 +533,17 @@ static int send_reply(struct svcxprt_rdma *rdma,
532 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags); 533 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
533 534
534 /* Prepare the SGE for the RPCRDMA Header */ 535 /* Prepare the SGE for the RPCRDMA Header */
536 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
537 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
535 ctxt->sge[0].addr = 538 ctxt->sge[0].addr =
536 ib_dma_map_page(rdma->sc_cm_id->device, 539 ib_dma_map_single(rdma->sc_cm_id->device, page_address(page),
537 page, 0, PAGE_SIZE, DMA_TO_DEVICE); 540 ctxt->sge[0].length, DMA_TO_DEVICE);
538 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr)) 541 if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
539 goto err; 542 goto err;
540 atomic_inc(&rdma->sc_dma_used); 543 atomic_inc(&rdma->sc_dma_used);
541 544
542 ctxt->direction = DMA_TO_DEVICE; 545 ctxt->direction = DMA_TO_DEVICE;
543 546
544 ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
545 ctxt->sge[0].lkey = rdma->sc_dma_lkey;
546
547 /* Determine how many of our SGE are to be transmitted */ 547 /* Determine how many of our SGE are to be transmitted */
548 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) { 548 for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
549 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count); 549 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 4b0c2fa15e0b..5151f9f6c573 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -500,8 +500,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
500 BUG_ON(sge_no >= xprt->sc_max_sge); 500 BUG_ON(sge_no >= xprt->sc_max_sge);
501 page = svc_rdma_get_page(); 501 page = svc_rdma_get_page();
502 ctxt->pages[sge_no] = page; 502 ctxt->pages[sge_no] = page;
503 pa = ib_dma_map_page(xprt->sc_cm_id->device, 503 pa = ib_dma_map_single(xprt->sc_cm_id->device,
504 page, 0, PAGE_SIZE, 504 page_address(page), PAGE_SIZE,
505 DMA_FROM_DEVICE); 505 DMA_FROM_DEVICE);
506 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa)) 506 if (ib_dma_mapping_error(xprt->sc_cm_id->device, pa))
507 goto err_put_ctxt; 507 goto err_put_ctxt;
@@ -1315,8 +1315,8 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1315 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); 1315 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1316 1316
1317 /* Prepare SGE for local address */ 1317 /* Prepare SGE for local address */
1318 sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, 1318 sge.addr = ib_dma_map_single(xprt->sc_cm_id->device,
1319 p, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1319 page_address(p), PAGE_SIZE, DMA_FROM_DEVICE);
1320 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { 1320 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) {
1321 put_page(p); 1321 put_page(p);
1322 return; 1322 return;
@@ -1343,7 +1343,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1343 if (ret) { 1343 if (ret) {
1344 dprintk("svcrdma: Error %d posting send for protocol error\n", 1344 dprintk("svcrdma: Error %d posting send for protocol error\n",
1345 ret); 1345 ret);
1346 ib_dma_unmap_page(xprt->sc_cm_id->device, 1346 ib_dma_unmap_single(xprt->sc_cm_id->device,
1347 sge.addr, PAGE_SIZE, 1347 sge.addr, PAGE_SIZE,
1348 DMA_FROM_DEVICE); 1348 DMA_FROM_DEVICE);
1349 svc_rdma_put_context(ctxt, 1); 1349 svc_rdma_put_context(ctxt, 1);