aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/xprtrdma
diff options
context:
space:
mode:
authorTom Tucker <tom@ogc.us>2010-10-12 16:33:57 -0400
committerJ. Bruce Fields <bfields@redhat.com>2010-10-18 19:51:32 -0400
commit4a84386fc27fdc7d2ea69fdbc641008e8f943159 (patch)
tree4d237d58f695713c72a01631d010e6387db820c7 /net/sunrpc/xprtrdma
parentb432e6b3d9c1b4271c43f02b45136f33a8ed5820 (diff)
svcrdma: Cleanup DMA unmapping in error paths.
There are several error paths in the code that do not unmap DMA. This patch adds calls to svc_rdma_unmap_dma to free these DMA contexts. Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc/xprtrdma')
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c1
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c29
3 files changed, 17 insertions, 15 deletions
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 926bdb44f3de..df67211c4baf 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -495,6 +495,7 @@ next_sge:
495 printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n", 495 printk(KERN_ERR "svcrdma: Error %d posting RDMA_READ\n",
496 err); 496 err);
497 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 497 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
498 svc_rdma_unmap_dma(ctxt);
498 svc_rdma_put_context(ctxt, 0); 499 svc_rdma_put_context(ctxt, 0);
499 goto out; 500 goto out;
500 } 501 }
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index d4f5e0e43f09..249a835b703f 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -367,6 +367,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
367 goto err; 367 goto err;
368 return 0; 368 return 0;
369 err: 369 err:
370 svc_rdma_unmap_dma(ctxt);
371 svc_rdma_put_frmr(xprt, vec->frmr);
370 svc_rdma_put_context(ctxt, 0); 372 svc_rdma_put_context(ctxt, 0);
371 /* Fatal error, close transport */ 373 /* Fatal error, close transport */
372 return -EIO; 374 return -EIO;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index e87e000e984c..22f65cc46fe5 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -512,9 +512,9 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
512 ctxt->sge[sge_no].addr = pa; 512 ctxt->sge[sge_no].addr = pa;
513 ctxt->sge[sge_no].length = PAGE_SIZE; 513 ctxt->sge[sge_no].length = PAGE_SIZE;
514 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey; 514 ctxt->sge[sge_no].lkey = xprt->sc_dma_lkey;
515 ctxt->count = sge_no + 1;
515 buflen += PAGE_SIZE; 516 buflen += PAGE_SIZE;
516 } 517 }
517 ctxt->count = sge_no;
518 recv_wr.next = NULL; 518 recv_wr.next = NULL;
519 recv_wr.sg_list = &ctxt->sge[0]; 519 recv_wr.sg_list = &ctxt->sge[0];
520 recv_wr.num_sge = ctxt->count; 520 recv_wr.num_sge = ctxt->count;
@@ -530,6 +530,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
530 return ret; 530 return ret;
531 531
532 err_put_ctxt: 532 err_put_ctxt:
533 svc_rdma_unmap_dma(ctxt);
533 svc_rdma_put_context(ctxt, 1); 534 svc_rdma_put_context(ctxt, 1);
534 return -ENOMEM; 535 return -ENOMEM;
535} 536}
@@ -1308,7 +1309,6 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1308 enum rpcrdma_errcode err) 1309 enum rpcrdma_errcode err)
1309{ 1310{
1310 struct ib_send_wr err_wr; 1311 struct ib_send_wr err_wr;
1311 struct ib_sge sge;
1312 struct page *p; 1312 struct page *p;
1313 struct svc_rdma_op_ctxt *ctxt; 1313 struct svc_rdma_op_ctxt *ctxt;
1314 u32 *va; 1314 u32 *va;
@@ -1321,26 +1321,27 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1321 /* XDR encode error */ 1321 /* XDR encode error */
1322 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va); 1322 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1323 1323
1324 ctxt = svc_rdma_get_context(xprt);
1325 ctxt->direction = DMA_FROM_DEVICE;
1326 ctxt->count = 1;
1327 ctxt->pages[0] = p;
1328
1324 /* Prepare SGE for local address */ 1329 /* Prepare SGE for local address */
1325 sge.addr = ib_dma_map_page(xprt->sc_cm_id->device, 1330 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
1326 p, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1331 p, 0, length, DMA_FROM_DEVICE);
1327 if (ib_dma_mapping_error(xprt->sc_cm_id->device, sge.addr)) { 1332 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
1328 put_page(p); 1333 put_page(p);
1329 return; 1334 return;
1330 } 1335 }
1331 atomic_inc(&xprt->sc_dma_used); 1336 atomic_inc(&xprt->sc_dma_used);
1332 sge.lkey = xprt->sc_dma_lkey; 1337 ctxt->sge[0].lkey = xprt->sc_dma_lkey;
1333 sge.length = length; 1338 ctxt->sge[0].length = length;
1334
1335 ctxt = svc_rdma_get_context(xprt);
1336 ctxt->count = 1;
1337 ctxt->pages[0] = p;
1338 1339
1339 /* Prepare SEND WR */ 1340 /* Prepare SEND WR */
1340 memset(&err_wr, 0, sizeof err_wr); 1341 memset(&err_wr, 0, sizeof err_wr);
1341 ctxt->wr_op = IB_WR_SEND; 1342 ctxt->wr_op = IB_WR_SEND;
1342 err_wr.wr_id = (unsigned long)ctxt; 1343 err_wr.wr_id = (unsigned long)ctxt;
1343 err_wr.sg_list = &sge; 1344 err_wr.sg_list = ctxt->sge;
1344 err_wr.num_sge = 1; 1345 err_wr.num_sge = 1;
1345 err_wr.opcode = IB_WR_SEND; 1346 err_wr.opcode = IB_WR_SEND;
1346 err_wr.send_flags = IB_SEND_SIGNALED; 1347 err_wr.send_flags = IB_SEND_SIGNALED;
@@ -1350,9 +1351,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1350 if (ret) { 1351 if (ret) {
1351 dprintk("svcrdma: Error %d posting send for protocol error\n", 1352 dprintk("svcrdma: Error %d posting send for protocol error\n",
1352 ret); 1353 ret);
1353 ib_dma_unmap_page(xprt->sc_cm_id->device, 1354 svc_rdma_unmap_dma(ctxt);
1354 sge.addr, PAGE_SIZE,
1355 DMA_FROM_DEVICE);
1356 svc_rdma_put_context(ctxt, 1); 1355 svc_rdma_put_context(ctxt, 1);
1357 } 1356 }
1358} 1357}