diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2017-10-20 10:47:47 -0400 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2017-11-17 13:47:55 -0500 |
commit | 394b2c77cb761fb1382b0e97b7cdff2dd717b5ee (patch) | |
tree | 7d59d2232665888050bed5a714e304550fc56128 /net/sunrpc | |
parent | ad99f0530710af72b5bbecda9e770c736e92b328 (diff) |
xprtrdma: Fix error handling in rpcrdma_prepare_msg_sges()
When this function fails, it needs to undo the DMA mappings it's
done so far. Otherwise these are leaked.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/xprtrdma/rpc_rdma.c | 38 |
1 files changed, 24 insertions, 14 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index 3c9255824d94..4f6c5395d198 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
@@ -511,6 +511,28 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, | |||
511 | return 0; | 511 | return 0; |
512 | } | 512 | } |
513 | 513 | ||
514 | /** | ||
515 | * rpcrdma_unmap_sges - DMA-unmap Send buffers | ||
516 | * @ia: interface adapter (device) | ||
517 | * @req: req with possibly some SGEs to be DMA unmapped | ||
518 | * | ||
519 | */ | ||
520 | void | ||
521 | rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req) | ||
522 | { | ||
523 | struct ib_sge *sge; | ||
524 | unsigned int count; | ||
525 | |||
526 | /* The first two SGEs contain the transport header and | ||
527 | * the inline buffer. These are always left mapped so | ||
528 | * they can be cheaply re-used. | ||
529 | */ | ||
530 | sge = &req->rl_send_sge[2]; | ||
531 | for (count = req->rl_mapped_sges; count--; sge++) | ||
532 | ib_dma_unmap_page(ia->ri_device, | ||
533 | sge->addr, sge->length, DMA_TO_DEVICE); | ||
534 | } | ||
535 | |||
514 | /* Prepare the RPC-over-RDMA header SGE. | 536 | /* Prepare the RPC-over-RDMA header SGE. |
515 | */ | 537 | */ |
516 | static bool | 538 | static bool |
@@ -641,10 +663,12 @@ out: | |||
641 | return true; | 663 | return true; |
642 | 664 | ||
643 | out_mapping_overflow: | 665 | out_mapping_overflow: |
666 | rpcrdma_unmap_sges(ia, req); | ||
644 | pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no); | 667 | pr_err("rpcrdma: too many Send SGEs (%u)\n", sge_no); |
645 | return false; | 668 | return false; |
646 | 669 | ||
647 | out_mapping_err: | 670 | out_mapping_err: |
671 | rpcrdma_unmap_sges(ia, req); | ||
648 | pr_err("rpcrdma: Send mapping error\n"); | 672 | pr_err("rpcrdma: Send mapping error\n"); |
649 | return false; | 673 | return false; |
650 | } | 674 | } |
@@ -671,20 +695,6 @@ out_map: | |||
671 | return false; | 695 | return false; |
672 | } | 696 | } |
673 | 697 | ||
674 | void | ||
675 | rpcrdma_unmap_sges(struct rpcrdma_ia *ia, struct rpcrdma_req *req) | ||
676 | { | ||
677 | struct ib_device *device = ia->ri_device; | ||
678 | struct ib_sge *sge; | ||
679 | int count; | ||
680 | |||
681 | sge = &req->rl_send_sge[2]; | ||
682 | for (count = req->rl_mapped_sges; count--; sge++) | ||
683 | ib_dma_unmap_page(device, sge->addr, sge->length, | ||
684 | DMA_TO_DEVICE); | ||
685 | req->rl_mapped_sges = 0; | ||
686 | } | ||
687 | |||
688 | /** | 698 | /** |
689 | * rpcrdma_marshal_req - Marshal and send one RPC request | 699 | * rpcrdma_marshal_req - Marshal and send one RPC request |
690 | * @r_xprt: controlling transport | 700 | * @r_xprt: controlling transport |