aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/mr.c
diff options
context:
space:
mode:
authorHaggai Eran <haggaie@mellanox.com>2014-05-22 07:50:08 -0400
committerRoland Dreier <roland@purestorage.com>2014-05-27 14:53:05 -0400
commit096f7e72c604e983e14b84b84fc37593fc433585 (patch)
tree540e07929db39577906f4c529f32a57d5b9e7039 /drivers/infiniband/hw/mlx5/mr.c
parentc7f44fbda68a6b2d6ceb10e45c711750e779bace (diff)
IB/mlx5: Fix error handling in reg_umr
If ib_post_send fails when posting the UMR work request in reg_umr, the code doesn't release the temporary pas buffer allocated, and doesn't dma_unmap it. Signed-off-by: Haggai Eran <haggaie@mellanox.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/mr.c')
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 81392b26d078..ad5898592016 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -730,7 +730,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
730 struct mlx5_ib_mr *mr; 730 struct mlx5_ib_mr *mr;
731 struct ib_sge sg; 731 struct ib_sge sg;
732 int size = sizeof(u64) * npages; 732 int size = sizeof(u64) * npages;
733 int err; 733 int err = 0;
734 int i; 734 int i;
735 735
736 for (i = 0; i < 1; i++) { 736 for (i = 0; i < 1; i++) {
@@ -751,7 +751,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
751 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL); 751 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
752 if (!mr->pas) { 752 if (!mr->pas) {
753 err = -ENOMEM; 753 err = -ENOMEM;
754 goto error; 754 goto free_mr;
755 } 755 }
756 756
757 mlx5_ib_populate_pas(dev, umem, page_shift, 757 mlx5_ib_populate_pas(dev, umem, page_shift,
@@ -760,9 +760,8 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
760 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size, 760 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
761 DMA_TO_DEVICE); 761 DMA_TO_DEVICE);
762 if (dma_mapping_error(ddev, mr->dma)) { 762 if (dma_mapping_error(ddev, mr->dma)) {
763 kfree(mr->pas);
764 err = -ENOMEM; 763 err = -ENOMEM;
765 goto error; 764 goto free_pas;
766 } 765 }
767 766
768 memset(&wr, 0, sizeof(wr)); 767 memset(&wr, 0, sizeof(wr));
@@ -778,26 +777,28 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
778 err = ib_post_send(umrc->qp, &wr, &bad); 777 err = ib_post_send(umrc->qp, &wr, &bad);
779 if (err) { 778 if (err) {
780 mlx5_ib_warn(dev, "post send failed, err %d\n", err); 779 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
781 up(&umrc->sem); 780 goto unmap_dma;
782 goto error;
783 } 781 }
784 wait_for_completion(&mr->done); 782 wait_for_completion(&mr->done);
785 up(&umrc->sem); 783 if (mr->status != IB_WC_SUCCESS) {
784 mlx5_ib_warn(dev, "reg umr failed\n");
785 err = -EFAULT;
786 }
786 787
788unmap_dma:
789 up(&umrc->sem);
787 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE); 790 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
791
792free_pas:
788 kfree(mr->pas); 793 kfree(mr->pas);
789 794
790 if (mr->status != IB_WC_SUCCESS) { 795free_mr:
791 mlx5_ib_warn(dev, "reg umr failed\n"); 796 if (err) {
792 err = -EFAULT; 797 free_cached_mr(dev, mr);
793 goto error; 798 return ERR_PTR(err);
794 } 799 }
795 800
796 return mr; 801 return mr;
797
798error:
799 free_cached_mr(dev, mr);
800 return ERR_PTR(err);
801} 802}
802 803
803static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, 804static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,