diff options
author | Shachar Raindel <raindel@mellanox.com> | 2014-05-22 07:50:12 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2014-05-27 14:53:09 -0400 |
commit | a74d24168d2df78e7a532567eb0e7538e6b09568 (patch) | |
tree | 1748a31b03e4bb85e0e33edeb0400d0001bc7065 /drivers/infiniband/hw | |
parent | 48fea837bb2709bda73cd4ae8bbd57cb277f7b90 (diff) |
IB/mlx5: Refactor UMR to have its own context struct
Instead of having the UMR context part of each memory region, allocate
a struct on the stack. This allows queuing multiple UMRs that access
the same memory region.
Signed-off-by: Shachar Raindel <raindel@mellanox.com>
Signed-off-by: Haggai Eran <haggaie@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/mlx5/mlx5_ib.h | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mr.c | 40 |
2 files changed, 31 insertions, 22 deletions
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 50541586e0a6..f2ccf1a5a291 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -264,8 +264,6 @@ struct mlx5_ib_mr { | |||
264 | __be64 *pas; | 264 | __be64 *pas; |
265 | dma_addr_t dma; | 265 | dma_addr_t dma; |
266 | int npages; | 266 | int npages; |
267 | struct completion done; | ||
268 | enum ib_wc_status status; | ||
269 | struct mlx5_ib_dev *dev; | 267 | struct mlx5_ib_dev *dev; |
270 | struct mlx5_create_mkey_mbox_out out; | 268 | struct mlx5_create_mkey_mbox_out out; |
271 | struct mlx5_core_sig_ctx *sig; | 269 | struct mlx5_core_sig_ctx *sig; |
@@ -277,6 +275,17 @@ struct mlx5_ib_fast_reg_page_list { | |||
277 | dma_addr_t map; | 275 | dma_addr_t map; |
278 | }; | 276 | }; |
279 | 277 | ||
278 | struct mlx5_ib_umr_context { | ||
279 | enum ib_wc_status status; | ||
280 | struct completion done; | ||
281 | }; | ||
282 | |||
283 | static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context) | ||
284 | { | ||
285 | context->status = -1; | ||
286 | init_completion(&context->done); | ||
287 | } | ||
288 | |||
280 | struct umr_common { | 289 | struct umr_common { |
281 | struct ib_pd *pd; | 290 | struct ib_pd *pd; |
282 | struct ib_cq *cq; | 291 | struct ib_cq *cq; |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index f472ab246d94..14ee4fdcf172 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -708,7 +708,7 @@ static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev, | |||
708 | 708 | ||
709 | void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) | 709 | void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) |
710 | { | 710 | { |
711 | struct mlx5_ib_mr *mr; | 711 | struct mlx5_ib_umr_context *context; |
712 | struct ib_wc wc; | 712 | struct ib_wc wc; |
713 | int err; | 713 | int err; |
714 | 714 | ||
@@ -721,9 +721,9 @@ void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) | |||
721 | if (err == 0) | 721 | if (err == 0) |
722 | break; | 722 | break; |
723 | 723 | ||
724 | mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id; | 724 | context = (struct mlx5_ib_umr_context *)wc.wr_id; |
725 | mr->status = wc.status; | 725 | context->status = wc.status; |
726 | complete(&mr->done); | 726 | complete(&context->done); |
727 | } | 727 | } |
728 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | 728 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); |
729 | } | 729 | } |
@@ -735,6 +735,7 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, | |||
735 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 735 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
736 | struct device *ddev = dev->ib_dev.dma_device; | 736 | struct device *ddev = dev->ib_dev.dma_device; |
737 | struct umr_common *umrc = &dev->umrc; | 737 | struct umr_common *umrc = &dev->umrc; |
738 | struct mlx5_ib_umr_context umr_context; | ||
738 | struct ib_send_wr wr, *bad; | 739 | struct ib_send_wr wr, *bad; |
739 | struct mlx5_ib_mr *mr; | 740 | struct mlx5_ib_mr *mr; |
740 | struct ib_sge sg; | 741 | struct ib_sge sg; |
@@ -774,24 +775,21 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, | |||
774 | } | 775 | } |
775 | 776 | ||
776 | memset(&wr, 0, sizeof(wr)); | 777 | memset(&wr, 0, sizeof(wr)); |
777 | wr.wr_id = (u64)(unsigned long)mr; | 778 | wr.wr_id = (u64)(unsigned long)&umr_context; |
778 | prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags); | 779 | prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags); |
779 | 780 | ||
780 | /* We serialize polls so one process does not kidnap another's | 781 | mlx5_ib_init_umr_context(&umr_context); |
781 | * completion. This is not a problem since wr is completed in | ||
782 | * around 1 usec | ||
783 | */ | ||
784 | down(&umrc->sem); | 782 | down(&umrc->sem); |
785 | init_completion(&mr->done); | ||
786 | err = ib_post_send(umrc->qp, &wr, &bad); | 783 | err = ib_post_send(umrc->qp, &wr, &bad); |
787 | if (err) { | 784 | if (err) { |
788 | mlx5_ib_warn(dev, "post send failed, err %d\n", err); | 785 | mlx5_ib_warn(dev, "post send failed, err %d\n", err); |
789 | goto unmap_dma; | 786 | goto unmap_dma; |
790 | } | 787 | } else { |
791 | wait_for_completion(&mr->done); | 788 | wait_for_completion(&umr_context.done); |
792 | if (mr->status != IB_WC_SUCCESS) { | 789 | if (umr_context.status != IB_WC_SUCCESS) { |
793 | mlx5_ib_warn(dev, "reg umr failed\n"); | 790 | mlx5_ib_warn(dev, "reg umr failed\n"); |
794 | err = -EFAULT; | 791 | err = -EFAULT; |
792 | } | ||
795 | } | 793 | } |
796 | 794 | ||
797 | mr->mmr.iova = virt_addr; | 795 | mr->mmr.iova = virt_addr; |
@@ -940,24 +938,26 @@ error: | |||
940 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) | 938 | static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) |
941 | { | 939 | { |
942 | struct umr_common *umrc = &dev->umrc; | 940 | struct umr_common *umrc = &dev->umrc; |
941 | struct mlx5_ib_umr_context umr_context; | ||
943 | struct ib_send_wr wr, *bad; | 942 | struct ib_send_wr wr, *bad; |
944 | int err; | 943 | int err; |
945 | 944 | ||
946 | memset(&wr, 0, sizeof(wr)); | 945 | memset(&wr, 0, sizeof(wr)); |
947 | wr.wr_id = (u64)(unsigned long)mr; | 946 | wr.wr_id = (u64)(unsigned long)&umr_context; |
948 | prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); | 947 | prep_umr_unreg_wqe(dev, &wr, mr->mmr.key); |
949 | 948 | ||
949 | mlx5_ib_init_umr_context(&umr_context); | ||
950 | down(&umrc->sem); | 950 | down(&umrc->sem); |
951 | init_completion(&mr->done); | ||
952 | err = ib_post_send(umrc->qp, &wr, &bad); | 951 | err = ib_post_send(umrc->qp, &wr, &bad); |
953 | if (err) { | 952 | if (err) { |
954 | up(&umrc->sem); | 953 | up(&umrc->sem); |
955 | mlx5_ib_dbg(dev, "err %d\n", err); | 954 | mlx5_ib_dbg(dev, "err %d\n", err); |
956 | goto error; | 955 | goto error; |
956 | } else { | ||
957 | wait_for_completion(&umr_context.done); | ||
958 | up(&umrc->sem); | ||
957 | } | 959 | } |
958 | wait_for_completion(&mr->done); | 960 | if (umr_context.status != IB_WC_SUCCESS) { |
959 | up(&umrc->sem); | ||
960 | if (mr->status != IB_WC_SUCCESS) { | ||
961 | mlx5_ib_warn(dev, "unreg umr failed\n"); | 961 | mlx5_ib_warn(dev, "unreg umr failed\n"); |
962 | err = -EFAULT; | 962 | err = -EFAULT; |
963 | goto error; | 963 | goto error; |