aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-03-03 03:38:22 -0500
committerDoug Ledford <dledford@redhat.com>2016-03-04 11:58:41 -0500
commitadd08d765e942eab8eb15a592baeb372a3dd6831 (patch)
treec59cee42d6debc8b932b109eacb8ec22c8e5feb3
parentd2370e0a573e5c5ea9c96373558727abb3ea71f7 (diff)
IB/mlx5: Convert UMR CQ to new CQ API
Simplifies the code, and makes it more fair vs other users by using a softirq for polling. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Haggai Eran <haggaie@mellanox.com> Reviewed-by: Sagi Grimberg <sagig@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/mlx5/main.c10
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c49
3 files changed, 27 insertions, 40 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 4d9b7cc4ca73..63c3d21dd471 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1861,7 +1861,7 @@ static void destroy_umrc_res(struct mlx5_ib_dev *dev)
1861 mlx5_ib_warn(dev, "mr cache cleanup failed\n"); 1861 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
1862 1862
1863 mlx5_ib_destroy_qp(dev->umrc.qp); 1863 mlx5_ib_destroy_qp(dev->umrc.qp);
1864 ib_destroy_cq(dev->umrc.cq); 1864 ib_free_cq(dev->umrc.cq);
1865 ib_dealloc_pd(dev->umrc.pd); 1865 ib_dealloc_pd(dev->umrc.pd);
1866} 1866}
1867 1867
@@ -1876,7 +1876,6 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
1876 struct ib_pd *pd; 1876 struct ib_pd *pd;
1877 struct ib_cq *cq; 1877 struct ib_cq *cq;
1878 struct ib_qp *qp; 1878 struct ib_qp *qp;
1879 struct ib_cq_init_attr cq_attr = {};
1880 int ret; 1879 int ret;
1881 1880
1882 attr = kzalloc(sizeof(*attr), GFP_KERNEL); 1881 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
@@ -1893,15 +1892,12 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
1893 goto error_0; 1892 goto error_0;
1894 } 1893 }
1895 1894
1896 cq_attr.cqe = 128; 1895 cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ);
1897 cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
1898 &cq_attr);
1899 if (IS_ERR(cq)) { 1896 if (IS_ERR(cq)) {
1900 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); 1897 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
1901 ret = PTR_ERR(cq); 1898 ret = PTR_ERR(cq);
1902 goto error_2; 1899 goto error_2;
1903 } 1900 }
1904 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1905 1901
1906 init_attr->send_cq = cq; 1902 init_attr->send_cq = cq;
1907 init_attr->recv_cq = cq; 1903 init_attr->recv_cq = cq;
@@ -1968,7 +1964,7 @@ error_4:
1968 mlx5_ib_destroy_qp(qp); 1964 mlx5_ib_destroy_qp(qp);
1969 1965
1970error_3: 1966error_3:
1971 ib_destroy_cq(cq); 1967 ib_free_cq(cq);
1972 1968
1973error_2: 1969error_2:
1974 ib_dealloc_pd(pd); 1970 ib_dealloc_pd(pd);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 648d2e2e445b..3c02b3ce76ae 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -468,16 +468,11 @@ struct mlx5_ib_mw {
468}; 468};
469 469
470struct mlx5_ib_umr_context { 470struct mlx5_ib_umr_context {
471 struct ib_cqe cqe;
471 enum ib_wc_status status; 472 enum ib_wc_status status;
472 struct completion done; 473 struct completion done;
473}; 474};
474 475
475static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
476{
477 context->status = -1;
478 init_completion(&context->done);
479}
480
481struct umr_common { 476struct umr_common {
482 struct ib_pd *pd; 477 struct ib_pd *pd;
483 struct ib_cq *cq; 478 struct ib_cq *cq;
@@ -762,7 +757,6 @@ int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
762int mlx5_mr_cache_init(struct mlx5_ib_dev *dev); 757int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
763int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev); 758int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
764int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift); 759int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u64 addr, int *count, int *shift);
765void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context);
766int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 760int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
767 struct ib_mr_status *mr_status); 761 struct ib_mr_status *mr_status);
768 762
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 70a047dde69e..dd9231494f63 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -836,26 +836,20 @@ static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
836 return umem; 836 return umem;
837} 837}
838 838
839void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context) 839static void mlx5_ib_umr_done(struct ib_cq *cq, struct ib_wc *wc)
840{ 840{
841 struct mlx5_ib_umr_context *context; 841 struct mlx5_ib_umr_context *context =
842 struct ib_wc wc; 842 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
843 int err;
844 843
845 while (1) { 844 context->status = wc->status;
846 err = ib_poll_cq(cq, 1, &wc); 845 complete(&context->done);
847 if (err < 0) { 846}
848 pr_warn("poll cq error %d\n", err);
849 return;
850 }
851 if (err == 0)
852 break;
853 847
854 context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id; 848static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context *context)
855 context->status = wc.status; 849{
856 complete(&context->done); 850 context->cqe.done = mlx5_ib_umr_done;
857 } 851 context->status = -1;
858 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); 852 init_completion(&context->done);
859} 853}
860 854
861static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, 855static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
@@ -896,12 +890,13 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
896 if (err) 890 if (err)
897 goto free_mr; 891 goto free_mr;
898 892
893 mlx5_ib_init_umr_context(&umr_context);
894
899 memset(&umrwr, 0, sizeof(umrwr)); 895 memset(&umrwr, 0, sizeof(umrwr));
900 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context; 896 umrwr.wr.wr_cqe = &umr_context.cqe;
901 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key, 897 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
902 page_shift, virt_addr, len, access_flags); 898 page_shift, virt_addr, len, access_flags);
903 899
904 mlx5_ib_init_umr_context(&umr_context);
905 down(&umrc->sem); 900 down(&umrc->sem);
906 err = ib_post_send(umrc->qp, &umrwr.wr, &bad); 901 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
907 if (err) { 902 if (err) {
@@ -1013,8 +1008,10 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
1013 1008
1014 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE); 1009 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1015 1010
1011 mlx5_ib_init_umr_context(&umr_context);
1012
1016 memset(&wr, 0, sizeof(wr)); 1013 memset(&wr, 0, sizeof(wr));
1017 wr.wr.wr_id = (u64)(unsigned long)&umr_context; 1014 wr.wr.wr_cqe = &umr_context.cqe;
1018 1015
1019 sg.addr = dma; 1016 sg.addr = dma;
1020 sg.length = ALIGN(npages * sizeof(u64), 1017 sg.length = ALIGN(npages * sizeof(u64),
@@ -1031,7 +1028,6 @@ int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
1031 wr.mkey = mr->mmkey.key; 1028 wr.mkey = mr->mmkey.key;
1032 wr.target.offset = start_page_index; 1029 wr.target.offset = start_page_index;
1033 1030
1034 mlx5_ib_init_umr_context(&umr_context);
1035 down(&umrc->sem); 1031 down(&umrc->sem);
1036 err = ib_post_send(umrc->qp, &wr.wr, &bad); 1032 err = ib_post_send(umrc->qp, &wr.wr, &bad);
1037 if (err) { 1033 if (err) {
@@ -1204,11 +1200,12 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1204 struct ib_send_wr *bad; 1200 struct ib_send_wr *bad;
1205 int err; 1201 int err;
1206 1202
1203 mlx5_ib_init_umr_context(&umr_context);
1204
1207 memset(&umrwr.wr, 0, sizeof(umrwr)); 1205 memset(&umrwr.wr, 0, sizeof(umrwr));
1208 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context; 1206 umrwr.wr.wr_cqe = &umr_context.cqe;
1209 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key); 1207 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
1210 1208
1211 mlx5_ib_init_umr_context(&umr_context);
1212 down(&umrc->sem); 1209 down(&umrc->sem);
1213 err = ib_post_send(umrc->qp, &umrwr.wr, &bad); 1210 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1214 if (err) { 1211 if (err) {
@@ -1246,7 +1243,9 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
1246 int size; 1243 int size;
1247 int err; 1244 int err;
1248 1245
1249 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context; 1246 mlx5_ib_init_umr_context(&umr_context);
1247
1248 umrwr.wr.wr_cqe = &umr_context.cqe;
1250 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE; 1249 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1251 1250
1252 if (flags & IB_MR_REREG_TRANS) { 1251 if (flags & IB_MR_REREG_TRANS) {
@@ -1273,8 +1272,6 @@ static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
1273 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS; 1272 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
1274 } 1273 }
1275 1274
1276 mlx5_ib_init_umr_context(&umr_context);
1277
1278 /* post send request to UMR QP */ 1275 /* post send request to UMR QP */
1279 down(&umrc->sem); 1276 down(&umrc->sem);
1280 err = ib_post_send(umrc->qp, &umrwr.wr, &bad); 1277 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);