aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSagi Grimberg <sagig@mellanox.com>2015-10-13 12:11:42 -0400
committerDoug Ledford <dledford@redhat.com>2015-10-28 22:27:19 -0400
commitdd01e66a6c532a8cd183cbc02ebaef99f186345f (patch)
tree3226946c62148c6a1ce593a45422e970fbda9133
parent9a21be531cacecce6c897faacd66ed4c7dbbe88b (diff)
IB/mlx5: Remove old FRWR API support
No ULP uses it anymore, go ahead and remove it. Keep only the local invalidate part of the handlers. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Acked-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c3
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h14
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c42
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c97
5 files changed, 9 insertions, 149 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 206930096d56..3dfd287256d6 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -112,9 +112,6 @@ static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx)
112 case IB_WR_REG_MR: 112 case IB_WR_REG_MR:
113 return IB_WC_REG_MR; 113 return IB_WC_REG_MR;
114 114
115 case IB_WR_FAST_REG_MR:
116 return IB_WC_FAST_REG_MR;
117
118 default: 115 default:
119 pr_warn("unknown completion status\n"); 116 pr_warn("unknown completion status\n");
120 return 0; 117 return 0;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 7e93044ea6ce..bdd60a69be2d 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1426,8 +1426,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1426 dev->ib_dev.process_mad = mlx5_ib_process_mad; 1426 dev->ib_dev.process_mad = mlx5_ib_process_mad;
1427 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr; 1427 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
1428 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; 1428 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
1429 dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list;
1430 dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
1431 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 1429 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
1432 dev->ib_dev.get_port_immutable = mlx5_port_immutable; 1430 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
1433 1431
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index a29b28c31c44..633347260b79 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -338,12 +338,6 @@ struct mlx5_ib_mr {
338 void *descs_alloc; 338 void *descs_alloc;
339}; 339};
340 340
341struct mlx5_ib_fast_reg_page_list {
342 struct ib_fast_reg_page_list ibfrpl;
343 __be64 *mapped_page_list;
344 dma_addr_t map;
345};
346
347struct mlx5_ib_umr_context { 341struct mlx5_ib_umr_context {
348 enum ib_wc_status status; 342 enum ib_wc_status status;
349 struct completion done; 343 struct completion done;
@@ -494,11 +488,6 @@ static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
494 return container_of(ibmr, struct mlx5_ib_mr, ibmr); 488 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
495} 489}
496 490
497static inline struct mlx5_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)
498{
499 return container_of(ibfrpl, struct mlx5_ib_fast_reg_page_list, ibfrpl);
500}
501
502struct mlx5_ib_ah { 491struct mlx5_ib_ah {
503 struct ib_ah ibah; 492 struct ib_ah ibah;
504 struct mlx5_av av; 493 struct mlx5_av av;
@@ -569,9 +558,6 @@ struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
569int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, 558int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
570 struct scatterlist *sg, 559 struct scatterlist *sg,
571 int sg_nents); 560 int sg_nents);
572struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
573 int page_list_len);
574void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list);
575int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 561int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
576 const struct ib_wc *in_wc, const struct ib_grh *in_grh, 562 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
577 const struct ib_mad_hdr *in, size_t in_mad_size, 563 const struct ib_mad_hdr *in, size_t in_mad_size,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 6177e8053888..ec8993a7b3be 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1383,48 +1383,6 @@ err_free:
1383 return ERR_PTR(err); 1383 return ERR_PTR(err);
1384} 1384}
1385 1385
1386struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
1387 int page_list_len)
1388{
1389 struct mlx5_ib_fast_reg_page_list *mfrpl;
1390 int size = page_list_len * sizeof(u64);
1391
1392 mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
1393 if (!mfrpl)
1394 return ERR_PTR(-ENOMEM);
1395
1396 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
1397 if (!mfrpl->ibfrpl.page_list)
1398 goto err_free;
1399
1400 mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
1401 size, &mfrpl->map,
1402 GFP_KERNEL);
1403 if (!mfrpl->mapped_page_list)
1404 goto err_free;
1405
1406 WARN_ON(mfrpl->map & 0x3f);
1407
1408 return &mfrpl->ibfrpl;
1409
1410err_free:
1411 kfree(mfrpl->ibfrpl.page_list);
1412 kfree(mfrpl);
1413 return ERR_PTR(-ENOMEM);
1414}
1415
1416void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1417{
1418 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
1419 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
1420 int size = page_list->max_page_list_len * sizeof(u64);
1421
1422 dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
1423 mfrpl->map);
1424 kfree(mfrpl->ibfrpl.page_list);
1425 kfree(mfrpl);
1426}
1427
1428int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, 1386int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1429 struct ib_mr_status *mr_status) 1387 struct ib_mr_status *mr_status)
1430{ 1388{
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index da2b46c2624a..307bdbca8938 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -64,7 +64,6 @@ static const u32 mlx5_ib_opcode[] = {
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA, 64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL, 65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR, 66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_REG_MR] = MLX5_OPCODE_UMR, 67 [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
69 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS, 68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
70 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA, 69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
@@ -1908,20 +1907,11 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
1908 umr->mkey_mask = frwr_mkey_mask(); 1907 umr->mkey_mask = frwr_mkey_mask();
1909} 1908}
1910 1909
1911static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 1910static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
1912 struct ib_send_wr *wr, int li)
1913{ 1911{
1914 memset(umr, 0, sizeof(*umr)); 1912 memset(umr, 0, sizeof(*umr));
1915 1913 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1916 if (li) { 1914 umr->flags = 1 << 7;
1917 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1918 umr->flags = 1 << 7;
1919 return;
1920 }
1921
1922 umr->flags = (1 << 5); /* fail if not free */
1923 umr->klm_octowords = get_klm_octo(fast_reg_wr(wr)->page_list_len);
1924 umr->mkey_mask = frwr_mkey_mask();
1925} 1915}
1926 1916
1927static __be64 get_umr_reg_mr_mask(void) 1917static __be64 get_umr_reg_mr_mask(void)
@@ -2015,24 +2005,10 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
2015 seg->log2_page_size = ilog2(mr->ibmr.page_size); 2005 seg->log2_page_size = ilog2(mr->ibmr.page_size);
2016} 2006}
2017 2007
2018static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, 2008static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
2019 int li, int *writ)
2020{ 2009{
2021 memset(seg, 0, sizeof(*seg)); 2010 memset(seg, 0, sizeof(*seg));
2022 if (li) { 2011 seg->status = MLX5_MKEY_STATUS_FREE;
2023 seg->status = MLX5_MKEY_STATUS_FREE;
2024 return;
2025 }
2026
2027 seg->flags = get_umr_flags(fast_reg_wr(wr)->access_flags) |
2028 MLX5_ACCESS_MODE_MTT;
2029 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
2030 seg->qpn_mkey7_0 = cpu_to_be32((fast_reg_wr(wr)->rkey & 0xff) | 0xffffff00);
2031 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2032 seg->start_addr = cpu_to_be64(fast_reg_wr(wr)->iova_start);
2033 seg->len = cpu_to_be64(fast_reg_wr(wr)->length);
2034 seg->xlt_oct_size = cpu_to_be32((fast_reg_wr(wr)->page_list_len + 1) / 2);
2035 seg->log2_page_size = fast_reg_wr(wr)->page_shift;
2036} 2012}
2037 2013
2038static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) 2014static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
@@ -2067,24 +2043,6 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
2067 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); 2043 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2068} 2044}
2069 2045
2070static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
2071 struct ib_send_wr *wr,
2072 struct mlx5_core_dev *mdev,
2073 struct mlx5_ib_pd *pd,
2074 int writ)
2075{
2076 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(fast_reg_wr(wr)->page_list);
2077 u64 *page_list = fast_reg_wr(wr)->page_list->page_list;
2078 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
2079 int i;
2080
2081 for (i = 0; i < fast_reg_wr(wr)->page_list_len; i++)
2082 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
2083 dseg->addr = cpu_to_be64(mfrpl->map);
2084 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * fast_reg_wr(wr)->page_list_len, 64));
2085 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2086}
2087
2088static __be32 send_ieth(struct ib_send_wr *wr) 2046static __be32 send_ieth(struct ib_send_wr *wr)
2089{ 2047{
2090 switch (wr->opcode) { 2048 switch (wr->opcode) {
@@ -2504,36 +2462,18 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
2504 return 0; 2462 return 0;
2505} 2463}
2506 2464
2507static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, 2465static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size)
2508 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
2509{ 2466{
2510 int writ = 0; 2467 set_linv_umr_seg(*seg);
2511 int li;
2512
2513 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
2514 if (unlikely(wr->send_flags & IB_SEND_INLINE))
2515 return -EINVAL;
2516
2517 set_frwr_umr_segment(*seg, wr, li);
2518 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 2468 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2519 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 2469 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2520 if (unlikely((*seg == qp->sq.qend))) 2470 if (unlikely((*seg == qp->sq.qend)))
2521 *seg = mlx5_get_send_wqe(qp, 0); 2471 *seg = mlx5_get_send_wqe(qp, 0);
2522 set_mkey_segment(*seg, wr, li, &writ); 2472 set_linv_mkey_seg(*seg);
2523 *seg += sizeof(struct mlx5_mkey_seg); 2473 *seg += sizeof(struct mlx5_mkey_seg);
2524 *size += sizeof(struct mlx5_mkey_seg) / 16; 2474 *size += sizeof(struct mlx5_mkey_seg) / 16;
2525 if (unlikely((*seg == qp->sq.qend))) 2475 if (unlikely((*seg == qp->sq.qend)))
2526 *seg = mlx5_get_send_wqe(qp, 0); 2476 *seg = mlx5_get_send_wqe(qp, 0);
2527 if (!li) {
2528 if (unlikely(fast_reg_wr(wr)->page_list_len >
2529 fast_reg_wr(wr)->page_list->max_page_list_len))
2530 return -ENOMEM;
2531
2532 set_frwr_pages(*seg, wr, mdev, pd, writ);
2533 *seg += sizeof(struct mlx5_wqe_data_seg);
2534 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2535 }
2536 return 0;
2537} 2477}
2538 2478
2539static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) 2479static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
@@ -2649,7 +2589,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2649{ 2589{
2650 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ 2590 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2651 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2591 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2652 struct mlx5_core_dev *mdev = dev->mdev;
2653 struct mlx5_ib_qp *qp = to_mqp(ibqp); 2592 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2654 struct mlx5_ib_mr *mr; 2593 struct mlx5_ib_mr *mr;
2655 struct mlx5_wqe_data_seg *dpseg; 2594 struct mlx5_wqe_data_seg *dpseg;
@@ -2724,25 +2663,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2724 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2663 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2725 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; 2664 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2726 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); 2665 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2727 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); 2666 set_linv_wr(qp, &seg, &size);
2728 if (err) {
2729 mlx5_ib_warn(dev, "\n");
2730 *bad_wr = wr;
2731 goto out;
2732 }
2733 num_sge = 0;
2734 break;
2735
2736 case IB_WR_FAST_REG_MR:
2737 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2738 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2739 ctrl->imm = cpu_to_be32(fast_reg_wr(wr)->rkey);
2740 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2741 if (err) {
2742 mlx5_ib_warn(dev, "\n");
2743 *bad_wr = wr;
2744 goto out;
2745 }
2746 num_sge = 0; 2667 num_sge = 0;
2747 break; 2668 break;
2748 2669