aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/qp.c
diff options
context:
space:
mode:
authorSagi Grimberg <sagig@mellanox.com>2015-10-13 12:11:42 -0400
committerDoug Ledford <dledford@redhat.com>2015-10-28 22:27:19 -0400
commitdd01e66a6c532a8cd183cbc02ebaef99f186345f (patch)
tree3226946c62148c6a1ce593a45422e970fbda9133 /drivers/infiniband/hw/mlx5/qp.c
parent9a21be531cacecce6c897faacd66ed4c7dbbe88b (diff)
IB/mlx5: Remove old FRWR API support
No ULP uses it anymore, go ahead and remove it. Keep only the local invalidate part of the handlers. Signed-off-by: Sagi Grimberg <sagig@mellanox.com> Acked-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c97
1 files changed, 9 insertions, 88 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index da2b46c2624a..307bdbca8938 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -64,7 +64,6 @@ static const u32 mlx5_ib_opcode[] = {
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA, 64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL, 65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR, 66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_REG_MR] = MLX5_OPCODE_UMR, 67 [IB_WR_REG_MR] = MLX5_OPCODE_UMR,
69 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS, 68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
70 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA, 69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
@@ -1908,20 +1907,11 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
1908 umr->mkey_mask = frwr_mkey_mask(); 1907 umr->mkey_mask = frwr_mkey_mask();
1909} 1908}
1910 1909
1911static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr, 1910static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
1912 struct ib_send_wr *wr, int li)
1913{ 1911{
1914 memset(umr, 0, sizeof(*umr)); 1912 memset(umr, 0, sizeof(*umr));
1915 1913 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1916 if (li) { 1914 umr->flags = 1 << 7;
1917 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1918 umr->flags = 1 << 7;
1919 return;
1920 }
1921
1922 umr->flags = (1 << 5); /* fail if not free */
1923 umr->klm_octowords = get_klm_octo(fast_reg_wr(wr)->page_list_len);
1924 umr->mkey_mask = frwr_mkey_mask();
1925} 1915}
1926 1916
1927static __be64 get_umr_reg_mr_mask(void) 1917static __be64 get_umr_reg_mr_mask(void)
@@ -2015,24 +2005,10 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
2015 seg->log2_page_size = ilog2(mr->ibmr.page_size); 2005 seg->log2_page_size = ilog2(mr->ibmr.page_size);
2016} 2006}
2017 2007
2018static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr, 2008static void set_linv_mkey_seg(struct mlx5_mkey_seg *seg)
2019 int li, int *writ)
2020{ 2009{
2021 memset(seg, 0, sizeof(*seg)); 2010 memset(seg, 0, sizeof(*seg));
2022 if (li) { 2011 seg->status = MLX5_MKEY_STATUS_FREE;
2023 seg->status = MLX5_MKEY_STATUS_FREE;
2024 return;
2025 }
2026
2027 seg->flags = get_umr_flags(fast_reg_wr(wr)->access_flags) |
2028 MLX5_ACCESS_MODE_MTT;
2029 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
2030 seg->qpn_mkey7_0 = cpu_to_be32((fast_reg_wr(wr)->rkey & 0xff) | 0xffffff00);
2031 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
2032 seg->start_addr = cpu_to_be64(fast_reg_wr(wr)->iova_start);
2033 seg->len = cpu_to_be64(fast_reg_wr(wr)->length);
2034 seg->xlt_oct_size = cpu_to_be32((fast_reg_wr(wr)->page_list_len + 1) / 2);
2035 seg->log2_page_size = fast_reg_wr(wr)->page_shift;
2036} 2012}
2037 2013
2038static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr) 2014static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
@@ -2067,24 +2043,6 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
2067 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey); 2043 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2068} 2044}
2069 2045
2070static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
2071 struct ib_send_wr *wr,
2072 struct mlx5_core_dev *mdev,
2073 struct mlx5_ib_pd *pd,
2074 int writ)
2075{
2076 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(fast_reg_wr(wr)->page_list);
2077 u64 *page_list = fast_reg_wr(wr)->page_list->page_list;
2078 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
2079 int i;
2080
2081 for (i = 0; i < fast_reg_wr(wr)->page_list_len; i++)
2082 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
2083 dseg->addr = cpu_to_be64(mfrpl->map);
2084 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * fast_reg_wr(wr)->page_list_len, 64));
2085 dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
2086}
2087
2088static __be32 send_ieth(struct ib_send_wr *wr) 2046static __be32 send_ieth(struct ib_send_wr *wr)
2089{ 2047{
2090 switch (wr->opcode) { 2048 switch (wr->opcode) {
@@ -2504,36 +2462,18 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
2504 return 0; 2462 return 0;
2505} 2463}
2506 2464
2507static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size, 2465static void set_linv_wr(struct mlx5_ib_qp *qp, void **seg, int *size)
2508 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
2509{ 2466{
2510 int writ = 0; 2467 set_linv_umr_seg(*seg);
2511 int li;
2512
2513 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
2514 if (unlikely(wr->send_flags & IB_SEND_INLINE))
2515 return -EINVAL;
2516
2517 set_frwr_umr_segment(*seg, wr, li);
2518 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg); 2468 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2519 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16; 2469 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2520 if (unlikely((*seg == qp->sq.qend))) 2470 if (unlikely((*seg == qp->sq.qend)))
2521 *seg = mlx5_get_send_wqe(qp, 0); 2471 *seg = mlx5_get_send_wqe(qp, 0);
2522 set_mkey_segment(*seg, wr, li, &writ); 2472 set_linv_mkey_seg(*seg);
2523 *seg += sizeof(struct mlx5_mkey_seg); 2473 *seg += sizeof(struct mlx5_mkey_seg);
2524 *size += sizeof(struct mlx5_mkey_seg) / 16; 2474 *size += sizeof(struct mlx5_mkey_seg) / 16;
2525 if (unlikely((*seg == qp->sq.qend))) 2475 if (unlikely((*seg == qp->sq.qend)))
2526 *seg = mlx5_get_send_wqe(qp, 0); 2476 *seg = mlx5_get_send_wqe(qp, 0);
2527 if (!li) {
2528 if (unlikely(fast_reg_wr(wr)->page_list_len >
2529 fast_reg_wr(wr)->page_list->max_page_list_len))
2530 return -ENOMEM;
2531
2532 set_frwr_pages(*seg, wr, mdev, pd, writ);
2533 *seg += sizeof(struct mlx5_wqe_data_seg);
2534 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2535 }
2536 return 0;
2537} 2477}
2538 2478
2539static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) 2479static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
@@ -2649,7 +2589,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2649{ 2589{
2650 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */ 2590 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2651 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 2591 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2652 struct mlx5_core_dev *mdev = dev->mdev;
2653 struct mlx5_ib_qp *qp = to_mqp(ibqp); 2592 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2654 struct mlx5_ib_mr *mr; 2593 struct mlx5_ib_mr *mr;
2655 struct mlx5_wqe_data_seg *dpseg; 2594 struct mlx5_wqe_data_seg *dpseg;
@@ -2724,25 +2663,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2724 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; 2663 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2725 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; 2664 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2726 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); 2665 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2727 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp); 2666 set_linv_wr(qp, &seg, &size);
2728 if (err) {
2729 mlx5_ib_warn(dev, "\n");
2730 *bad_wr = wr;
2731 goto out;
2732 }
2733 num_sge = 0;
2734 break;
2735
2736 case IB_WR_FAST_REG_MR:
2737 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2738 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2739 ctrl->imm = cpu_to_be32(fast_reg_wr(wr)->rkey);
2740 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2741 if (err) {
2742 mlx5_ib_warn(dev, "\n");
2743 *bad_wr = wr;
2744 goto out;
2745 }
2746 num_sge = 0; 2667 num_sge = 0;
2747 break; 2668 break;
2748 2669