aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYonatan Cohen <yonatanc@mellanox.com>2018-10-09 05:05:15 -0400
committerDoug Ledford <dledford@redhat.com>2018-10-17 11:25:41 -0400
commit6f4bc0ea682b59d7013cbc5ced2d4dd73067a33f (patch)
tree33698021af90822b34bc9345b8d270aee82c8e88
parent2e43bb31b8df662f591a7e80270ca3acda44bb48 (diff)
IB/mlx5: Allow scatter to CQE without global signaled WRs
Requester scatter to CQE is restricted to QPs configured to signal all WRs. This patch adds ability to enable scatter to cqe (force enable) in the requester without sig_all, for users who do not want all WRs signaled but rather just the ones whose data found in the CQE. Signed-off-by: Yonatan Cohen <yonatanc@mellanox.com> Reviewed-by: Guy Levi <guyle@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c14
-rw-r--r--include/uapi/rdma/mlx5-abi.h1
2 files changed, 12 insertions, 3 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 5b1811be6677..368728e6f980 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1706,15 +1706,20 @@ static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
1706 1706
1707static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev, 1707static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
1708 struct ib_qp_init_attr *init_attr, 1708 struct ib_qp_init_attr *init_attr,
1709 struct mlx5_ib_create_qp *ucmd,
1709 void *qpc) 1710 void *qpc)
1710{ 1711{
1711 enum ib_qp_type qpt = init_attr->qp_type; 1712 enum ib_qp_type qpt = init_attr->qp_type;
1712 int scqe_sz; 1713 int scqe_sz;
1714 bool allow_scat_cqe = 0;
1713 1715
1714 if (qpt == IB_QPT_UC || qpt == IB_QPT_UD) 1716 if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
1715 return; 1717 return;
1716 1718
1717 if (init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) 1719 if (ucmd)
1720 allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
1721
1722 if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
1718 return; 1723 return;
1719 1724
1720 scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq); 1725 scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
@@ -1836,7 +1841,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1836 MLX5_QP_FLAG_TUNNEL_OFFLOADS | 1841 MLX5_QP_FLAG_TUNNEL_OFFLOADS |
1837 MLX5_QP_FLAG_BFREG_INDEX | 1842 MLX5_QP_FLAG_BFREG_INDEX |
1838 MLX5_QP_FLAG_TYPE_DCT | 1843 MLX5_QP_FLAG_TYPE_DCT |
1839 MLX5_QP_FLAG_TYPE_DCI)) 1844 MLX5_QP_FLAG_TYPE_DCI |
1845 MLX5_QP_FLAG_ALLOW_SCATTER_CQE))
1840 return -EINVAL; 1846 return -EINVAL;
1841 1847
1842 err = get_qp_user_index(to_mucontext(pd->uobject->context), 1848 err = get_qp_user_index(to_mucontext(pd->uobject->context),
@@ -1971,7 +1977,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1971 1977
1972 if (qp->scat_cqe && is_connected(init_attr->qp_type)) { 1978 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
1973 configure_responder_scat_cqe(init_attr, qpc); 1979 configure_responder_scat_cqe(init_attr, qpc);
1974 configure_requester_scat_cqe(dev, init_attr, qpc); 1980 configure_requester_scat_cqe(dev, init_attr,
1981 (pd && pd->uobject) ? &ucmd : NULL,
1982 qpc);
1975 } 1983 }
1976 1984
1977 if (qp->rq.wqe_cnt) { 1985 if (qp->rq.wqe_cnt) {
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 6056625237cf..8fa9f90e2bb1 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -47,6 +47,7 @@ enum {
47 MLX5_QP_FLAG_TYPE_DCI = 1 << 5, 47 MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
48 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6, 48 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
49 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7, 49 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
50 MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
50}; 51};
51 52
52enum { 53enum {