diff options
author | Majd Dibbiny <majd@mellanox.com> | 2016-02-14 11:35:51 -0500 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2016-03-03 10:00:03 -0500 |
commit | 85d9691ccc96d95629939a877fd6c1f8c4724f56 (patch) | |
tree | 03325bdfe51f55708b61046de40e5275f278a6dd | |
parent | 5adebafb75bdfbbe4ec69f14c3613e70f6ed7f6f (diff) |
IB/mlx5: Avoid using user-index for SRQs
Normal SRQs, unlike XRC SRQs, don't have user-index, therefore
avoid verifying it and using it.
Fixes: cfb5e088e26a ('IB/mlx5: Add CQE version 1 support to user QPs and SRQs')
Signed-off-by: Majd Dibbiny <majd@mellanox.com>
Reviewed-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r-- | drivers/infiniband/hw/mlx5/srq.c | 30 |
1 files changed, 19 insertions, 11 deletions
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 4659256cd95e..a1b7122c2ad6 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c | |||
@@ -75,7 +75,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type) | |||
75 | 75 | ||
76 | static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, | 76 | static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, |
77 | struct mlx5_create_srq_mbox_in **in, | 77 | struct mlx5_create_srq_mbox_in **in, |
78 | struct ib_udata *udata, int buf_size, int *inlen) | 78 | struct ib_udata *udata, int buf_size, int *inlen, |
79 | int is_xrc) | ||
79 | { | 80 | { |
80 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 81 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
81 | struct mlx5_ib_create_srq ucmd = {}; | 82 | struct mlx5_ib_create_srq ucmd = {}; |
@@ -108,10 +109,12 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, | |||
108 | drv_data - sizeof(ucmd))) | 109 | drv_data - sizeof(ucmd))) |
109 | return -EINVAL; | 110 | return -EINVAL; |
110 | 111 | ||
111 | err = get_srq_user_index(to_mucontext(pd->uobject->context), | 112 | if (is_xrc) { |
112 | &ucmd, udata->inlen, &uidx); | 113 | err = get_srq_user_index(to_mucontext(pd->uobject->context), |
113 | if (err) | 114 | &ucmd, udata->inlen, &uidx); |
114 | return err; | 115 | if (err) |
116 | return err; | ||
117 | } | ||
115 | 118 | ||
116 | srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); | 119 | srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); |
117 | 120 | ||
@@ -151,7 +154,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, | |||
151 | (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; | 154 | (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; |
152 | (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); | 155 | (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); |
153 | 156 | ||
154 | if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) { | 157 | if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) && |
158 | is_xrc){ | ||
155 | xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, | 159 | xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, |
156 | xrc_srq_context_entry); | 160 | xrc_srq_context_entry); |
157 | MLX5_SET(xrc_srqc, xsrqc, user_index, uidx); | 161 | MLX5_SET(xrc_srqc, xsrqc, user_index, uidx); |
@@ -170,7 +174,7 @@ err_umem: | |||
170 | 174 | ||
171 | static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, | 175 | static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, |
172 | struct mlx5_create_srq_mbox_in **in, int buf_size, | 176 | struct mlx5_create_srq_mbox_in **in, int buf_size, |
173 | int *inlen) | 177 | int *inlen, int is_xrc) |
174 | { | 178 | { |
175 | int err; | 179 | int err; |
176 | int i; | 180 | int i; |
@@ -224,7 +228,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, | |||
224 | 228 | ||
225 | (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; | 229 | (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; |
226 | 230 | ||
227 | if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) { | 231 | if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) && |
232 | is_xrc){ | ||
228 | xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, | 233 | xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, |
229 | xrc_srq_context_entry); | 234 | xrc_srq_context_entry); |
230 | /* 0xffffff means we ask to work with cqe version 0 */ | 235 | /* 0xffffff means we ask to work with cqe version 0 */ |
@@ -302,10 +307,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
302 | desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, | 307 | desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, |
303 | srq->msrq.max_avail_gather); | 308 | srq->msrq.max_avail_gather); |
304 | 309 | ||
310 | is_xrc = (init_attr->srq_type == IB_SRQT_XRC); | ||
311 | |||
305 | if (pd->uobject) | 312 | if (pd->uobject) |
306 | err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); | 313 | err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen, |
314 | is_xrc); | ||
307 | else | 315 | else |
308 | err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); | 316 | err = create_srq_kernel(dev, srq, &in, buf_size, &inlen, |
317 | is_xrc); | ||
309 | 318 | ||
310 | if (err) { | 319 | if (err) { |
311 | mlx5_ib_warn(dev, "create srq %s failed, err %d\n", | 320 | mlx5_ib_warn(dev, "create srq %s failed, err %d\n", |
@@ -313,7 +322,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
313 | goto err_srq; | 322 | goto err_srq; |
314 | } | 323 | } |
315 | 324 | ||
316 | is_xrc = (init_attr->srq_type == IB_SRQT_XRC); | ||
317 | in->ctx.state_log_sz = ilog2(srq->msrq.max); | 325 | in->ctx.state_log_sz = ilog2(srq->msrq.max); |
318 | flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; | 326 | flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; |
319 | xrcdn = 0; | 327 | xrcdn = 0; |