diff options
author | Kamal Heib <kamalheib1@gmail.com> | 2018-07-10 04:56:50 -0400 |
---|---|---|
committer | Jason Gunthorpe <jgg@mellanox.com> | 2018-07-11 16:16:13 -0400 |
commit | d63c46734c545ad0488761059004a65c46efdde3 (patch) | |
tree | 71e29caa5e21937df10c67a67a859bbd703d1294 | |
parent | fe48aecb4df837540f13b5216f27ddb306aaf4b9 (diff) |
RDMA/mlx5: Fix memory leak in mlx5_ib_create_srq() error path
Fix memory leak in the error path of mlx5_ib_create_srq() by making sure
to free the allocated srq.
Fixes: c2b37f76485f ("IB/mlx5: Fix integer overflows in mlx5_ib_create_srq")
Signed-off-by: Kamal Heib <kamalheib1@gmail.com>
Acked-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r-- | drivers/infiniband/hw/mlx5/srq.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 0af7b7905550..f5de5adc9b1a 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c | |||
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
266 | 266 | ||
267 | desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + | 267 | desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + |
268 | srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); | 268 | srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); |
269 | if (desc_size == 0 || srq->msrq.max_gs > desc_size) | 269 | if (desc_size == 0 || srq->msrq.max_gs > desc_size) { |
270 | return ERR_PTR(-EINVAL); | 270 | err = -EINVAL; |
271 | goto err_srq; | ||
272 | } | ||
271 | desc_size = roundup_pow_of_two(desc_size); | 273 | desc_size = roundup_pow_of_two(desc_size); |
272 | desc_size = max_t(size_t, 32, desc_size); | 274 | desc_size = max_t(size_t, 32, desc_size); |
273 | if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) | 275 | if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) { |
274 | return ERR_PTR(-EINVAL); | 276 | err = -EINVAL; |
277 | goto err_srq; | ||
278 | } | ||
275 | srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / | 279 | srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / |
276 | sizeof(struct mlx5_wqe_data_seg); | 280 | sizeof(struct mlx5_wqe_data_seg); |
277 | srq->msrq.wqe_shift = ilog2(desc_size); | 281 | srq->msrq.wqe_shift = ilog2(desc_size); |
278 | buf_size = srq->msrq.max * desc_size; | 282 | buf_size = srq->msrq.max * desc_size; |
279 | if (buf_size < desc_size) | 283 | if (buf_size < desc_size) { |
280 | return ERR_PTR(-EINVAL); | 284 | err = -EINVAL; |
285 | goto err_srq; | ||
286 | } | ||
281 | in.type = init_attr->srq_type; | 287 | in.type = init_attr->srq_type; |
282 | 288 | ||
283 | if (pd->uobject) | 289 | if (pd->uobject) |