aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5')
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c10
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c9
-rw-r--r--drivers/infiniband/hw/mlx5/main.c51
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h7
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c14
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c34
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c15
7 files changed, 88 insertions, 52 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index c4c7b82f4ac1..94a27d89a303 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -221,7 +221,6 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
221 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); 221 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
222 break; 222 break;
223 } 223 }
224 wc->slid = be16_to_cpu(cqe->slid);
225 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; 224 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
226 wc->dlid_path_bits = cqe->ml_path; 225 wc->dlid_path_bits = cqe->ml_path;
227 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; 226 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
@@ -236,10 +235,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
236 } 235 }
237 236
238 if (ll != IB_LINK_LAYER_ETHERNET) { 237 if (ll != IB_LINK_LAYER_ETHERNET) {
238 wc->slid = be16_to_cpu(cqe->slid);
239 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; 239 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
240 return; 240 return;
241 } 241 }
242 242
243 wc->slid = 0;
243 vlan_present = cqe->l4_l3_hdr_type & 0x1; 244 vlan_present = cqe->l4_l3_hdr_type & 0x1;
244 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3; 245 roce_packet_type = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
245 if (vlan_present) { 246 if (vlan_present) {
@@ -1188,7 +1189,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
1188 if (ucmd.reserved0 || ucmd.reserved1) 1189 if (ucmd.reserved0 || ucmd.reserved1)
1189 return -EINVAL; 1190 return -EINVAL;
1190 1191
1191 umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, 1192 /* check multiplication overflow */
1193 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1)
1194 return -EINVAL;
1195
1196 umem = ib_umem_get(context, ucmd.buf_addr,
1197 (size_t)ucmd.cqe_size * entries,
1192 IB_ACCESS_LOCAL_WRITE, 1); 1198 IB_ACCESS_LOCAL_WRITE, 1);
1193 if (IS_ERR(umem)) { 1199 if (IS_ERR(umem)) {
1194 err = PTR_ERR(umem); 1200 err = PTR_ERR(umem);
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 61cc3d7db257..0e04fdddf670 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -30,12 +30,15 @@ static const struct mlx5_ib_profile rep_profile = {
30 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 30 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
31 mlx5_ib_stage_bfrag_init, 31 mlx5_ib_stage_bfrag_init,
32 mlx5_ib_stage_bfrag_cleanup), 32 mlx5_ib_stage_bfrag_cleanup),
33 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
34 NULL,
35 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
33 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 36 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
34 mlx5_ib_stage_ib_reg_init, 37 mlx5_ib_stage_ib_reg_init,
35 mlx5_ib_stage_ib_reg_cleanup), 38 mlx5_ib_stage_ib_reg_cleanup),
36 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 39 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
37 mlx5_ib_stage_umr_res_init, 40 mlx5_ib_stage_post_ib_reg_umr_init,
38 mlx5_ib_stage_umr_res_cleanup), 41 NULL),
39 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, 42 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
40 mlx5_ib_stage_class_attr_init, 43 mlx5_ib_stage_class_attr_init,
41 NULL), 44 NULL),
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d9474b95d8e5..390e4375647e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -256,12 +256,16 @@ struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev,
256 struct mlx5_ib_multiport_info *mpi; 256 struct mlx5_ib_multiport_info *mpi;
257 struct mlx5_ib_port *port; 257 struct mlx5_ib_port *port;
258 258
259 if (!mlx5_core_mp_enabled(ibdev->mdev) ||
260 ll != IB_LINK_LAYER_ETHERNET) {
261 if (native_port_num)
262 *native_port_num = ib_port_num;
263 return ibdev->mdev;
264 }
265
259 if (native_port_num) 266 if (native_port_num)
260 *native_port_num = 1; 267 *native_port_num = 1;
261 268
262 if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET)
263 return ibdev->mdev;
264
265 port = &ibdev->port[ib_port_num - 1]; 269 port = &ibdev->port[ib_port_num - 1];
266 if (!port) 270 if (!port)
267 return NULL; 271 return NULL;
@@ -3297,7 +3301,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3297 struct mlx5_ib_dev *ibdev; 3301 struct mlx5_ib_dev *ibdev;
3298 struct ib_event ibev; 3302 struct ib_event ibev;
3299 bool fatal = false; 3303 bool fatal = false;
3300 u8 port = 0; 3304 u8 port = (u8)work->param;
3301 3305
3302 if (mlx5_core_is_mp_slave(work->dev)) { 3306 if (mlx5_core_is_mp_slave(work->dev)) {
3303 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context); 3307 ibdev = mlx5_ib_get_ibdev_from_mpi(work->context);
@@ -3317,8 +3321,6 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3317 case MLX5_DEV_EVENT_PORT_UP: 3321 case MLX5_DEV_EVENT_PORT_UP:
3318 case MLX5_DEV_EVENT_PORT_DOWN: 3322 case MLX5_DEV_EVENT_PORT_DOWN:
3319 case MLX5_DEV_EVENT_PORT_INITIALIZED: 3323 case MLX5_DEV_EVENT_PORT_INITIALIZED:
3320 port = (u8)work->param;
3321
3322 /* In RoCE, port up/down events are handled in 3324 /* In RoCE, port up/down events are handled in
3323 * mlx5_netdev_event(). 3325 * mlx5_netdev_event().
3324 */ 3326 */
@@ -3332,24 +3334,19 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3332 3334
3333 case MLX5_DEV_EVENT_LID_CHANGE: 3335 case MLX5_DEV_EVENT_LID_CHANGE:
3334 ibev.event = IB_EVENT_LID_CHANGE; 3336 ibev.event = IB_EVENT_LID_CHANGE;
3335 port = (u8)work->param;
3336 break; 3337 break;
3337 3338
3338 case MLX5_DEV_EVENT_PKEY_CHANGE: 3339 case MLX5_DEV_EVENT_PKEY_CHANGE:
3339 ibev.event = IB_EVENT_PKEY_CHANGE; 3340 ibev.event = IB_EVENT_PKEY_CHANGE;
3340 port = (u8)work->param;
3341
3342 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); 3341 schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work);
3343 break; 3342 break;
3344 3343
3345 case MLX5_DEV_EVENT_GUID_CHANGE: 3344 case MLX5_DEV_EVENT_GUID_CHANGE:
3346 ibev.event = IB_EVENT_GID_CHANGE; 3345 ibev.event = IB_EVENT_GID_CHANGE;
3347 port = (u8)work->param;
3348 break; 3346 break;
3349 3347
3350 case MLX5_DEV_EVENT_CLIENT_REREG: 3348 case MLX5_DEV_EVENT_CLIENT_REREG:
3351 ibev.event = IB_EVENT_CLIENT_REREGISTER; 3349 ibev.event = IB_EVENT_CLIENT_REREGISTER;
3352 port = (u8)work->param;
3353 break; 3350 break;
3354 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT: 3351 case MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT:
3355 schedule_work(&ibdev->delay_drop.delay_drop_work); 3352 schedule_work(&ibdev->delay_drop.delay_drop_work);
@@ -3361,7 +3358,7 @@ static void mlx5_ib_handle_event(struct work_struct *_work)
3361 ibev.device = &ibdev->ib_dev; 3358 ibev.device = &ibdev->ib_dev;
3362 ibev.element.port_num = port; 3359 ibev.element.port_num = port;
3363 3360
3364 if (port < 1 || port > ibdev->num_ports) { 3361 if (!rdma_is_port_valid(&ibdev->ib_dev, port)) {
3365 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port); 3362 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
3366 goto out; 3363 goto out;
3367 } 3364 }
@@ -4999,19 +4996,19 @@ int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4999 return ib_register_device(&dev->ib_dev, NULL); 4996 return ib_register_device(&dev->ib_dev, NULL);
5000} 4997}
5001 4998
5002void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) 4999void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
5003{ 5000{
5004 ib_unregister_device(&dev->ib_dev); 5001 destroy_umrc_res(dev);
5005} 5002}
5006 5003
5007int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) 5004void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
5008{ 5005{
5009 return create_umr_res(dev); 5006 ib_unregister_device(&dev->ib_dev);
5010} 5007}
5011 5008
5012void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) 5009int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
5013{ 5010{
5014 destroy_umrc_res(dev); 5011 return create_umr_res(dev);
5015} 5012}
5016 5013
5017static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) 5014static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
@@ -5130,12 +5127,15 @@ static const struct mlx5_ib_profile pf_profile = {
5130 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 5127 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5131 mlx5_ib_stage_bfrag_init, 5128 mlx5_ib_stage_bfrag_init,
5132 mlx5_ib_stage_bfrag_cleanup), 5129 mlx5_ib_stage_bfrag_cleanup),
5130 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5131 NULL,
5132 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5133 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 5133 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5134 mlx5_ib_stage_ib_reg_init, 5134 mlx5_ib_stage_ib_reg_init,
5135 mlx5_ib_stage_ib_reg_cleanup), 5135 mlx5_ib_stage_ib_reg_cleanup),
5136 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 5136 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5137 mlx5_ib_stage_umr_res_init, 5137 mlx5_ib_stage_post_ib_reg_umr_init,
5138 mlx5_ib_stage_umr_res_cleanup), 5138 NULL),
5139 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, 5139 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
5140 mlx5_ib_stage_delay_drop_init, 5140 mlx5_ib_stage_delay_drop_init,
5141 mlx5_ib_stage_delay_drop_cleanup), 5141 mlx5_ib_stage_delay_drop_cleanup),
@@ -5172,12 +5172,15 @@ static const struct mlx5_ib_profile nic_rep_profile = {
5172 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 5172 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
5173 mlx5_ib_stage_bfrag_init, 5173 mlx5_ib_stage_bfrag_init,
5174 mlx5_ib_stage_bfrag_cleanup), 5174 mlx5_ib_stage_bfrag_cleanup),
5175 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
5176 NULL,
5177 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
5175 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 5178 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
5176 mlx5_ib_stage_ib_reg_init, 5179 mlx5_ib_stage_ib_reg_init,
5177 mlx5_ib_stage_ib_reg_cleanup), 5180 mlx5_ib_stage_ib_reg_cleanup),
5178 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 5181 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
5179 mlx5_ib_stage_umr_res_init, 5182 mlx5_ib_stage_post_ib_reg_umr_init,
5180 mlx5_ib_stage_umr_res_cleanup), 5183 NULL),
5181 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR, 5184 STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
5182 mlx5_ib_stage_class_attr_init, 5185 mlx5_ib_stage_class_attr_init,
5183 NULL), 5186 NULL),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e0bad28e0f09..c33bf1523d67 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -742,8 +742,9 @@ enum mlx5_ib_stages {
742 MLX5_IB_STAGE_CONG_DEBUGFS, 742 MLX5_IB_STAGE_CONG_DEBUGFS,
743 MLX5_IB_STAGE_UAR, 743 MLX5_IB_STAGE_UAR,
744 MLX5_IB_STAGE_BFREG, 744 MLX5_IB_STAGE_BFREG,
745 MLX5_IB_STAGE_PRE_IB_REG_UMR,
745 MLX5_IB_STAGE_IB_REG, 746 MLX5_IB_STAGE_IB_REG,
746 MLX5_IB_STAGE_UMR_RESOURCES, 747 MLX5_IB_STAGE_POST_IB_REG_UMR,
747 MLX5_IB_STAGE_DELAY_DROP, 748 MLX5_IB_STAGE_DELAY_DROP,
748 MLX5_IB_STAGE_CLASS_ATTR, 749 MLX5_IB_STAGE_CLASS_ATTR,
749 MLX5_IB_STAGE_REP_REG, 750 MLX5_IB_STAGE_REP_REG,
@@ -1068,10 +1069,10 @@ int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev);
1068void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev); 1069void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
1069int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev); 1070int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
1070void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev); 1071void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
1072void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev);
1071int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev); 1073int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
1072void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev); 1074void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
1073int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev); 1075int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev);
1074void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev);
1075int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev); 1076int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
1076void __mlx5_ib_remove(struct mlx5_ib_dev *dev, 1077void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1077 const struct mlx5_ib_profile *profile, 1078 const struct mlx5_ib_profile *profile,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index a5fad3e87ff7..95a36e9ea552 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -839,7 +839,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
839 *umem = ib_umem_get(pd->uobject->context, start, length, 839 *umem = ib_umem_get(pd->uobject->context, start, length,
840 access_flags, 0); 840 access_flags, 0);
841 err = PTR_ERR_OR_ZERO(*umem); 841 err = PTR_ERR_OR_ZERO(*umem);
842 if (err < 0) { 842 if (err) {
843 *umem = NULL;
843 mlx5_ib_err(dev, "umem get failed (%d)\n", err); 844 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
844 return err; 845 return err;
845 } 846 }
@@ -1416,6 +1417,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1416 if (err) { 1417 if (err) {
1417 mlx5_ib_warn(dev, "Failed to rereg UMR\n"); 1418 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1418 ib_umem_release(mr->umem); 1419 ib_umem_release(mr->umem);
1420 mr->umem = NULL;
1419 clean_mr(dev, mr); 1421 clean_mr(dev, mr);
1420 return err; 1422 return err;
1421 } 1423 }
@@ -1499,14 +1501,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1499 u32 key = mr->mmkey.key; 1501 u32 key = mr->mmkey.key;
1500 1502
1501 err = destroy_mkey(dev, mr); 1503 err = destroy_mkey(dev, mr);
1502 kfree(mr);
1503 if (err) { 1504 if (err) {
1504 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", 1505 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1505 key, err); 1506 key, err);
1506 return err; 1507 return err;
1507 } 1508 }
1508 } else {
1509 mlx5_mr_cache_free(dev, mr);
1510 } 1509 }
1511 1510
1512 return 0; 1511 return 0;
@@ -1549,6 +1548,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1549 atomic_sub(npages, &dev->mdev->priv.reg_pages); 1548 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1550 } 1549 }
1551 1550
1551 if (!mr->allocated_from_cache)
1552 kfree(mr);
1553 else
1554 mlx5_mr_cache_free(dev, mr);
1555
1552 return 0; 1556 return 0;
1553} 1557}
1554 1558
@@ -1817,7 +1821,6 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1817 1821
1818 mr->ibmr.iova = sg_dma_address(sg) + sg_offset; 1822 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
1819 mr->ibmr.length = 0; 1823 mr->ibmr.length = 0;
1820 mr->ndescs = sg_nents;
1821 1824
1822 for_each_sg(sgl, sg, sg_nents, i) { 1825 for_each_sg(sgl, sg, sg_nents, i) {
1823 if (unlikely(i >= mr->max_descs)) 1826 if (unlikely(i >= mr->max_descs))
@@ -1829,6 +1832,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1829 1832
1830 sg_offset = 0; 1833 sg_offset = 0;
1831 } 1834 }
1835 mr->ndescs = i;
1832 1836
1833 if (sg_offset_p) 1837 if (sg_offset_p)
1834 *sg_offset_p = sg_offset; 1838 *sg_offset_p = sg_offset;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 0e67e3682bca..85c612ac547a 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1177,7 +1177,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1177 ib_umem_release(sq->ubuffer.umem); 1177 ib_umem_release(sq->ubuffer.umem);
1178} 1178}
1179 1179
1180static int get_rq_pas_size(void *qpc) 1180static size_t get_rq_pas_size(void *qpc)
1181{ 1181{
1182 u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; 1182 u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
1183 u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); 1183 u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
@@ -1193,7 +1193,8 @@ static int get_rq_pas_size(void *qpc)
1193} 1193}
1194 1194
1195static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 1195static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1196 struct mlx5_ib_rq *rq, void *qpin) 1196 struct mlx5_ib_rq *rq, void *qpin,
1197 size_t qpinlen)
1197{ 1198{
1198 struct mlx5_ib_qp *mqp = rq->base.container_mibqp; 1199 struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
1199 __be64 *pas; 1200 __be64 *pas;
@@ -1202,9 +1203,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1202 void *rqc; 1203 void *rqc;
1203 void *wq; 1204 void *wq;
1204 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 1205 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1205 int inlen; 1206 size_t rq_pas_size = get_rq_pas_size(qpc);
1207 size_t inlen;
1206 int err; 1208 int err;
1207 u32 rq_pas_size = get_rq_pas_size(qpc); 1209
1210 if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
1211 return -EINVAL;
1208 1212
1209 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; 1213 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
1210 in = kvzalloc(inlen, GFP_KERNEL); 1214 in = kvzalloc(inlen, GFP_KERNEL);
@@ -1297,7 +1301,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1297} 1301}
1298 1302
1299static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1303static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1300 u32 *in, 1304 u32 *in, size_t inlen,
1301 struct ib_pd *pd) 1305 struct ib_pd *pd)
1302{ 1306{
1303 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 1307 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
@@ -1329,7 +1333,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1329 rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; 1333 rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
1330 if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING) 1334 if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
1331 rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; 1335 rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
1332 err = create_raw_packet_qp_rq(dev, rq, in); 1336 err = create_raw_packet_qp_rq(dev, rq, in, inlen);
1333 if (err) 1337 if (err)
1334 goto err_destroy_sq; 1338 goto err_destroy_sq;
1335 1339
@@ -1608,6 +1612,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1608 u32 uidx = MLX5_IB_DEFAULT_UIDX; 1612 u32 uidx = MLX5_IB_DEFAULT_UIDX;
1609 struct mlx5_ib_create_qp ucmd; 1613 struct mlx5_ib_create_qp ucmd;
1610 struct mlx5_ib_qp_base *base; 1614 struct mlx5_ib_qp_base *base;
1615 int mlx5_st;
1611 void *qpc; 1616 void *qpc;
1612 u32 *in; 1617 u32 *in;
1613 int err; 1618 int err;
@@ -1616,6 +1621,10 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1616 spin_lock_init(&qp->sq.lock); 1621 spin_lock_init(&qp->sq.lock);
1617 spin_lock_init(&qp->rq.lock); 1622 spin_lock_init(&qp->rq.lock);
1618 1623
1624 mlx5_st = to_mlx5_st(init_attr->qp_type);
1625 if (mlx5_st < 0)
1626 return -EINVAL;
1627
1619 if (init_attr->rwq_ind_tbl) { 1628 if (init_attr->rwq_ind_tbl) {
1620 if (!udata) 1629 if (!udata)
1621 return -ENOSYS; 1630 return -ENOSYS;
@@ -1777,7 +1786,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1777 1786
1778 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); 1787 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
1779 1788
1780 MLX5_SET(qpc, qpc, st, to_mlx5_st(init_attr->qp_type)); 1789 MLX5_SET(qpc, qpc, st, mlx5_st);
1781 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); 1790 MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
1782 1791
1783 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR) 1792 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
@@ -1891,11 +1900,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1891 } 1900 }
1892 } 1901 }
1893 1902
1903 if (inlen < 0) {
1904 err = -EINVAL;
1905 goto err;
1906 }
1907
1894 if (init_attr->qp_type == IB_QPT_RAW_PACKET || 1908 if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
1895 qp->flags & MLX5_IB_QP_UNDERLAY) { 1909 qp->flags & MLX5_IB_QP_UNDERLAY) {
1896 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; 1910 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
1897 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); 1911 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
1898 err = create_raw_packet_qp(dev, qp, in, pd); 1912 err = create_raw_packet_qp(dev, qp, in, inlen, pd);
1899 } else { 1913 } else {
1900 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); 1914 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
1901 } 1915 }
@@ -3116,8 +3130,10 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
3116 goto out; 3130 goto out;
3117 3131
3118 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE || 3132 if (mlx5_cur >= MLX5_QP_NUM_STATE || mlx5_new >= MLX5_QP_NUM_STATE ||
3119 !optab[mlx5_cur][mlx5_new]) 3133 !optab[mlx5_cur][mlx5_new]) {
3134 err = -EINVAL;
3120 goto out; 3135 goto out;
3136 }
3121 3137
3122 op = optab[mlx5_cur][mlx5_new]; 3138 op = optab[mlx5_cur][mlx5_new];
3123 optpar = ib_mask_to_mlx5_opt(attr_mask); 3139 optpar = ib_mask_to_mlx5_opt(attr_mask);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 6d5fadad9090..3c7522d025f2 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
241{ 241{
242 struct mlx5_ib_dev *dev = to_mdev(pd->device); 242 struct mlx5_ib_dev *dev = to_mdev(pd->device);
243 struct mlx5_ib_srq *srq; 243 struct mlx5_ib_srq *srq;
244 int desc_size; 244 size_t desc_size;
245 int buf_size; 245 size_t buf_size;
246 int err; 246 int err;
247 struct mlx5_srq_attr in = {0}; 247 struct mlx5_srq_attr in = {0};
248 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 248 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
@@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
266 266
267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + 267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); 268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
269 if (desc_size == 0 || srq->msrq.max_gs > desc_size)
270 return ERR_PTR(-EINVAL);
269 desc_size = roundup_pow_of_two(desc_size); 271 desc_size = roundup_pow_of_two(desc_size);
270 desc_size = max_t(int, 32, desc_size); 272 desc_size = max_t(size_t, 32, desc_size);
273 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
274 return ERR_PTR(-EINVAL);
271 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / 275 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
272 sizeof(struct mlx5_wqe_data_seg); 276 sizeof(struct mlx5_wqe_data_seg);
273 srq->msrq.wqe_shift = ilog2(desc_size); 277 srq->msrq.wqe_shift = ilog2(desc_size);
274 buf_size = srq->msrq.max * desc_size; 278 buf_size = srq->msrq.max * desc_size;
275 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", 279 if (buf_size < desc_size)
276 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, 280 return ERR_PTR(-EINVAL);
277 srq->msrq.max_avail_gather);
278 in.type = init_attr->srq_type; 281 in.type = init_attr->srq_type;
279 282
280 if (pd->uobject) 283 if (pd->uobject)