aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx5/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c89
1 files changed, 41 insertions, 48 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d35f62d4f4c5..203c8a45e095 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -220,13 +220,11 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
220static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, 220static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
221 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) 221 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
222{ 222{
223 struct mlx5_general_caps *gen;
224 int wqe_size; 223 int wqe_size;
225 int wq_size; 224 int wq_size;
226 225
227 gen = &dev->mdev->caps.gen;
228 /* Sanity check RQ size before proceeding */ 226 /* Sanity check RQ size before proceeding */
229 if (cap->max_recv_wr > gen->max_wqes) 227 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
230 return -EINVAL; 228 return -EINVAL;
231 229
232 if (!has_rq) { 230 if (!has_rq) {
@@ -246,10 +244,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
246 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 244 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
247 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 245 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
248 qp->rq.wqe_cnt = wq_size / wqe_size; 246 qp->rq.wqe_cnt = wq_size / wqe_size;
249 if (wqe_size > gen->max_rq_desc_sz) { 247 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
250 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 248 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
251 wqe_size, 249 wqe_size,
252 gen->max_rq_desc_sz); 250 MLX5_CAP_GEN(dev->mdev,
251 max_wqe_sz_rq));
253 return -EINVAL; 252 return -EINVAL;
254 } 253 }
255 qp->rq.wqe_shift = ilog2(wqe_size); 254 qp->rq.wqe_shift = ilog2(wqe_size);
@@ -330,11 +329,9 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
330static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 329static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
331 struct mlx5_ib_qp *qp) 330 struct mlx5_ib_qp *qp)
332{ 331{
333 struct mlx5_general_caps *gen;
334 int wqe_size; 332 int wqe_size;
335 int wq_size; 333 int wq_size;
336 334
337 gen = &dev->mdev->caps.gen;
338 if (!attr->cap.max_send_wr) 335 if (!attr->cap.max_send_wr)
339 return 0; 336 return 0;
340 337
@@ -343,9 +340,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
343 if (wqe_size < 0) 340 if (wqe_size < 0)
344 return wqe_size; 341 return wqe_size;
345 342
346 if (wqe_size > gen->max_sq_desc_sz) { 343 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
347 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 344 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
348 wqe_size, gen->max_sq_desc_sz); 345 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
349 return -EINVAL; 346 return -EINVAL;
350 } 347 }
351 348
@@ -358,9 +355,10 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
358 355
359 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 356 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
360 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 357 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
361 if (qp->sq.wqe_cnt > gen->max_wqes) { 358 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
362 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", 359 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
363 qp->sq.wqe_cnt, gen->max_wqes); 360 qp->sq.wqe_cnt,
361 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
364 return -ENOMEM; 362 return -ENOMEM;
365 } 363 }
366 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 364 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -375,13 +373,11 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
375 struct mlx5_ib_qp *qp, 373 struct mlx5_ib_qp *qp,
376 struct mlx5_ib_create_qp *ucmd) 374 struct mlx5_ib_create_qp *ucmd)
377{ 375{
378 struct mlx5_general_caps *gen;
379 int desc_sz = 1 << qp->sq.wqe_shift; 376 int desc_sz = 1 << qp->sq.wqe_shift;
380 377
381 gen = &dev->mdev->caps.gen; 378 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
382 if (desc_sz > gen->max_sq_desc_sz) {
383 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 379 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
384 desc_sz, gen->max_sq_desc_sz); 380 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
385 return -EINVAL; 381 return -EINVAL;
386 } 382 }
387 383
@@ -393,9 +389,10 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
393 389
394 qp->sq.wqe_cnt = ucmd->sq_wqe_count; 390 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
395 391
396 if (qp->sq.wqe_cnt > gen->max_wqes) { 392 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
397 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 393 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
398 qp->sq.wqe_cnt, gen->max_wqes); 394 qp->sq.wqe_cnt,
395 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
399 return -EINVAL; 396 return -EINVAL;
400 } 397 }
401 398
@@ -768,7 +765,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
768 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; 765 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
769 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); 766 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
770 767
771 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf); 768 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf);
772 if (err) { 769 if (err) {
773 mlx5_ib_dbg(dev, "err %d\n", err); 770 mlx5_ib_dbg(dev, "err %d\n", err);
774 goto err_uuar; 771 goto err_uuar;
@@ -866,22 +863,21 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
866 struct ib_udata *udata, struct mlx5_ib_qp *qp) 863 struct ib_udata *udata, struct mlx5_ib_qp *qp)
867{ 864{
868 struct mlx5_ib_resources *devr = &dev->devr; 865 struct mlx5_ib_resources *devr = &dev->devr;
866 struct mlx5_core_dev *mdev = dev->mdev;
869 struct mlx5_ib_create_qp_resp resp; 867 struct mlx5_ib_create_qp_resp resp;
870 struct mlx5_create_qp_mbox_in *in; 868 struct mlx5_create_qp_mbox_in *in;
871 struct mlx5_general_caps *gen;
872 struct mlx5_ib_create_qp ucmd; 869 struct mlx5_ib_create_qp ucmd;
873 int inlen = sizeof(*in); 870 int inlen = sizeof(*in);
874 int err; 871 int err;
875 872
876 mlx5_ib_odp_create_qp(qp); 873 mlx5_ib_odp_create_qp(qp);
877 874
878 gen = &dev->mdev->caps.gen;
879 mutex_init(&qp->mutex); 875 mutex_init(&qp->mutex);
880 spin_lock_init(&qp->sq.lock); 876 spin_lock_init(&qp->sq.lock);
881 spin_lock_init(&qp->rq.lock); 877 spin_lock_init(&qp->rq.lock);
882 878
883 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 879 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
884 if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { 880 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
885 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); 881 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
886 return -EINVAL; 882 return -EINVAL;
887 } else { 883 } else {
@@ -914,15 +910,17 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
914 910
915 if (pd) { 911 if (pd) {
916 if (pd->uobject) { 912 if (pd->uobject) {
913 __u32 max_wqes =
914 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
917 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count); 915 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
918 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || 916 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
919 ucmd.rq_wqe_count != qp->rq.wqe_cnt) { 917 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
920 mlx5_ib_dbg(dev, "invalid rq params\n"); 918 mlx5_ib_dbg(dev, "invalid rq params\n");
921 return -EINVAL; 919 return -EINVAL;
922 } 920 }
923 if (ucmd.sq_wqe_count > gen->max_wqes) { 921 if (ucmd.sq_wqe_count > max_wqes) {
924 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", 922 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
925 ucmd.sq_wqe_count, gen->max_wqes); 923 ucmd.sq_wqe_count, max_wqes);
926 return -EINVAL; 924 return -EINVAL;
927 } 925 }
928 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); 926 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -1014,7 +1012,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1014 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn); 1012 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
1015 } else { 1013 } else {
1016 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn); 1014 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
1017 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn); 1015 in->ctx.rq_type_srqn |=
1016 cpu_to_be32(to_msrq(devr->s1)->msrq.srqn);
1018 } 1017 }
1019 } 1018 }
1020 1019
@@ -1226,7 +1225,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1226 struct ib_qp_init_attr *init_attr, 1225 struct ib_qp_init_attr *init_attr,
1227 struct ib_udata *udata) 1226 struct ib_udata *udata)
1228{ 1227{
1229 struct mlx5_general_caps *gen;
1230 struct mlx5_ib_dev *dev; 1228 struct mlx5_ib_dev *dev;
1231 struct mlx5_ib_qp *qp; 1229 struct mlx5_ib_qp *qp;
1232 u16 xrcdn = 0; 1230 u16 xrcdn = 0;
@@ -1244,12 +1242,11 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1244 } 1242 }
1245 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); 1243 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1246 } 1244 }
1247 gen = &dev->mdev->caps.gen;
1248 1245
1249 switch (init_attr->qp_type) { 1246 switch (init_attr->qp_type) {
1250 case IB_QPT_XRC_TGT: 1247 case IB_QPT_XRC_TGT:
1251 case IB_QPT_XRC_INI: 1248 case IB_QPT_XRC_INI:
1252 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) { 1249 if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
1253 mlx5_ib_dbg(dev, "XRC not supported\n"); 1250 mlx5_ib_dbg(dev, "XRC not supported\n");
1254 return ERR_PTR(-ENOSYS); 1251 return ERR_PTR(-ENOSYS);
1255 } 1252 }
@@ -1356,9 +1353,6 @@ enum {
1356 1353
1357static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) 1354static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1358{ 1355{
1359 struct mlx5_general_caps *gen;
1360
1361 gen = &dev->mdev->caps.gen;
1362 if (rate == IB_RATE_PORT_CURRENT) { 1356 if (rate == IB_RATE_PORT_CURRENT) {
1363 return 0; 1357 return 0;
1364 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { 1358 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
@@ -1366,7 +1360,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1366 } else { 1360 } else {
1367 while (rate != IB_RATE_2_5_GBPS && 1361 while (rate != IB_RATE_2_5_GBPS &&
1368 !(1 << (rate + MLX5_STAT_RATE_OFFSET) & 1362 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1369 gen->stat_rate_support)) 1363 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
1370 --rate; 1364 --rate;
1371 } 1365 }
1372 1366
@@ -1377,10 +1371,8 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1377 struct mlx5_qp_path *path, u8 port, int attr_mask, 1371 struct mlx5_qp_path *path, u8 port, int attr_mask,
1378 u32 path_flags, const struct ib_qp_attr *attr) 1372 u32 path_flags, const struct ib_qp_attr *attr)
1379{ 1373{
1380 struct mlx5_general_caps *gen;
1381 int err; 1374 int err;
1382 1375
1383 gen = &dev->mdev->caps.gen;
1384 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; 1376 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1385 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; 1377 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1386 1378
@@ -1391,9 +1383,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1391 path->rlid = cpu_to_be16(ah->dlid); 1383 path->rlid = cpu_to_be16(ah->dlid);
1392 1384
1393 if (ah->ah_flags & IB_AH_GRH) { 1385 if (ah->ah_flags & IB_AH_GRH) {
1394 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) { 1386 if (ah->grh.sgid_index >=
1387 dev->mdev->port_caps[port - 1].gid_table_len) {
1395 pr_err("sgid_index (%u) too large. max is %d\n", 1388 pr_err("sgid_index (%u) too large. max is %d\n",
1396 ah->grh.sgid_index, gen->port[port - 1].gid_table_len); 1389 ah->grh.sgid_index,
1390 dev->mdev->port_caps[port - 1].gid_table_len);
1397 return -EINVAL; 1391 return -EINVAL;
1398 } 1392 }
1399 path->grh_mlid |= 1 << 7; 1393 path->grh_mlid |= 1 << 7;
@@ -1570,7 +1564,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1570 struct mlx5_ib_qp *qp = to_mqp(ibqp); 1564 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1571 struct mlx5_ib_cq *send_cq, *recv_cq; 1565 struct mlx5_ib_cq *send_cq, *recv_cq;
1572 struct mlx5_qp_context *context; 1566 struct mlx5_qp_context *context;
1573 struct mlx5_general_caps *gen;
1574 struct mlx5_modify_qp_mbox_in *in; 1567 struct mlx5_modify_qp_mbox_in *in;
1575 struct mlx5_ib_pd *pd; 1568 struct mlx5_ib_pd *pd;
1576 enum mlx5_qp_state mlx5_cur, mlx5_new; 1569 enum mlx5_qp_state mlx5_cur, mlx5_new;
@@ -1579,7 +1572,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1579 int mlx5_st; 1572 int mlx5_st;
1580 int err; 1573 int err;
1581 1574
1582 gen = &dev->mdev->caps.gen;
1583 in = kzalloc(sizeof(*in), GFP_KERNEL); 1575 in = kzalloc(sizeof(*in), GFP_KERNEL);
1584 if (!in) 1576 if (!in)
1585 return -ENOMEM; 1577 return -ENOMEM;
@@ -1619,7 +1611,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1619 err = -EINVAL; 1611 err = -EINVAL;
1620 goto out; 1612 goto out;
1621 } 1613 }
1622 context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg; 1614 context->mtu_msgmax = (attr->path_mtu << 5) |
1615 (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
1623 } 1616 }
1624 1617
1625 if (attr_mask & IB_QP_DEST_QPN) 1618 if (attr_mask & IB_QP_DEST_QPN)
@@ -1777,11 +1770,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1777 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 1770 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1778 struct mlx5_ib_qp *qp = to_mqp(ibqp); 1771 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1779 enum ib_qp_state cur_state, new_state; 1772 enum ib_qp_state cur_state, new_state;
1780 struct mlx5_general_caps *gen;
1781 int err = -EINVAL; 1773 int err = -EINVAL;
1782 int port; 1774 int port;
1783 1775
1784 gen = &dev->mdev->caps.gen;
1785 mutex_lock(&qp->mutex); 1776 mutex_lock(&qp->mutex);
1786 1777
1787 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 1778 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -1793,21 +1784,25 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1793 goto out; 1784 goto out;
1794 1785
1795 if ((attr_mask & IB_QP_PORT) && 1786 if ((attr_mask & IB_QP_PORT) &&
1796 (attr->port_num == 0 || attr->port_num > gen->num_ports)) 1787 (attr->port_num == 0 ||
1788 attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
1797 goto out; 1789 goto out;
1798 1790
1799 if (attr_mask & IB_QP_PKEY_INDEX) { 1791 if (attr_mask & IB_QP_PKEY_INDEX) {
1800 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1792 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1801 if (attr->pkey_index >= gen->port[port - 1].pkey_table_len) 1793 if (attr->pkey_index >=
1794 dev->mdev->port_caps[port - 1].pkey_table_len)
1802 goto out; 1795 goto out;
1803 } 1796 }
1804 1797
1805 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1798 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1806 attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp)) 1799 attr->max_rd_atomic >
1800 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
1807 goto out; 1801 goto out;
1808 1802
1809 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1803 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1810 attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp)) 1804 attr->max_dest_rd_atomic >
1805 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
1811 goto out; 1806 goto out;
1812 1807
1813 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 1808 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -3009,7 +3004,7 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
3009 ib_ah_attr->port_num = path->port; 3004 ib_ah_attr->port_num = path->port;
3010 3005
3011 if (ib_ah_attr->port_num == 0 || 3006 if (ib_ah_attr->port_num == 0 ||
3012 ib_ah_attr->port_num > dev->caps.gen.num_ports) 3007 ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
3013 return; 3008 return;
3014 3009
3015 ib_ah_attr->sl = path->sl & 0xf; 3010 ib_ah_attr->sl = path->sl & 0xf;
@@ -3135,12 +3130,10 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3135 struct ib_udata *udata) 3130 struct ib_udata *udata)
3136{ 3131{
3137 struct mlx5_ib_dev *dev = to_mdev(ibdev); 3132 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3138 struct mlx5_general_caps *gen;
3139 struct mlx5_ib_xrcd *xrcd; 3133 struct mlx5_ib_xrcd *xrcd;
3140 int err; 3134 int err;
3141 3135
3142 gen = &dev->mdev->caps.gen; 3136 if (!MLX5_CAP_GEN(dev->mdev, xrc))
3143 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
3144 return ERR_PTR(-ENOSYS); 3137 return ERR_PTR(-ENOSYS);
3145 3138
3146 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); 3139 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);