aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx5/qp.c
diff options
context:
space:
mode:
authorEli Cohen <eli@mellanox.com>2014-10-02 05:19:42 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-03 18:42:31 -0400
commitc7a08ac7ee68b9af0d5af99c7b34b574cac4d144 (patch)
tree5c8ee3cd2058f2230df4b5c9fe21f44d47cfbf7b /drivers/infiniband/hw/mlx5/qp.c
parent55a93b3ea780908b7d1b3a8cf1976223a9268d78 (diff)
net/mlx5_core: Update device capabilities handling
Rearrange struct mlx5_caps so it has a "gen" field to represent the current capabilities configured for the device. Max capabilities can also be queried from the device. Also update capabilities struct to contain more fields as per the latest revision if firmware specification. Signed-off-by: Eli Cohen <eli@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c72
1 files changed, 47 insertions, 25 deletions
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8c574b63d77b..dbfe498870c1 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -158,11 +158,13 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
158static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, 158static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
159 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) 159 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
160{ 160{
161 struct mlx5_general_caps *gen;
161 int wqe_size; 162 int wqe_size;
162 int wq_size; 163 int wq_size;
163 164
165 gen = &dev->mdev->caps.gen;
164 /* Sanity check RQ size before proceeding */ 166 /* Sanity check RQ size before proceeding */
165 if (cap->max_recv_wr > dev->mdev->caps.max_wqes) 167 if (cap->max_recv_wr > gen->max_wqes)
166 return -EINVAL; 168 return -EINVAL;
167 169
168 if (!has_rq) { 170 if (!has_rq) {
@@ -182,10 +184,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
182 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 184 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
183 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 185 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
184 qp->rq.wqe_cnt = wq_size / wqe_size; 186 qp->rq.wqe_cnt = wq_size / wqe_size;
185 if (wqe_size > dev->mdev->caps.max_rq_desc_sz) { 187 if (wqe_size > gen->max_rq_desc_sz) {
186 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 188 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
187 wqe_size, 189 wqe_size,
188 dev->mdev->caps.max_rq_desc_sz); 190 gen->max_rq_desc_sz);
189 return -EINVAL; 191 return -EINVAL;
190 } 192 }
191 qp->rq.wqe_shift = ilog2(wqe_size); 193 qp->rq.wqe_shift = ilog2(wqe_size);
@@ -266,9 +268,11 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
266static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 268static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
267 struct mlx5_ib_qp *qp) 269 struct mlx5_ib_qp *qp)
268{ 270{
271 struct mlx5_general_caps *gen;
269 int wqe_size; 272 int wqe_size;
270 int wq_size; 273 int wq_size;
271 274
275 gen = &dev->mdev->caps.gen;
272 if (!attr->cap.max_send_wr) 276 if (!attr->cap.max_send_wr)
273 return 0; 277 return 0;
274 278
@@ -277,9 +281,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
277 if (wqe_size < 0) 281 if (wqe_size < 0)
278 return wqe_size; 282 return wqe_size;
279 283
280 if (wqe_size > dev->mdev->caps.max_sq_desc_sz) { 284 if (wqe_size > gen->max_sq_desc_sz) {
281 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 285 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
282 wqe_size, dev->mdev->caps.max_sq_desc_sz); 286 wqe_size, gen->max_sq_desc_sz);
283 return -EINVAL; 287 return -EINVAL;
284 } 288 }
285 289
@@ -292,9 +296,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
292 296
293 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 297 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
294 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 298 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
295 if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) { 299 if (qp->sq.wqe_cnt > gen->max_wqes) {
296 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", 300 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
297 qp->sq.wqe_cnt, dev->mdev->caps.max_wqes); 301 qp->sq.wqe_cnt, gen->max_wqes);
298 return -ENOMEM; 302 return -ENOMEM;
299 } 303 }
300 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 304 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -309,11 +313,13 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
309 struct mlx5_ib_qp *qp, 313 struct mlx5_ib_qp *qp,
310 struct mlx5_ib_create_qp *ucmd) 314 struct mlx5_ib_create_qp *ucmd)
311{ 315{
316 struct mlx5_general_caps *gen;
312 int desc_sz = 1 << qp->sq.wqe_shift; 317 int desc_sz = 1 << qp->sq.wqe_shift;
313 318
314 if (desc_sz > dev->mdev->caps.max_sq_desc_sz) { 319 gen = &dev->mdev->caps.gen;
320 if (desc_sz > gen->max_sq_desc_sz) {
315 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 321 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
316 desc_sz, dev->mdev->caps.max_sq_desc_sz); 322 desc_sz, gen->max_sq_desc_sz);
317 return -EINVAL; 323 return -EINVAL;
318 } 324 }
319 325
@@ -325,9 +331,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
325 331
326 qp->sq.wqe_cnt = ucmd->sq_wqe_count; 332 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
327 333
328 if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) { 334 if (qp->sq.wqe_cnt > gen->max_wqes) {
329 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 335 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
330 qp->sq.wqe_cnt, dev->mdev->caps.max_wqes); 336 qp->sq.wqe_cnt, gen->max_wqes);
331 return -EINVAL; 337 return -EINVAL;
332 } 338 }
333 339
@@ -803,16 +809,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
803 struct mlx5_ib_resources *devr = &dev->devr; 809 struct mlx5_ib_resources *devr = &dev->devr;
804 struct mlx5_ib_create_qp_resp resp; 810 struct mlx5_ib_create_qp_resp resp;
805 struct mlx5_create_qp_mbox_in *in; 811 struct mlx5_create_qp_mbox_in *in;
812 struct mlx5_general_caps *gen;
806 struct mlx5_ib_create_qp ucmd; 813 struct mlx5_ib_create_qp ucmd;
807 int inlen = sizeof(*in); 814 int inlen = sizeof(*in);
808 int err; 815 int err;
809 816
817 gen = &dev->mdev->caps.gen;
810 mutex_init(&qp->mutex); 818 mutex_init(&qp->mutex);
811 spin_lock_init(&qp->sq.lock); 819 spin_lock_init(&qp->sq.lock);
812 spin_lock_init(&qp->rq.lock); 820 spin_lock_init(&qp->rq.lock);
813 821
814 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 822 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
815 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { 823 if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
816 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); 824 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
817 return -EINVAL; 825 return -EINVAL;
818 } else { 826 } else {
@@ -851,9 +859,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
851 mlx5_ib_dbg(dev, "invalid rq params\n"); 859 mlx5_ib_dbg(dev, "invalid rq params\n");
852 return -EINVAL; 860 return -EINVAL;
853 } 861 }
854 if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) { 862 if (ucmd.sq_wqe_count > gen->max_wqes) {
855 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", 863 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
856 ucmd.sq_wqe_count, dev->mdev->caps.max_wqes); 864 ucmd.sq_wqe_count, gen->max_wqes);
857 return -EINVAL; 865 return -EINVAL;
858 } 866 }
859 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); 867 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -1144,6 +1152,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1144 struct ib_qp_init_attr *init_attr, 1152 struct ib_qp_init_attr *init_attr,
1145 struct ib_udata *udata) 1153 struct ib_udata *udata)
1146{ 1154{
1155 struct mlx5_general_caps *gen;
1147 struct mlx5_ib_dev *dev; 1156 struct mlx5_ib_dev *dev;
1148 struct mlx5_ib_qp *qp; 1157 struct mlx5_ib_qp *qp;
1149 u16 xrcdn = 0; 1158 u16 xrcdn = 0;
@@ -1161,11 +1170,12 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1161 } 1170 }
1162 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); 1171 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1163 } 1172 }
1173 gen = &dev->mdev->caps.gen;
1164 1174
1165 switch (init_attr->qp_type) { 1175 switch (init_attr->qp_type) {
1166 case IB_QPT_XRC_TGT: 1176 case IB_QPT_XRC_TGT:
1167 case IB_QPT_XRC_INI: 1177 case IB_QPT_XRC_INI:
1168 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) { 1178 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
1169 mlx5_ib_dbg(dev, "XRC not supported\n"); 1179 mlx5_ib_dbg(dev, "XRC not supported\n");
1170 return ERR_PTR(-ENOSYS); 1180 return ERR_PTR(-ENOSYS);
1171 } 1181 }
@@ -1272,6 +1282,9 @@ enum {
1272 1282
1273static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) 1283static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1274{ 1284{
1285 struct mlx5_general_caps *gen;
1286
1287 gen = &dev->mdev->caps.gen;
1275 if (rate == IB_RATE_PORT_CURRENT) { 1288 if (rate == IB_RATE_PORT_CURRENT) {
1276 return 0; 1289 return 0;
1277 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { 1290 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
@@ -1279,7 +1292,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1279 } else { 1292 } else {
1280 while (rate != IB_RATE_2_5_GBPS && 1293 while (rate != IB_RATE_2_5_GBPS &&
1281 !(1 << (rate + MLX5_STAT_RATE_OFFSET) & 1294 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1282 dev->mdev->caps.stat_rate_support)) 1295 gen->stat_rate_support))
1283 --rate; 1296 --rate;
1284 } 1297 }
1285 1298
@@ -1290,8 +1303,10 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1290 struct mlx5_qp_path *path, u8 port, int attr_mask, 1303 struct mlx5_qp_path *path, u8 port, int attr_mask,
1291 u32 path_flags, const struct ib_qp_attr *attr) 1304 u32 path_flags, const struct ib_qp_attr *attr)
1292{ 1305{
1306 struct mlx5_general_caps *gen;
1293 int err; 1307 int err;
1294 1308
1309 gen = &dev->mdev->caps.gen;
1295 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; 1310 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1296 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; 1311 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1297 1312
@@ -1318,9 +1333,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1318 path->port = port; 1333 path->port = port;
1319 1334
1320 if (ah->ah_flags & IB_AH_GRH) { 1335 if (ah->ah_flags & IB_AH_GRH) {
1321 if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) { 1336 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
1322 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", 1337 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
1323 ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len); 1338 ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
1324 return -EINVAL; 1339 return -EINVAL;
1325 } 1340 }
1326 1341
@@ -1492,6 +1507,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1492 struct mlx5_ib_qp *qp = to_mqp(ibqp); 1507 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1493 struct mlx5_ib_cq *send_cq, *recv_cq; 1508 struct mlx5_ib_cq *send_cq, *recv_cq;
1494 struct mlx5_qp_context *context; 1509 struct mlx5_qp_context *context;
1510 struct mlx5_general_caps *gen;
1495 struct mlx5_modify_qp_mbox_in *in; 1511 struct mlx5_modify_qp_mbox_in *in;
1496 struct mlx5_ib_pd *pd; 1512 struct mlx5_ib_pd *pd;
1497 enum mlx5_qp_state mlx5_cur, mlx5_new; 1513 enum mlx5_qp_state mlx5_cur, mlx5_new;
@@ -1500,6 +1516,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1500 int mlx5_st; 1516 int mlx5_st;
1501 int err; 1517 int err;
1502 1518
1519 gen = &dev->mdev->caps.gen;
1503 in = kzalloc(sizeof(*in), GFP_KERNEL); 1520 in = kzalloc(sizeof(*in), GFP_KERNEL);
1504 if (!in) 1521 if (!in)
1505 return -ENOMEM; 1522 return -ENOMEM;
@@ -1539,7 +1556,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1539 err = -EINVAL; 1556 err = -EINVAL;
1540 goto out; 1557 goto out;
1541 } 1558 }
1542 context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg; 1559 context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
1543 } 1560 }
1544 1561
1545 if (attr_mask & IB_QP_DEST_QPN) 1562 if (attr_mask & IB_QP_DEST_QPN)
@@ -1685,9 +1702,11 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1685 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 1702 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1686 struct mlx5_ib_qp *qp = to_mqp(ibqp); 1703 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1687 enum ib_qp_state cur_state, new_state; 1704 enum ib_qp_state cur_state, new_state;
1705 struct mlx5_general_caps *gen;
1688 int err = -EINVAL; 1706 int err = -EINVAL;
1689 int port; 1707 int port;
1690 1708
1709 gen = &dev->mdev->caps.gen;
1691 mutex_lock(&qp->mutex); 1710 mutex_lock(&qp->mutex);
1692 1711
1693 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 1712 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -1699,21 +1718,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1699 goto out; 1718 goto out;
1700 1719
1701 if ((attr_mask & IB_QP_PORT) && 1720 if ((attr_mask & IB_QP_PORT) &&
1702 (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports)) 1721 (attr->port_num == 0 || attr->port_num > gen->num_ports))
1703 goto out; 1722 goto out;
1704 1723
1705 if (attr_mask & IB_QP_PKEY_INDEX) { 1724 if (attr_mask & IB_QP_PKEY_INDEX) {
1706 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1725 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1707 if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len) 1726 if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
1708 goto out; 1727 goto out;
1709 } 1728 }
1710 1729
1711 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1730 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1712 attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp) 1731 attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
1713 goto out; 1732 goto out;
1714 1733
1715 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1734 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1716 attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp) 1735 attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
1717 goto out; 1736 goto out;
1718 1737
1719 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 1738 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -2893,7 +2912,8 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
2893 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); 2912 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
2894 ib_ah_attr->port_num = path->port; 2913 ib_ah_attr->port_num = path->port;
2895 2914
2896 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) 2915 if (ib_ah_attr->port_num == 0 ||
2916 ib_ah_attr->port_num > dev->caps.gen.num_ports)
2897 return; 2917 return;
2898 2918
2899 ib_ah_attr->sl = path->sl & 0xf; 2919 ib_ah_attr->sl = path->sl & 0xf;
@@ -3011,10 +3031,12 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3011 struct ib_udata *udata) 3031 struct ib_udata *udata)
3012{ 3032{
3013 struct mlx5_ib_dev *dev = to_mdev(ibdev); 3033 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3034 struct mlx5_general_caps *gen;
3014 struct mlx5_ib_xrcd *xrcd; 3035 struct mlx5_ib_xrcd *xrcd;
3015 int err; 3036 int err;
3016 3037
3017 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) 3038 gen = &dev->mdev->caps.gen;
3039 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
3018 return ERR_PTR(-ENOSYS); 3040 return ERR_PTR(-ENOSYS);
3019 3041
3020 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); 3042 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);