aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c83
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c72
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c81
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c219
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c4
-rw-r--r--include/linux/mlx5/device.h24
-rw-r--r--include/linux/mlx5/driver.h28
12 files changed, 331 insertions, 222 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index e4056279166d..10cfce5119a9 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
752 return ERR_PTR(-EINVAL); 752 return ERR_PTR(-EINVAL);
753 753
754 entries = roundup_pow_of_two(entries + 1); 754 entries = roundup_pow_of_two(entries + 1);
755 if (entries > dev->mdev->caps.max_cqes) 755 if (entries > dev->mdev->caps.gen.max_cqes)
756 return ERR_PTR(-EINVAL); 756 return ERR_PTR(-EINVAL);
757 757
758 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 758 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
919 int err; 919 int err;
920 u32 fsel; 920 u32 fsel;
921 921
922 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) 922 if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
923 return -ENOSYS; 923 return -ENOSYS;
924 924
925 in = kzalloc(sizeof(*in), GFP_KERNEL); 925 in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1074 int uninitialized_var(cqe_size); 1074 int uninitialized_var(cqe_size);
1075 unsigned long flags; 1075 unsigned long flags;
1076 1076
1077 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { 1077 if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
1078 pr_info("Firmware does not support resize CQ\n"); 1078 pr_info("Firmware does not support resize CQ\n");
1079 return -ENOSYS; 1079 return -ENOSYS;
1080 } 1080 }
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1083 return -EINVAL; 1083 return -EINVAL;
1084 1084
1085 entries = roundup_pow_of_two(entries + 1); 1085 entries = roundup_pow_of_two(entries + 1);
1086 if (entries > dev->mdev->caps.max_cqes + 1) 1086 if (entries > dev->mdev->caps.gen.max_cqes + 1)
1087 return -EINVAL; 1087 return -EINVAL;
1088 1088
1089 if (entries == ibcq->cqe + 1) 1089 if (entries == ibcq->cqe + 1)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index b514bbb5610f..657af9a1167c 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
129 129
130 packet_error = be16_to_cpu(out_mad->status); 130 packet_error = be16_to_cpu(out_mad->status);
131 131
132 dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ? 132 dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ?
133 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; 133 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
134 134
135out: 135out:
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index d8907b20522a..f3114d1132fb 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -157,11 +157,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
157 struct mlx5_ib_dev *dev = to_mdev(ibdev); 157 struct mlx5_ib_dev *dev = to_mdev(ibdev);
158 struct ib_smp *in_mad = NULL; 158 struct ib_smp *in_mad = NULL;
159 struct ib_smp *out_mad = NULL; 159 struct ib_smp *out_mad = NULL;
160 struct mlx5_general_caps *gen;
160 int err = -ENOMEM; 161 int err = -ENOMEM;
161 int max_rq_sg; 162 int max_rq_sg;
162 int max_sq_sg; 163 int max_sq_sg;
163 u64 flags; 164 u64 flags;
164 165
166 gen = &dev->mdev->caps.gen;
165 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 167 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
166 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 168 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
167 if (!in_mad || !out_mad) 169 if (!in_mad || !out_mad)
@@ -183,7 +185,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
183 IB_DEVICE_PORT_ACTIVE_EVENT | 185 IB_DEVICE_PORT_ACTIVE_EVENT |
184 IB_DEVICE_SYS_IMAGE_GUID | 186 IB_DEVICE_SYS_IMAGE_GUID |
185 IB_DEVICE_RC_RNR_NAK_GEN; 187 IB_DEVICE_RC_RNR_NAK_GEN;
186 flags = dev->mdev->caps.flags; 188 flags = gen->flags;
187 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) 189 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
188 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 190 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
189 if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) 191 if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
@@ -213,30 +215,31 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
213 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 215 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
214 216
215 props->max_mr_size = ~0ull; 217 props->max_mr_size = ~0ull;
216 props->page_size_cap = dev->mdev->caps.min_page_sz; 218 props->page_size_cap = gen->min_page_sz;
217 props->max_qp = 1 << dev->mdev->caps.log_max_qp; 219 props->max_qp = 1 << gen->log_max_qp;
218 props->max_qp_wr = dev->mdev->caps.max_wqes; 220 props->max_qp_wr = gen->max_wqes;
219 max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); 221 max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
220 max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / 222 max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
221 sizeof(struct mlx5_wqe_data_seg); 223 sizeof(struct mlx5_wqe_data_seg);
222 props->max_sge = min(max_rq_sg, max_sq_sg); 224 props->max_sge = min(max_rq_sg, max_sq_sg);
223 props->max_cq = 1 << dev->mdev->caps.log_max_cq; 225 props->max_cq = 1 << gen->log_max_cq;
224 props->max_cqe = dev->mdev->caps.max_cqes - 1; 226 props->max_cqe = gen->max_cqes - 1;
225 props->max_mr = 1 << dev->mdev->caps.log_max_mkey; 227 props->max_mr = 1 << gen->log_max_mkey;
226 props->max_pd = 1 << dev->mdev->caps.log_max_pd; 228 props->max_pd = 1 << gen->log_max_pd;
227 props->max_qp_rd_atom = dev->mdev->caps.max_ra_req_qp; 229 props->max_qp_rd_atom = 1 << gen->log_max_ra_req_qp;
228 props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp; 230 props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp;
231 props->max_srq = 1 << gen->log_max_srq;
232 props->max_srq_wr = gen->max_srq_wqes - 1;
233 props->local_ca_ack_delay = gen->local_ca_ack_delay;
229 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 234 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
230 props->max_srq = 1 << dev->mdev->caps.log_max_srq;
231 props->max_srq_wr = dev->mdev->caps.max_srq_wqes - 1;
232 props->max_srq_sge = max_rq_sg - 1; 235 props->max_srq_sge = max_rq_sg - 1;
233 props->max_fast_reg_page_list_len = (unsigned int)-1; 236 props->max_fast_reg_page_list_len = (unsigned int)-1;
234 props->local_ca_ack_delay = dev->mdev->caps.local_ca_ack_delay; 237 props->local_ca_ack_delay = gen->local_ca_ack_delay;
235 props->atomic_cap = IB_ATOMIC_NONE; 238 props->atomic_cap = IB_ATOMIC_NONE;
236 props->masked_atomic_cap = IB_ATOMIC_NONE; 239 props->masked_atomic_cap = IB_ATOMIC_NONE;
237 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 240 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
238 props->max_mcast_grp = 1 << dev->mdev->caps.log_max_mcg; 241 props->max_mcast_grp = 1 << gen->log_max_mcg;
239 props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg; 242 props->max_mcast_qp_attach = gen->max_qp_mcg;
240 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 243 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
241 props->max_mcast_grp; 244 props->max_mcast_grp;
242 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 245 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
@@ -254,10 +257,12 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
254 struct mlx5_ib_dev *dev = to_mdev(ibdev); 257 struct mlx5_ib_dev *dev = to_mdev(ibdev);
255 struct ib_smp *in_mad = NULL; 258 struct ib_smp *in_mad = NULL;
256 struct ib_smp *out_mad = NULL; 259 struct ib_smp *out_mad = NULL;
260 struct mlx5_general_caps *gen;
257 int ext_active_speed; 261 int ext_active_speed;
258 int err = -ENOMEM; 262 int err = -ENOMEM;
259 263
260 if (port < 1 || port > dev->mdev->caps.num_ports) { 264 gen = &dev->mdev->caps.gen;
265 if (port < 1 || port > gen->num_ports) {
261 mlx5_ib_warn(dev, "invalid port number %d\n", port); 266 mlx5_ib_warn(dev, "invalid port number %d\n", port);
262 return -EINVAL; 267 return -EINVAL;
263 } 268 }
@@ -288,8 +293,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
288 props->phys_state = out_mad->data[33] >> 4; 293 props->phys_state = out_mad->data[33] >> 4;
289 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); 294 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
290 props->gid_tbl_len = out_mad->data[50]; 295 props->gid_tbl_len = out_mad->data[50];
291 props->max_msg_sz = 1 << to_mdev(ibdev)->mdev->caps.log_max_msg; 296 props->max_msg_sz = 1 << gen->log_max_msg;
292 props->pkey_tbl_len = to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len; 297 props->pkey_tbl_len = gen->port[port - 1].pkey_table_len;
293 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); 298 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
294 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); 299 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
295 props->active_width = out_mad->data[31] & 0xf; 300 props->active_width = out_mad->data[31] & 0xf;
@@ -316,7 +321,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
316 321
317 /* If reported active speed is QDR, check if is FDR-10 */ 322 /* If reported active speed is QDR, check if is FDR-10 */
318 if (props->active_speed == 4) { 323 if (props->active_speed == 4) {
319 if (dev->mdev->caps.ext_port_cap[port - 1] & 324 if (gen->ext_port_cap[port - 1] &
320 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 325 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
321 init_query_mad(in_mad); 326 init_query_mad(in_mad);
322 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 327 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
@@ -470,6 +475,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
470 struct mlx5_ib_alloc_ucontext_req_v2 req; 475 struct mlx5_ib_alloc_ucontext_req_v2 req;
471 struct mlx5_ib_alloc_ucontext_resp resp; 476 struct mlx5_ib_alloc_ucontext_resp resp;
472 struct mlx5_ib_ucontext *context; 477 struct mlx5_ib_ucontext *context;
478 struct mlx5_general_caps *gen;
473 struct mlx5_uuar_info *uuari; 479 struct mlx5_uuar_info *uuari;
474 struct mlx5_uar *uars; 480 struct mlx5_uar *uars;
475 int gross_uuars; 481 int gross_uuars;
@@ -480,6 +486,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
480 int i; 486 int i;
481 size_t reqlen; 487 size_t reqlen;
482 488
489 gen = &dev->mdev->caps.gen;
483 if (!dev->ib_active) 490 if (!dev->ib_active)
484 return ERR_PTR(-EAGAIN); 491 return ERR_PTR(-EAGAIN);
485 492
@@ -512,14 +519,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
512 519
513 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; 520 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
514 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; 521 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
515 resp.qp_tab_size = 1 << dev->mdev->caps.log_max_qp; 522 resp.qp_tab_size = 1 << gen->log_max_qp;
516 resp.bf_reg_size = dev->mdev->caps.bf_reg_size; 523 resp.bf_reg_size = gen->bf_reg_size;
517 resp.cache_line_size = L1_CACHE_BYTES; 524 resp.cache_line_size = L1_CACHE_BYTES;
518 resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz; 525 resp.max_sq_desc_sz = gen->max_sq_desc_sz;
519 resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz; 526 resp.max_rq_desc_sz = gen->max_rq_desc_sz;
520 resp.max_send_wqebb = dev->mdev->caps.max_wqes; 527 resp.max_send_wqebb = gen->max_wqes;
521 resp.max_recv_wr = dev->mdev->caps.max_wqes; 528 resp.max_recv_wr = gen->max_wqes;
522 resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes; 529 resp.max_srq_recv_wr = gen->max_srq_wqes;
523 530
524 context = kzalloc(sizeof(*context), GFP_KERNEL); 531 context = kzalloc(sizeof(*context), GFP_KERNEL);
525 if (!context) 532 if (!context)
@@ -565,7 +572,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
565 mutex_init(&context->db_page_mutex); 572 mutex_init(&context->db_page_mutex);
566 573
567 resp.tot_uuars = req.total_num_uuars; 574 resp.tot_uuars = req.total_num_uuars;
568 resp.num_ports = dev->mdev->caps.num_ports; 575 resp.num_ports = gen->num_ports;
569 err = ib_copy_to_udata(udata, &resp, 576 err = ib_copy_to_udata(udata, &resp,
570 sizeof(resp) - sizeof(resp.reserved)); 577 sizeof(resp) - sizeof(resp.reserved));
571 if (err) 578 if (err)
@@ -967,9 +974,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
967 974
968static void get_ext_port_caps(struct mlx5_ib_dev *dev) 975static void get_ext_port_caps(struct mlx5_ib_dev *dev)
969{ 976{
977 struct mlx5_general_caps *gen;
970 int port; 978 int port;
971 979
972 for (port = 1; port <= dev->mdev->caps.num_ports; port++) 980 gen = &dev->mdev->caps.gen;
981 for (port = 1; port <= gen->num_ports; port++)
973 mlx5_query_ext_port_caps(dev, port); 982 mlx5_query_ext_port_caps(dev, port);
974} 983}
975 984
@@ -977,9 +986,11 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
977{ 986{
978 struct ib_device_attr *dprops = NULL; 987 struct ib_device_attr *dprops = NULL;
979 struct ib_port_attr *pprops = NULL; 988 struct ib_port_attr *pprops = NULL;
989 struct mlx5_general_caps *gen;
980 int err = 0; 990 int err = 0;
981 int port; 991 int port;
982 992
993 gen = &dev->mdev->caps.gen;
983 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); 994 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
984 if (!pprops) 995 if (!pprops)
985 goto out; 996 goto out;
@@ -994,14 +1005,14 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
994 goto out; 1005 goto out;
995 } 1006 }
996 1007
997 for (port = 1; port <= dev->mdev->caps.num_ports; port++) { 1008 for (port = 1; port <= gen->num_ports; port++) {
998 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 1009 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
999 if (err) { 1010 if (err) {
1000 mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); 1011 mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
1001 break; 1012 break;
1002 } 1013 }
1003 dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys; 1014 gen->port[port - 1].pkey_table_len = dprops->max_pkeys;
1004 dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; 1015 gen->port[port - 1].gid_table_len = pprops->gid_tbl_len;
1005 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", 1016 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
1006 dprops->max_pkeys, pprops->gid_tbl_len); 1017 dprops->max_pkeys, pprops->gid_tbl_len);
1007 } 1018 }
@@ -1279,8 +1290,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1279 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); 1290 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
1280 dev->ib_dev.owner = THIS_MODULE; 1291 dev->ib_dev.owner = THIS_MODULE;
1281 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1292 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1282 dev->ib_dev.local_dma_lkey = mdev->caps.reserved_lkey; 1293 dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey;
1283 dev->num_ports = mdev->caps.num_ports; 1294 dev->num_ports = mdev->caps.gen.num_ports;
1284 dev->ib_dev.phys_port_cnt = dev->num_ports; 1295 dev->ib_dev.phys_port_cnt = dev->num_ports;
1285 dev->ib_dev.num_comp_vectors = dev->num_comp_vectors; 1296 dev->ib_dev.num_comp_vectors = dev->num_comp_vectors;
1286 dev->ib_dev.dma_device = &mdev->pdev->dev; 1297 dev->ib_dev.dma_device = &mdev->pdev->dev;
@@ -1355,7 +1366,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1355 dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; 1366 dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
1356 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 1367 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
1357 1368
1358 if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) { 1369 if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) {
1359 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; 1370 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
1360 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; 1371 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
1361 dev->ib_dev.uverbs_cmd_mask |= 1372 dev->ib_dev.uverbs_cmd_mask |=
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 8c574b63d77b..dbfe498870c1 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -158,11 +158,13 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
158static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, 158static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
159 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) 159 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
160{ 160{
161 struct mlx5_general_caps *gen;
161 int wqe_size; 162 int wqe_size;
162 int wq_size; 163 int wq_size;
163 164
165 gen = &dev->mdev->caps.gen;
164 /* Sanity check RQ size before proceeding */ 166 /* Sanity check RQ size before proceeding */
165 if (cap->max_recv_wr > dev->mdev->caps.max_wqes) 167 if (cap->max_recv_wr > gen->max_wqes)
166 return -EINVAL; 168 return -EINVAL;
167 169
168 if (!has_rq) { 170 if (!has_rq) {
@@ -182,10 +184,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
182 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 184 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
183 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 185 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
184 qp->rq.wqe_cnt = wq_size / wqe_size; 186 qp->rq.wqe_cnt = wq_size / wqe_size;
185 if (wqe_size > dev->mdev->caps.max_rq_desc_sz) { 187 if (wqe_size > gen->max_rq_desc_sz) {
186 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 188 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
187 wqe_size, 189 wqe_size,
188 dev->mdev->caps.max_rq_desc_sz); 190 gen->max_rq_desc_sz);
189 return -EINVAL; 191 return -EINVAL;
190 } 192 }
191 qp->rq.wqe_shift = ilog2(wqe_size); 193 qp->rq.wqe_shift = ilog2(wqe_size);
@@ -266,9 +268,11 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
266static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 268static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
267 struct mlx5_ib_qp *qp) 269 struct mlx5_ib_qp *qp)
268{ 270{
271 struct mlx5_general_caps *gen;
269 int wqe_size; 272 int wqe_size;
270 int wq_size; 273 int wq_size;
271 274
275 gen = &dev->mdev->caps.gen;
272 if (!attr->cap.max_send_wr) 276 if (!attr->cap.max_send_wr)
273 return 0; 277 return 0;
274 278
@@ -277,9 +281,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
277 if (wqe_size < 0) 281 if (wqe_size < 0)
278 return wqe_size; 282 return wqe_size;
279 283
280 if (wqe_size > dev->mdev->caps.max_sq_desc_sz) { 284 if (wqe_size > gen->max_sq_desc_sz) {
281 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 285 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
282 wqe_size, dev->mdev->caps.max_sq_desc_sz); 286 wqe_size, gen->max_sq_desc_sz);
283 return -EINVAL; 287 return -EINVAL;
284 } 288 }
285 289
@@ -292,9 +296,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
292 296
293 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 297 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
294 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 298 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
295 if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) { 299 if (qp->sq.wqe_cnt > gen->max_wqes) {
296 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", 300 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
297 qp->sq.wqe_cnt, dev->mdev->caps.max_wqes); 301 qp->sq.wqe_cnt, gen->max_wqes);
298 return -ENOMEM; 302 return -ENOMEM;
299 } 303 }
300 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 304 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -309,11 +313,13 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
309 struct mlx5_ib_qp *qp, 313 struct mlx5_ib_qp *qp,
310 struct mlx5_ib_create_qp *ucmd) 314 struct mlx5_ib_create_qp *ucmd)
311{ 315{
316 struct mlx5_general_caps *gen;
312 int desc_sz = 1 << qp->sq.wqe_shift; 317 int desc_sz = 1 << qp->sq.wqe_shift;
313 318
314 if (desc_sz > dev->mdev->caps.max_sq_desc_sz) { 319 gen = &dev->mdev->caps.gen;
320 if (desc_sz > gen->max_sq_desc_sz) {
315 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 321 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
316 desc_sz, dev->mdev->caps.max_sq_desc_sz); 322 desc_sz, gen->max_sq_desc_sz);
317 return -EINVAL; 323 return -EINVAL;
318 } 324 }
319 325
@@ -325,9 +331,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
325 331
326 qp->sq.wqe_cnt = ucmd->sq_wqe_count; 332 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
327 333
328 if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) { 334 if (qp->sq.wqe_cnt > gen->max_wqes) {
329 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 335 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
330 qp->sq.wqe_cnt, dev->mdev->caps.max_wqes); 336 qp->sq.wqe_cnt, gen->max_wqes);
331 return -EINVAL; 337 return -EINVAL;
332 } 338 }
333 339
@@ -803,16 +809,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
803 struct mlx5_ib_resources *devr = &dev->devr; 809 struct mlx5_ib_resources *devr = &dev->devr;
804 struct mlx5_ib_create_qp_resp resp; 810 struct mlx5_ib_create_qp_resp resp;
805 struct mlx5_create_qp_mbox_in *in; 811 struct mlx5_create_qp_mbox_in *in;
812 struct mlx5_general_caps *gen;
806 struct mlx5_ib_create_qp ucmd; 813 struct mlx5_ib_create_qp ucmd;
807 int inlen = sizeof(*in); 814 int inlen = sizeof(*in);
808 int err; 815 int err;
809 816
817 gen = &dev->mdev->caps.gen;
810 mutex_init(&qp->mutex); 818 mutex_init(&qp->mutex);
811 spin_lock_init(&qp->sq.lock); 819 spin_lock_init(&qp->sq.lock);
812 spin_lock_init(&qp->rq.lock); 820 spin_lock_init(&qp->rq.lock);
813 821
814 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 822 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
815 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { 823 if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
816 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); 824 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
817 return -EINVAL; 825 return -EINVAL;
818 } else { 826 } else {
@@ -851,9 +859,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
851 mlx5_ib_dbg(dev, "invalid rq params\n"); 859 mlx5_ib_dbg(dev, "invalid rq params\n");
852 return -EINVAL; 860 return -EINVAL;
853 } 861 }
854 if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) { 862 if (ucmd.sq_wqe_count > gen->max_wqes) {
855 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", 863 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
856 ucmd.sq_wqe_count, dev->mdev->caps.max_wqes); 864 ucmd.sq_wqe_count, gen->max_wqes);
857 return -EINVAL; 865 return -EINVAL;
858 } 866 }
859 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); 867 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -1144,6 +1152,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1144 struct ib_qp_init_attr *init_attr, 1152 struct ib_qp_init_attr *init_attr,
1145 struct ib_udata *udata) 1153 struct ib_udata *udata)
1146{ 1154{
1155 struct mlx5_general_caps *gen;
1147 struct mlx5_ib_dev *dev; 1156 struct mlx5_ib_dev *dev;
1148 struct mlx5_ib_qp *qp; 1157 struct mlx5_ib_qp *qp;
1149 u16 xrcdn = 0; 1158 u16 xrcdn = 0;
@@ -1161,11 +1170,12 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1161 } 1170 }
1162 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); 1171 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1163 } 1172 }
1173 gen = &dev->mdev->caps.gen;
1164 1174
1165 switch (init_attr->qp_type) { 1175 switch (init_attr->qp_type) {
1166 case IB_QPT_XRC_TGT: 1176 case IB_QPT_XRC_TGT:
1167 case IB_QPT_XRC_INI: 1177 case IB_QPT_XRC_INI:
1168 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) { 1178 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) {
1169 mlx5_ib_dbg(dev, "XRC not supported\n"); 1179 mlx5_ib_dbg(dev, "XRC not supported\n");
1170 return ERR_PTR(-ENOSYS); 1180 return ERR_PTR(-ENOSYS);
1171 } 1181 }
@@ -1272,6 +1282,9 @@ enum {
1272 1282
1273static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) 1283static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1274{ 1284{
1285 struct mlx5_general_caps *gen;
1286
1287 gen = &dev->mdev->caps.gen;
1275 if (rate == IB_RATE_PORT_CURRENT) { 1288 if (rate == IB_RATE_PORT_CURRENT) {
1276 return 0; 1289 return 0;
1277 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { 1290 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
@@ -1279,7 +1292,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1279 } else { 1292 } else {
1280 while (rate != IB_RATE_2_5_GBPS && 1293 while (rate != IB_RATE_2_5_GBPS &&
1281 !(1 << (rate + MLX5_STAT_RATE_OFFSET) & 1294 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1282 dev->mdev->caps.stat_rate_support)) 1295 gen->stat_rate_support))
1283 --rate; 1296 --rate;
1284 } 1297 }
1285 1298
@@ -1290,8 +1303,10 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1290 struct mlx5_qp_path *path, u8 port, int attr_mask, 1303 struct mlx5_qp_path *path, u8 port, int attr_mask,
1291 u32 path_flags, const struct ib_qp_attr *attr) 1304 u32 path_flags, const struct ib_qp_attr *attr)
1292{ 1305{
1306 struct mlx5_general_caps *gen;
1293 int err; 1307 int err;
1294 1308
1309 gen = &dev->mdev->caps.gen;
1295 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; 1310 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1296 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; 1311 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1297 1312
@@ -1318,9 +1333,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1318 path->port = port; 1333 path->port = port;
1319 1334
1320 if (ah->ah_flags & IB_AH_GRH) { 1335 if (ah->ah_flags & IB_AH_GRH) {
1321 if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) { 1336 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
1322 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", 1337 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
1323 ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len); 1338 ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
1324 return -EINVAL; 1339 return -EINVAL;
1325 } 1340 }
1326 1341
@@ -1492,6 +1507,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1492 struct mlx5_ib_qp *qp = to_mqp(ibqp); 1507 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1493 struct mlx5_ib_cq *send_cq, *recv_cq; 1508 struct mlx5_ib_cq *send_cq, *recv_cq;
1494 struct mlx5_qp_context *context; 1509 struct mlx5_qp_context *context;
1510 struct mlx5_general_caps *gen;
1495 struct mlx5_modify_qp_mbox_in *in; 1511 struct mlx5_modify_qp_mbox_in *in;
1496 struct mlx5_ib_pd *pd; 1512 struct mlx5_ib_pd *pd;
1497 enum mlx5_qp_state mlx5_cur, mlx5_new; 1513 enum mlx5_qp_state mlx5_cur, mlx5_new;
@@ -1500,6 +1516,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1500 int mlx5_st; 1516 int mlx5_st;
1501 int err; 1517 int err;
1502 1518
1519 gen = &dev->mdev->caps.gen;
1503 in = kzalloc(sizeof(*in), GFP_KERNEL); 1520 in = kzalloc(sizeof(*in), GFP_KERNEL);
1504 if (!in) 1521 if (!in)
1505 return -ENOMEM; 1522 return -ENOMEM;
@@ -1539,7 +1556,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1539 err = -EINVAL; 1556 err = -EINVAL;
1540 goto out; 1557 goto out;
1541 } 1558 }
1542 context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg; 1559 context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg;
1543 } 1560 }
1544 1561
1545 if (attr_mask & IB_QP_DEST_QPN) 1562 if (attr_mask & IB_QP_DEST_QPN)
@@ -1685,9 +1702,11 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1685 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 1702 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1686 struct mlx5_ib_qp *qp = to_mqp(ibqp); 1703 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1687 enum ib_qp_state cur_state, new_state; 1704 enum ib_qp_state cur_state, new_state;
1705 struct mlx5_general_caps *gen;
1688 int err = -EINVAL; 1706 int err = -EINVAL;
1689 int port; 1707 int port;
1690 1708
1709 gen = &dev->mdev->caps.gen;
1691 mutex_lock(&qp->mutex); 1710 mutex_lock(&qp->mutex);
1692 1711
1693 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 1712 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -1699,21 +1718,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1699 goto out; 1718 goto out;
1700 1719
1701 if ((attr_mask & IB_QP_PORT) && 1720 if ((attr_mask & IB_QP_PORT) &&
1702 (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports)) 1721 (attr->port_num == 0 || attr->port_num > gen->num_ports))
1703 goto out; 1722 goto out;
1704 1723
1705 if (attr_mask & IB_QP_PKEY_INDEX) { 1724 if (attr_mask & IB_QP_PKEY_INDEX) {
1706 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1725 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1707 if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len) 1726 if (attr->pkey_index >= gen->port[port - 1].pkey_table_len)
1708 goto out; 1727 goto out;
1709 } 1728 }
1710 1729
1711 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1730 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1712 attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp) 1731 attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp))
1713 goto out; 1732 goto out;
1714 1733
1715 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1734 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1716 attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp) 1735 attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp))
1717 goto out; 1736 goto out;
1718 1737
1719 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 1738 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -2893,7 +2912,8 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
2893 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); 2912 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
2894 ib_ah_attr->port_num = path->port; 2913 ib_ah_attr->port_num = path->port;
2895 2914
2896 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) 2915 if (ib_ah_attr->port_num == 0 ||
2916 ib_ah_attr->port_num > dev->caps.gen.num_ports)
2897 return; 2917 return;
2898 2918
2899 ib_ah_attr->sl = path->sl & 0xf; 2919 ib_ah_attr->sl = path->sl & 0xf;
@@ -3011,10 +3031,12 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3011 struct ib_udata *udata) 3031 struct ib_udata *udata)
3012{ 3032{
3013 struct mlx5_ib_dev *dev = to_mdev(ibdev); 3033 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3034 struct mlx5_general_caps *gen;
3014 struct mlx5_ib_xrcd *xrcd; 3035 struct mlx5_ib_xrcd *xrcd;
3015 int err; 3036 int err;
3016 3037
3017 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) 3038 gen = &dev->mdev->caps.gen;
3039 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
3018 return ERR_PTR(-ENOSYS); 3040 return ERR_PTR(-ENOSYS);
3019 3041
3020 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); 3042 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 70bd131ba646..97cc1baaa8e3 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -238,6 +238,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
238 struct ib_udata *udata) 238 struct ib_udata *udata)
239{ 239{
240 struct mlx5_ib_dev *dev = to_mdev(pd->device); 240 struct mlx5_ib_dev *dev = to_mdev(pd->device);
241 struct mlx5_general_caps *gen;
241 struct mlx5_ib_srq *srq; 242 struct mlx5_ib_srq *srq;
242 int desc_size; 243 int desc_size;
243 int buf_size; 244 int buf_size;
@@ -247,11 +248,12 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
247 int is_xrc; 248 int is_xrc;
248 u32 flgs, xrcdn; 249 u32 flgs, xrcdn;
249 250
251 gen = &dev->mdev->caps.gen;
250 /* Sanity check SRQ size before proceeding */ 252 /* Sanity check SRQ size before proceeding */
251 if (init_attr->attr.max_wr >= dev->mdev->caps.max_srq_wqes) { 253 if (init_attr->attr.max_wr >= gen->max_srq_wqes) {
252 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", 254 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
253 init_attr->attr.max_wr, 255 init_attr->attr.max_wr,
254 dev->mdev->caps.max_srq_wqes); 256 gen->max_srq_wqes);
255 return ERR_PTR(-EINVAL); 257 return ERR_PTR(-EINVAL);
256 } 258 }
257 259
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 65a7da69e2ac..6eb0f85cf872 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1538,16 +1538,9 @@ static const char *cmd_status_str(u8 status)
1538 } 1538 }
1539} 1539}
1540 1540
1541int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) 1541static int cmd_status_to_err(u8 status)
1542{ 1542{
1543 if (!hdr->status) 1543 switch (status) {
1544 return 0;
1545
1546 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1547 cmd_status_str(hdr->status), hdr->status,
1548 be32_to_cpu(hdr->syndrome));
1549
1550 switch (hdr->status) {
1551 case MLX5_CMD_STAT_OK: return 0; 1544 case MLX5_CMD_STAT_OK: return 0;
1552 case MLX5_CMD_STAT_INT_ERR: return -EIO; 1545 case MLX5_CMD_STAT_INT_ERR: return -EIO;
1553 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; 1546 case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL;
@@ -1567,3 +1560,16 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1567 default: return -EIO; 1560 default: return -EIO;
1568 } 1561 }
1569} 1562}
1563
1564/* this will be available till all the commands use set/get macros */
1565int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1566{
1567 if (!hdr->status)
1568 return 0;
1569
1570 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1571 cmd_status_str(hdr->status), hdr->status,
1572 be32_to_cpu(hdr->syndrome));
1573
1574 return cmd_status_to_err(hdr->status);
1575}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 4e8bd0b34bb0..11b9b840ad4d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -468,7 +468,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
468 468
469 err = mlx5_create_map_eq(dev, &table->pages_eq, 469 err = mlx5_create_map_eq(dev, &table->pages_eq,
470 MLX5_EQ_VEC_PAGES, 470 MLX5_EQ_VEC_PAGES,
471 dev->caps.max_vf + 1, 471 dev->caps.gen.max_vf + 1,
472 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", 472 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
473 &dev->priv.uuari.uars[0]); 473 &dev->priv.uuari.uars[0]);
474 if (err) { 474 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index f012658b6a92..087c4c797deb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -64,86 +64,9 @@ out_out:
64 return err; 64 return err;
65} 65}
66 66
67int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, 67int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
68 struct mlx5_caps *caps)
69{ 68{
70 struct mlx5_cmd_query_hca_cap_mbox_out *out; 69 return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
71 struct mlx5_cmd_query_hca_cap_mbox_in in;
72 struct mlx5_query_special_ctxs_mbox_out ctx_out;
73 struct mlx5_query_special_ctxs_mbox_in ctx_in;
74 int err;
75 u16 t16;
76
77 out = kzalloc(sizeof(*out), GFP_KERNEL);
78 if (!out)
79 return -ENOMEM;
80
81 memset(&in, 0, sizeof(in));
82 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
83 in.hdr.opmod = cpu_to_be16(0x1);
84 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
85 if (err)
86 goto out_out;
87
88 if (out->hdr.status) {
89 err = mlx5_cmd_status_to_err(&out->hdr);
90 goto out_out;
91 }
92
93
94 caps->log_max_eq = out->hca_cap.log_max_eq & 0xf;
95 caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz;
96 caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz;
97 caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq);
98 caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq);
99 caps->flags = be64_to_cpu(out->hca_cap.flags);
100 caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support);
101 caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f;
102 caps->num_ports = out->hca_cap.num_ports & 0xf;
103 caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f;
104 if (caps->num_ports > MLX5_MAX_PORTS) {
105 mlx5_core_err(dev, "device has %d ports while the driver supports max %d ports\n",
106 caps->num_ports, MLX5_MAX_PORTS);
107 err = -EINVAL;
108 goto out_out;
109 }
110 caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f;
111 caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f;
112 caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f;
113 caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
114 caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
115 caps->log_max_mcg = out->hca_cap.log_max_mcg;
116 caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
117 caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
118 caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
119 caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
120 t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size);
121 if (t16 & 0x8000) {
122 caps->bf_reg_size = 1 << (t16 & 0x1f);
123 caps->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE;
124 } else {
125 caps->bf_reg_size = 0;
126 caps->bf_regs_per_page = 0;
127 }
128 caps->min_page_sz = ~(u32)((1 << out->hca_cap.log_pg_sz) - 1);
129
130 memset(&ctx_in, 0, sizeof(ctx_in));
131 memset(&ctx_out, 0, sizeof(ctx_out));
132 ctx_in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
133 err = mlx5_cmd_exec(dev, &ctx_in, sizeof(ctx_in),
134 &ctx_out, sizeof(ctx_out));
135 if (err)
136 goto out_out;
137
138 if (ctx_out.hdr.status)
139 err = mlx5_cmd_status_to_err(&ctx_out.hdr);
140
141 caps->reserved_lkey = be32_to_cpu(ctx_out.reserved_lkey);
142
143out_out:
144 kfree(out);
145
146 return err;
147} 70}
148 71
149int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) 72int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f2716cc1f51d..d9f74618befa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -207,11 +207,11 @@ static void release_bar(struct pci_dev *pdev)
207static int mlx5_enable_msix(struct mlx5_core_dev *dev) 207static int mlx5_enable_msix(struct mlx5_core_dev *dev)
208{ 208{
209 struct mlx5_eq_table *table = &dev->priv.eq_table; 209 struct mlx5_eq_table *table = &dev->priv.eq_table;
210 int num_eqs = 1 << dev->caps.log_max_eq; 210 int num_eqs = 1 << dev->caps.gen.log_max_eq;
211 int nvec; 211 int nvec;
212 int i; 212 int i;
213 213
214 nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; 214 nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
215 nvec = min_t(int, nvec, num_eqs); 215 nvec = min_t(int, nvec, num_eqs);
216 if (nvec <= MLX5_EQ_VEC_COMP_BASE) 216 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
217 return -ENOMEM; 217 return -ENOMEM;
@@ -250,13 +250,34 @@ struct mlx5_reg_host_endianess {
250#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) 250#define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
251 251
252enum { 252enum {
253 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | 253 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
254 CAP_MASK(MLX5_CAP_OFF_DCT, 1), 254 MLX5_DEV_CAP_FLAG_DCT,
255}; 255};
256 256
257static u16 to_fw_pkey_sz(u32 size)
258{
259 switch (size) {
260 case 128:
261 return 0;
262 case 256:
263 return 1;
264 case 512:
265 return 2;
266 case 1024:
267 return 3;
268 case 2048:
269 return 4;
270 case 4096:
271 return 5;
272 default:
273 pr_warn("invalid pkey table size %d\n", size);
274 return 0;
275 }
276}
277
257/* selectively copy writable fields clearing any reserved area 278/* selectively copy writable fields clearing any reserved area
258 */ 279 */
259static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_hca_cap *from) 280static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_general_caps *from)
260{ 281{
261 u64 v64; 282 u64 v64;
262 283
@@ -265,76 +286,172 @@ static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_hca_cap *from)
265 to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f; 286 to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f;
266 to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f; 287 to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f;
267 to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f; 288 to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f;
268 to->log_max_atomic_size_qp = from->log_max_atomic_size_qp; 289 to->pkey_table_size = cpu_to_be16(to_fw_pkey_sz(from->pkey_table_size));
269 to->log_max_atomic_size_dc = from->log_max_atomic_size_dc; 290 v64 = from->flags & MLX5_CAP_BITS_RW_MASK;
270 v64 = be64_to_cpu(from->flags) & MLX5_CAP_BITS_RW_MASK;
271 to->flags = cpu_to_be64(v64); 291 to->flags = cpu_to_be64(v64);
272} 292}
273 293
274enum { 294static u16 get_pkey_table_size(int pkey)
275 HCA_CAP_OPMOD_GET_MAX = 0, 295{
276 HCA_CAP_OPMOD_GET_CUR = 1, 296 if (pkey > MLX5_MAX_LOG_PKEY_TABLE)
277}; 297 return 0;
278 298
279static int handle_hca_cap(struct mlx5_core_dev *dev) 299 return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
300}
301
302static void fw2drv_caps(struct mlx5_caps *caps,
303 struct mlx5_cmd_query_hca_cap_mbox_out *out)
280{ 304{
281 struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL; 305 struct mlx5_general_caps *gen = &caps->gen;
282 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; 306 u16 t16;
283 struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; 307
284 struct mlx5_cmd_set_hca_cap_mbox_out set_out; 308 gen->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
285 u64 flags; 309 gen->max_wqes = 1 << out->hca_cap.log_max_qp_sz;
310 gen->log_max_qp = out->hca_cap.log_max_qp & 0x1f;
311 gen->log_max_strq = out->hca_cap.log_max_strq_sz;
312 gen->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
313 gen->max_cqes = 1 << out->hca_cap.log_max_cq_sz;
314 gen->log_max_cq = out->hca_cap.log_max_cq & 0x1f;
315 gen->max_eqes = out->hca_cap.log_max_eq_sz;
316 gen->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f;
317 gen->log_max_eq = out->hca_cap.log_max_eq & 0xf;
318 gen->max_indirection = out->hca_cap.max_indirection;
319 gen->log_max_mrw_sz = out->hca_cap.log_max_mrw_sz;
320 gen->log_max_bsf_list_size = 0;
321 gen->log_max_klm_list_size = 0;
322 gen->log_max_ra_req_dc = out->hca_cap.log_max_ra_req_dc;
323 gen->log_max_ra_res_dc = out->hca_cap.log_max_ra_res_dc;
324 gen->log_max_ra_req_qp = out->hca_cap.log_max_ra_req_qp;
325 gen->log_max_ra_res_qp = out->hca_cap.log_max_ra_res_qp;
326 gen->max_qp_counters = be16_to_cpu(out->hca_cap.max_qp_count);
327 gen->pkey_table_size = get_pkey_table_size(be16_to_cpu(out->hca_cap.pkey_table_size));
328 gen->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
329 gen->num_ports = out->hca_cap.num_ports & 0xf;
330 gen->log_max_msg = out->hca_cap.log_max_msg & 0x1f;
331 gen->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support);
332 gen->flags = be64_to_cpu(out->hca_cap.flags);
333 pr_debug("flags = 0x%llx\n", gen->flags);
334 gen->uar_sz = out->hca_cap.uar_sz;
335 gen->min_log_pg_sz = out->hca_cap.log_pg_sz;
336
337 t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size);
338 if (t16 & 0x8000) {
339 gen->bf_reg_size = 1 << (t16 & 0x1f);
340 gen->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE;
341 } else {
342 gen->bf_reg_size = 0;
343 gen->bf_regs_per_page = 0;
344 }
345 gen->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq);
346 gen->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq);
347 gen->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
348 gen->log_max_pd = out->hca_cap.log_max_pd & 0x1f;
349 gen->log_max_xrcd = out->hca_cap.log_max_xrcd;
350 gen->log_uar_page_sz = be16_to_cpu(out->hca_cap.log_uar_page_sz);
351}
352
353static const char *caps_opmod_str(u16 opmod)
354{
355 switch (opmod) {
356 case HCA_CAP_OPMOD_GET_MAX:
357 return "GET_MAX";
358 case HCA_CAP_OPMOD_GET_CUR:
359 return "GET_CUR";
360 default:
361 return "Invalid";
362 }
363}
364
365int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
366 u16 opmod)
367{
368 struct mlx5_cmd_query_hca_cap_mbox_out *out;
369 struct mlx5_cmd_query_hca_cap_mbox_in in;
286 int err; 370 int err;
287 371
288 memset(&query_ctx, 0, sizeof(query_ctx)); 372 memset(&in, 0, sizeof(in));
289 query_out = kzalloc(sizeof(*query_out), GFP_KERNEL); 373 out = kzalloc(sizeof(*out), GFP_KERNEL);
290 if (!query_out) 374 if (!out)
291 return -ENOMEM; 375 return -ENOMEM;
292 376
293 set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL); 377 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP);
294 if (!set_ctx) { 378 in.hdr.opmod = cpu_to_be16(opmod);
295 err = -ENOMEM; 379 err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out));
380
381 err = mlx5_cmd_status_to_err(&out->hdr);
382 if (err) {
383 mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
296 goto query_ex; 384 goto query_ex;
297 } 385 }
386 mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
387 fw2drv_caps(caps, out);
298 388
299 query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); 389query_ex:
300 query_ctx.hdr.opmod = cpu_to_be16(HCA_CAP_OPMOD_GET_CUR); 390 kfree(out);
301 err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx), 391 return err;
302 query_out, sizeof(*query_out)); 392}
393
394static int set_caps(struct mlx5_core_dev *dev,
395 struct mlx5_cmd_set_hca_cap_mbox_in *in)
396{
397 struct mlx5_cmd_set_hca_cap_mbox_out out;
398 int err;
399
400 memset(&out, 0, sizeof(out));
401
402 in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
403 err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
303 if (err) 404 if (err)
304 goto query_ex; 405 return err;
305 406
306 err = mlx5_cmd_status_to_err(&query_out->hdr); 407 err = mlx5_cmd_status_to_err(&out.hdr);
307 if (err) { 408
308 mlx5_core_warn(dev, "query hca cap failed, %d\n", err); 409 return err;
410}
411
412static int handle_hca_cap(struct mlx5_core_dev *dev)
413{
414 struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL;
415 struct mlx5_profile *prof = dev->profile;
416 struct mlx5_caps *cur_caps = NULL;
417 struct mlx5_caps *max_caps = NULL;
418 int err = -ENOMEM;
419
420 set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL);
421 if (!set_ctx)
309 goto query_ex; 422 goto query_ex;
310 }
311 423
312 copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap); 424 max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL);
425 if (!max_caps)
426 goto query_ex;
313 427
314 if (dev->profile && dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) 428 cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL);
315 set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; 429 if (!cur_caps)
430 goto query_ex;
316 431
317 flags = be64_to_cpu(query_out->hca_cap.flags); 432 err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX);
318 /* disable checksum */ 433 if (err)
319 flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
320
321 set_ctx->hca_cap.flags = cpu_to_be64(flags);
322 memset(&set_out, 0, sizeof(set_out));
323 set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12);
324 set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP);
325 err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx),
326 &set_out, sizeof(set_out));
327 if (err) {
328 mlx5_core_warn(dev, "set hca cap failed, %d\n", err);
329 goto query_ex; 434 goto query_ex;
330 }
331 435
332 err = mlx5_cmd_status_to_err(&set_out.hdr); 436 err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR);
333 if (err) 437 if (err)
334 goto query_ex; 438 goto query_ex;
335 439
440 /* we limit the size of the pkey table to 128 entries for now */
441 cur_caps->gen.pkey_table_size = 128;
442
443 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
444 cur_caps->gen.log_max_qp = prof->log_max_qp;
445
446 /* disable checksum */
447 cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
448
449 copy_rw_fields(&set_ctx->hca_cap, &cur_caps->gen);
450 err = set_caps(dev, set_ctx);
451
336query_ex: 452query_ex:
337 kfree(query_out); 453 kfree(cur_caps);
454 kfree(max_caps);
338 kfree(set_ctx); 455 kfree(set_ctx);
339 456
340 return err; 457 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 68f5d9c77c7b..0a6348cefc01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -174,11 +174,11 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
174 for (i = 0; i < tot_uuars; i++) { 174 for (i = 0; i < tot_uuars; i++) {
175 bf = &uuari->bfs[i]; 175 bf = &uuari->bfs[i];
176 176
177 bf->buf_size = dev->caps.bf_reg_size / 2; 177 bf->buf_size = dev->caps.gen.bf_reg_size / 2;
178 bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; 178 bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
179 bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; 179 bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
180 bf->reg = NULL; /* Add WC support */ 180 bf->reg = NULL; /* Add WC support */
181 bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.bf_reg_size + 181 bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size +
182 MLX5_BF_OFFSET; 182 MLX5_BF_OFFSET;
183 bf->need_lock = need_uuar_lock(i); 183 bf->need_lock = need_uuar_lock(i);
184 spin_lock_init(&bf->lock); 184 spin_lock_init(&bf->lock);
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 334947151dfc..dce01fd854a8 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -71,6 +71,11 @@ enum {
71}; 71};
72 72
73enum { 73enum {
74 MLX5_MIN_PKEY_TABLE_SIZE = 128,
75 MLX5_MAX_LOG_PKEY_TABLE = 5,
76};
77
78enum {
74 MLX5_PERM_LOCAL_READ = 1 << 2, 79 MLX5_PERM_LOCAL_READ = 1 << 2,
75 MLX5_PERM_LOCAL_WRITE = 1 << 3, 80 MLX5_PERM_LOCAL_WRITE = 1 << 3,
76 MLX5_PERM_REMOTE_READ = 1 << 4, 81 MLX5_PERM_REMOTE_READ = 1 << 4,
@@ -184,10 +189,10 @@ enum {
184 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, 189 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29,
185 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, 190 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30,
186 MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, 191 MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32,
192 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37,
187 MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, 193 MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38,
188 MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, 194 MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39,
189 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, 195 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40,
190 MLX5_DEV_CAP_FLAG_DCT = 1LL << 41,
191 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, 196 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46,
192}; 197};
193 198
@@ -243,10 +248,14 @@ enum {
243}; 248};
244 249
245enum { 250enum {
246 MLX5_CAP_OFF_DCT = 41,
247 MLX5_CAP_OFF_CMDIF_CSUM = 46, 251 MLX5_CAP_OFF_CMDIF_CSUM = 46,
248}; 252};
249 253
254enum {
255 HCA_CAP_OPMOD_GET_MAX = 0,
256 HCA_CAP_OPMOD_GET_CUR = 1,
257};
258
250struct mlx5_inbox_hdr { 259struct mlx5_inbox_hdr {
251 __be16 opcode; 260 __be16 opcode;
252 u8 rsvd[4]; 261 u8 rsvd[4];
@@ -303,9 +312,10 @@ struct mlx5_hca_cap {
303 u8 log_max_ra_req_qp; 312 u8 log_max_ra_req_qp;
304 u8 rsvd10; 313 u8 rsvd10;
305 u8 log_max_ra_res_qp; 314 u8 log_max_ra_res_qp;
306 u8 rsvd11[4]; 315 u8 pad_cap;
316 u8 rsvd11[3];
307 __be16 max_qp_count; 317 __be16 max_qp_count;
308 __be16 rsvd12; 318 __be16 pkey_table_size;
309 u8 rsvd13; 319 u8 rsvd13;
310 u8 local_ca_ack_delay; 320 u8 local_ca_ack_delay;
311 u8 rsvd14; 321 u8 rsvd14;
@@ -335,11 +345,7 @@ struct mlx5_hca_cap {
335 u8 log_max_xrcd; 345 u8 log_max_xrcd;
336 u8 rsvd25[42]; 346 u8 rsvd25[42];
337 __be16 log_uar_page_sz; 347 __be16 log_uar_page_sz;
338 u8 rsvd26[28]; 348 u8 rsvd26[108];
339 u8 log_max_atomic_size_qp;
340 u8 rsvd27[2];
341 u8 log_max_atomic_size_dc;
342 u8 rsvd28[76];
343}; 349};
344 350
345 351
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index b88e9b46d957..45a2add747e0 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -335,23 +335,30 @@ struct mlx5_port_caps {
335 int pkey_table_len; 335 int pkey_table_len;
336}; 336};
337 337
338struct mlx5_caps { 338struct mlx5_general_caps {
339 u8 log_max_eq; 339 u8 log_max_eq;
340 u8 log_max_cq; 340 u8 log_max_cq;
341 u8 log_max_qp; 341 u8 log_max_qp;
342 u8 log_max_mkey; 342 u8 log_max_mkey;
343 u8 log_max_pd; 343 u8 log_max_pd;
344 u8 log_max_srq; 344 u8 log_max_srq;
345 u8 log_max_strq;
346 u8 log_max_mrw_sz;
347 u8 log_max_bsf_list_size;
348 u8 log_max_klm_list_size;
345 u32 max_cqes; 349 u32 max_cqes;
346 int max_wqes; 350 int max_wqes;
351 u32 max_eqes;
352 u32 max_indirection;
347 int max_sq_desc_sz; 353 int max_sq_desc_sz;
348 int max_rq_desc_sz; 354 int max_rq_desc_sz;
355 int max_dc_sq_desc_sz;
349 u64 flags; 356 u64 flags;
350 u16 stat_rate_support; 357 u16 stat_rate_support;
351 int log_max_msg; 358 int log_max_msg;
352 int num_ports; 359 int num_ports;
353 int max_ra_res_qp; 360 u8 log_max_ra_res_qp;
354 int max_ra_req_qp; 361 u8 log_max_ra_req_qp;
355 int max_srq_wqes; 362 int max_srq_wqes;
356 int bf_reg_size; 363 int bf_reg_size;
357 int bf_regs_per_page; 364 int bf_regs_per_page;
@@ -363,6 +370,19 @@ struct mlx5_caps {
363 u8 log_max_mcg; 370 u8 log_max_mcg;
364 u32 max_qp_mcg; 371 u32 max_qp_mcg;
365 int min_page_sz; 372 int min_page_sz;
373 int pd_cap;
374 u32 max_qp_counters;
375 u32 pkey_table_size;
376 u8 log_max_ra_req_dc;
377 u8 log_max_ra_res_dc;
378 u32 uar_sz;
379 u8 min_log_pg_sz;
380 u8 log_max_xrcd;
381 u16 log_uar_page_sz;
382};
383
384struct mlx5_caps {
385 struct mlx5_general_caps gen;
366}; 386};
367 387
368struct mlx5_cmd_mailbox { 388struct mlx5_cmd_mailbox {
@@ -695,6 +715,8 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
695void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 715void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
696void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 716void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
697int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); 717int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr);
718int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
719 u16 opmod);
698int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, 720int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
699 int out_size); 721 int out_size);
700int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, 722int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,