diff options
author | David S. Miller <davem@davemloft.net> | 2014-10-03 18:42:37 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-10-03 18:42:37 -0400 |
commit | 48fea861c9fbee5aae17897fb9acc4d348a9abac (patch) | |
tree | 84f297a4df4c18b5ec6e589528c8f2d978d8abae | |
parent | 55a93b3ea780908b7d1b3a8cf1976223a9268d78 (diff) | |
parent | f832dc820fe8fca561933e8fa734adca75bba5a0 (diff) |
Merge branch 'mlx5-next'
Eli Cohen says:
====================
mlx5 update for 3.18
This series integrates a new mechanism for populating and extracting field values
used in the driver/firmware interaction around command mailboxes.
Changes from V1:
- Remove unused definition of memcpy_cpu_to_be32()
- Remove definitions of non_existent_*() and use BUILD_BUG_ON() instead.
- Added a patch one line patch to add support for ConnectX-4 devices.
Changes from V0:
- trimmed the auto-generated file to a minimum, as required by the reviewers.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/infiniband/hw/mlx5/cq.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/mad.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 83 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/qp.c | 72 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx5/srq.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/cmd.c | 77 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eq.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/fw.c | 81 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/main.c | 230 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/qp.c | 60 | ||||
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/uar.c | 4 | ||||
-rw-r--r-- | include/linux/mlx5/device.h | 152 | ||||
-rw-r--r-- | include/linux/mlx5/driver.h | 118 | ||||
-rw-r--r-- | include/linux/mlx5/mlx5_ifc.h | 349 | ||||
-rw-r--r-- | include/linux/mlx5/qp.h | 3 |
15 files changed, 804 insertions, 455 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index e4056279166d..10cfce5119a9 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries, | |||
752 | return ERR_PTR(-EINVAL); | 752 | return ERR_PTR(-EINVAL); |
753 | 753 | ||
754 | entries = roundup_pow_of_two(entries + 1); | 754 | entries = roundup_pow_of_two(entries + 1); |
755 | if (entries > dev->mdev->caps.max_cqes) | 755 | if (entries > dev->mdev->caps.gen.max_cqes) |
756 | return ERR_PTR(-EINVAL); | 756 | return ERR_PTR(-EINVAL); |
757 | 757 | ||
758 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); | 758 | cq = kzalloc(sizeof(*cq), GFP_KERNEL); |
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) | |||
919 | int err; | 919 | int err; |
920 | u32 fsel; | 920 | u32 fsel; |
921 | 921 | ||
922 | if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) | 922 | if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) |
923 | return -ENOSYS; | 923 | return -ENOSYS; |
924 | 924 | ||
925 | in = kzalloc(sizeof(*in), GFP_KERNEL); | 925 | in = kzalloc(sizeof(*in), GFP_KERNEL); |
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | |||
1074 | int uninitialized_var(cqe_size); | 1074 | int uninitialized_var(cqe_size); |
1075 | unsigned long flags; | 1075 | unsigned long flags; |
1076 | 1076 | ||
1077 | if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { | 1077 | if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { |
1078 | pr_info("Firmware does not support resize CQ\n"); | 1078 | pr_info("Firmware does not support resize CQ\n"); |
1079 | return -ENOSYS; | 1079 | return -ENOSYS; |
1080 | } | 1080 | } |
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | |||
1083 | return -EINVAL; | 1083 | return -EINVAL; |
1084 | 1084 | ||
1085 | entries = roundup_pow_of_two(entries + 1); | 1085 | entries = roundup_pow_of_two(entries + 1); |
1086 | if (entries > dev->mdev->caps.max_cqes + 1) | 1086 | if (entries > dev->mdev->caps.gen.max_cqes + 1) |
1087 | return -EINVAL; | 1087 | return -EINVAL; |
1088 | 1088 | ||
1089 | if (entries == ibcq->cqe + 1) | 1089 | if (entries == ibcq->cqe + 1) |
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index b514bbb5610f..657af9a1167c 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c | |||
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port) | |||
129 | 129 | ||
130 | packet_error = be16_to_cpu(out_mad->status); | 130 | packet_error = be16_to_cpu(out_mad->status); |
131 | 131 | ||
132 | dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ? | 132 | dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ? |
133 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; | 133 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; |
134 | 134 | ||
135 | out: | 135 | out: |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d8907b20522a..f3114d1132fb 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -157,11 +157,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
157 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 157 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
158 | struct ib_smp *in_mad = NULL; | 158 | struct ib_smp *in_mad = NULL; |
159 | struct ib_smp *out_mad = NULL; | 159 | struct ib_smp *out_mad = NULL; |
160 | struct mlx5_general_caps *gen; | ||
160 | int err = -ENOMEM; | 161 | int err = -ENOMEM; |
161 | int max_rq_sg; | 162 | int max_rq_sg; |
162 | int max_sq_sg; | 163 | int max_sq_sg; |
163 | u64 flags; | 164 | u64 flags; |
164 | 165 | ||
166 | gen = &dev->mdev->caps.gen; | ||
165 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); | 167 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); |
166 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); | 168 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); |
167 | if (!in_mad || !out_mad) | 169 | if (!in_mad || !out_mad) |
@@ -183,7 +185,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
183 | IB_DEVICE_PORT_ACTIVE_EVENT | | 185 | IB_DEVICE_PORT_ACTIVE_EVENT | |
184 | IB_DEVICE_SYS_IMAGE_GUID | | 186 | IB_DEVICE_SYS_IMAGE_GUID | |
185 | IB_DEVICE_RC_RNR_NAK_GEN; | 187 | IB_DEVICE_RC_RNR_NAK_GEN; |
186 | flags = dev->mdev->caps.flags; | 188 | flags = gen->flags; |
187 | if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) | 189 | if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) |
188 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; | 190 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; |
189 | if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) | 191 | if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) |
@@ -213,30 +215,31 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
213 | memcpy(&props->sys_image_guid, out_mad->data + 4, 8); | 215 | memcpy(&props->sys_image_guid, out_mad->data + 4, 8); |
214 | 216 | ||
215 | props->max_mr_size = ~0ull; | 217 | props->max_mr_size = ~0ull; |
216 | props->page_size_cap = dev->mdev->caps.min_page_sz; | 218 | props->page_size_cap = gen->min_page_sz; |
217 | props->max_qp = 1 << dev->mdev->caps.log_max_qp; | 219 | props->max_qp = 1 << gen->log_max_qp; |
218 | props->max_qp_wr = dev->mdev->caps.max_wqes; | 220 | props->max_qp_wr = gen->max_wqes; |
219 | max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); | 221 | max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); |
220 | max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / | 222 | max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / |
221 | sizeof(struct mlx5_wqe_data_seg); | 223 | sizeof(struct mlx5_wqe_data_seg); |
222 | props->max_sge = min(max_rq_sg, max_sq_sg); | 224 | props->max_sge = min(max_rq_sg, max_sq_sg); |
223 | props->max_cq = 1 << dev->mdev->caps.log_max_cq; | 225 | props->max_cq = 1 << gen->log_max_cq; |
224 | props->max_cqe = dev->mdev->caps.max_cqes - 1; | 226 | props->max_cqe = gen->max_cqes - 1; |
225 | props->max_mr = 1 << dev->mdev->caps.log_max_mkey; | 227 | props->max_mr = 1 << gen->log_max_mkey; |
226 | props->max_pd = 1 << dev->mdev->caps.log_max_pd; | 228 | props->max_pd = 1 << gen->log_max_pd; |
227 | props->max_qp_rd_atom = dev->mdev->caps.max_ra_req_qp; | 229 | props->max_qp_rd_atom = 1 << gen->log_max_ra_req_qp; |
228 | props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp; | 230 | props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp; |
231 | props->max_srq = 1 << gen->log_max_srq; | ||
232 | props->max_srq_wr = gen->max_srq_wqes - 1; | ||
233 | props->local_ca_ack_delay = gen->local_ca_ack_delay; | ||
229 | props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; | 234 | props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; |
230 | props->max_srq = 1 << dev->mdev->caps.log_max_srq; | ||
231 | props->max_srq_wr = dev->mdev->caps.max_srq_wqes - 1; | ||
232 | props->max_srq_sge = max_rq_sg - 1; | 235 | props->max_srq_sge = max_rq_sg - 1; |
233 | props->max_fast_reg_page_list_len = (unsigned int)-1; | 236 | props->max_fast_reg_page_list_len = (unsigned int)-1; |
234 | props->local_ca_ack_delay = dev->mdev->caps.local_ca_ack_delay; | 237 | props->local_ca_ack_delay = gen->local_ca_ack_delay; |
235 | props->atomic_cap = IB_ATOMIC_NONE; | 238 | props->atomic_cap = IB_ATOMIC_NONE; |
236 | props->masked_atomic_cap = IB_ATOMIC_NONE; | 239 | props->masked_atomic_cap = IB_ATOMIC_NONE; |
237 | props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); | 240 | props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); |
238 | props->max_mcast_grp = 1 << dev->mdev->caps.log_max_mcg; | 241 | props->max_mcast_grp = 1 << gen->log_max_mcg; |
239 | props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg; | 242 | props->max_mcast_qp_attach = gen->max_qp_mcg; |
240 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * | 243 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * |
241 | props->max_mcast_grp; | 244 | props->max_mcast_grp; |
242 | props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ | 245 | props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ |
@@ -254,10 +257,12 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, | |||
254 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 257 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
255 | struct ib_smp *in_mad = NULL; | 258 | struct ib_smp *in_mad = NULL; |
256 | struct ib_smp *out_mad = NULL; | 259 | struct ib_smp *out_mad = NULL; |
260 | struct mlx5_general_caps *gen; | ||
257 | int ext_active_speed; | 261 | int ext_active_speed; |
258 | int err = -ENOMEM; | 262 | int err = -ENOMEM; |
259 | 263 | ||
260 | if (port < 1 || port > dev->mdev->caps.num_ports) { | 264 | gen = &dev->mdev->caps.gen; |
265 | if (port < 1 || port > gen->num_ports) { | ||
261 | mlx5_ib_warn(dev, "invalid port number %d\n", port); | 266 | mlx5_ib_warn(dev, "invalid port number %d\n", port); |
262 | return -EINVAL; | 267 | return -EINVAL; |
263 | } | 268 | } |
@@ -288,8 +293,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, | |||
288 | props->phys_state = out_mad->data[33] >> 4; | 293 | props->phys_state = out_mad->data[33] >> 4; |
289 | props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); | 294 | props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); |
290 | props->gid_tbl_len = out_mad->data[50]; | 295 | props->gid_tbl_len = out_mad->data[50]; |
291 | props->max_msg_sz = 1 << to_mdev(ibdev)->mdev->caps.log_max_msg; | 296 | props->max_msg_sz = 1 << gen->log_max_msg; |
292 | props->pkey_tbl_len = to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len; | 297 | props->pkey_tbl_len = gen->port[port - 1].pkey_table_len; |
293 | props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); | 298 | props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); |
294 | props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); | 299 | props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); |
295 | props->active_width = out_mad->data[31] & 0xf; | 300 | props->active_width = out_mad->data[31] & 0xf; |
@@ -316,7 +321,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, | |||
316 | 321 | ||
317 | /* If reported active speed is QDR, check if is FDR-10 */ | 322 | /* If reported active speed is QDR, check if is FDR-10 */ |
318 | if (props->active_speed == 4) { | 323 | if (props->active_speed == 4) { |
319 | if (dev->mdev->caps.ext_port_cap[port - 1] & | 324 | if (gen->ext_port_cap[port - 1] & |
320 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { | 325 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { |
321 | init_query_mad(in_mad); | 326 | init_query_mad(in_mad); |
322 | in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; | 327 | in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; |
@@ -470,6 +475,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
470 | struct mlx5_ib_alloc_ucontext_req_v2 req; | 475 | struct mlx5_ib_alloc_ucontext_req_v2 req; |
471 | struct mlx5_ib_alloc_ucontext_resp resp; | 476 | struct mlx5_ib_alloc_ucontext_resp resp; |
472 | struct mlx5_ib_ucontext *context; | 477 | struct mlx5_ib_ucontext *context; |
478 | struct mlx5_general_caps *gen; | ||
473 | struct mlx5_uuar_info *uuari; | 479 | struct mlx5_uuar_info *uuari; |
474 | struct mlx5_uar *uars; | 480 | struct mlx5_uar *uars; |
475 | int gross_uuars; | 481 | int gross_uuars; |
@@ -480,6 +486,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
480 | int i; | 486 | int i; |
481 | size_t reqlen; | 487 | size_t reqlen; |
482 | 488 | ||
489 | gen = &dev->mdev->caps.gen; | ||
483 | if (!dev->ib_active) | 490 | if (!dev->ib_active) |
484 | return ERR_PTR(-EAGAIN); | 491 | return ERR_PTR(-EAGAIN); |
485 | 492 | ||
@@ -512,14 +519,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
512 | 519 | ||
513 | num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; | 520 | num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; |
514 | gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; | 521 | gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; |
515 | resp.qp_tab_size = 1 << dev->mdev->caps.log_max_qp; | 522 | resp.qp_tab_size = 1 << gen->log_max_qp; |
516 | resp.bf_reg_size = dev->mdev->caps.bf_reg_size; | 523 | resp.bf_reg_size = gen->bf_reg_size; |
517 | resp.cache_line_size = L1_CACHE_BYTES; | 524 | resp.cache_line_size = L1_CACHE_BYTES; |
518 | resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz; | 525 | resp.max_sq_desc_sz = gen->max_sq_desc_sz; |
519 | resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz; | 526 | resp.max_rq_desc_sz = gen->max_rq_desc_sz; |
520 | resp.max_send_wqebb = dev->mdev->caps.max_wqes; | 527 | resp.max_send_wqebb = gen->max_wqes; |
521 | resp.max_recv_wr = dev->mdev->caps.max_wqes; | 528 | resp.max_recv_wr = gen->max_wqes; |
522 | resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes; | 529 | resp.max_srq_recv_wr = gen->max_srq_wqes; |
523 | 530 | ||
524 | context = kzalloc(sizeof(*context), GFP_KERNEL); | 531 | context = kzalloc(sizeof(*context), GFP_KERNEL); |
525 | if (!context) | 532 | if (!context) |
@@ -565,7 +572,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
565 | mutex_init(&context->db_page_mutex); | 572 | mutex_init(&context->db_page_mutex); |
566 | 573 | ||
567 | resp.tot_uuars = req.total_num_uuars; | 574 | resp.tot_uuars = req.total_num_uuars; |
568 | resp.num_ports = dev->mdev->caps.num_ports; | 575 | resp.num_ports = gen->num_ports; |
569 | err = ib_copy_to_udata(udata, &resp, | 576 | err = ib_copy_to_udata(udata, &resp, |
570 | sizeof(resp) - sizeof(resp.reserved)); | 577 | sizeof(resp) - sizeof(resp.reserved)); |
571 | if (err) | 578 | if (err) |
@@ -967,9 +974,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, | |||
967 | 974 | ||
968 | static void get_ext_port_caps(struct mlx5_ib_dev *dev) | 975 | static void get_ext_port_caps(struct mlx5_ib_dev *dev) |
969 | { | 976 | { |
977 | struct mlx5_general_caps *gen; | ||
970 | int port; | 978 | int port; |
971 | 979 | ||
972 | for (port = 1; port <= dev->mdev->caps.num_ports; port++) | 980 | gen = &dev->mdev->caps.gen; |
981 | for (port = 1; port <= gen->num_ports; port++) | ||
973 | mlx5_query_ext_port_caps(dev, port); | 982 | mlx5_query_ext_port_caps(dev, port); |
974 | } | 983 | } |
975 | 984 | ||
@@ -977,9 +986,11 @@ static int get_port_caps(struct mlx5_ib_dev *dev) | |||
977 | { | 986 | { |
978 | struct ib_device_attr *dprops = NULL; | 987 | struct ib_device_attr *dprops = NULL; |
979 | struct ib_port_attr *pprops = NULL; | 988 | struct ib_port_attr *pprops = NULL; |
989 | struct mlx5_general_caps *gen; | ||
980 | int err = 0; | 990 | int err = 0; |
981 | int port; | 991 | int port; |
982 | 992 | ||
993 | gen = &dev->mdev->caps.gen; | ||
983 | pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); | 994 | pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); |
984 | if (!pprops) | 995 | if (!pprops) |
985 | goto out; | 996 | goto out; |
@@ -994,14 +1005,14 @@ static int get_port_caps(struct mlx5_ib_dev *dev) | |||
994 | goto out; | 1005 | goto out; |
995 | } | 1006 | } |
996 | 1007 | ||
997 | for (port = 1; port <= dev->mdev->caps.num_ports; port++) { | 1008 | for (port = 1; port <= gen->num_ports; port++) { |
998 | err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); | 1009 | err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); |
999 | if (err) { | 1010 | if (err) { |
1000 | mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); | 1011 | mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); |
1001 | break; | 1012 | break; |
1002 | } | 1013 | } |
1003 | dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys; | 1014 | gen->port[port - 1].pkey_table_len = dprops->max_pkeys; |
1004 | dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; | 1015 | gen->port[port - 1].gid_table_len = pprops->gid_tbl_len; |
1005 | mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", | 1016 | mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", |
1006 | dprops->max_pkeys, pprops->gid_tbl_len); | 1017 | dprops->max_pkeys, pprops->gid_tbl_len); |
1007 | } | 1018 | } |
@@ -1279,8 +1290,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
1279 | strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); | 1290 | strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); |
1280 | dev->ib_dev.owner = THIS_MODULE; | 1291 | dev->ib_dev.owner = THIS_MODULE; |
1281 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; | 1292 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; |
1282 | dev->ib_dev.local_dma_lkey = mdev->caps.reserved_lkey; | 1293 | dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey; |
1283 | dev->num_ports = mdev->caps.num_ports; | 1294 | dev->num_ports = mdev->caps.gen.num_ports; |
1284 | dev->ib_dev.phys_port_cnt = dev->num_ports; | 1295 | dev->ib_dev.phys_port_cnt = dev->num_ports; |
1285 | dev->ib_dev.num_comp_vectors = dev->num_comp_vectors; | 1296 | dev->ib_dev.num_comp_vectors = dev->num_comp_vectors; |
1286 | dev->ib_dev.dma_device = &mdev->pdev->dev; | 1297 | dev->ib_dev.dma_device = &mdev->pdev->dev; |
@@ -1355,7 +1366,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
1355 | dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; | 1366 | dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; |
1356 | dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; | 1367 | dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; |
1357 | 1368 | ||
1358 | if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) { | 1369 | if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) { |
1359 | dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; | 1370 | dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; |
1360 | dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; | 1371 | dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; |
1361 | dev->ib_dev.uverbs_cmd_mask |= | 1372 | dev->ib_dev.uverbs_cmd_mask |= |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 8c574b63d77b..dbfe498870c1 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -158,11 +158,13 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) | |||
158 | static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, | 158 | static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, |
159 | int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) | 159 | int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) |
160 | { | 160 | { |
161 | struct mlx5_general_caps *gen; | ||
161 | int wqe_size; | 162 | int wqe_size; |
162 | int wq_size; | 163 | int wq_size; |
163 | 164 | ||
165 | gen = &dev->mdev->caps.gen; | ||
164 | /* Sanity check RQ size before proceeding */ | 166 | /* Sanity check RQ size before proceeding */ |
165 | if (cap->max_recv_wr > dev->mdev->caps.max_wqes) | 167 | if (cap->max_recv_wr > gen->max_wqes) |
166 | return -EINVAL; | 168 | return -EINVAL; |
167 | 169 | ||
168 | if (!has_rq) { | 170 | if (!has_rq) { |
@@ -182,10 +184,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, | |||
182 | wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; | 184 | wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; |
183 | wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); | 185 | wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); |
184 | qp->rq.wqe_cnt = wq_size / wqe_size; | 186 | qp->rq.wqe_cnt = wq_size / wqe_size; |
185 | if (wqe_size > dev->mdev->caps.max_rq_desc_sz) { | 187 | if (wqe_size > gen->max_rq_desc_sz) { |
186 | mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", | 188 | mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", |
187 | wqe_size, | 189 | wqe_size, |
188 | dev->mdev->caps.max_rq_desc_sz); | 190 | gen->max_rq_desc_sz); |
189 | return -EINVAL; | 191 | return -EINVAL; |
190 | } | 192 | } |
191 | qp->rq.wqe_shift = ilog2(wqe_size); | 193 | qp->rq.wqe_shift = ilog2(wqe_size); |
@@ -266,9 +268,11 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr) | |||
266 | static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, | 268 | static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, |
267 | struct mlx5_ib_qp *qp) | 269 | struct mlx5_ib_qp *qp) |
268 | { | 270 | { |
271 | struct mlx5_general_caps *gen; | ||
269 | int wqe_size; | 272 | int wqe_size; |
270 | int wq_size; | 273 | int wq_size; |
271 | 274 | ||
275 | gen = &dev->mdev->caps.gen; | ||
272 | if (!attr->cap.max_send_wr) | 276 | if (!attr->cap.max_send_wr) |
273 | return 0; | 277 | return 0; |
274 | 278 | ||
@@ -277,9 +281,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, | |||
277 | if (wqe_size < 0) | 281 | if (wqe_size < 0) |
278 | return wqe_size; | 282 | return wqe_size; |
279 | 283 | ||
280 | if (wqe_size > dev->mdev->caps.max_sq_desc_sz) { | 284 | if (wqe_size > gen->max_sq_desc_sz) { |
281 | mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", | 285 | mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", |
282 | wqe_size, dev->mdev->caps.max_sq_desc_sz); | 286 | wqe_size, gen->max_sq_desc_sz); |
283 | return -EINVAL; | 287 | return -EINVAL; |
284 | } | 288 | } |
285 | 289 | ||
@@ -292,9 +296,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, | |||
292 | 296 | ||
293 | wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); | 297 | wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); |
294 | qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; | 298 | qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; |
295 | if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) { | 299 | if (qp->sq.wqe_cnt > gen->max_wqes) { |
296 | mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", | 300 | mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", |
297 | qp->sq.wqe_cnt, dev->mdev->caps.max_wqes); | 301 | qp->sq.wqe_cnt, gen->max_wqes); |
298 | return -ENOMEM; | 302 | return -ENOMEM; |
299 | } | 303 | } |
300 | qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); | 304 | qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); |
@@ -309,11 +313,13 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev, | |||
309 | struct mlx5_ib_qp *qp, | 313 | struct mlx5_ib_qp *qp, |
310 | struct mlx5_ib_create_qp *ucmd) | 314 | struct mlx5_ib_create_qp *ucmd) |
311 | { | 315 | { |
316 | struct mlx5_general_caps *gen; | ||
312 | int desc_sz = 1 << qp->sq.wqe_shift; | 317 | int desc_sz = 1 << qp->sq.wqe_shift; |
313 | 318 | ||
314 | if (desc_sz > dev->mdev->caps.max_sq_desc_sz) { | 319 | gen = &dev->mdev->caps.gen; |
320 | if (desc_sz > gen->max_sq_desc_sz) { | ||
315 | mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", | 321 | mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", |
316 | desc_sz, dev->mdev->caps.max_sq_desc_sz); | 322 | desc_sz, gen->max_sq_desc_sz); |
317 | return -EINVAL; | 323 | return -EINVAL; |
318 | } | 324 | } |
319 | 325 | ||
@@ -325,9 +331,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev, | |||
325 | 331 | ||
326 | qp->sq.wqe_cnt = ucmd->sq_wqe_count; | 332 | qp->sq.wqe_cnt = ucmd->sq_wqe_count; |
327 | 333 | ||
328 | if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) { | 334 | if (qp->sq.wqe_cnt > gen->max_wqes) { |
329 | mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", | 335 | mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", |
330 | qp->sq.wqe_cnt, dev->mdev->caps.max_wqes); | 336 | qp->sq.wqe_cnt, gen->max_wqes); |
331 | return -EINVAL; | 337 | return -EINVAL; |
332 | } | 338 | } |
333 | 339 | ||
@@ -803,16 +809,18 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
803 | struct mlx5_ib_resources *devr = &dev->devr; | 809 | struct mlx5_ib_resources *devr = &dev->devr; |
804 | struct mlx5_ib_create_qp_resp resp; | 810 | struct mlx5_ib_create_qp_resp resp; |
805 | struct mlx5_create_qp_mbox_in *in; | 811 | struct mlx5_create_qp_mbox_in *in; |
812 | struct mlx5_general_caps *gen; | ||
806 | struct mlx5_ib_create_qp ucmd; | 813 | struct mlx5_ib_create_qp ucmd; |
807 | int inlen = sizeof(*in); | 814 | int inlen = sizeof(*in); |
808 | int err; | 815 | int err; |
809 | 816 | ||
817 | gen = &dev->mdev->caps.gen; | ||
810 | mutex_init(&qp->mutex); | 818 | mutex_init(&qp->mutex); |
811 | spin_lock_init(&qp->sq.lock); | 819 | spin_lock_init(&qp->sq.lock); |
812 | spin_lock_init(&qp->rq.lock); | 820 | spin_lock_init(&qp->rq.lock); |
813 | 821 | ||
814 | if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { | 822 | if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { |
815 | if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { | 823 | if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { |
816 | mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); | 824 | mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); |
817 | return -EINVAL; | 825 | return -EINVAL; |
818 | } else { | 826 | } else { |
@@ -851,9 +859,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
851 | mlx5_ib_dbg(dev, "invalid rq params\n"); | 859 | mlx5_ib_dbg(dev, "invalid rq params\n"); |
852 | return -EINVAL; | 860 | return -EINVAL; |
853 | } | 861 | } |
854 | if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) { | 862 | if (ucmd.sq_wqe_count > gen->max_wqes) { |
855 | mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", | 863 | mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", |
856 | ucmd.sq_wqe_count, dev->mdev->caps.max_wqes); | 864 | ucmd.sq_wqe_count, gen->max_wqes); |
857 | return -EINVAL; | 865 | return -EINVAL; |
858 | } | 866 | } |
859 | err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); | 867 | err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); |
@@ -1144,6 +1152,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, | |||
1144 | struct ib_qp_init_attr *init_attr, | 1152 | struct ib_qp_init_attr *init_attr, |
1145 | struct ib_udata *udata) | 1153 | struct ib_udata *udata) |
1146 | { | 1154 | { |
1155 | struct mlx5_general_caps *gen; | ||
1147 | struct mlx5_ib_dev *dev; | 1156 | struct mlx5_ib_dev *dev; |
1148 | struct mlx5_ib_qp *qp; | 1157 | struct mlx5_ib_qp *qp; |
1149 | u16 xrcdn = 0; | 1158 | u16 xrcdn = 0; |
@@ -1161,11 +1170,12 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, | |||
1161 | } | 1170 | } |
1162 | dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); | 1171 | dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); |
1163 | } | 1172 | } |
1173 | gen = &dev->mdev->caps.gen; | ||
1164 | 1174 | ||
1165 | switch (init_attr->qp_type) { | 1175 | switch (init_attr->qp_type) { |
1166 | case IB_QPT_XRC_TGT: | 1176 | case IB_QPT_XRC_TGT: |
1167 | case IB_QPT_XRC_INI: | 1177 | case IB_QPT_XRC_INI: |
1168 | if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) { | 1178 | if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) { |
1169 | mlx5_ib_dbg(dev, "XRC not supported\n"); | 1179 | mlx5_ib_dbg(dev, "XRC not supported\n"); |
1170 | return ERR_PTR(-ENOSYS); | 1180 | return ERR_PTR(-ENOSYS); |
1171 | } | 1181 | } |
@@ -1272,6 +1282,9 @@ enum { | |||
1272 | 1282 | ||
1273 | static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) | 1283 | static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) |
1274 | { | 1284 | { |
1285 | struct mlx5_general_caps *gen; | ||
1286 | |||
1287 | gen = &dev->mdev->caps.gen; | ||
1275 | if (rate == IB_RATE_PORT_CURRENT) { | 1288 | if (rate == IB_RATE_PORT_CURRENT) { |
1276 | return 0; | 1289 | return 0; |
1277 | } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { | 1290 | } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { |
@@ -1279,7 +1292,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) | |||
1279 | } else { | 1292 | } else { |
1280 | while (rate != IB_RATE_2_5_GBPS && | 1293 | while (rate != IB_RATE_2_5_GBPS && |
1281 | !(1 << (rate + MLX5_STAT_RATE_OFFSET) & | 1294 | !(1 << (rate + MLX5_STAT_RATE_OFFSET) & |
1282 | dev->mdev->caps.stat_rate_support)) | 1295 | gen->stat_rate_support)) |
1283 | --rate; | 1296 | --rate; |
1284 | } | 1297 | } |
1285 | 1298 | ||
@@ -1290,8 +1303,10 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, | |||
1290 | struct mlx5_qp_path *path, u8 port, int attr_mask, | 1303 | struct mlx5_qp_path *path, u8 port, int attr_mask, |
1291 | u32 path_flags, const struct ib_qp_attr *attr) | 1304 | u32 path_flags, const struct ib_qp_attr *attr) |
1292 | { | 1305 | { |
1306 | struct mlx5_general_caps *gen; | ||
1293 | int err; | 1307 | int err; |
1294 | 1308 | ||
1309 | gen = &dev->mdev->caps.gen; | ||
1295 | path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; | 1310 | path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; |
1296 | path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; | 1311 | path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; |
1297 | 1312 | ||
@@ -1318,9 +1333,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah, | |||
1318 | path->port = port; | 1333 | path->port = port; |
1319 | 1334 | ||
1320 | if (ah->ah_flags & IB_AH_GRH) { | 1335 | if (ah->ah_flags & IB_AH_GRH) { |
1321 | if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) { | 1336 | if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) { |
1322 | pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", | 1337 | pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n", |
1323 | ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len); | 1338 | ah->grh.sgid_index, gen->port[port - 1].gid_table_len); |
1324 | return -EINVAL; | 1339 | return -EINVAL; |
1325 | } | 1340 | } |
1326 | 1341 | ||
@@ -1492,6 +1507,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
1492 | struct mlx5_ib_qp *qp = to_mqp(ibqp); | 1507 | struct mlx5_ib_qp *qp = to_mqp(ibqp); |
1493 | struct mlx5_ib_cq *send_cq, *recv_cq; | 1508 | struct mlx5_ib_cq *send_cq, *recv_cq; |
1494 | struct mlx5_qp_context *context; | 1509 | struct mlx5_qp_context *context; |
1510 | struct mlx5_general_caps *gen; | ||
1495 | struct mlx5_modify_qp_mbox_in *in; | 1511 | struct mlx5_modify_qp_mbox_in *in; |
1496 | struct mlx5_ib_pd *pd; | 1512 | struct mlx5_ib_pd *pd; |
1497 | enum mlx5_qp_state mlx5_cur, mlx5_new; | 1513 | enum mlx5_qp_state mlx5_cur, mlx5_new; |
@@ -1500,6 +1516,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
1500 | int mlx5_st; | 1516 | int mlx5_st; |
1501 | int err; | 1517 | int err; |
1502 | 1518 | ||
1519 | gen = &dev->mdev->caps.gen; | ||
1503 | in = kzalloc(sizeof(*in), GFP_KERNEL); | 1520 | in = kzalloc(sizeof(*in), GFP_KERNEL); |
1504 | if (!in) | 1521 | if (!in) |
1505 | return -ENOMEM; | 1522 | return -ENOMEM; |
@@ -1539,7 +1556,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
1539 | err = -EINVAL; | 1556 | err = -EINVAL; |
1540 | goto out; | 1557 | goto out; |
1541 | } | 1558 | } |
1542 | context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg; | 1559 | context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg; |
1543 | } | 1560 | } |
1544 | 1561 | ||
1545 | if (attr_mask & IB_QP_DEST_QPN) | 1562 | if (attr_mask & IB_QP_DEST_QPN) |
@@ -1685,9 +1702,11 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1685 | struct mlx5_ib_dev *dev = to_mdev(ibqp->device); | 1702 | struct mlx5_ib_dev *dev = to_mdev(ibqp->device); |
1686 | struct mlx5_ib_qp *qp = to_mqp(ibqp); | 1703 | struct mlx5_ib_qp *qp = to_mqp(ibqp); |
1687 | enum ib_qp_state cur_state, new_state; | 1704 | enum ib_qp_state cur_state, new_state; |
1705 | struct mlx5_general_caps *gen; | ||
1688 | int err = -EINVAL; | 1706 | int err = -EINVAL; |
1689 | int port; | 1707 | int port; |
1690 | 1708 | ||
1709 | gen = &dev->mdev->caps.gen; | ||
1691 | mutex_lock(&qp->mutex); | 1710 | mutex_lock(&qp->mutex); |
1692 | 1711 | ||
1693 | cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; | 1712 | cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; |
@@ -1699,21 +1718,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1699 | goto out; | 1718 | goto out; |
1700 | 1719 | ||
1701 | if ((attr_mask & IB_QP_PORT) && | 1720 | if ((attr_mask & IB_QP_PORT) && |
1702 | (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports)) | 1721 | (attr->port_num == 0 || attr->port_num > gen->num_ports)) |
1703 | goto out; | 1722 | goto out; |
1704 | 1723 | ||
1705 | if (attr_mask & IB_QP_PKEY_INDEX) { | 1724 | if (attr_mask & IB_QP_PKEY_INDEX) { |
1706 | port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; | 1725 | port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; |
1707 | if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len) | 1726 | if (attr->pkey_index >= gen->port[port - 1].pkey_table_len) |
1708 | goto out; | 1727 | goto out; |
1709 | } | 1728 | } |
1710 | 1729 | ||
1711 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && | 1730 | if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && |
1712 | attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp) | 1731 | attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp)) |
1713 | goto out; | 1732 | goto out; |
1714 | 1733 | ||
1715 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && | 1734 | if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && |
1716 | attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp) | 1735 | attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp)) |
1717 | goto out; | 1736 | goto out; |
1718 | 1737 | ||
1719 | if (cur_state == new_state && cur_state == IB_QPS_RESET) { | 1738 | if (cur_state == new_state && cur_state == IB_QPS_RESET) { |
@@ -2893,7 +2912,8 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at | |||
2893 | memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); | 2912 | memset(ib_ah_attr, 0, sizeof(*ib_ah_attr)); |
2894 | ib_ah_attr->port_num = path->port; | 2913 | ib_ah_attr->port_num = path->port; |
2895 | 2914 | ||
2896 | if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports) | 2915 | if (ib_ah_attr->port_num == 0 || |
2916 | ib_ah_attr->port_num > dev->caps.gen.num_ports) | ||
2897 | return; | 2917 | return; |
2898 | 2918 | ||
2899 | ib_ah_attr->sl = path->sl & 0xf; | 2919 | ib_ah_attr->sl = path->sl & 0xf; |
@@ -3011,10 +3031,12 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev, | |||
3011 | struct ib_udata *udata) | 3031 | struct ib_udata *udata) |
3012 | { | 3032 | { |
3013 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | 3033 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
3034 | struct mlx5_general_caps *gen; | ||
3014 | struct mlx5_ib_xrcd *xrcd; | 3035 | struct mlx5_ib_xrcd *xrcd; |
3015 | int err; | 3036 | int err; |
3016 | 3037 | ||
3017 | if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) | 3038 | gen = &dev->mdev->caps.gen; |
3039 | if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) | ||
3018 | return ERR_PTR(-ENOSYS); | 3040 | return ERR_PTR(-ENOSYS); |
3019 | 3041 | ||
3020 | xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); | 3042 | xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); |
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c index 70bd131ba646..97cc1baaa8e3 100644 --- a/drivers/infiniband/hw/mlx5/srq.c +++ b/drivers/infiniband/hw/mlx5/srq.c | |||
@@ -238,6 +238,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
238 | struct ib_udata *udata) | 238 | struct ib_udata *udata) |
239 | { | 239 | { |
240 | struct mlx5_ib_dev *dev = to_mdev(pd->device); | 240 | struct mlx5_ib_dev *dev = to_mdev(pd->device); |
241 | struct mlx5_general_caps *gen; | ||
241 | struct mlx5_ib_srq *srq; | 242 | struct mlx5_ib_srq *srq; |
242 | int desc_size; | 243 | int desc_size; |
243 | int buf_size; | 244 | int buf_size; |
@@ -247,11 +248,12 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd, | |||
247 | int is_xrc; | 248 | int is_xrc; |
248 | u32 flgs, xrcdn; | 249 | u32 flgs, xrcdn; |
249 | 250 | ||
251 | gen = &dev->mdev->caps.gen; | ||
250 | /* Sanity check SRQ size before proceeding */ | 252 | /* Sanity check SRQ size before proceeding */ |
251 | if (init_attr->attr.max_wr >= dev->mdev->caps.max_srq_wqes) { | 253 | if (init_attr->attr.max_wr >= gen->max_srq_wqes) { |
252 | mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", | 254 | mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", |
253 | init_attr->attr.max_wr, | 255 | init_attr->attr.max_wr, |
254 | dev->mdev->caps.max_srq_wqes); | 256 | gen->max_srq_wqes); |
255 | return ERR_PTR(-EINVAL); | 257 | return ERR_PTR(-EINVAL); |
256 | } | 258 | } |
257 | 259 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 65a7da69e2ac..368c6c5ea014 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -357,60 +357,24 @@ const char *mlx5_command_str(int command) | |||
357 | case MLX5_CMD_OP_2ERR_QP: | 357 | case MLX5_CMD_OP_2ERR_QP: |
358 | return "2ERR_QP"; | 358 | return "2ERR_QP"; |
359 | 359 | ||
360 | case MLX5_CMD_OP_RTS2SQD_QP: | ||
361 | return "RTS2SQD_QP"; | ||
362 | |||
363 | case MLX5_CMD_OP_SQD2RTS_QP: | ||
364 | return "SQD2RTS_QP"; | ||
365 | |||
366 | case MLX5_CMD_OP_2RST_QP: | 360 | case MLX5_CMD_OP_2RST_QP: |
367 | return "2RST_QP"; | 361 | return "2RST_QP"; |
368 | 362 | ||
369 | case MLX5_CMD_OP_QUERY_QP: | 363 | case MLX5_CMD_OP_QUERY_QP: |
370 | return "QUERY_QP"; | 364 | return "QUERY_QP"; |
371 | 365 | ||
372 | case MLX5_CMD_OP_CONF_SQP: | ||
373 | return "CONF_SQP"; | ||
374 | |||
375 | case MLX5_CMD_OP_MAD_IFC: | 366 | case MLX5_CMD_OP_MAD_IFC: |
376 | return "MAD_IFC"; | 367 | return "MAD_IFC"; |
377 | 368 | ||
378 | case MLX5_CMD_OP_INIT2INIT_QP: | 369 | case MLX5_CMD_OP_INIT2INIT_QP: |
379 | return "INIT2INIT_QP"; | 370 | return "INIT2INIT_QP"; |
380 | 371 | ||
381 | case MLX5_CMD_OP_SUSPEND_QP: | ||
382 | return "SUSPEND_QP"; | ||
383 | |||
384 | case MLX5_CMD_OP_UNSUSPEND_QP: | ||
385 | return "UNSUSPEND_QP"; | ||
386 | |||
387 | case MLX5_CMD_OP_SQD2SQD_QP: | ||
388 | return "SQD2SQD_QP"; | ||
389 | |||
390 | case MLX5_CMD_OP_ALLOC_QP_COUNTER_SET: | ||
391 | return "ALLOC_QP_COUNTER_SET"; | ||
392 | |||
393 | case MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET: | ||
394 | return "DEALLOC_QP_COUNTER_SET"; | ||
395 | |||
396 | case MLX5_CMD_OP_QUERY_QP_COUNTER_SET: | ||
397 | return "QUERY_QP_COUNTER_SET"; | ||
398 | |||
399 | case MLX5_CMD_OP_CREATE_PSV: | 372 | case MLX5_CMD_OP_CREATE_PSV: |
400 | return "CREATE_PSV"; | 373 | return "CREATE_PSV"; |
401 | 374 | ||
402 | case MLX5_CMD_OP_DESTROY_PSV: | 375 | case MLX5_CMD_OP_DESTROY_PSV: |
403 | return "DESTROY_PSV"; | 376 | return "DESTROY_PSV"; |
404 | 377 | ||
405 | case MLX5_CMD_OP_QUERY_PSV: | ||
406 | return "QUERY_PSV"; | ||
407 | |||
408 | case MLX5_CMD_OP_QUERY_SIG_RULE_TABLE: | ||
409 | return "QUERY_SIG_RULE_TABLE"; | ||
410 | |||
411 | case MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE: | ||
412 | return "QUERY_BLOCK_SIZE_TABLE"; | ||
413 | |||
414 | case MLX5_CMD_OP_CREATE_SRQ: | 378 | case MLX5_CMD_OP_CREATE_SRQ: |
415 | return "CREATE_SRQ"; | 379 | return "CREATE_SRQ"; |
416 | 380 | ||
@@ -1538,16 +1502,9 @@ static const char *cmd_status_str(u8 status) | |||
1538 | } | 1502 | } |
1539 | } | 1503 | } |
1540 | 1504 | ||
1541 | int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) | 1505 | static int cmd_status_to_err(u8 status) |
1542 | { | 1506 | { |
1543 | if (!hdr->status) | 1507 | switch (status) { |
1544 | return 0; | ||
1545 | |||
1546 | pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", | ||
1547 | cmd_status_str(hdr->status), hdr->status, | ||
1548 | be32_to_cpu(hdr->syndrome)); | ||
1549 | |||
1550 | switch (hdr->status) { | ||
1551 | case MLX5_CMD_STAT_OK: return 0; | 1508 | case MLX5_CMD_STAT_OK: return 0; |
1552 | case MLX5_CMD_STAT_INT_ERR: return -EIO; | 1509 | case MLX5_CMD_STAT_INT_ERR: return -EIO; |
1553 | case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; | 1510 | case MLX5_CMD_STAT_BAD_OP_ERR: return -EINVAL; |
@@ -1567,3 +1524,33 @@ int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) | |||
1567 | default: return -EIO; | 1524 | default: return -EIO; |
1568 | } | 1525 | } |
1569 | } | 1526 | } |
1527 | |||
1528 | /* this will be available till all the commands use set/get macros */ | ||
1529 | int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr) | ||
1530 | { | ||
1531 | if (!hdr->status) | ||
1532 | return 0; | ||
1533 | |||
1534 | pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", | ||
1535 | cmd_status_str(hdr->status), hdr->status, | ||
1536 | be32_to_cpu(hdr->syndrome)); | ||
1537 | |||
1538 | return cmd_status_to_err(hdr->status); | ||
1539 | } | ||
1540 | |||
1541 | int mlx5_cmd_status_to_err_v2(void *ptr) | ||
1542 | { | ||
1543 | u32 syndrome; | ||
1544 | u8 status; | ||
1545 | |||
1546 | status = be32_to_cpu(*(__be32 *)ptr) >> 24; | ||
1547 | if (!status) | ||
1548 | return 0; | ||
1549 | |||
1550 | syndrome = be32_to_cpu(*(__be32 *)(ptr + 4)); | ||
1551 | |||
1552 | pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n", | ||
1553 | cmd_status_str(status), status, syndrome); | ||
1554 | |||
1555 | return cmd_status_to_err(status); | ||
1556 | } | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 4e8bd0b34bb0..ed53291468f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -198,7 +198,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |||
198 | int eqes_found = 0; | 198 | int eqes_found = 0; |
199 | int set_ci = 0; | 199 | int set_ci = 0; |
200 | u32 cqn; | 200 | u32 cqn; |
201 | u32 srqn; | 201 | u32 rsn; |
202 | u8 port; | 202 | u8 port; |
203 | 203 | ||
204 | while ((eqe = next_eqe_sw(eq))) { | 204 | while ((eqe = next_eqe_sw(eq))) { |
@@ -224,18 +224,18 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |||
224 | case MLX5_EVENT_TYPE_PATH_MIG_FAILED: | 224 | case MLX5_EVENT_TYPE_PATH_MIG_FAILED: |
225 | case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: | 225 | case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: |
226 | case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: | 226 | case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: |
227 | rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; | ||
227 | mlx5_core_dbg(dev, "event %s(%d) arrived\n", | 228 | mlx5_core_dbg(dev, "event %s(%d) arrived\n", |
228 | eqe_type_str(eqe->type), eqe->type); | 229 | eqe_type_str(eqe->type), eqe->type); |
229 | mlx5_qp_event(dev, be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff, | 230 | mlx5_rsc_event(dev, rsn, eqe->type); |
230 | eqe->type); | ||
231 | break; | 231 | break; |
232 | 232 | ||
233 | case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: | 233 | case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: |
234 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: | 234 | case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: |
235 | srqn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; | 235 | rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; |
236 | mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", | 236 | mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", |
237 | eqe_type_str(eqe->type), eqe->type, srqn); | 237 | eqe_type_str(eqe->type), eqe->type, rsn); |
238 | mlx5_srq_event(dev, srqn, eqe->type); | 238 | mlx5_srq_event(dev, rsn, eqe->type); |
239 | break; | 239 | break; |
240 | 240 | ||
241 | case MLX5_EVENT_TYPE_CMD: | 241 | case MLX5_EVENT_TYPE_CMD: |
@@ -468,7 +468,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) | |||
468 | 468 | ||
469 | err = mlx5_create_map_eq(dev, &table->pages_eq, | 469 | err = mlx5_create_map_eq(dev, &table->pages_eq, |
470 | MLX5_EQ_VEC_PAGES, | 470 | MLX5_EQ_VEC_PAGES, |
471 | dev->caps.max_vf + 1, | 471 | dev->caps.gen.max_vf + 1, |
472 | 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", | 472 | 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", |
473 | &dev->priv.uuari.uars[0]); | 473 | &dev->priv.uuari.uars[0]); |
474 | if (err) { | 474 | if (err) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index f012658b6a92..087c4c797deb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
@@ -64,86 +64,9 @@ out_out: | |||
64 | return err; | 64 | return err; |
65 | } | 65 | } |
66 | 66 | ||
67 | int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, | 67 | int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps) |
68 | struct mlx5_caps *caps) | ||
69 | { | 68 | { |
70 | struct mlx5_cmd_query_hca_cap_mbox_out *out; | 69 | return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR); |
71 | struct mlx5_cmd_query_hca_cap_mbox_in in; | ||
72 | struct mlx5_query_special_ctxs_mbox_out ctx_out; | ||
73 | struct mlx5_query_special_ctxs_mbox_in ctx_in; | ||
74 | int err; | ||
75 | u16 t16; | ||
76 | |||
77 | out = kzalloc(sizeof(*out), GFP_KERNEL); | ||
78 | if (!out) | ||
79 | return -ENOMEM; | ||
80 | |||
81 | memset(&in, 0, sizeof(in)); | ||
82 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); | ||
83 | in.hdr.opmod = cpu_to_be16(0x1); | ||
84 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, sizeof(*out)); | ||
85 | if (err) | ||
86 | goto out_out; | ||
87 | |||
88 | if (out->hdr.status) { | ||
89 | err = mlx5_cmd_status_to_err(&out->hdr); | ||
90 | goto out_out; | ||
91 | } | ||
92 | |||
93 | |||
94 | caps->log_max_eq = out->hca_cap.log_max_eq & 0xf; | ||
95 | caps->max_cqes = 1 << out->hca_cap.log_max_cq_sz; | ||
96 | caps->max_wqes = 1 << out->hca_cap.log_max_qp_sz; | ||
97 | caps->max_sq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_sq); | ||
98 | caps->max_rq_desc_sz = be16_to_cpu(out->hca_cap.max_desc_sz_rq); | ||
99 | caps->flags = be64_to_cpu(out->hca_cap.flags); | ||
100 | caps->stat_rate_support = be16_to_cpu(out->hca_cap.stat_rate_support); | ||
101 | caps->log_max_msg = out->hca_cap.log_max_msg & 0x1f; | ||
102 | caps->num_ports = out->hca_cap.num_ports & 0xf; | ||
103 | caps->log_max_cq = out->hca_cap.log_max_cq & 0x1f; | ||
104 | if (caps->num_ports > MLX5_MAX_PORTS) { | ||
105 | mlx5_core_err(dev, "device has %d ports while the driver supports max %d ports\n", | ||
106 | caps->num_ports, MLX5_MAX_PORTS); | ||
107 | err = -EINVAL; | ||
108 | goto out_out; | ||
109 | } | ||
110 | caps->log_max_qp = out->hca_cap.log_max_qp & 0x1f; | ||
111 | caps->log_max_mkey = out->hca_cap.log_max_mkey & 0x3f; | ||
112 | caps->log_max_pd = out->hca_cap.log_max_pd & 0x1f; | ||
113 | caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; | ||
114 | caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; | ||
115 | caps->log_max_mcg = out->hca_cap.log_max_mcg; | ||
116 | caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; | ||
117 | caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); | ||
118 | caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); | ||
119 | caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; | ||
120 | t16 = be16_to_cpu(out->hca_cap.bf_log_bf_reg_size); | ||
121 | if (t16 & 0x8000) { | ||
122 | caps->bf_reg_size = 1 << (t16 & 0x1f); | ||
123 | caps->bf_regs_per_page = MLX5_BF_REGS_PER_PAGE; | ||
124 | } else { | ||
125 | caps->bf_reg_size = 0; | ||
126 | caps->bf_regs_per_page = 0; | ||
127 | } | ||
128 | caps->min_page_sz = ~(u32)((1 << out->hca_cap.log_pg_sz) - 1); | ||
129 | |||
130 | memset(&ctx_in, 0, sizeof(ctx_in)); | ||
131 | memset(&ctx_out, 0, sizeof(ctx_out)); | ||
132 | ctx_in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); | ||
133 | err = mlx5_cmd_exec(dev, &ctx_in, sizeof(ctx_in), | ||
134 | &ctx_out, sizeof(ctx_out)); | ||
135 | if (err) | ||
136 | goto out_out; | ||
137 | |||
138 | if (ctx_out.hdr.status) | ||
139 | err = mlx5_cmd_status_to_err(&ctx_out.hdr); | ||
140 | |||
141 | caps->reserved_lkey = be32_to_cpu(ctx_out.reserved_lkey); | ||
142 | |||
143 | out_out: | ||
144 | kfree(out); | ||
145 | |||
146 | return err; | ||
147 | } | 70 | } |
148 | 71 | ||
149 | int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) | 72 | int mlx5_cmd_init_hca(struct mlx5_core_dev *dev) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f2716cc1f51d..3d8e8e489b2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/mlx5/qp.h> | 43 | #include <linux/mlx5/qp.h> |
44 | #include <linux/mlx5/srq.h> | 44 | #include <linux/mlx5/srq.h> |
45 | #include <linux/debugfs.h> | 45 | #include <linux/debugfs.h> |
46 | #include <linux/mlx5/mlx5_ifc.h> | ||
46 | #include "mlx5_core.h" | 47 | #include "mlx5_core.h" |
47 | 48 | ||
48 | #define DRIVER_NAME "mlx5_core" | 49 | #define DRIVER_NAME "mlx5_core" |
@@ -207,11 +208,11 @@ static void release_bar(struct pci_dev *pdev) | |||
207 | static int mlx5_enable_msix(struct mlx5_core_dev *dev) | 208 | static int mlx5_enable_msix(struct mlx5_core_dev *dev) |
208 | { | 209 | { |
209 | struct mlx5_eq_table *table = &dev->priv.eq_table; | 210 | struct mlx5_eq_table *table = &dev->priv.eq_table; |
210 | int num_eqs = 1 << dev->caps.log_max_eq; | 211 | int num_eqs = 1 << dev->caps.gen.log_max_eq; |
211 | int nvec; | 212 | int nvec; |
212 | int i; | 213 | int i; |
213 | 214 | ||
214 | nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; | 215 | nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE; |
215 | nvec = min_t(int, nvec, num_eqs); | 216 | nvec = min_t(int, nvec, num_eqs); |
216 | if (nvec <= MLX5_EQ_VEC_COMP_BASE) | 217 | if (nvec <= MLX5_EQ_VEC_COMP_BASE) |
217 | return -ENOMEM; | 218 | return -ENOMEM; |
@@ -250,91 +251,205 @@ struct mlx5_reg_host_endianess { | |||
250 | #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) | 251 | #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos)) |
251 | 252 | ||
252 | enum { | 253 | enum { |
253 | MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | | 254 | MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) | |
254 | CAP_MASK(MLX5_CAP_OFF_DCT, 1), | 255 | MLX5_DEV_CAP_FLAG_DCT, |
255 | }; | 256 | }; |
256 | 257 | ||
258 | static u16 to_fw_pkey_sz(u32 size) | ||
259 | { | ||
260 | switch (size) { | ||
261 | case 128: | ||
262 | return 0; | ||
263 | case 256: | ||
264 | return 1; | ||
265 | case 512: | ||
266 | return 2; | ||
267 | case 1024: | ||
268 | return 3; | ||
269 | case 2048: | ||
270 | return 4; | ||
271 | case 4096: | ||
272 | return 5; | ||
273 | default: | ||
274 | pr_warn("invalid pkey table size %d\n", size); | ||
275 | return 0; | ||
276 | } | ||
277 | } | ||
278 | |||
257 | /* selectively copy writable fields clearing any reserved area | 279 | /* selectively copy writable fields clearing any reserved area |
258 | */ | 280 | */ |
259 | static void copy_rw_fields(struct mlx5_hca_cap *to, struct mlx5_hca_cap *from) | 281 | static void copy_rw_fields(void *to, struct mlx5_caps *from) |
260 | { | 282 | { |
283 | __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22); | ||
261 | u64 v64; | 284 | u64 v64; |
262 | 285 | ||
263 | to->log_max_qp = from->log_max_qp & 0x1f; | 286 | MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp); |
264 | to->log_max_ra_req_dc = from->log_max_ra_req_dc & 0x3f; | 287 | MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp); |
265 | to->log_max_ra_res_dc = from->log_max_ra_res_dc & 0x3f; | 288 | MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp); |
266 | to->log_max_ra_req_qp = from->log_max_ra_req_qp & 0x3f; | 289 | MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size); |
267 | to->log_max_ra_res_qp = from->log_max_ra_res_qp & 0x3f; | 290 | MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc); |
268 | to->log_max_atomic_size_qp = from->log_max_atomic_size_qp; | 291 | MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc); |
269 | to->log_max_atomic_size_dc = from->log_max_atomic_size_dc; | 292 | MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size)); |
270 | v64 = be64_to_cpu(from->flags) & MLX5_CAP_BITS_RW_MASK; | 293 | v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK; |
271 | to->flags = cpu_to_be64(v64); | 294 | *flags_off = cpu_to_be64(v64); |
272 | } | 295 | } |
273 | 296 | ||
274 | enum { | 297 | static u16 get_pkey_table_size(int pkey) |
275 | HCA_CAP_OPMOD_GET_MAX = 0, | 298 | { |
276 | HCA_CAP_OPMOD_GET_CUR = 1, | 299 | if (pkey > MLX5_MAX_LOG_PKEY_TABLE) |
277 | }; | 300 | return 0; |
278 | 301 | ||
279 | static int handle_hca_cap(struct mlx5_core_dev *dev) | 302 | return MLX5_MIN_PKEY_TABLE_SIZE << pkey; |
303 | } | ||
304 | |||
305 | static void fw2drv_caps(struct mlx5_caps *caps, void *out) | ||
306 | { | ||
307 | struct mlx5_general_caps *gen = &caps->gen; | ||
308 | |||
309 | gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz); | ||
310 | gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz); | ||
311 | gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp); | ||
312 | gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz); | ||
313 | gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs); | ||
314 | gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz); | ||
315 | gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq); | ||
316 | gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz); | ||
317 | gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey); | ||
318 | gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq); | ||
319 | gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection); | ||
320 | gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz); | ||
321 | gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size); | ||
322 | gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size); | ||
323 | gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc); | ||
324 | gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc); | ||
325 | gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp); | ||
326 | gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp); | ||
327 | gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt); | ||
328 | gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size)); | ||
329 | gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay); | ||
330 | gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports); | ||
331 | gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg); | ||
332 | gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support); | ||
333 | gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22)); | ||
334 | pr_debug("flags = 0x%llx\n", gen->flags); | ||
335 | gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz); | ||
336 | gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz); | ||
337 | gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf); | ||
338 | gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size); | ||
339 | gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq); | ||
340 | gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq); | ||
341 | gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc); | ||
342 | gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg); | ||
343 | gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd); | ||
344 | gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd); | ||
345 | gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz); | ||
346 | } | ||
347 | |||
348 | static const char *caps_opmod_str(u16 opmod) | ||
280 | { | 349 | { |
281 | struct mlx5_cmd_query_hca_cap_mbox_out *query_out = NULL; | 350 | switch (opmod) { |
282 | struct mlx5_cmd_set_hca_cap_mbox_in *set_ctx = NULL; | 351 | case HCA_CAP_OPMOD_GET_MAX: |
283 | struct mlx5_cmd_query_hca_cap_mbox_in query_ctx; | 352 | return "GET_MAX"; |
284 | struct mlx5_cmd_set_hca_cap_mbox_out set_out; | 353 | case HCA_CAP_OPMOD_GET_CUR: |
285 | u64 flags; | 354 | return "GET_CUR"; |
355 | default: | ||
356 | return "Invalid"; | ||
357 | } | ||
358 | } | ||
359 | |||
360 | int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, | ||
361 | u16 opmod) | ||
362 | { | ||
363 | u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)]; | ||
364 | int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out); | ||
365 | void *out; | ||
286 | int err; | 366 | int err; |
287 | 367 | ||
288 | memset(&query_ctx, 0, sizeof(query_ctx)); | 368 | memset(in, 0, sizeof(in)); |
289 | query_out = kzalloc(sizeof(*query_out), GFP_KERNEL); | 369 | out = kzalloc(out_sz, GFP_KERNEL); |
290 | if (!query_out) | 370 | if (!out) |
291 | return -ENOMEM; | 371 | return -ENOMEM; |
372 | MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP); | ||
373 | MLX5_SET(query_hca_cap_in, in, op_mod, opmod); | ||
374 | err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz); | ||
375 | if (err) | ||
376 | goto query_ex; | ||
292 | 377 | ||
293 | set_ctx = kzalloc(sizeof(*set_ctx), GFP_KERNEL); | 378 | err = mlx5_cmd_status_to_err_v2(out); |
294 | if (!set_ctx) { | 379 | if (err) { |
295 | err = -ENOMEM; | 380 | mlx5_core_warn(dev, "query max hca cap failed, %d\n", err); |
296 | goto query_ex; | 381 | goto query_ex; |
297 | } | 382 | } |
383 | mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod)); | ||
384 | fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct)); | ||
385 | |||
386 | query_ex: | ||
387 | kfree(out); | ||
388 | return err; | ||
389 | } | ||
298 | 390 | ||
299 | query_ctx.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_HCA_CAP); | 391 | static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz) |
300 | query_ctx.hdr.opmod = cpu_to_be16(HCA_CAP_OPMOD_GET_CUR); | 392 | { |
301 | err = mlx5_cmd_exec(dev, &query_ctx, sizeof(query_ctx), | 393 | u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)]; |
302 | query_out, sizeof(*query_out)); | 394 | int err; |
395 | |||
396 | memset(out, 0, sizeof(out)); | ||
397 | |||
398 | MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP); | ||
399 | err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out)); | ||
303 | if (err) | 400 | if (err) |
304 | goto query_ex; | 401 | return err; |
305 | 402 | ||
306 | err = mlx5_cmd_status_to_err(&query_out->hdr); | 403 | err = mlx5_cmd_status_to_err_v2(out); |
307 | if (err) { | 404 | |
308 | mlx5_core_warn(dev, "query hca cap failed, %d\n", err); | 405 | return err; |
406 | } | ||
407 | |||
408 | static int handle_hca_cap(struct mlx5_core_dev *dev) | ||
409 | { | ||
410 | void *set_ctx = NULL; | ||
411 | struct mlx5_profile *prof = dev->profile; | ||
412 | struct mlx5_caps *cur_caps = NULL; | ||
413 | struct mlx5_caps *max_caps = NULL; | ||
414 | int err = -ENOMEM; | ||
415 | int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); | ||
416 | |||
417 | set_ctx = kzalloc(set_sz, GFP_KERNEL); | ||
418 | if (!set_ctx) | ||
309 | goto query_ex; | 419 | goto query_ex; |
310 | } | ||
311 | 420 | ||
312 | copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap); | 421 | max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL); |
422 | if (!max_caps) | ||
423 | goto query_ex; | ||
313 | 424 | ||
314 | if (dev->profile && dev->profile->mask & MLX5_PROF_MASK_QP_SIZE) | 425 | cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL); |
315 | set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp; | 426 | if (!cur_caps) |
427 | goto query_ex; | ||
316 | 428 | ||
317 | flags = be64_to_cpu(query_out->hca_cap.flags); | 429 | err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX); |
318 | /* disable checksum */ | 430 | if (err) |
319 | flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; | ||
320 | |||
321 | set_ctx->hca_cap.flags = cpu_to_be64(flags); | ||
322 | memset(&set_out, 0, sizeof(set_out)); | ||
323 | set_ctx->hca_cap.log_uar_page_sz = cpu_to_be16(PAGE_SHIFT - 12); | ||
324 | set_ctx->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_SET_HCA_CAP); | ||
325 | err = mlx5_cmd_exec(dev, set_ctx, sizeof(*set_ctx), | ||
326 | &set_out, sizeof(set_out)); | ||
327 | if (err) { | ||
328 | mlx5_core_warn(dev, "set hca cap failed, %d\n", err); | ||
329 | goto query_ex; | 431 | goto query_ex; |
330 | } | ||
331 | 432 | ||
332 | err = mlx5_cmd_status_to_err(&set_out.hdr); | 433 | err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR); |
333 | if (err) | 434 | if (err) |
334 | goto query_ex; | 435 | goto query_ex; |
335 | 436 | ||
437 | /* we limit the size of the pkey table to 128 entries for now */ | ||
438 | cur_caps->gen.pkey_table_size = 128; | ||
439 | |||
440 | if (prof->mask & MLX5_PROF_MASK_QP_SIZE) | ||
441 | cur_caps->gen.log_max_qp = prof->log_max_qp; | ||
442 | |||
443 | /* disable checksum */ | ||
444 | cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM; | ||
445 | |||
446 | copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct), | ||
447 | cur_caps); | ||
448 | err = set_caps(dev, set_ctx, set_sz); | ||
449 | |||
336 | query_ex: | 450 | query_ex: |
337 | kfree(query_out); | 451 | kfree(cur_caps); |
452 | kfree(max_caps); | ||
338 | kfree(set_ctx); | 453 | kfree(set_ctx); |
339 | 454 | ||
340 | return err; | 455 | return err; |
@@ -782,6 +897,7 @@ static void remove_one(struct pci_dev *pdev) | |||
782 | 897 | ||
783 | static const struct pci_device_id mlx5_core_pci_table[] = { | 898 | static const struct pci_device_id mlx5_core_pci_table[] = { |
784 | { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ | 899 | { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ |
900 | { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ | ||
785 | { 0, } | 901 | { 0, } |
786 | }; | 902 | }; |
787 | 903 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 8145b4668229..5261a2b0da43 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c | |||
@@ -39,28 +39,53 @@ | |||
39 | 39 | ||
40 | #include "mlx5_core.h" | 40 | #include "mlx5_core.h" |
41 | 41 | ||
42 | void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type) | 42 | static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev, |
43 | u32 rsn) | ||
43 | { | 44 | { |
44 | struct mlx5_qp_table *table = &dev->priv.qp_table; | 45 | struct mlx5_qp_table *table = &dev->priv.qp_table; |
45 | struct mlx5_core_qp *qp; | 46 | struct mlx5_core_rsc_common *common; |
46 | 47 | ||
47 | spin_lock(&table->lock); | 48 | spin_lock(&table->lock); |
48 | 49 | ||
49 | qp = radix_tree_lookup(&table->tree, qpn); | 50 | common = radix_tree_lookup(&table->tree, rsn); |
50 | if (qp) | 51 | if (common) |
51 | atomic_inc(&qp->refcount); | 52 | atomic_inc(&common->refcount); |
52 | 53 | ||
53 | spin_unlock(&table->lock); | 54 | spin_unlock(&table->lock); |
54 | 55 | ||
55 | if (!qp) { | 56 | if (!common) { |
56 | mlx5_core_warn(dev, "Async event for bogus QP 0x%x\n", qpn); | 57 | mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", |
57 | return; | 58 | rsn); |
59 | return NULL; | ||
58 | } | 60 | } |
61 | return common; | ||
62 | } | ||
59 | 63 | ||
60 | qp->event(qp, event_type); | 64 | void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common) |
65 | { | ||
66 | if (atomic_dec_and_test(&common->refcount)) | ||
67 | complete(&common->free); | ||
68 | } | ||
69 | |||
70 | void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) | ||
71 | { | ||
72 | struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn); | ||
73 | struct mlx5_core_qp *qp; | ||
74 | |||
75 | if (!common) | ||
76 | return; | ||
77 | |||
78 | switch (common->res) { | ||
79 | case MLX5_RES_QP: | ||
80 | qp = (struct mlx5_core_qp *)common; | ||
81 | qp->event(qp, event_type); | ||
82 | break; | ||
83 | |||
84 | default: | ||
85 | mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn); | ||
86 | } | ||
61 | 87 | ||
62 | if (atomic_dec_and_test(&qp->refcount)) | 88 | mlx5_core_put_rsc(common); |
63 | complete(&qp->free); | ||
64 | } | 89 | } |
65 | 90 | ||
66 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, | 91 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, |
@@ -92,6 +117,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, | |||
92 | qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; | 117 | qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; |
93 | mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); | 118 | mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); |
94 | 119 | ||
120 | qp->common.res = MLX5_RES_QP; | ||
95 | spin_lock_irq(&table->lock); | 121 | spin_lock_irq(&table->lock); |
96 | err = radix_tree_insert(&table->tree, qp->qpn, qp); | 122 | err = radix_tree_insert(&table->tree, qp->qpn, qp); |
97 | spin_unlock_irq(&table->lock); | 123 | spin_unlock_irq(&table->lock); |
@@ -106,9 +132,9 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev, | |||
106 | qp->qpn); | 132 | qp->qpn); |
107 | 133 | ||
108 | qp->pid = current->pid; | 134 | qp->pid = current->pid; |
109 | atomic_set(&qp->refcount, 1); | 135 | atomic_set(&qp->common.refcount, 1); |
110 | atomic_inc(&dev->num_qps); | 136 | atomic_inc(&dev->num_qps); |
111 | init_completion(&qp->free); | 137 | init_completion(&qp->common.free); |
112 | 138 | ||
113 | return 0; | 139 | return 0; |
114 | 140 | ||
@@ -138,9 +164,8 @@ int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, | |||
138 | radix_tree_delete(&table->tree, qp->qpn); | 164 | radix_tree_delete(&table->tree, qp->qpn); |
139 | spin_unlock_irqrestore(&table->lock, flags); | 165 | spin_unlock_irqrestore(&table->lock, flags); |
140 | 166 | ||
141 | if (atomic_dec_and_test(&qp->refcount)) | 167 | mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp); |
142 | complete(&qp->free); | 168 | wait_for_completion(&qp->common.free); |
143 | wait_for_completion(&qp->free); | ||
144 | 169 | ||
145 | memset(&in, 0, sizeof(in)); | 170 | memset(&in, 0, sizeof(in)); |
146 | memset(&out, 0, sizeof(out)); | 171 | memset(&out, 0, sizeof(out)); |
@@ -184,13 +209,10 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state, | |||
184 | [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, | 209 | [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, |
185 | [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, | 210 | [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, |
186 | [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, | 211 | [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP, |
187 | [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_RTS2SQD_QP, | ||
188 | }, | 212 | }, |
189 | [MLX5_QP_STATE_SQD] = { | 213 | [MLX5_QP_STATE_SQD] = { |
190 | [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, | 214 | [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, |
191 | [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, | 215 | [MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP, |
192 | [MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQD2RTS_QP, | ||
193 | [MLX5_QP_STATE_SQD] = MLX5_CMD_OP_SQD2SQD_QP, | ||
194 | }, | 216 | }, |
195 | [MLX5_QP_STATE_SQER] = { | 217 | [MLX5_QP_STATE_SQER] = { |
196 | [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, | 218 | [MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c index 68f5d9c77c7b..0a6348cefc01 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c | |||
@@ -174,11 +174,11 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari) | |||
174 | for (i = 0; i < tot_uuars; i++) { | 174 | for (i = 0; i < tot_uuars; i++) { |
175 | bf = &uuari->bfs[i]; | 175 | bf = &uuari->bfs[i]; |
176 | 176 | ||
177 | bf->buf_size = dev->caps.bf_reg_size / 2; | 177 | bf->buf_size = dev->caps.gen.bf_reg_size / 2; |
178 | bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; | 178 | bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE]; |
179 | bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; | 179 | bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map; |
180 | bf->reg = NULL; /* Add WC support */ | 180 | bf->reg = NULL; /* Add WC support */ |
181 | bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.bf_reg_size + | 181 | bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size + |
182 | MLX5_BF_OFFSET; | 182 | MLX5_BF_OFFSET; |
183 | bf->need_lock = need_uuar_lock(i); | 183 | bf->need_lock = need_uuar_lock(i); |
184 | spin_lock_init(&bf->lock); | 184 | spin_lock_init(&bf->lock); |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 334947151dfc..1d67fd32e71c 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -44,6 +44,50 @@ | |||
44 | #error Host endianness not defined | 44 | #error Host endianness not defined |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | /* helper macros */ | ||
48 | #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) | ||
49 | #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) | ||
50 | #define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld))) | ||
51 | #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) | ||
52 | #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) | ||
53 | #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) | ||
54 | #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) | ||
55 | #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) | ||
56 | #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) | ||
57 | |||
58 | #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) | ||
59 | #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) | ||
60 | #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) | ||
61 | #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) | ||
62 | #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) | ||
63 | |||
64 | /* insert a value to a struct */ | ||
65 | #define MLX5_SET(typ, p, fld, v) do { \ | ||
66 | BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ | ||
67 | *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ | ||
68 | cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ | ||
69 | (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ | ||
70 | << __mlx5_dw_bit_off(typ, fld))); \ | ||
71 | } while (0) | ||
72 | |||
73 | #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ | ||
74 | __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ | ||
75 | __mlx5_mask(typ, fld)) | ||
76 | |||
77 | #define MLX5_GET_PR(typ, p, fld) ({ \ | ||
78 | u32 ___t = MLX5_GET(typ, p, fld); \ | ||
79 | pr_debug(#fld " = 0x%x\n", ___t); \ | ||
80 | ___t; \ | ||
81 | }) | ||
82 | |||
83 | #define MLX5_SET64(typ, p, fld, v) do { \ | ||
84 | BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ | ||
85 | BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ | ||
86 | *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ | ||
87 | } while (0) | ||
88 | |||
89 | #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) | ||
90 | |||
47 | enum { | 91 | enum { |
48 | MLX5_MAX_COMMANDS = 32, | 92 | MLX5_MAX_COMMANDS = 32, |
49 | MLX5_CMD_DATA_BLOCK_SIZE = 512, | 93 | MLX5_CMD_DATA_BLOCK_SIZE = 512, |
@@ -71,6 +115,11 @@ enum { | |||
71 | }; | 115 | }; |
72 | 116 | ||
73 | enum { | 117 | enum { |
118 | MLX5_MIN_PKEY_TABLE_SIZE = 128, | ||
119 | MLX5_MAX_LOG_PKEY_TABLE = 5, | ||
120 | }; | ||
121 | |||
122 | enum { | ||
74 | MLX5_PERM_LOCAL_READ = 1 << 2, | 123 | MLX5_PERM_LOCAL_READ = 1 << 2, |
75 | MLX5_PERM_LOCAL_WRITE = 1 << 3, | 124 | MLX5_PERM_LOCAL_WRITE = 1 << 3, |
76 | MLX5_PERM_REMOTE_READ = 1 << 4, | 125 | MLX5_PERM_REMOTE_READ = 1 << 4, |
@@ -184,10 +233,10 @@ enum { | |||
184 | MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, | 233 | MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, |
185 | MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, | 234 | MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, |
186 | MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, | 235 | MLX5_DEV_CAP_FLAG_RESIZE_SRQ = 1LL << 32, |
236 | MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, | ||
187 | MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, | 237 | MLX5_DEV_CAP_FLAG_REMOTE_FENCE = 1LL << 38, |
188 | MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, | 238 | MLX5_DEV_CAP_FLAG_TLP_HINTS = 1LL << 39, |
189 | MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, | 239 | MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, |
190 | MLX5_DEV_CAP_FLAG_DCT = 1LL << 41, | ||
191 | MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, | 240 | MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, |
192 | }; | 241 | }; |
193 | 242 | ||
@@ -243,10 +292,14 @@ enum { | |||
243 | }; | 292 | }; |
244 | 293 | ||
245 | enum { | 294 | enum { |
246 | MLX5_CAP_OFF_DCT = 41, | ||
247 | MLX5_CAP_OFF_CMDIF_CSUM = 46, | 295 | MLX5_CAP_OFF_CMDIF_CSUM = 46, |
248 | }; | 296 | }; |
249 | 297 | ||
298 | enum { | ||
299 | HCA_CAP_OPMOD_GET_MAX = 0, | ||
300 | HCA_CAP_OPMOD_GET_CUR = 1, | ||
301 | }; | ||
302 | |||
250 | struct mlx5_inbox_hdr { | 303 | struct mlx5_inbox_hdr { |
251 | __be16 opcode; | 304 | __be16 opcode; |
252 | u8 rsvd[4]; | 305 | u8 rsvd[4]; |
@@ -274,101 +327,6 @@ struct mlx5_cmd_query_adapter_mbox_out { | |||
274 | u8 vsd_psid[16]; | 327 | u8 vsd_psid[16]; |
275 | }; | 328 | }; |
276 | 329 | ||
277 | struct mlx5_hca_cap { | ||
278 | u8 rsvd1[16]; | ||
279 | u8 log_max_srq_sz; | ||
280 | u8 log_max_qp_sz; | ||
281 | u8 rsvd2; | ||
282 | u8 log_max_qp; | ||
283 | u8 log_max_strq_sz; | ||
284 | u8 log_max_srqs; | ||
285 | u8 rsvd4[2]; | ||
286 | u8 rsvd5; | ||
287 | u8 log_max_cq_sz; | ||
288 | u8 rsvd6; | ||
289 | u8 log_max_cq; | ||
290 | u8 log_max_eq_sz; | ||
291 | u8 log_max_mkey; | ||
292 | u8 rsvd7; | ||
293 | u8 log_max_eq; | ||
294 | u8 max_indirection; | ||
295 | u8 log_max_mrw_sz; | ||
296 | u8 log_max_bsf_list_sz; | ||
297 | u8 log_max_klm_list_sz; | ||
298 | u8 rsvd_8_0; | ||
299 | u8 log_max_ra_req_dc; | ||
300 | u8 rsvd_8_1; | ||
301 | u8 log_max_ra_res_dc; | ||
302 | u8 rsvd9; | ||
303 | u8 log_max_ra_req_qp; | ||
304 | u8 rsvd10; | ||
305 | u8 log_max_ra_res_qp; | ||
306 | u8 rsvd11[4]; | ||
307 | __be16 max_qp_count; | ||
308 | __be16 rsvd12; | ||
309 | u8 rsvd13; | ||
310 | u8 local_ca_ack_delay; | ||
311 | u8 rsvd14; | ||
312 | u8 num_ports; | ||
313 | u8 log_max_msg; | ||
314 | u8 rsvd15[3]; | ||
315 | __be16 stat_rate_support; | ||
316 | u8 rsvd16[2]; | ||
317 | __be64 flags; | ||
318 | u8 rsvd17; | ||
319 | u8 uar_sz; | ||
320 | u8 rsvd18; | ||
321 | u8 log_pg_sz; | ||
322 | __be16 bf_log_bf_reg_size; | ||
323 | u8 rsvd19[4]; | ||
324 | __be16 max_desc_sz_sq; | ||
325 | u8 rsvd20[2]; | ||
326 | __be16 max_desc_sz_rq; | ||
327 | u8 rsvd21[2]; | ||
328 | __be16 max_desc_sz_sq_dc; | ||
329 | __be32 max_qp_mcg; | ||
330 | u8 rsvd22[3]; | ||
331 | u8 log_max_mcg; | ||
332 | u8 rsvd23; | ||
333 | u8 log_max_pd; | ||
334 | u8 rsvd24; | ||
335 | u8 log_max_xrcd; | ||
336 | u8 rsvd25[42]; | ||
337 | __be16 log_uar_page_sz; | ||
338 | u8 rsvd26[28]; | ||
339 | u8 log_max_atomic_size_qp; | ||
340 | u8 rsvd27[2]; | ||
341 | u8 log_max_atomic_size_dc; | ||
342 | u8 rsvd28[76]; | ||
343 | }; | ||
344 | |||
345 | |||
346 | struct mlx5_cmd_query_hca_cap_mbox_in { | ||
347 | struct mlx5_inbox_hdr hdr; | ||
348 | u8 rsvd[8]; | ||
349 | }; | ||
350 | |||
351 | |||
352 | struct mlx5_cmd_query_hca_cap_mbox_out { | ||
353 | struct mlx5_outbox_hdr hdr; | ||
354 | u8 rsvd0[8]; | ||
355 | struct mlx5_hca_cap hca_cap; | ||
356 | }; | ||
357 | |||
358 | |||
359 | struct mlx5_cmd_set_hca_cap_mbox_in { | ||
360 | struct mlx5_inbox_hdr hdr; | ||
361 | u8 rsvd[8]; | ||
362 | struct mlx5_hca_cap hca_cap; | ||
363 | }; | ||
364 | |||
365 | |||
366 | struct mlx5_cmd_set_hca_cap_mbox_out { | ||
367 | struct mlx5_outbox_hdr hdr; | ||
368 | u8 rsvd0[8]; | ||
369 | }; | ||
370 | |||
371 | |||
372 | struct mlx5_cmd_init_hca_mbox_in { | 330 | struct mlx5_cmd_init_hca_mbox_in { |
373 | struct mlx5_inbox_hdr hdr; | 331 | struct mlx5_inbox_hdr hdr; |
374 | u8 rsvd0[2]; | 332 | u8 rsvd0[2]; |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index b88e9b46d957..246310dc8bef 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -44,6 +44,7 @@ | |||
44 | 44 | ||
45 | #include <linux/mlx5/device.h> | 45 | #include <linux/mlx5/device.h> |
46 | #include <linux/mlx5/doorbell.h> | 46 | #include <linux/mlx5/doorbell.h> |
47 | #include <linux/mlx5/mlx5_ifc.h> | ||
47 | 48 | ||
48 | enum { | 49 | enum { |
49 | MLX5_BOARD_ID_LEN = 64, | 50 | MLX5_BOARD_ID_LEN = 64, |
@@ -99,81 +100,6 @@ enum { | |||
99 | }; | 100 | }; |
100 | 101 | ||
101 | enum { | 102 | enum { |
102 | MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, | ||
103 | MLX5_CMD_OP_QUERY_ADAPTER = 0x101, | ||
104 | MLX5_CMD_OP_INIT_HCA = 0x102, | ||
105 | MLX5_CMD_OP_TEARDOWN_HCA = 0x103, | ||
106 | MLX5_CMD_OP_ENABLE_HCA = 0x104, | ||
107 | MLX5_CMD_OP_DISABLE_HCA = 0x105, | ||
108 | MLX5_CMD_OP_QUERY_PAGES = 0x107, | ||
109 | MLX5_CMD_OP_MANAGE_PAGES = 0x108, | ||
110 | MLX5_CMD_OP_SET_HCA_CAP = 0x109, | ||
111 | |||
112 | MLX5_CMD_OP_CREATE_MKEY = 0x200, | ||
113 | MLX5_CMD_OP_QUERY_MKEY = 0x201, | ||
114 | MLX5_CMD_OP_DESTROY_MKEY = 0x202, | ||
115 | MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, | ||
116 | |||
117 | MLX5_CMD_OP_CREATE_EQ = 0x301, | ||
118 | MLX5_CMD_OP_DESTROY_EQ = 0x302, | ||
119 | MLX5_CMD_OP_QUERY_EQ = 0x303, | ||
120 | |||
121 | MLX5_CMD_OP_CREATE_CQ = 0x400, | ||
122 | MLX5_CMD_OP_DESTROY_CQ = 0x401, | ||
123 | MLX5_CMD_OP_QUERY_CQ = 0x402, | ||
124 | MLX5_CMD_OP_MODIFY_CQ = 0x403, | ||
125 | |||
126 | MLX5_CMD_OP_CREATE_QP = 0x500, | ||
127 | MLX5_CMD_OP_DESTROY_QP = 0x501, | ||
128 | MLX5_CMD_OP_RST2INIT_QP = 0x502, | ||
129 | MLX5_CMD_OP_INIT2RTR_QP = 0x503, | ||
130 | MLX5_CMD_OP_RTR2RTS_QP = 0x504, | ||
131 | MLX5_CMD_OP_RTS2RTS_QP = 0x505, | ||
132 | MLX5_CMD_OP_SQERR2RTS_QP = 0x506, | ||
133 | MLX5_CMD_OP_2ERR_QP = 0x507, | ||
134 | MLX5_CMD_OP_RTS2SQD_QP = 0x508, | ||
135 | MLX5_CMD_OP_SQD2RTS_QP = 0x509, | ||
136 | MLX5_CMD_OP_2RST_QP = 0x50a, | ||
137 | MLX5_CMD_OP_QUERY_QP = 0x50b, | ||
138 | MLX5_CMD_OP_CONF_SQP = 0x50c, | ||
139 | MLX5_CMD_OP_MAD_IFC = 0x50d, | ||
140 | MLX5_CMD_OP_INIT2INIT_QP = 0x50e, | ||
141 | MLX5_CMD_OP_SUSPEND_QP = 0x50f, | ||
142 | MLX5_CMD_OP_UNSUSPEND_QP = 0x510, | ||
143 | MLX5_CMD_OP_SQD2SQD_QP = 0x511, | ||
144 | MLX5_CMD_OP_ALLOC_QP_COUNTER_SET = 0x512, | ||
145 | MLX5_CMD_OP_DEALLOC_QP_COUNTER_SET = 0x513, | ||
146 | MLX5_CMD_OP_QUERY_QP_COUNTER_SET = 0x514, | ||
147 | |||
148 | MLX5_CMD_OP_CREATE_PSV = 0x600, | ||
149 | MLX5_CMD_OP_DESTROY_PSV = 0x601, | ||
150 | MLX5_CMD_OP_QUERY_PSV = 0x602, | ||
151 | MLX5_CMD_OP_QUERY_SIG_RULE_TABLE = 0x603, | ||
152 | MLX5_CMD_OP_QUERY_BLOCK_SIZE_TABLE = 0x604, | ||
153 | |||
154 | MLX5_CMD_OP_CREATE_SRQ = 0x700, | ||
155 | MLX5_CMD_OP_DESTROY_SRQ = 0x701, | ||
156 | MLX5_CMD_OP_QUERY_SRQ = 0x702, | ||
157 | MLX5_CMD_OP_ARM_RQ = 0x703, | ||
158 | MLX5_CMD_OP_RESIZE_SRQ = 0x704, | ||
159 | |||
160 | MLX5_CMD_OP_ALLOC_PD = 0x800, | ||
161 | MLX5_CMD_OP_DEALLOC_PD = 0x801, | ||
162 | MLX5_CMD_OP_ALLOC_UAR = 0x802, | ||
163 | MLX5_CMD_OP_DEALLOC_UAR = 0x803, | ||
164 | |||
165 | MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, | ||
166 | MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, | ||
167 | |||
168 | |||
169 | MLX5_CMD_OP_ALLOC_XRCD = 0x80e, | ||
170 | MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, | ||
171 | |||
172 | MLX5_CMD_OP_ACCESS_REG = 0x805, | ||
173 | MLX5_CMD_OP_MAX = 0x810, | ||
174 | }; | ||
175 | |||
176 | enum { | ||
177 | MLX5_REG_PCAP = 0x5001, | 103 | MLX5_REG_PCAP = 0x5001, |
178 | MLX5_REG_PMTU = 0x5003, | 104 | MLX5_REG_PMTU = 0x5003, |
179 | MLX5_REG_PTYS = 0x5004, | 105 | MLX5_REG_PTYS = 0x5004, |
@@ -335,23 +261,30 @@ struct mlx5_port_caps { | |||
335 | int pkey_table_len; | 261 | int pkey_table_len; |
336 | }; | 262 | }; |
337 | 263 | ||
338 | struct mlx5_caps { | 264 | struct mlx5_general_caps { |
339 | u8 log_max_eq; | 265 | u8 log_max_eq; |
340 | u8 log_max_cq; | 266 | u8 log_max_cq; |
341 | u8 log_max_qp; | 267 | u8 log_max_qp; |
342 | u8 log_max_mkey; | 268 | u8 log_max_mkey; |
343 | u8 log_max_pd; | 269 | u8 log_max_pd; |
344 | u8 log_max_srq; | 270 | u8 log_max_srq; |
271 | u8 log_max_strq; | ||
272 | u8 log_max_mrw_sz; | ||
273 | u8 log_max_bsf_list_size; | ||
274 | u8 log_max_klm_list_size; | ||
345 | u32 max_cqes; | 275 | u32 max_cqes; |
346 | int max_wqes; | 276 | int max_wqes; |
277 | u32 max_eqes; | ||
278 | u32 max_indirection; | ||
347 | int max_sq_desc_sz; | 279 | int max_sq_desc_sz; |
348 | int max_rq_desc_sz; | 280 | int max_rq_desc_sz; |
281 | int max_dc_sq_desc_sz; | ||
349 | u64 flags; | 282 | u64 flags; |
350 | u16 stat_rate_support; | 283 | u16 stat_rate_support; |
351 | int log_max_msg; | 284 | int log_max_msg; |
352 | int num_ports; | 285 | int num_ports; |
353 | int max_ra_res_qp; | 286 | u8 log_max_ra_res_qp; |
354 | int max_ra_req_qp; | 287 | u8 log_max_ra_req_qp; |
355 | int max_srq_wqes; | 288 | int max_srq_wqes; |
356 | int bf_reg_size; | 289 | int bf_reg_size; |
357 | int bf_regs_per_page; | 290 | int bf_regs_per_page; |
@@ -363,6 +296,19 @@ struct mlx5_caps { | |||
363 | u8 log_max_mcg; | 296 | u8 log_max_mcg; |
364 | u32 max_qp_mcg; | 297 | u32 max_qp_mcg; |
365 | int min_page_sz; | 298 | int min_page_sz; |
299 | int pd_cap; | ||
300 | u32 max_qp_counters; | ||
301 | u32 pkey_table_size; | ||
302 | u8 log_max_ra_req_dc; | ||
303 | u8 log_max_ra_res_dc; | ||
304 | u32 uar_sz; | ||
305 | u8 min_log_pg_sz; | ||
306 | u8 log_max_xrcd; | ||
307 | u16 log_uar_page_sz; | ||
308 | }; | ||
309 | |||
310 | struct mlx5_caps { | ||
311 | struct mlx5_general_caps gen; | ||
366 | }; | 312 | }; |
367 | 313 | ||
368 | struct mlx5_cmd_mailbox { | 314 | struct mlx5_cmd_mailbox { |
@@ -429,6 +375,16 @@ struct mlx5_core_mr { | |||
429 | u32 pd; | 375 | u32 pd; |
430 | }; | 376 | }; |
431 | 377 | ||
378 | enum mlx5_res_type { | ||
379 | MLX5_RES_QP, | ||
380 | }; | ||
381 | |||
382 | struct mlx5_core_rsc_common { | ||
383 | enum mlx5_res_type res; | ||
384 | atomic_t refcount; | ||
385 | struct completion free; | ||
386 | }; | ||
387 | |||
432 | struct mlx5_core_srq { | 388 | struct mlx5_core_srq { |
433 | u32 srqn; | 389 | u32 srqn; |
434 | int max; | 390 | int max; |
@@ -695,6 +651,9 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); | |||
695 | void mlx5_cmd_use_events(struct mlx5_core_dev *dev); | 651 | void mlx5_cmd_use_events(struct mlx5_core_dev *dev); |
696 | void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); | 652 | void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); |
697 | int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); | 653 | int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr); |
654 | int mlx5_cmd_status_to_err_v2(void *ptr); | ||
655 | int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps, | ||
656 | u16 opmod); | ||
698 | int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, | 657 | int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, |
699 | int out_size); | 658 | int out_size); |
700 | int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, | 659 | int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, |
@@ -751,7 +710,7 @@ int mlx5_eq_init(struct mlx5_core_dev *dev); | |||
751 | void mlx5_eq_cleanup(struct mlx5_core_dev *dev); | 710 | void mlx5_eq_cleanup(struct mlx5_core_dev *dev); |
752 | void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); | 711 | void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas); |
753 | void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); | 712 | void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn); |
754 | void mlx5_qp_event(struct mlx5_core_dev *dev, u32 qpn, int event_type); | 713 | void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type); |
755 | void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); | 714 | void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type); |
756 | struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); | 715 | struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn); |
757 | void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); | 716 | void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector); |
@@ -788,6 +747,7 @@ void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); | |||
788 | int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, | 747 | int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn, |
789 | int npsvs, u32 *sig_index); | 748 | int npsvs, u32 *sig_index); |
790 | int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); | 749 | int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num); |
750 | void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); | ||
791 | 751 | ||
792 | static inline u32 mlx5_mkey_to_idx(u32 mkey) | 752 | static inline u32 mlx5_mkey_to_idx(u32 mkey) |
793 | { | 753 | { |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h new file mode 100644 index 000000000000..5f48b8f592c5 --- /dev/null +++ b/include/linux/mlx5/mlx5_ifc.h | |||
@@ -0,0 +1,349 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014, Mellanox Technologies inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #ifndef MLX5_IFC_H | ||
34 | #define MLX5_IFC_H | ||
35 | |||
36 | enum { | ||
37 | MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, | ||
38 | MLX5_CMD_OP_QUERY_ADAPTER = 0x101, | ||
39 | MLX5_CMD_OP_INIT_HCA = 0x102, | ||
40 | MLX5_CMD_OP_TEARDOWN_HCA = 0x103, | ||
41 | MLX5_CMD_OP_ENABLE_HCA = 0x104, | ||
42 | MLX5_CMD_OP_DISABLE_HCA = 0x105, | ||
43 | MLX5_CMD_OP_QUERY_PAGES = 0x107, | ||
44 | MLX5_CMD_OP_MANAGE_PAGES = 0x108, | ||
45 | MLX5_CMD_OP_SET_HCA_CAP = 0x109, | ||
46 | MLX5_CMD_OP_CREATE_MKEY = 0x200, | ||
47 | MLX5_CMD_OP_QUERY_MKEY = 0x201, | ||
48 | MLX5_CMD_OP_DESTROY_MKEY = 0x202, | ||
49 | MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS = 0x203, | ||
50 | MLX5_CMD_OP_PAGE_FAULT_RESUME = 0x204, | ||
51 | MLX5_CMD_OP_CREATE_EQ = 0x301, | ||
52 | MLX5_CMD_OP_DESTROY_EQ = 0x302, | ||
53 | MLX5_CMD_OP_QUERY_EQ = 0x303, | ||
54 | MLX5_CMD_OP_GEN_EQE = 0x304, | ||
55 | MLX5_CMD_OP_CREATE_CQ = 0x400, | ||
56 | MLX5_CMD_OP_DESTROY_CQ = 0x401, | ||
57 | MLX5_CMD_OP_QUERY_CQ = 0x402, | ||
58 | MLX5_CMD_OP_MODIFY_CQ = 0x403, | ||
59 | MLX5_CMD_OP_CREATE_QP = 0x500, | ||
60 | MLX5_CMD_OP_DESTROY_QP = 0x501, | ||
61 | MLX5_CMD_OP_RST2INIT_QP = 0x502, | ||
62 | MLX5_CMD_OP_INIT2RTR_QP = 0x503, | ||
63 | MLX5_CMD_OP_RTR2RTS_QP = 0x504, | ||
64 | MLX5_CMD_OP_RTS2RTS_QP = 0x505, | ||
65 | MLX5_CMD_OP_SQERR2RTS_QP = 0x506, | ||
66 | MLX5_CMD_OP_2ERR_QP = 0x507, | ||
67 | MLX5_CMD_OP_2RST_QP = 0x50a, | ||
68 | MLX5_CMD_OP_QUERY_QP = 0x50b, | ||
69 | MLX5_CMD_OP_INIT2INIT_QP = 0x50e, | ||
70 | MLX5_CMD_OP_CREATE_PSV = 0x600, | ||
71 | MLX5_CMD_OP_DESTROY_PSV = 0x601, | ||
72 | MLX5_CMD_OP_CREATE_SRQ = 0x700, | ||
73 | MLX5_CMD_OP_DESTROY_SRQ = 0x701, | ||
74 | MLX5_CMD_OP_QUERY_SRQ = 0x702, | ||
75 | MLX5_CMD_OP_ARM_RQ = 0x703, | ||
76 | MLX5_CMD_OP_RESIZE_SRQ = 0x704, | ||
77 | MLX5_CMD_OP_CREATE_DCT = 0x710, | ||
78 | MLX5_CMD_OP_DESTROY_DCT = 0x711, | ||
79 | MLX5_CMD_OP_DRAIN_DCT = 0x712, | ||
80 | MLX5_CMD_OP_QUERY_DCT = 0x713, | ||
81 | MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION = 0x714, | ||
82 | MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, | ||
83 | MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, | ||
84 | MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, | ||
85 | MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT = 0x753, | ||
86 | MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT = 0x754, | ||
87 | MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT = 0x755, | ||
88 | MLX5_CMD_OP_QUERY_RCOE_ADDRESS = 0x760, | ||
89 | MLX5_CMD_OP_SET_ROCE_ADDRESS = 0x761, | ||
90 | MLX5_CMD_OP_QUERY_VPORT_COUNTER = 0x770, | ||
91 | MLX5_CMD_OP_ALLOC_Q_COUNTER = 0x771, | ||
92 | MLX5_CMD_OP_DEALLOC_Q_COUNTER = 0x772, | ||
93 | MLX5_CMD_OP_QUERY_Q_COUNTER = 0x773, | ||
94 | MLX5_CMD_OP_ALLOC_PD = 0x800, | ||
95 | MLX5_CMD_OP_DEALLOC_PD = 0x801, | ||
96 | MLX5_CMD_OP_ALLOC_UAR = 0x802, | ||
97 | MLX5_CMD_OP_DEALLOC_UAR = 0x803, | ||
98 | MLX5_CMD_OP_CONFIG_INT_MODERATION = 0x804, | ||
99 | MLX5_CMD_OP_ACCESS_REG = 0x805, | ||
100 | MLX5_CMD_OP_ATTACH_TO_MCG = 0x806, | ||
101 | MLX5_CMD_OP_DETACH_FROM_MCG = 0x807, | ||
102 | MLX5_CMD_OP_GET_DROPPED_PACKET_LOG = 0x80a, | ||
103 | MLX5_CMD_OP_MAD_IFC = 0x50d, | ||
104 | MLX5_CMD_OP_QUERY_MAD_DEMUX = 0x80b, | ||
105 | MLX5_CMD_OP_SET_MAD_DEMUX = 0x80c, | ||
106 | MLX5_CMD_OP_NOP = 0x80d, | ||
107 | MLX5_CMD_OP_ALLOC_XRCD = 0x80e, | ||
108 | MLX5_CMD_OP_DEALLOC_XRCD = 0x80f, | ||
109 | MLX5_CMD_OP_SET_BURST_SIZE = 0x812, | ||
110 | MLX5_CMD_OP_QUERY_BURST_SZIE = 0x813, | ||
111 | MLX5_CMD_OP_ACTIVATE_TRACER = 0x814, | ||
112 | MLX5_CMD_OP_DEACTIVATE_TRACER = 0x815, | ||
113 | MLX5_CMD_OP_CREATE_SNIFFER_RULE = 0x820, | ||
114 | MLX5_CMD_OP_DESTROY_SNIFFER_RULE = 0x821, | ||
115 | MLX5_CMD_OP_QUERY_CONG_PARAMS = 0x822, | ||
116 | MLX5_CMD_OP_MODIFY_CONG_PARAMS = 0x823, | ||
117 | MLX5_CMD_OP_QUERY_CONG_STATISTICS = 0x824, | ||
118 | MLX5_CMD_OP_CREATE_TIR = 0x900, | ||
119 | MLX5_CMD_OP_MODIFY_TIR = 0x901, | ||
120 | MLX5_CMD_OP_DESTROY_TIR = 0x902, | ||
121 | MLX5_CMD_OP_QUERY_TIR = 0x903, | ||
122 | MLX5_CMD_OP_CREATE_TIS = 0x912, | ||
123 | MLX5_CMD_OP_MODIFY_TIS = 0x913, | ||
124 | MLX5_CMD_OP_DESTROY_TIS = 0x914, | ||
125 | MLX5_CMD_OP_QUERY_TIS = 0x915, | ||
126 | MLX5_CMD_OP_CREATE_SQ = 0x904, | ||
127 | MLX5_CMD_OP_MODIFY_SQ = 0x905, | ||
128 | MLX5_CMD_OP_DESTROY_SQ = 0x906, | ||
129 | MLX5_CMD_OP_QUERY_SQ = 0x907, | ||
130 | MLX5_CMD_OP_CREATE_RQ = 0x908, | ||
131 | MLX5_CMD_OP_MODIFY_RQ = 0x909, | ||
132 | MLX5_CMD_OP_DESTROY_RQ = 0x90a, | ||
133 | MLX5_CMD_OP_QUERY_RQ = 0x90b, | ||
134 | MLX5_CMD_OP_CREATE_RMP = 0x90c, | ||
135 | MLX5_CMD_OP_MODIFY_RMP = 0x90d, | ||
136 | MLX5_CMD_OP_DESTROY_RMP = 0x90e, | ||
137 | MLX5_CMD_OP_QUERY_RMP = 0x90f, | ||
138 | MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY = 0x910, | ||
139 | MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY = 0x911, | ||
140 | MLX5_CMD_OP_MAX = 0x911 | ||
141 | }; | ||
142 | |||
143 | struct mlx5_ifc_cmd_hca_cap_bits { | ||
144 | u8 reserved_0[0x80]; | ||
145 | |||
146 | u8 log_max_srq_sz[0x8]; | ||
147 | u8 log_max_qp_sz[0x8]; | ||
148 | u8 reserved_1[0xb]; | ||
149 | u8 log_max_qp[0x5]; | ||
150 | |||
151 | u8 log_max_strq_sz[0x8]; | ||
152 | u8 reserved_2[0x3]; | ||
153 | u8 log_max_srqs[0x5]; | ||
154 | u8 reserved_3[0x10]; | ||
155 | |||
156 | u8 reserved_4[0x8]; | ||
157 | u8 log_max_cq_sz[0x8]; | ||
158 | u8 reserved_5[0xb]; | ||
159 | u8 log_max_cq[0x5]; | ||
160 | |||
161 | u8 log_max_eq_sz[0x8]; | ||
162 | u8 reserved_6[0x2]; | ||
163 | u8 log_max_mkey[0x6]; | ||
164 | u8 reserved_7[0xc]; | ||
165 | u8 log_max_eq[0x4]; | ||
166 | |||
167 | u8 max_indirection[0x8]; | ||
168 | u8 reserved_8[0x1]; | ||
169 | u8 log_max_mrw_sz[0x7]; | ||
170 | u8 reserved_9[0x2]; | ||
171 | u8 log_max_bsf_list_size[0x6]; | ||
172 | u8 reserved_10[0x2]; | ||
173 | u8 log_max_klm_list_size[0x6]; | ||
174 | |||
175 | u8 reserved_11[0xa]; | ||
176 | u8 log_max_ra_req_dc[0x6]; | ||
177 | u8 reserved_12[0xa]; | ||
178 | u8 log_max_ra_res_dc[0x6]; | ||
179 | |||
180 | u8 reserved_13[0xa]; | ||
181 | u8 log_max_ra_req_qp[0x6]; | ||
182 | u8 reserved_14[0xa]; | ||
183 | u8 log_max_ra_res_qp[0x6]; | ||
184 | |||
185 | u8 pad_cap[0x1]; | ||
186 | u8 cc_query_allowed[0x1]; | ||
187 | u8 cc_modify_allowed[0x1]; | ||
188 | u8 reserved_15[0x1d]; | ||
189 | |||
190 | u8 reserved_16[0x6]; | ||
191 | u8 max_qp_cnt[0xa]; | ||
192 | u8 pkey_table_size[0x10]; | ||
193 | |||
194 | u8 eswitch_owner[0x1]; | ||
195 | u8 reserved_17[0xa]; | ||
196 | u8 local_ca_ack_delay[0x5]; | ||
197 | u8 reserved_18[0x8]; | ||
198 | u8 num_ports[0x8]; | ||
199 | |||
200 | u8 reserved_19[0x3]; | ||
201 | u8 log_max_msg[0x5]; | ||
202 | u8 reserved_20[0x18]; | ||
203 | |||
204 | u8 stat_rate_support[0x10]; | ||
205 | u8 reserved_21[0x10]; | ||
206 | |||
207 | u8 reserved_22[0x10]; | ||
208 | u8 cmdif_checksum[0x2]; | ||
209 | u8 sigerr_cqe[0x1]; | ||
210 | u8 reserved_23[0x1]; | ||
211 | u8 wq_signature[0x1]; | ||
212 | u8 sctr_data_cqe[0x1]; | ||
213 | u8 reserved_24[0x1]; | ||
214 | u8 sho[0x1]; | ||
215 | u8 tph[0x1]; | ||
216 | u8 rf[0x1]; | ||
217 | u8 dc[0x1]; | ||
218 | u8 reserved_25[0x2]; | ||
219 | u8 roce[0x1]; | ||
220 | u8 atomic[0x1]; | ||
221 | u8 rsz_srq[0x1]; | ||
222 | |||
223 | u8 cq_oi[0x1]; | ||
224 | u8 cq_resize[0x1]; | ||
225 | u8 cq_moderation[0x1]; | ||
226 | u8 sniffer_rule_flow[0x1]; | ||
227 | u8 sniffer_rule_vport[0x1]; | ||
228 | u8 sniffer_rule_phy[0x1]; | ||
229 | u8 reserved_26[0x1]; | ||
230 | u8 pg[0x1]; | ||
231 | u8 block_lb_mc[0x1]; | ||
232 | u8 reserved_27[0x3]; | ||
233 | u8 cd[0x1]; | ||
234 | u8 reserved_28[0x1]; | ||
235 | u8 apm[0x1]; | ||
236 | u8 reserved_29[0x7]; | ||
237 | u8 qkv[0x1]; | ||
238 | u8 pkv[0x1]; | ||
239 | u8 reserved_30[0x4]; | ||
240 | u8 xrc[0x1]; | ||
241 | u8 ud[0x1]; | ||
242 | u8 uc[0x1]; | ||
243 | u8 rc[0x1]; | ||
244 | |||
245 | u8 reserved_31[0xa]; | ||
246 | u8 uar_sz[0x6]; | ||
247 | u8 reserved_32[0x8]; | ||
248 | u8 log_pg_sz[0x8]; | ||
249 | |||
250 | u8 bf[0x1]; | ||
251 | u8 reserved_33[0xa]; | ||
252 | u8 log_bf_reg_size[0x5]; | ||
253 | u8 reserved_34[0x10]; | ||
254 | |||
255 | u8 reserved_35[0x10]; | ||
256 | u8 max_wqe_sz_sq[0x10]; | ||
257 | |||
258 | u8 reserved_36[0x10]; | ||
259 | u8 max_wqe_sz_rq[0x10]; | ||
260 | |||
261 | u8 reserved_37[0x10]; | ||
262 | u8 max_wqe_sz_sq_dc[0x10]; | ||
263 | |||
264 | u8 reserved_38[0x7]; | ||
265 | u8 max_qp_mcg[0x19]; | ||
266 | |||
267 | u8 reserved_39[0x18]; | ||
268 | u8 log_max_mcg[0x8]; | ||
269 | |||
270 | u8 reserved_40[0xb]; | ||
271 | u8 log_max_pd[0x5]; | ||
272 | u8 reserved_41[0xb]; | ||
273 | u8 log_max_xrcd[0x5]; | ||
274 | |||
275 | u8 reserved_42[0x20]; | ||
276 | |||
277 | u8 reserved_43[0x3]; | ||
278 | u8 log_max_rq[0x5]; | ||
279 | u8 reserved_44[0x3]; | ||
280 | u8 log_max_sq[0x5]; | ||
281 | u8 reserved_45[0x3]; | ||
282 | u8 log_max_tir[0x5]; | ||
283 | u8 reserved_46[0x3]; | ||
284 | u8 log_max_tis[0x5]; | ||
285 | |||
286 | u8 reserved_47[0x13]; | ||
287 | u8 log_max_rq_per_tir[0x5]; | ||
288 | u8 reserved_48[0x3]; | ||
289 | u8 log_max_tis_per_sq[0x5]; | ||
290 | |||
291 | u8 reserved_49[0xe0]; | ||
292 | |||
293 | u8 reserved_50[0x10]; | ||
294 | u8 log_uar_page_sz[0x10]; | ||
295 | |||
296 | u8 reserved_51[0x100]; | ||
297 | |||
298 | u8 reserved_52[0x1f]; | ||
299 | u8 cqe_zip[0x1]; | ||
300 | |||
301 | u8 cqe_zip_timeout[0x10]; | ||
302 | u8 cqe_zip_max_num[0x10]; | ||
303 | |||
304 | u8 reserved_53[0x220]; | ||
305 | }; | ||
306 | |||
307 | struct mlx5_ifc_set_hca_cap_in_bits { | ||
308 | u8 opcode[0x10]; | ||
309 | u8 reserved_0[0x10]; | ||
310 | |||
311 | u8 reserved_1[0x10]; | ||
312 | u8 op_mod[0x10]; | ||
313 | |||
314 | u8 reserved_2[0x40]; | ||
315 | |||
316 | struct mlx5_ifc_cmd_hca_cap_bits hca_capability_struct; | ||
317 | }; | ||
318 | |||
319 | struct mlx5_ifc_query_hca_cap_in_bits { | ||
320 | u8 opcode[0x10]; | ||
321 | u8 reserved_0[0x10]; | ||
322 | |||
323 | u8 reserved_1[0x10]; | ||
324 | u8 op_mod[0x10]; | ||
325 | |||
326 | u8 reserved_2[0x40]; | ||
327 | }; | ||
328 | |||
329 | struct mlx5_ifc_query_hca_cap_out_bits { | ||
330 | u8 status[0x8]; | ||
331 | u8 reserved_0[0x18]; | ||
332 | |||
333 | u8 syndrome[0x20]; | ||
334 | |||
335 | u8 reserved_1[0x40]; | ||
336 | |||
337 | u8 capability_struct[256][0x8]; | ||
338 | }; | ||
339 | |||
340 | struct mlx5_ifc_set_hca_cap_out_bits { | ||
341 | u8 status[0x8]; | ||
342 | u8 reserved_0[0x18]; | ||
343 | |||
344 | u8 syndrome[0x20]; | ||
345 | |||
346 | u8 reserved_1[0x40]; | ||
347 | }; | ||
348 | |||
349 | #endif /* MLX5_IFC_H */ | ||
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 9709b30e2d69..7c4c0f1f5805 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
@@ -342,10 +342,9 @@ struct mlx5_stride_block_ctrl_seg { | |||
342 | }; | 342 | }; |
343 | 343 | ||
344 | struct mlx5_core_qp { | 344 | struct mlx5_core_qp { |
345 | struct mlx5_core_rsc_common common; /* must be first */ | ||
345 | void (*event) (struct mlx5_core_qp *, int); | 346 | void (*event) (struct mlx5_core_qp *, int); |
346 | int qpn; | 347 | int qpn; |
347 | atomic_t refcount; | ||
348 | struct completion free; | ||
349 | struct mlx5_rsc_debug *dbg; | 348 | struct mlx5_rsc_debug *dbg; |
350 | int pid; | 349 | int pid; |
351 | }; | 350 | }; |