aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorSaeed Mahameed <saeedm@mellanox.com>2015-05-28 15:28:41 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-30 21:23:22 -0400
commit938fe83c8dcbbf294d167e6163200a8540ae43c4 (patch)
tree1867c23ac3d241f620a2b16eeca38d8ea8b2fe73 /drivers/infiniband
parente281682bf29438848daac11627216bceb1507b71 (diff)
net/mlx5_core: New device capabilities handling
- Query all supported types of dev caps on driver load. - Store the Cap data outbox per cap type into driver private data. - Introduce new Macros to access/dump stored caps (using the auto generated data types). - Obsolete SW representation of dev caps (no need for SW copy for each cap). - Modify IB driver to use new macros for checking caps. Signed-off-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: Amir Vadai <amirv@mellanox.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c113
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h6
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c3
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c47
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c84
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c7
8 files changed, 124 insertions, 146 deletions
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 4e88b18cf62e..e2bea9ab93b3 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -753,7 +753,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
753 return ERR_PTR(-EINVAL); 753 return ERR_PTR(-EINVAL);
754 754
755 entries = roundup_pow_of_two(entries + 1); 755 entries = roundup_pow_of_two(entries + 1);
756 if (entries > dev->mdev->caps.gen.max_cqes) 756 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))
757 return ERR_PTR(-EINVAL); 757 return ERR_PTR(-EINVAL);
758 758
759 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 759 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -920,7 +920,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
920 int err; 920 int err;
921 u32 fsel; 921 u32 fsel;
922 922
923 if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_CQ_MODER)) 923 if (!MLX5_CAP_GEN(dev->mdev, cq_moderation))
924 return -ENOSYS; 924 return -ENOSYS;
925 925
926 in = kzalloc(sizeof(*in), GFP_KERNEL); 926 in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -1075,7 +1075,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1075 int uninitialized_var(cqe_size); 1075 int uninitialized_var(cqe_size);
1076 unsigned long flags; 1076 unsigned long flags;
1077 1077
1078 if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) { 1078 if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
1079 pr_info("Firmware does not support resize CQ\n"); 1079 pr_info("Firmware does not support resize CQ\n");
1080 return -ENOSYS; 1080 return -ENOSYS;
1081 } 1081 }
@@ -1084,7 +1084,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1084 return -EINVAL; 1084 return -EINVAL;
1085 1085
1086 entries = roundup_pow_of_two(entries + 1); 1086 entries = roundup_pow_of_two(entries + 1);
1087 if (entries > dev->mdev->caps.gen.max_cqes + 1) 1087 if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1)
1088 return -EINVAL; 1088 return -EINVAL;
1089 1089
1090 if (entries == ibcq->cqe + 1) 1090 if (entries == ibcq->cqe + 1)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 9cf9a37bb5ff..f2d9e70818d7 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
129 129
130 packet_error = be16_to_cpu(out_mad->status); 130 packet_error = be16_to_cpu(out_mad->status);
131 131
132 dev->mdev->caps.gen.ext_port_cap[port - 1] = (!err && !packet_error) ? 132 dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
133 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0; 133 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
134 134
135out: 135out:
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 57c9809e8b87..9075649f30fc 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -66,15 +66,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
66 struct ib_device_attr *props) 66 struct ib_device_attr *props)
67{ 67{
68 struct mlx5_ib_dev *dev = to_mdev(ibdev); 68 struct mlx5_ib_dev *dev = to_mdev(ibdev);
69 struct mlx5_core_dev *mdev = dev->mdev;
69 struct ib_smp *in_mad = NULL; 70 struct ib_smp *in_mad = NULL;
70 struct ib_smp *out_mad = NULL; 71 struct ib_smp *out_mad = NULL;
71 struct mlx5_general_caps *gen;
72 int err = -ENOMEM; 72 int err = -ENOMEM;
73 int max_rq_sg; 73 int max_rq_sg;
74 int max_sq_sg; 74 int max_sq_sg;
75 u64 flags;
76 75
77 gen = &dev->mdev->caps.gen;
78 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); 76 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
79 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); 77 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
80 if (!in_mad || !out_mad) 78 if (!in_mad || !out_mad)
@@ -96,18 +94,18 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
96 IB_DEVICE_PORT_ACTIVE_EVENT | 94 IB_DEVICE_PORT_ACTIVE_EVENT |
97 IB_DEVICE_SYS_IMAGE_GUID | 95 IB_DEVICE_SYS_IMAGE_GUID |
98 IB_DEVICE_RC_RNR_NAK_GEN; 96 IB_DEVICE_RC_RNR_NAK_GEN;
99 flags = gen->flags; 97
100 if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) 98 if (MLX5_CAP_GEN(mdev, pkv))
101 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; 99 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
102 if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) 100 if (MLX5_CAP_GEN(mdev, qkv))
103 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; 101 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
104 if (flags & MLX5_DEV_CAP_FLAG_APM) 102 if (MLX5_CAP_GEN(mdev, apm))
105 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; 103 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
106 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; 104 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
107 if (flags & MLX5_DEV_CAP_FLAG_XRC) 105 if (MLX5_CAP_GEN(mdev, xrc))
108 props->device_cap_flags |= IB_DEVICE_XRC; 106 props->device_cap_flags |= IB_DEVICE_XRC;
109 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; 107 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
110 if (flags & MLX5_DEV_CAP_FLAG_SIG_HAND_OVER) { 108 if (MLX5_CAP_GEN(mdev, sho)) {
111 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER; 109 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
112 /* At this stage no support for signature handover */ 110 /* At this stage no support for signature handover */
113 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | 111 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
@@ -116,7 +114,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
116 props->sig_guard_cap = IB_GUARD_T10DIF_CRC | 114 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
117 IB_GUARD_T10DIF_CSUM; 115 IB_GUARD_T10DIF_CSUM;
118 } 116 }
119 if (flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST) 117 if (MLX5_CAP_GEN(mdev, block_lb_mc))
120 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; 118 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
121 119
122 props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 120 props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) &
@@ -126,37 +124,38 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
126 memcpy(&props->sys_image_guid, out_mad->data + 4, 8); 124 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
127 125
128 props->max_mr_size = ~0ull; 126 props->max_mr_size = ~0ull;
129 props->page_size_cap = gen->min_page_sz; 127 props->page_size_cap = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
130 props->max_qp = 1 << gen->log_max_qp; 128 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
131 props->max_qp_wr = gen->max_wqes; 129 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
132 max_rq_sg = gen->max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); 130 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
133 max_sq_sg = (gen->max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / 131 sizeof(struct mlx5_wqe_data_seg);
134 sizeof(struct mlx5_wqe_data_seg); 132 max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
133 sizeof(struct mlx5_wqe_ctrl_seg)) /
134 sizeof(struct mlx5_wqe_data_seg);
135 props->max_sge = min(max_rq_sg, max_sq_sg); 135 props->max_sge = min(max_rq_sg, max_sq_sg);
136 props->max_cq = 1 << gen->log_max_cq; 136 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
137 props->max_cqe = gen->max_cqes - 1; 137 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
138 props->max_mr = 1 << gen->log_max_mkey; 138 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
139 props->max_pd = 1 << gen->log_max_pd; 139 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
140 props->max_qp_rd_atom = 1 << gen->log_max_ra_req_qp; 140 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
141 props->max_qp_init_rd_atom = 1 << gen->log_max_ra_res_qp; 141 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
142 props->max_srq = 1 << gen->log_max_srq; 142 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
143 props->max_srq_wr = gen->max_srq_wqes - 1; 143 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
144 props->local_ca_ack_delay = gen->local_ca_ack_delay; 144 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
145 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; 145 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
146 props->max_srq_sge = max_rq_sg - 1; 146 props->max_srq_sge = max_rq_sg - 1;
147 props->max_fast_reg_page_list_len = (unsigned int)-1; 147 props->max_fast_reg_page_list_len = (unsigned int)-1;
148 props->local_ca_ack_delay = gen->local_ca_ack_delay;
149 props->atomic_cap = IB_ATOMIC_NONE; 148 props->atomic_cap = IB_ATOMIC_NONE;
150 props->masked_atomic_cap = IB_ATOMIC_NONE; 149 props->masked_atomic_cap = IB_ATOMIC_NONE;
151 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); 150 props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
152 props->max_mcast_grp = 1 << gen->log_max_mcg; 151 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
153 props->max_mcast_qp_attach = gen->max_qp_mcg; 152 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
154 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 153 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
155 props->max_mcast_grp; 154 props->max_mcast_grp;
156 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ 155 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
157 156
158#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 157#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
159 if (dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG) 158 if (MLX5_CAP_GEN(mdev, pg))
160 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING; 159 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
161 props->odp_caps = dev->odp_caps; 160 props->odp_caps = dev->odp_caps;
162#endif 161#endif
@@ -172,14 +171,13 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
172 struct ib_port_attr *props) 171 struct ib_port_attr *props)
173{ 172{
174 struct mlx5_ib_dev *dev = to_mdev(ibdev); 173 struct mlx5_ib_dev *dev = to_mdev(ibdev);
174 struct mlx5_core_dev *mdev = dev->mdev;
175 struct ib_smp *in_mad = NULL; 175 struct ib_smp *in_mad = NULL;
176 struct ib_smp *out_mad = NULL; 176 struct ib_smp *out_mad = NULL;
177 struct mlx5_general_caps *gen;
178 int ext_active_speed; 177 int ext_active_speed;
179 int err = -ENOMEM; 178 int err = -ENOMEM;
180 179
181 gen = &dev->mdev->caps.gen; 180 if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
182 if (port < 1 || port > gen->num_ports) {
183 mlx5_ib_warn(dev, "invalid port number %d\n", port); 181 mlx5_ib_warn(dev, "invalid port number %d\n", port);
184 return -EINVAL; 182 return -EINVAL;
185 } 183 }
@@ -210,8 +208,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
210 props->phys_state = out_mad->data[33] >> 4; 208 props->phys_state = out_mad->data[33] >> 4;
211 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); 209 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
212 props->gid_tbl_len = out_mad->data[50]; 210 props->gid_tbl_len = out_mad->data[50];
213 props->max_msg_sz = 1 << gen->log_max_msg; 211 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
214 props->pkey_tbl_len = gen->port[port - 1].pkey_table_len; 212 props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
215 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); 213 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
216 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); 214 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
217 props->active_width = out_mad->data[31] & 0xf; 215 props->active_width = out_mad->data[31] & 0xf;
@@ -238,7 +236,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
238 236
239 /* If reported active speed is QDR, check if is FDR-10 */ 237 /* If reported active speed is QDR, check if is FDR-10 */
240 if (props->active_speed == 4) { 238 if (props->active_speed == 4) {
241 if (gen->ext_port_cap[port - 1] & 239 if (mdev->port_caps[port - 1].ext_port_cap &
242 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { 240 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
243 init_query_mad(in_mad); 241 init_query_mad(in_mad);
244 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; 242 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
@@ -392,7 +390,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
392 struct mlx5_ib_alloc_ucontext_req_v2 req; 390 struct mlx5_ib_alloc_ucontext_req_v2 req;
393 struct mlx5_ib_alloc_ucontext_resp resp; 391 struct mlx5_ib_alloc_ucontext_resp resp;
394 struct mlx5_ib_ucontext *context; 392 struct mlx5_ib_ucontext *context;
395 struct mlx5_general_caps *gen;
396 struct mlx5_uuar_info *uuari; 393 struct mlx5_uuar_info *uuari;
397 struct mlx5_uar *uars; 394 struct mlx5_uar *uars;
398 int gross_uuars; 395 int gross_uuars;
@@ -403,7 +400,6 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
403 int i; 400 int i;
404 size_t reqlen; 401 size_t reqlen;
405 402
406 gen = &dev->mdev->caps.gen;
407 if (!dev->ib_active) 403 if (!dev->ib_active)
408 return ERR_PTR(-EAGAIN); 404 return ERR_PTR(-EAGAIN);
409 405
@@ -436,14 +432,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
436 432
437 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; 433 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
438 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; 434 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
439 resp.qp_tab_size = 1 << gen->log_max_qp; 435 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
440 resp.bf_reg_size = gen->bf_reg_size; 436 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
441 resp.cache_line_size = L1_CACHE_BYTES; 437 resp.cache_line_size = L1_CACHE_BYTES;
442 resp.max_sq_desc_sz = gen->max_sq_desc_sz; 438 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
443 resp.max_rq_desc_sz = gen->max_rq_desc_sz; 439 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
444 resp.max_send_wqebb = gen->max_wqes; 440 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
445 resp.max_recv_wr = gen->max_wqes; 441 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
446 resp.max_srq_recv_wr = gen->max_srq_wqes; 442 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
447 443
448 context = kzalloc(sizeof(*context), GFP_KERNEL); 444 context = kzalloc(sizeof(*context), GFP_KERNEL);
449 if (!context) 445 if (!context)
@@ -493,7 +489,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
493 mutex_init(&context->db_page_mutex); 489 mutex_init(&context->db_page_mutex);
494 490
495 resp.tot_uuars = req.total_num_uuars; 491 resp.tot_uuars = req.total_num_uuars;
496 resp.num_ports = gen->num_ports; 492 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
497 err = ib_copy_to_udata(udata, &resp, 493 err = ib_copy_to_udata(udata, &resp,
498 sizeof(resp) - sizeof(resp.reserved)); 494 sizeof(resp) - sizeof(resp.reserved));
499 if (err) 495 if (err)
@@ -895,11 +891,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
895 891
896static void get_ext_port_caps(struct mlx5_ib_dev *dev) 892static void get_ext_port_caps(struct mlx5_ib_dev *dev)
897{ 893{
898 struct mlx5_general_caps *gen;
899 int port; 894 int port;
900 895
901 gen = &dev->mdev->caps.gen; 896 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
902 for (port = 1; port <= gen->num_ports; port++)
903 mlx5_query_ext_port_caps(dev, port); 897 mlx5_query_ext_port_caps(dev, port);
904} 898}
905 899
@@ -907,11 +901,9 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
907{ 901{
908 struct ib_device_attr *dprops = NULL; 902 struct ib_device_attr *dprops = NULL;
909 struct ib_port_attr *pprops = NULL; 903 struct ib_port_attr *pprops = NULL;
910 struct mlx5_general_caps *gen;
911 int err = -ENOMEM; 904 int err = -ENOMEM;
912 int port; 905 int port;
913 906
914 gen = &dev->mdev->caps.gen;
915 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); 907 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
916 if (!pprops) 908 if (!pprops)
917 goto out; 909 goto out;
@@ -926,14 +918,17 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
926 goto out; 918 goto out;
927 } 919 }
928 920
929 for (port = 1; port <= gen->num_ports; port++) { 921 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
930 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); 922 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
931 if (err) { 923 if (err) {
932 mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); 924 mlx5_ib_warn(dev, "query_port %d failed %d\n",
925 port, err);
933 break; 926 break;
934 } 927 }
935 gen->port[port - 1].pkey_table_len = dprops->max_pkeys; 928 dev->mdev->port_caps[port - 1].pkey_table_len =
936 gen->port[port - 1].gid_table_len = pprops->gid_tbl_len; 929 dprops->max_pkeys;
930 dev->mdev->port_caps[port - 1].gid_table_len =
931 pprops->gid_tbl_len;
937 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", 932 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
938 dprops->max_pkeys, pprops->gid_tbl_len); 933 dprops->max_pkeys, pprops->gid_tbl_len);
939 } 934 }
@@ -1207,8 +1202,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1207 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); 1202 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
1208 dev->ib_dev.owner = THIS_MODULE; 1203 dev->ib_dev.owner = THIS_MODULE;
1209 dev->ib_dev.node_type = RDMA_NODE_IB_CA; 1204 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1210 dev->ib_dev.local_dma_lkey = mdev->caps.gen.reserved_lkey; 1205 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
1211 dev->num_ports = mdev->caps.gen.num_ports; 1206 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
1212 dev->ib_dev.phys_port_cnt = dev->num_ports; 1207 dev->ib_dev.phys_port_cnt = dev->num_ports;
1213 dev->ib_dev.num_comp_vectors = 1208 dev->ib_dev.num_comp_vectors =
1214 dev->mdev->priv.eq_table.num_comp_vectors; 1209 dev->mdev->priv.eq_table.num_comp_vectors;
@@ -1286,9 +1281,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1286 dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; 1281 dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list;
1287 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; 1282 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
1288 1283
1289 mlx5_ib_internal_query_odp_caps(dev); 1284 mlx5_ib_internal_fill_odp_caps(dev);
1290 1285
1291 if (mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_XRC) { 1286 if (MLX5_CAP_GEN(mdev, xrc)) {
1292 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; 1287 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
1293 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; 1288 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
1294 dev->ib_dev.uverbs_cmd_mask |= 1289 dev->ib_dev.uverbs_cmd_mask |=
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index dff1cfcdf476..0c441add0464 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -617,7 +617,7 @@ int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
617#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING 617#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
618extern struct workqueue_struct *mlx5_ib_page_fault_wq; 618extern struct workqueue_struct *mlx5_ib_page_fault_wq;
619 619
620int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev); 620void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev);
621void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp, 621void mlx5_ib_mr_pfault_handler(struct mlx5_ib_qp *qp,
622 struct mlx5_ib_pfault *pfault); 622 struct mlx5_ib_pfault *pfault);
623void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp); 623void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp);
@@ -631,9 +631,9 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
631 unsigned long end); 631 unsigned long end);
632 632
633#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ 633#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
634static inline int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev) 634static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
635{ 635{
636 return 0; 636 return;
637} 637}
638 638
639static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {} 639static inline void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) {}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 71c593583864..bc9a0de897cb 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -975,8 +975,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
975 struct mlx5_ib_mr *mr; 975 struct mlx5_ib_mr *mr;
976 int inlen; 976 int inlen;
977 int err; 977 int err;
978 bool pg_cap = !!(dev->mdev->caps.gen.flags & 978 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
979 MLX5_DEV_CAP_FLAG_ON_DMND_PG);
980 979
981 mr = kzalloc(sizeof(*mr), GFP_KERNEL); 980 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
982 if (!mr) 981 if (!mr)
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 5099db08afd2..aa8391e75385 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -109,40 +109,33 @@ void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start,
109 ib_umem_odp_unmap_dma_pages(umem, start, end); 109 ib_umem_odp_unmap_dma_pages(umem, start, end);
110} 110}
111 111
112#define COPY_ODP_BIT_MLX_TO_IB(reg, ib_caps, field_name, bit_name) do { \ 112void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
113 if (be32_to_cpu(reg.field_name) & MLX5_ODP_SUPPORT_##bit_name) \
114 ib_caps->field_name |= IB_ODP_SUPPORT_##bit_name; \
115} while (0)
116
117int mlx5_ib_internal_query_odp_caps(struct mlx5_ib_dev *dev)
118{ 113{
119 int err;
120 struct mlx5_odp_caps hw_caps;
121 struct ib_odp_caps *caps = &dev->odp_caps; 114 struct ib_odp_caps *caps = &dev->odp_caps;
122 115
123 memset(caps, 0, sizeof(*caps)); 116 memset(caps, 0, sizeof(*caps));
124 117
125 if (!(dev->mdev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)) 118 if (!MLX5_CAP_GEN(dev->mdev, pg))
126 return 0; 119 return;
127
128 err = mlx5_query_odp_caps(dev->mdev, &hw_caps);
129 if (err)
130 goto out;
131 120
132 caps->general_caps = IB_ODP_SUPPORT; 121 caps->general_caps = IB_ODP_SUPPORT;
133 COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.ud_odp_caps, 122
134 SEND); 123 if (MLX5_CAP_ODP(dev->mdev, ud_odp_caps.send))
135 COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, 124 caps->per_transport_caps.ud_odp_caps |= IB_ODP_SUPPORT_SEND;
136 SEND); 125
137 COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, 126 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.send))
138 RECV); 127 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_SEND;
139 COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, 128
140 WRITE); 129 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.receive))
141 COPY_ODP_BIT_MLX_TO_IB(hw_caps, caps, per_transport_caps.rc_odp_caps, 130 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_RECV;
142 READ); 131
143 132 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.write))
144out: 133 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_WRITE;
145 return err; 134
135 if (MLX5_CAP_ODP(dev->mdev, rc_odp_caps.read))
136 caps->per_transport_caps.rc_odp_caps |= IB_ODP_SUPPORT_READ;
137
138 return;
146} 139}
147 140
148static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev, 141static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 426eb88dfa49..15fd485d1ad9 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -220,13 +220,11 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
220static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, 220static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
221 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd) 221 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
222{ 222{
223 struct mlx5_general_caps *gen;
224 int wqe_size; 223 int wqe_size;
225 int wq_size; 224 int wq_size;
226 225
227 gen = &dev->mdev->caps.gen;
228 /* Sanity check RQ size before proceeding */ 226 /* Sanity check RQ size before proceeding */
229 if (cap->max_recv_wr > gen->max_wqes) 227 if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
230 return -EINVAL; 228 return -EINVAL;
231 229
232 if (!has_rq) { 230 if (!has_rq) {
@@ -246,10 +244,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
246 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size; 244 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
247 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB); 245 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
248 qp->rq.wqe_cnt = wq_size / wqe_size; 246 qp->rq.wqe_cnt = wq_size / wqe_size;
249 if (wqe_size > gen->max_rq_desc_sz) { 247 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
250 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n", 248 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
251 wqe_size, 249 wqe_size,
252 gen->max_rq_desc_sz); 250 MLX5_CAP_GEN(dev->mdev,
251 max_wqe_sz_rq));
253 return -EINVAL; 252 return -EINVAL;
254 } 253 }
255 qp->rq.wqe_shift = ilog2(wqe_size); 254 qp->rq.wqe_shift = ilog2(wqe_size);
@@ -330,11 +329,9 @@ static int calc_send_wqe(struct ib_qp_init_attr *attr)
330static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, 329static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
331 struct mlx5_ib_qp *qp) 330 struct mlx5_ib_qp *qp)
332{ 331{
333 struct mlx5_general_caps *gen;
334 int wqe_size; 332 int wqe_size;
335 int wq_size; 333 int wq_size;
336 334
337 gen = &dev->mdev->caps.gen;
338 if (!attr->cap.max_send_wr) 335 if (!attr->cap.max_send_wr)
339 return 0; 336 return 0;
340 337
@@ -343,9 +340,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
343 if (wqe_size < 0) 340 if (wqe_size < 0)
344 return wqe_size; 341 return wqe_size;
345 342
346 if (wqe_size > gen->max_sq_desc_sz) { 343 if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
347 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n", 344 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
348 wqe_size, gen->max_sq_desc_sz); 345 wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
349 return -EINVAL; 346 return -EINVAL;
350 } 347 }
351 348
@@ -358,9 +355,10 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
358 355
359 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size); 356 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
360 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; 357 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
361 if (qp->sq.wqe_cnt > gen->max_wqes) { 358 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
362 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n", 359 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
363 qp->sq.wqe_cnt, gen->max_wqes); 360 qp->sq.wqe_cnt,
361 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
364 return -ENOMEM; 362 return -ENOMEM;
365 } 363 }
366 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); 364 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -375,13 +373,11 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
375 struct mlx5_ib_qp *qp, 373 struct mlx5_ib_qp *qp,
376 struct mlx5_ib_create_qp *ucmd) 374 struct mlx5_ib_create_qp *ucmd)
377{ 375{
378 struct mlx5_general_caps *gen;
379 int desc_sz = 1 << qp->sq.wqe_shift; 376 int desc_sz = 1 << qp->sq.wqe_shift;
380 377
381 gen = &dev->mdev->caps.gen; 378 if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
382 if (desc_sz > gen->max_sq_desc_sz) {
383 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n", 379 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
384 desc_sz, gen->max_sq_desc_sz); 380 desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
385 return -EINVAL; 381 return -EINVAL;
386 } 382 }
387 383
@@ -393,9 +389,10 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
393 389
394 qp->sq.wqe_cnt = ucmd->sq_wqe_count; 390 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
395 391
396 if (qp->sq.wqe_cnt > gen->max_wqes) { 392 if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
397 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n", 393 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
398 qp->sq.wqe_cnt, gen->max_wqes); 394 qp->sq.wqe_cnt,
395 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
399 return -EINVAL; 396 return -EINVAL;
400 } 397 }
401 398
@@ -866,22 +863,21 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
866 struct ib_udata *udata, struct mlx5_ib_qp *qp) 863 struct ib_udata *udata, struct mlx5_ib_qp *qp)
867{ 864{
868 struct mlx5_ib_resources *devr = &dev->devr; 865 struct mlx5_ib_resources *devr = &dev->devr;
866 struct mlx5_core_dev *mdev = dev->mdev;
869 struct mlx5_ib_create_qp_resp resp; 867 struct mlx5_ib_create_qp_resp resp;
870 struct mlx5_create_qp_mbox_in *in; 868 struct mlx5_create_qp_mbox_in *in;
871 struct mlx5_general_caps *gen;
872 struct mlx5_ib_create_qp ucmd; 869 struct mlx5_ib_create_qp ucmd;
873 int inlen = sizeof(*in); 870 int inlen = sizeof(*in);
874 int err; 871 int err;
875 872
876 mlx5_ib_odp_create_qp(qp); 873 mlx5_ib_odp_create_qp(qp);
877 874
878 gen = &dev->mdev->caps.gen;
879 mutex_init(&qp->mutex); 875 mutex_init(&qp->mutex);
880 spin_lock_init(&qp->sq.lock); 876 spin_lock_init(&qp->sq.lock);
881 spin_lock_init(&qp->rq.lock); 877 spin_lock_init(&qp->rq.lock);
882 878
883 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) { 879 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
884 if (!(gen->flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) { 880 if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
885 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n"); 881 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
886 return -EINVAL; 882 return -EINVAL;
887 } else { 883 } else {
@@ -914,15 +910,17 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
914 910
915 if (pd) { 911 if (pd) {
916 if (pd->uobject) { 912 if (pd->uobject) {
913 __u32 max_wqes =
914 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
917 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count); 915 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
918 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift || 916 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
919 ucmd.rq_wqe_count != qp->rq.wqe_cnt) { 917 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
920 mlx5_ib_dbg(dev, "invalid rq params\n"); 918 mlx5_ib_dbg(dev, "invalid rq params\n");
921 return -EINVAL; 919 return -EINVAL;
922 } 920 }
923 if (ucmd.sq_wqe_count > gen->max_wqes) { 921 if (ucmd.sq_wqe_count > max_wqes) {
924 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n", 922 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
925 ucmd.sq_wqe_count, gen->max_wqes); 923 ucmd.sq_wqe_count, max_wqes);
926 return -EINVAL; 924 return -EINVAL;
927 } 925 }
928 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); 926 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -1226,7 +1224,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1226 struct ib_qp_init_attr *init_attr, 1224 struct ib_qp_init_attr *init_attr,
1227 struct ib_udata *udata) 1225 struct ib_udata *udata)
1228{ 1226{
1229 struct mlx5_general_caps *gen;
1230 struct mlx5_ib_dev *dev; 1227 struct mlx5_ib_dev *dev;
1231 struct mlx5_ib_qp *qp; 1228 struct mlx5_ib_qp *qp;
1232 u16 xrcdn = 0; 1229 u16 xrcdn = 0;
@@ -1244,12 +1241,11 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1244 } 1241 }
1245 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device); 1242 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1246 } 1243 }
1247 gen = &dev->mdev->caps.gen;
1248 1244
1249 switch (init_attr->qp_type) { 1245 switch (init_attr->qp_type) {
1250 case IB_QPT_XRC_TGT: 1246 case IB_QPT_XRC_TGT:
1251 case IB_QPT_XRC_INI: 1247 case IB_QPT_XRC_INI:
1252 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC)) { 1248 if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
1253 mlx5_ib_dbg(dev, "XRC not supported\n"); 1249 mlx5_ib_dbg(dev, "XRC not supported\n");
1254 return ERR_PTR(-ENOSYS); 1250 return ERR_PTR(-ENOSYS);
1255 } 1251 }
@@ -1356,9 +1352,6 @@ enum {
1356 1352
1357static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) 1353static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1358{ 1354{
1359 struct mlx5_general_caps *gen;
1360
1361 gen = &dev->mdev->caps.gen;
1362 if (rate == IB_RATE_PORT_CURRENT) { 1355 if (rate == IB_RATE_PORT_CURRENT) {
1363 return 0; 1356 return 0;
1364 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) { 1357 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
@@ -1366,7 +1359,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1366 } else { 1359 } else {
1367 while (rate != IB_RATE_2_5_GBPS && 1360 while (rate != IB_RATE_2_5_GBPS &&
1368 !(1 << (rate + MLX5_STAT_RATE_OFFSET) & 1361 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1369 gen->stat_rate_support)) 1362 MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
1370 --rate; 1363 --rate;
1371 } 1364 }
1372 1365
@@ -1377,10 +1370,8 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1377 struct mlx5_qp_path *path, u8 port, int attr_mask, 1370 struct mlx5_qp_path *path, u8 port, int attr_mask,
1378 u32 path_flags, const struct ib_qp_attr *attr) 1371 u32 path_flags, const struct ib_qp_attr *attr)
1379{ 1372{
1380 struct mlx5_general_caps *gen;
1381 int err; 1373 int err;
1382 1374
1383 gen = &dev->mdev->caps.gen;
1384 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; 1375 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1385 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0; 1376 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1386 1377
@@ -1391,9 +1382,11 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1391 path->rlid = cpu_to_be16(ah->dlid); 1382 path->rlid = cpu_to_be16(ah->dlid);
1392 1383
1393 if (ah->ah_flags & IB_AH_GRH) { 1384 if (ah->ah_flags & IB_AH_GRH) {
1394 if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) { 1385 if (ah->grh.sgid_index >=
1386 dev->mdev->port_caps[port - 1].gid_table_len) {
1395 pr_err("sgid_index (%u) too large. max is %d\n", 1387 pr_err("sgid_index (%u) too large. max is %d\n",
1396 ah->grh.sgid_index, gen->port[port - 1].gid_table_len); 1388 ah->grh.sgid_index,
1389 dev->mdev->port_caps[port - 1].gid_table_len);
1397 return -EINVAL; 1390 return -EINVAL;
1398 } 1391 }
1399 path->grh_mlid |= 1 << 7; 1392 path->grh_mlid |= 1 << 7;
@@ -1570,7 +1563,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1570 struct mlx5_ib_qp *qp = to_mqp(ibqp); 1563 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1571 struct mlx5_ib_cq *send_cq, *recv_cq; 1564 struct mlx5_ib_cq *send_cq, *recv_cq;
1572 struct mlx5_qp_context *context; 1565 struct mlx5_qp_context *context;
1573 struct mlx5_general_caps *gen;
1574 struct mlx5_modify_qp_mbox_in *in; 1566 struct mlx5_modify_qp_mbox_in *in;
1575 struct mlx5_ib_pd *pd; 1567 struct mlx5_ib_pd *pd;
1576 enum mlx5_qp_state mlx5_cur, mlx5_new; 1568 enum mlx5_qp_state mlx5_cur, mlx5_new;
@@ -1579,7 +1571,6 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1579 int mlx5_st; 1571 int mlx5_st;
1580 int err; 1572 int err;
1581 1573
1582 gen = &dev->mdev->caps.gen;
1583 in = kzalloc(sizeof(*in), GFP_KERNEL); 1574 in = kzalloc(sizeof(*in), GFP_KERNEL);
1584 if (!in) 1575 if (!in)
1585 return -ENOMEM; 1576 return -ENOMEM;
@@ -1619,7 +1610,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1619 err = -EINVAL; 1610 err = -EINVAL;
1620 goto out; 1611 goto out;
1621 } 1612 }
1622 context->mtu_msgmax = (attr->path_mtu << 5) | gen->log_max_msg; 1613 context->mtu_msgmax = (attr->path_mtu << 5) |
1614 (u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
1623 } 1615 }
1624 1616
1625 if (attr_mask & IB_QP_DEST_QPN) 1617 if (attr_mask & IB_QP_DEST_QPN)
@@ -1777,11 +1769,9 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1777 struct mlx5_ib_dev *dev = to_mdev(ibqp->device); 1769 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1778 struct mlx5_ib_qp *qp = to_mqp(ibqp); 1770 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1779 enum ib_qp_state cur_state, new_state; 1771 enum ib_qp_state cur_state, new_state;
1780 struct mlx5_general_caps *gen;
1781 int err = -EINVAL; 1772 int err = -EINVAL;
1782 int port; 1773 int port;
1783 1774
1784 gen = &dev->mdev->caps.gen;
1785 mutex_lock(&qp->mutex); 1775 mutex_lock(&qp->mutex);
1786 1776
1787 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; 1777 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
@@ -1793,21 +1783,25 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1793 goto out; 1783 goto out;
1794 1784
1795 if ((attr_mask & IB_QP_PORT) && 1785 if ((attr_mask & IB_QP_PORT) &&
1796 (attr->port_num == 0 || attr->port_num > gen->num_ports)) 1786 (attr->port_num == 0 ||
1787 attr->port_num > MLX5_CAP_GEN(dev->mdev, num_ports)))
1797 goto out; 1788 goto out;
1798 1789
1799 if (attr_mask & IB_QP_PKEY_INDEX) { 1790 if (attr_mask & IB_QP_PKEY_INDEX) {
1800 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; 1791 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1801 if (attr->pkey_index >= gen->port[port - 1].pkey_table_len) 1792 if (attr->pkey_index >=
1793 dev->mdev->port_caps[port - 1].pkey_table_len)
1802 goto out; 1794 goto out;
1803 } 1795 }
1804 1796
1805 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && 1797 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1806 attr->max_rd_atomic > (1 << gen->log_max_ra_res_qp)) 1798 attr->max_rd_atomic >
1799 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp)))
1807 goto out; 1800 goto out;
1808 1801
1809 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && 1802 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1810 attr->max_dest_rd_atomic > (1 << gen->log_max_ra_req_qp)) 1803 attr->max_dest_rd_atomic >
1804 (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp)))
1811 goto out; 1805 goto out;
1812 1806
1813 if (cur_state == new_state && cur_state == IB_QPS_RESET) { 1807 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -3009,7 +3003,7 @@ static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_at
3009 ib_ah_attr->port_num = path->port; 3003 ib_ah_attr->port_num = path->port;
3010 3004
3011 if (ib_ah_attr->port_num == 0 || 3005 if (ib_ah_attr->port_num == 0 ||
3012 ib_ah_attr->port_num > dev->caps.gen.num_ports) 3006 ib_ah_attr->port_num > MLX5_CAP_GEN(dev, num_ports))
3013 return; 3007 return;
3014 3008
3015 ib_ah_attr->sl = path->sl & 0xf; 3009 ib_ah_attr->sl = path->sl & 0xf;
@@ -3135,12 +3129,10 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3135 struct ib_udata *udata) 3129 struct ib_udata *udata)
3136{ 3130{
3137 struct mlx5_ib_dev *dev = to_mdev(ibdev); 3131 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3138 struct mlx5_general_caps *gen;
3139 struct mlx5_ib_xrcd *xrcd; 3132 struct mlx5_ib_xrcd *xrcd;
3140 int err; 3133 int err;
3141 3134
3142 gen = &dev->mdev->caps.gen; 3135 if (!MLX5_CAP_GEN(dev->mdev, xrc))
3143 if (!(gen->flags & MLX5_DEV_CAP_FLAG_XRC))
3144 return ERR_PTR(-ENOSYS); 3136 return ERR_PTR(-ENOSYS);
3145 3137
3146 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL); 3138 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4242e1ded868..e8e8e942fa4a 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -236,7 +236,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
236 struct ib_udata *udata) 236 struct ib_udata *udata)
237{ 237{
238 struct mlx5_ib_dev *dev = to_mdev(pd->device); 238 struct mlx5_ib_dev *dev = to_mdev(pd->device);
239 struct mlx5_general_caps *gen;
240 struct mlx5_ib_srq *srq; 239 struct mlx5_ib_srq *srq;
241 int desc_size; 240 int desc_size;
242 int buf_size; 241 int buf_size;
@@ -245,13 +244,13 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
245 int uninitialized_var(inlen); 244 int uninitialized_var(inlen);
246 int is_xrc; 245 int is_xrc;
247 u32 flgs, xrcdn; 246 u32 flgs, xrcdn;
247 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
248 248
249 gen = &dev->mdev->caps.gen;
250 /* Sanity check SRQ size before proceeding */ 249 /* Sanity check SRQ size before proceeding */
251 if (init_attr->attr.max_wr >= gen->max_srq_wqes) { 250 if (init_attr->attr.max_wr >= max_srq_wqes) {
252 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n", 251 mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
253 init_attr->attr.max_wr, 252 init_attr->attr.max_wr,
254 gen->max_srq_wqes); 253 max_srq_wqes);
255 return ERR_PTR(-EINVAL); 254 return ERR_PTR(-EINVAL);
256 } 255 }
257 256