diff options
author | Eli Cohen <eli@mellanox.com> | 2013-07-07 10:25:49 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2013-07-08 13:32:24 -0400 |
commit | e126ba97dba9edeb6fafa3665b5f8497fc9cdf8c (patch) | |
tree | c886014a89a8a96b8fb171ad6683dc80ce2ff018 /drivers/infiniband/hw/mlx5/main.c | |
parent | 0134f16bc91cc15a38c867b81568b791c9b626aa (diff) |
mlx5: Add driver for Mellanox Connect-IB adapters
The driver is comprised of two kernel modules: mlx5_ib and mlx5_core.
This partitioning resembles what we have for mlx4, except that mlx5_ib
is the pci device driver and not mlx5_core.
mlx5_core is essentially a library that provides general functionality
that is intended to be used by other Mellanox devices that will be
introduced in the future. mlx5_ib has a similar role as any hardware
device under drivers/infiniband/hw.
Signed-off-by: Eli Cohen <eli@mellanox.com>
Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
[ Merge in coccinelle fixes from Fengguang Wu <fengguang.wu@intel.com>.
- Roland ]
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/mlx5/main.c')
-rw-r--r-- | drivers/infiniband/hw/mlx5/main.c | 1504 |
1 files changed, 1504 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c new file mode 100644 index 000000000000..6b1007f9bc29 --- /dev/null +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -0,0 +1,1504 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <asm-generic/kmap_types.h> | ||
34 | #include <linux/module.h> | ||
35 | #include <linux/init.h> | ||
36 | #include <linux/errno.h> | ||
37 | #include <linux/pci.h> | ||
38 | #include <linux/dma-mapping.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/io-mapping.h> | ||
41 | #include <linux/sched.h> | ||
42 | #include <rdma/ib_user_verbs.h> | ||
43 | #include <rdma/ib_smi.h> | ||
44 | #include <rdma/ib_umem.h> | ||
45 | #include "user.h" | ||
46 | #include "mlx5_ib.h" | ||
47 | |||
48 | #define DRIVER_NAME "mlx5_ib" | ||
49 | #define DRIVER_VERSION "1.0" | ||
50 | #define DRIVER_RELDATE "June 2013" | ||
51 | |||
52 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); | ||
53 | MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver"); | ||
54 | MODULE_LICENSE("Dual BSD/GPL"); | ||
55 | MODULE_VERSION(DRIVER_VERSION); | ||
56 | |||
57 | static int prof_sel = 2; | ||
58 | module_param_named(prof_sel, prof_sel, int, 0444); | ||
59 | MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2"); | ||
60 | |||
61 | static char mlx5_version[] = | ||
62 | DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v" | ||
63 | DRIVER_VERSION " (" DRIVER_RELDATE ")\n"; | ||
64 | |||
65 | struct mlx5_profile profile[] = { | ||
66 | [0] = { | ||
67 | .mask = 0, | ||
68 | }, | ||
69 | [1] = { | ||
70 | .mask = MLX5_PROF_MASK_QP_SIZE, | ||
71 | .log_max_qp = 12, | ||
72 | }, | ||
73 | [2] = { | ||
74 | .mask = MLX5_PROF_MASK_QP_SIZE | | ||
75 | MLX5_PROF_MASK_MR_CACHE, | ||
76 | .log_max_qp = 17, | ||
77 | .mr_cache[0] = { | ||
78 | .size = 500, | ||
79 | .limit = 250 | ||
80 | }, | ||
81 | .mr_cache[1] = { | ||
82 | .size = 500, | ||
83 | .limit = 250 | ||
84 | }, | ||
85 | .mr_cache[2] = { | ||
86 | .size = 500, | ||
87 | .limit = 250 | ||
88 | }, | ||
89 | .mr_cache[3] = { | ||
90 | .size = 500, | ||
91 | .limit = 250 | ||
92 | }, | ||
93 | .mr_cache[4] = { | ||
94 | .size = 500, | ||
95 | .limit = 250 | ||
96 | }, | ||
97 | .mr_cache[5] = { | ||
98 | .size = 500, | ||
99 | .limit = 250 | ||
100 | }, | ||
101 | .mr_cache[6] = { | ||
102 | .size = 500, | ||
103 | .limit = 250 | ||
104 | }, | ||
105 | .mr_cache[7] = { | ||
106 | .size = 500, | ||
107 | .limit = 250 | ||
108 | }, | ||
109 | .mr_cache[8] = { | ||
110 | .size = 500, | ||
111 | .limit = 250 | ||
112 | }, | ||
113 | .mr_cache[9] = { | ||
114 | .size = 500, | ||
115 | .limit = 250 | ||
116 | }, | ||
117 | .mr_cache[10] = { | ||
118 | .size = 500, | ||
119 | .limit = 250 | ||
120 | }, | ||
121 | .mr_cache[11] = { | ||
122 | .size = 500, | ||
123 | .limit = 250 | ||
124 | }, | ||
125 | .mr_cache[12] = { | ||
126 | .size = 64, | ||
127 | .limit = 32 | ||
128 | }, | ||
129 | .mr_cache[13] = { | ||
130 | .size = 32, | ||
131 | .limit = 16 | ||
132 | }, | ||
133 | .mr_cache[14] = { | ||
134 | .size = 16, | ||
135 | .limit = 8 | ||
136 | }, | ||
137 | .mr_cache[15] = { | ||
138 | .size = 8, | ||
139 | .limit = 4 | ||
140 | }, | ||
141 | }, | ||
142 | }; | ||
143 | |||
144 | int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn) | ||
145 | { | ||
146 | struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; | ||
147 | struct mlx5_eq *eq, *n; | ||
148 | int err = -ENOENT; | ||
149 | |||
150 | spin_lock(&table->lock); | ||
151 | list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { | ||
152 | if (eq->index == vector) { | ||
153 | *eqn = eq->eqn; | ||
154 | *irqn = eq->irqn; | ||
155 | err = 0; | ||
156 | break; | ||
157 | } | ||
158 | } | ||
159 | spin_unlock(&table->lock); | ||
160 | |||
161 | return err; | ||
162 | } | ||
163 | |||
164 | static int alloc_comp_eqs(struct mlx5_ib_dev *dev) | ||
165 | { | ||
166 | struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; | ||
167 | struct mlx5_eq *eq, *n; | ||
168 | int ncomp_vec; | ||
169 | int nent; | ||
170 | int err; | ||
171 | int i; | ||
172 | |||
173 | INIT_LIST_HEAD(&dev->eqs_list); | ||
174 | ncomp_vec = table->num_comp_vectors; | ||
175 | nent = MLX5_COMP_EQ_SIZE; | ||
176 | for (i = 0; i < ncomp_vec; i++) { | ||
177 | eq = kzalloc(sizeof(*eq), GFP_KERNEL); | ||
178 | if (!eq) { | ||
179 | err = -ENOMEM; | ||
180 | goto clean; | ||
181 | } | ||
182 | |||
183 | snprintf(eq->name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i); | ||
184 | err = mlx5_create_map_eq(&dev->mdev, eq, | ||
185 | i + MLX5_EQ_VEC_COMP_BASE, nent, 0, | ||
186 | eq->name, | ||
187 | &dev->mdev.priv.uuari.uars[0]); | ||
188 | if (err) { | ||
189 | kfree(eq); | ||
190 | goto clean; | ||
191 | } | ||
192 | mlx5_ib_dbg(dev, "allocated completion EQN %d\n", eq->eqn); | ||
193 | eq->index = i; | ||
194 | spin_lock(&table->lock); | ||
195 | list_add_tail(&eq->list, &dev->eqs_list); | ||
196 | spin_unlock(&table->lock); | ||
197 | } | ||
198 | |||
199 | dev->num_comp_vectors = ncomp_vec; | ||
200 | return 0; | ||
201 | |||
202 | clean: | ||
203 | spin_lock(&table->lock); | ||
204 | list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { | ||
205 | list_del(&eq->list); | ||
206 | spin_unlock(&table->lock); | ||
207 | if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) | ||
208 | mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); | ||
209 | kfree(eq); | ||
210 | spin_lock(&table->lock); | ||
211 | } | ||
212 | spin_unlock(&table->lock); | ||
213 | return err; | ||
214 | } | ||
215 | |||
216 | static void free_comp_eqs(struct mlx5_ib_dev *dev) | ||
217 | { | ||
218 | struct mlx5_eq_table *table = &dev->mdev.priv.eq_table; | ||
219 | struct mlx5_eq *eq, *n; | ||
220 | |||
221 | spin_lock(&table->lock); | ||
222 | list_for_each_entry_safe(eq, n, &dev->eqs_list, list) { | ||
223 | list_del(&eq->list); | ||
224 | spin_unlock(&table->lock); | ||
225 | if (mlx5_destroy_unmap_eq(&dev->mdev, eq)) | ||
226 | mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn); | ||
227 | kfree(eq); | ||
228 | spin_lock(&table->lock); | ||
229 | } | ||
230 | spin_unlock(&table->lock); | ||
231 | } | ||
232 | |||
233 | static int mlx5_ib_query_device(struct ib_device *ibdev, | ||
234 | struct ib_device_attr *props) | ||
235 | { | ||
236 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | ||
237 | struct ib_smp *in_mad = NULL; | ||
238 | struct ib_smp *out_mad = NULL; | ||
239 | int err = -ENOMEM; | ||
240 | int max_rq_sg; | ||
241 | int max_sq_sg; | ||
242 | u64 flags; | ||
243 | |||
244 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); | ||
245 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); | ||
246 | if (!in_mad || !out_mad) | ||
247 | goto out; | ||
248 | |||
249 | init_query_mad(in_mad); | ||
250 | in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; | ||
251 | |||
252 | err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad); | ||
253 | if (err) | ||
254 | goto out; | ||
255 | |||
256 | memset(props, 0, sizeof(*props)); | ||
257 | |||
258 | props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) | | ||
259 | (fw_rev_min(&dev->mdev) << 16) | | ||
260 | fw_rev_sub(&dev->mdev); | ||
261 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | | ||
262 | IB_DEVICE_PORT_ACTIVE_EVENT | | ||
263 | IB_DEVICE_SYS_IMAGE_GUID | | ||
264 | IB_DEVICE_RC_RNR_NAK_GEN | | ||
265 | IB_DEVICE_BLOCK_MULTICAST_LOOPBACK; | ||
266 | flags = dev->mdev.caps.flags; | ||
267 | if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR) | ||
268 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; | ||
269 | if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR) | ||
270 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; | ||
271 | if (flags & MLX5_DEV_CAP_FLAG_APM) | ||
272 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; | ||
273 | props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY; | ||
274 | if (flags & MLX5_DEV_CAP_FLAG_XRC) | ||
275 | props->device_cap_flags |= IB_DEVICE_XRC; | ||
276 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; | ||
277 | |||
278 | props->vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & | ||
279 | 0xffffff; | ||
280 | props->vendor_part_id = be16_to_cpup((__be16 *)(out_mad->data + 30)); | ||
281 | props->hw_ver = be32_to_cpup((__be32 *)(out_mad->data + 32)); | ||
282 | memcpy(&props->sys_image_guid, out_mad->data + 4, 8); | ||
283 | |||
284 | props->max_mr_size = ~0ull; | ||
285 | props->page_size_cap = dev->mdev.caps.min_page_sz; | ||
286 | props->max_qp = 1 << dev->mdev.caps.log_max_qp; | ||
287 | props->max_qp_wr = dev->mdev.caps.max_wqes; | ||
288 | max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg); | ||
289 | max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) / | ||
290 | sizeof(struct mlx5_wqe_data_seg); | ||
291 | props->max_sge = min(max_rq_sg, max_sq_sg); | ||
292 | props->max_cq = 1 << dev->mdev.caps.log_max_cq; | ||
293 | props->max_cqe = dev->mdev.caps.max_cqes - 1; | ||
294 | props->max_mr = 1 << dev->mdev.caps.log_max_mkey; | ||
295 | props->max_pd = 1 << dev->mdev.caps.log_max_pd; | ||
296 | props->max_qp_rd_atom = dev->mdev.caps.max_ra_req_qp; | ||
297 | props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp; | ||
298 | props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; | ||
299 | props->max_srq = 1 << dev->mdev.caps.log_max_srq; | ||
300 | props->max_srq_wr = dev->mdev.caps.max_srq_wqes - 1; | ||
301 | props->max_srq_sge = max_rq_sg - 1; | ||
302 | props->max_fast_reg_page_list_len = (unsigned int)-1; | ||
303 | props->local_ca_ack_delay = dev->mdev.caps.local_ca_ack_delay; | ||
304 | props->atomic_cap = dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_ATOMIC ? | ||
305 | IB_ATOMIC_HCA : IB_ATOMIC_NONE; | ||
306 | props->masked_atomic_cap = IB_ATOMIC_HCA; | ||
307 | props->max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28)); | ||
308 | props->max_mcast_grp = 1 << dev->mdev.caps.log_max_mcg; | ||
309 | props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg; | ||
310 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * | ||
311 | props->max_mcast_grp; | ||
312 | props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */ | ||
313 | |||
314 | out: | ||
315 | kfree(in_mad); | ||
316 | kfree(out_mad); | ||
317 | |||
318 | return err; | ||
319 | } | ||
320 | |||
321 | int mlx5_ib_query_port(struct ib_device *ibdev, u8 port, | ||
322 | struct ib_port_attr *props) | ||
323 | { | ||
324 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | ||
325 | struct ib_smp *in_mad = NULL; | ||
326 | struct ib_smp *out_mad = NULL; | ||
327 | int ext_active_speed; | ||
328 | int err = -ENOMEM; | ||
329 | |||
330 | if (port < 1 || port > dev->mdev.caps.num_ports) { | ||
331 | mlx5_ib_warn(dev, "invalid port number %d\n", port); | ||
332 | return -EINVAL; | ||
333 | } | ||
334 | |||
335 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); | ||
336 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); | ||
337 | if (!in_mad || !out_mad) | ||
338 | goto out; | ||
339 | |||
340 | memset(props, 0, sizeof(*props)); | ||
341 | |||
342 | init_query_mad(in_mad); | ||
343 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; | ||
344 | in_mad->attr_mod = cpu_to_be32(port); | ||
345 | |||
346 | err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad); | ||
347 | if (err) { | ||
348 | mlx5_ib_warn(dev, "err %d\n", err); | ||
349 | goto out; | ||
350 | } | ||
351 | |||
352 | |||
353 | props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16)); | ||
354 | props->lmc = out_mad->data[34] & 0x7; | ||
355 | props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18)); | ||
356 | props->sm_sl = out_mad->data[36] & 0xf; | ||
357 | props->state = out_mad->data[32] & 0xf; | ||
358 | props->phys_state = out_mad->data[33] >> 4; | ||
359 | props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20)); | ||
360 | props->gid_tbl_len = out_mad->data[50]; | ||
361 | props->max_msg_sz = 1 << to_mdev(ibdev)->mdev.caps.log_max_msg; | ||
362 | props->pkey_tbl_len = to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len; | ||
363 | props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46)); | ||
364 | props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48)); | ||
365 | props->active_width = out_mad->data[31] & 0xf; | ||
366 | props->active_speed = out_mad->data[35] >> 4; | ||
367 | props->max_mtu = out_mad->data[41] & 0xf; | ||
368 | props->active_mtu = out_mad->data[36] >> 4; | ||
369 | props->subnet_timeout = out_mad->data[51] & 0x1f; | ||
370 | props->max_vl_num = out_mad->data[37] >> 4; | ||
371 | props->init_type_reply = out_mad->data[41] >> 4; | ||
372 | |||
373 | /* Check if extended speeds (EDR/FDR/...) are supported */ | ||
374 | if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { | ||
375 | ext_active_speed = out_mad->data[62] >> 4; | ||
376 | |||
377 | switch (ext_active_speed) { | ||
378 | case 1: | ||
379 | props->active_speed = 16; /* FDR */ | ||
380 | break; | ||
381 | case 2: | ||
382 | props->active_speed = 32; /* EDR */ | ||
383 | break; | ||
384 | } | ||
385 | } | ||
386 | |||
387 | /* If reported active speed is QDR, check if is FDR-10 */ | ||
388 | if (props->active_speed == 4) { | ||
389 | if (dev->mdev.caps.ext_port_cap[port - 1] & | ||
390 | MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { | ||
391 | init_query_mad(in_mad); | ||
392 | in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; | ||
393 | in_mad->attr_mod = cpu_to_be32(port); | ||
394 | |||
395 | err = mlx5_MAD_IFC(dev, 1, 1, port, | ||
396 | NULL, NULL, in_mad, out_mad); | ||
397 | if (err) | ||
398 | goto out; | ||
399 | |||
400 | /* Checking LinkSpeedActive for FDR-10 */ | ||
401 | if (out_mad->data[15] & 0x1) | ||
402 | props->active_speed = 8; | ||
403 | } | ||
404 | } | ||
405 | |||
406 | out: | ||
407 | kfree(in_mad); | ||
408 | kfree(out_mad); | ||
409 | |||
410 | return err; | ||
411 | } | ||
412 | |||
413 | static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, | ||
414 | union ib_gid *gid) | ||
415 | { | ||
416 | struct ib_smp *in_mad = NULL; | ||
417 | struct ib_smp *out_mad = NULL; | ||
418 | int err = -ENOMEM; | ||
419 | |||
420 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); | ||
421 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); | ||
422 | if (!in_mad || !out_mad) | ||
423 | goto out; | ||
424 | |||
425 | init_query_mad(in_mad); | ||
426 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; | ||
427 | in_mad->attr_mod = cpu_to_be32(port); | ||
428 | |||
429 | err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); | ||
430 | if (err) | ||
431 | goto out; | ||
432 | |||
433 | memcpy(gid->raw, out_mad->data + 8, 8); | ||
434 | |||
435 | init_query_mad(in_mad); | ||
436 | in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; | ||
437 | in_mad->attr_mod = cpu_to_be32(index / 8); | ||
438 | |||
439 | err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); | ||
440 | if (err) | ||
441 | goto out; | ||
442 | |||
443 | memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); | ||
444 | |||
445 | out: | ||
446 | kfree(in_mad); | ||
447 | kfree(out_mad); | ||
448 | return err; | ||
449 | } | ||
450 | |||
451 | static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, | ||
452 | u16 *pkey) | ||
453 | { | ||
454 | struct ib_smp *in_mad = NULL; | ||
455 | struct ib_smp *out_mad = NULL; | ||
456 | int err = -ENOMEM; | ||
457 | |||
458 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); | ||
459 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); | ||
460 | if (!in_mad || !out_mad) | ||
461 | goto out; | ||
462 | |||
463 | init_query_mad(in_mad); | ||
464 | in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; | ||
465 | in_mad->attr_mod = cpu_to_be32(index / 32); | ||
466 | |||
467 | err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); | ||
468 | if (err) | ||
469 | goto out; | ||
470 | |||
471 | *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]); | ||
472 | |||
473 | out: | ||
474 | kfree(in_mad); | ||
475 | kfree(out_mad); | ||
476 | return err; | ||
477 | } | ||
478 | |||
479 | struct mlx5_reg_node_desc { | ||
480 | u8 desc[64]; | ||
481 | }; | ||
482 | |||
483 | static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, | ||
484 | struct ib_device_modify *props) | ||
485 | { | ||
486 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | ||
487 | struct mlx5_reg_node_desc in; | ||
488 | struct mlx5_reg_node_desc out; | ||
489 | int err; | ||
490 | |||
491 | if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) | ||
492 | return -EOPNOTSUPP; | ||
493 | |||
494 | if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) | ||
495 | return 0; | ||
496 | |||
497 | /* | ||
498 | * If possible, pass node desc to FW, so it can generate | ||
499 | * a 144 trap. If cmd fails, just ignore. | ||
500 | */ | ||
501 | memcpy(&in, props->node_desc, 64); | ||
502 | err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out, | ||
503 | sizeof(out), MLX5_REG_NODE_DESC, 0, 1); | ||
504 | if (err) | ||
505 | return err; | ||
506 | |||
507 | memcpy(ibdev->node_desc, props->node_desc, 64); | ||
508 | |||
509 | return err; | ||
510 | } | ||
511 | |||
512 | static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask, | ||
513 | struct ib_port_modify *props) | ||
514 | { | ||
515 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | ||
516 | struct ib_port_attr attr; | ||
517 | u32 tmp; | ||
518 | int err; | ||
519 | |||
520 | mutex_lock(&dev->cap_mask_mutex); | ||
521 | |||
522 | err = mlx5_ib_query_port(ibdev, port, &attr); | ||
523 | if (err) | ||
524 | goto out; | ||
525 | |||
526 | tmp = (attr.port_cap_flags | props->set_port_cap_mask) & | ||
527 | ~props->clr_port_cap_mask; | ||
528 | |||
529 | err = mlx5_set_port_caps(&dev->mdev, port, tmp); | ||
530 | |||
531 | out: | ||
532 | mutex_unlock(&dev->cap_mask_mutex); | ||
533 | return err; | ||
534 | } | ||
535 | |||
536 | static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | ||
537 | struct ib_udata *udata) | ||
538 | { | ||
539 | struct mlx5_ib_dev *dev = to_mdev(ibdev); | ||
540 | struct mlx5_ib_alloc_ucontext_req req; | ||
541 | struct mlx5_ib_alloc_ucontext_resp resp; | ||
542 | struct mlx5_ib_ucontext *context; | ||
543 | struct mlx5_uuar_info *uuari; | ||
544 | struct mlx5_uar *uars; | ||
545 | int num_uars; | ||
546 | int uuarn; | ||
547 | int err; | ||
548 | int i; | ||
549 | |||
550 | if (!dev->ib_active) | ||
551 | return ERR_PTR(-EAGAIN); | ||
552 | |||
553 | err = ib_copy_from_udata(&req, udata, sizeof(req)); | ||
554 | if (err) | ||
555 | return ERR_PTR(err); | ||
556 | |||
557 | if (req.total_num_uuars > MLX5_MAX_UUARS) | ||
558 | return ERR_PTR(-ENOMEM); | ||
559 | |||
560 | if (req.total_num_uuars == 0) | ||
561 | return ERR_PTR(-EINVAL); | ||
562 | |||
563 | req.total_num_uuars = ALIGN(req.total_num_uuars, MLX5_BF_REGS_PER_PAGE); | ||
564 | if (req.num_low_latency_uuars > req.total_num_uuars - 1) | ||
565 | return ERR_PTR(-EINVAL); | ||
566 | |||
567 | num_uars = req.total_num_uuars / MLX5_BF_REGS_PER_PAGE; | ||
568 | resp.qp_tab_size = 1 << dev->mdev.caps.log_max_qp; | ||
569 | resp.bf_reg_size = dev->mdev.caps.bf_reg_size; | ||
570 | resp.cache_line_size = L1_CACHE_BYTES; | ||
571 | resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz; | ||
572 | resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz; | ||
573 | resp.max_send_wqebb = dev->mdev.caps.max_wqes; | ||
574 | resp.max_recv_wr = dev->mdev.caps.max_wqes; | ||
575 | resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes; | ||
576 | |||
577 | context = kzalloc(sizeof(*context), GFP_KERNEL); | ||
578 | if (!context) | ||
579 | return ERR_PTR(-ENOMEM); | ||
580 | |||
581 | uuari = &context->uuari; | ||
582 | mutex_init(&uuari->lock); | ||
583 | uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL); | ||
584 | if (!uars) { | ||
585 | err = -ENOMEM; | ||
586 | goto out_ctx; | ||
587 | } | ||
588 | |||
589 | uuari->bitmap = kcalloc(BITS_TO_LONGS(req.total_num_uuars), | ||
590 | sizeof(*uuari->bitmap), | ||
591 | GFP_KERNEL); | ||
592 | if (!uuari->bitmap) { | ||
593 | err = -ENOMEM; | ||
594 | goto out_uar_ctx; | ||
595 | } | ||
596 | /* | ||
597 | * clear all fast path uuars | ||
598 | */ | ||
599 | for (i = 0; i < req.total_num_uuars; i++) { | ||
600 | uuarn = i & 3; | ||
601 | if (uuarn == 2 || uuarn == 3) | ||
602 | set_bit(i, uuari->bitmap); | ||
603 | } | ||
604 | |||
605 | uuari->count = kcalloc(req.total_num_uuars, sizeof(*uuari->count), GFP_KERNEL); | ||
606 | if (!uuari->count) { | ||
607 | err = -ENOMEM; | ||
608 | goto out_bitmap; | ||
609 | } | ||
610 | |||
611 | for (i = 0; i < num_uars; i++) { | ||
612 | err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index); | ||
613 | if (err) | ||
614 | goto out_count; | ||
615 | } | ||
616 | |||
617 | INIT_LIST_HEAD(&context->db_page_list); | ||
618 | mutex_init(&context->db_page_mutex); | ||
619 | |||
620 | resp.tot_uuars = req.total_num_uuars; | ||
621 | resp.num_ports = dev->mdev.caps.num_ports; | ||
622 | err = ib_copy_to_udata(udata, &resp, sizeof(resp)); | ||
623 | if (err) | ||
624 | goto out_uars; | ||
625 | |||
626 | uuari->num_low_latency_uuars = req.num_low_latency_uuars; | ||
627 | uuari->uars = uars; | ||
628 | uuari->num_uars = num_uars; | ||
629 | return &context->ibucontext; | ||
630 | |||
631 | out_uars: | ||
632 | for (i--; i >= 0; i--) | ||
633 | mlx5_cmd_free_uar(&dev->mdev, uars[i].index); | ||
634 | out_count: | ||
635 | kfree(uuari->count); | ||
636 | |||
637 | out_bitmap: | ||
638 | kfree(uuari->bitmap); | ||
639 | |||
640 | out_uar_ctx: | ||
641 | kfree(uars); | ||
642 | |||
643 | out_ctx: | ||
644 | kfree(context); | ||
645 | return ERR_PTR(err); | ||
646 | } | ||
647 | |||
648 | static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) | ||
649 | { | ||
650 | struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); | ||
651 | struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); | ||
652 | struct mlx5_uuar_info *uuari = &context->uuari; | ||
653 | int i; | ||
654 | |||
655 | for (i = 0; i < uuari->num_uars; i++) { | ||
656 | if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index)) | ||
657 | mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index); | ||
658 | } | ||
659 | |||
660 | kfree(uuari->count); | ||
661 | kfree(uuari->bitmap); | ||
662 | kfree(uuari->uars); | ||
663 | kfree(context); | ||
664 | |||
665 | return 0; | ||
666 | } | ||
667 | |||
668 | static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index) | ||
669 | { | ||
670 | return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index; | ||
671 | } | ||
672 | |||
673 | static int get_command(unsigned long offset) | ||
674 | { | ||
675 | return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; | ||
676 | } | ||
677 | |||
678 | static int get_arg(unsigned long offset) | ||
679 | { | ||
680 | return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); | ||
681 | } | ||
682 | |||
683 | static int get_index(unsigned long offset) | ||
684 | { | ||
685 | return get_arg(offset); | ||
686 | } | ||
687 | |||
688 | static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) | ||
689 | { | ||
690 | struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); | ||
691 | struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); | ||
692 | struct mlx5_uuar_info *uuari = &context->uuari; | ||
693 | unsigned long command; | ||
694 | unsigned long idx; | ||
695 | phys_addr_t pfn; | ||
696 | |||
697 | command = get_command(vma->vm_pgoff); | ||
698 | switch (command) { | ||
699 | case MLX5_IB_MMAP_REGULAR_PAGE: | ||
700 | if (vma->vm_end - vma->vm_start != PAGE_SIZE) | ||
701 | return -EINVAL; | ||
702 | |||
703 | idx = get_index(vma->vm_pgoff); | ||
704 | pfn = uar_index2pfn(dev, uuari->uars[idx].index); | ||
705 | mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx, | ||
706 | (unsigned long long)pfn); | ||
707 | |||
708 | if (idx >= uuari->num_uars) | ||
709 | return -EINVAL; | ||
710 | |||
711 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
712 | if (io_remap_pfn_range(vma, vma->vm_start, pfn, | ||
713 | PAGE_SIZE, vma->vm_page_prot)) | ||
714 | return -EAGAIN; | ||
715 | |||
716 | mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n", | ||
717 | vma->vm_start, | ||
718 | (unsigned long long)pfn << PAGE_SHIFT); | ||
719 | break; | ||
720 | |||
721 | case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: | ||
722 | return -ENOSYS; | ||
723 | |||
724 | default: | ||
725 | return -EINVAL; | ||
726 | } | ||
727 | |||
728 | return 0; | ||
729 | } | ||
730 | |||
731 | static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn) | ||
732 | { | ||
733 | struct mlx5_create_mkey_mbox_in *in; | ||
734 | struct mlx5_mkey_seg *seg; | ||
735 | struct mlx5_core_mr mr; | ||
736 | int err; | ||
737 | |||
738 | in = kzalloc(sizeof(*in), GFP_KERNEL); | ||
739 | if (!in) | ||
740 | return -ENOMEM; | ||
741 | |||
742 | seg = &in->seg; | ||
743 | seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA; | ||
744 | seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64); | ||
745 | seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8); | ||
746 | seg->start_addr = 0; | ||
747 | |||
748 | err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in)); | ||
749 | if (err) { | ||
750 | mlx5_ib_warn(dev, "failed to create mkey, %d\n", err); | ||
751 | goto err_in; | ||
752 | } | ||
753 | |||
754 | kfree(in); | ||
755 | *key = mr.key; | ||
756 | |||
757 | return 0; | ||
758 | |||
759 | err_in: | ||
760 | kfree(in); | ||
761 | |||
762 | return err; | ||
763 | } | ||
764 | |||
765 | static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key) | ||
766 | { | ||
767 | struct mlx5_core_mr mr; | ||
768 | int err; | ||
769 | |||
770 | memset(&mr, 0, sizeof(mr)); | ||
771 | mr.key = key; | ||
772 | err = mlx5_core_destroy_mkey(&dev->mdev, &mr); | ||
773 | if (err) | ||
774 | mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key); | ||
775 | } | ||
776 | |||
777 | static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev, | ||
778 | struct ib_ucontext *context, | ||
779 | struct ib_udata *udata) | ||
780 | { | ||
781 | struct mlx5_ib_alloc_pd_resp resp; | ||
782 | struct mlx5_ib_pd *pd; | ||
783 | int err; | ||
784 | |||
785 | pd = kmalloc(sizeof(*pd), GFP_KERNEL); | ||
786 | if (!pd) | ||
787 | return ERR_PTR(-ENOMEM); | ||
788 | |||
789 | err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn); | ||
790 | if (err) { | ||
791 | kfree(pd); | ||
792 | return ERR_PTR(err); | ||
793 | } | ||
794 | |||
795 | if (context) { | ||
796 | resp.pdn = pd->pdn; | ||
797 | if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { | ||
798 | mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); | ||
799 | kfree(pd); | ||
800 | return ERR_PTR(-EFAULT); | ||
801 | } | ||
802 | } else { | ||
803 | err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn); | ||
804 | if (err) { | ||
805 | mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn); | ||
806 | kfree(pd); | ||
807 | return ERR_PTR(err); | ||
808 | } | ||
809 | } | ||
810 | |||
811 | return &pd->ibpd; | ||
812 | } | ||
813 | |||
814 | static int mlx5_ib_dealloc_pd(struct ib_pd *pd) | ||
815 | { | ||
816 | struct mlx5_ib_dev *mdev = to_mdev(pd->device); | ||
817 | struct mlx5_ib_pd *mpd = to_mpd(pd); | ||
818 | |||
819 | if (!pd->uobject) | ||
820 | free_pa_mkey(mdev, mpd->pa_lkey); | ||
821 | |||
822 | mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn); | ||
823 | kfree(mpd); | ||
824 | |||
825 | return 0; | ||
826 | } | ||
827 | |||
828 | static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
829 | { | ||
830 | struct mlx5_ib_dev *dev = to_mdev(ibqp->device); | ||
831 | int err; | ||
832 | |||
833 | err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num); | ||
834 | if (err) | ||
835 | mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", | ||
836 | ibqp->qp_num, gid->raw); | ||
837 | |||
838 | return err; | ||
839 | } | ||
840 | |||
841 | static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
842 | { | ||
843 | struct mlx5_ib_dev *dev = to_mdev(ibqp->device); | ||
844 | int err; | ||
845 | |||
846 | err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num); | ||
847 | if (err) | ||
848 | mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", | ||
849 | ibqp->qp_num, gid->raw); | ||
850 | |||
851 | return err; | ||
852 | } | ||
853 | |||
854 | static int init_node_data(struct mlx5_ib_dev *dev) | ||
855 | { | ||
856 | struct ib_smp *in_mad = NULL; | ||
857 | struct ib_smp *out_mad = NULL; | ||
858 | int err = -ENOMEM; | ||
859 | |||
860 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); | ||
861 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); | ||
862 | if (!in_mad || !out_mad) | ||
863 | goto out; | ||
864 | |||
865 | init_query_mad(in_mad); | ||
866 | in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; | ||
867 | |||
868 | err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); | ||
869 | if (err) | ||
870 | goto out; | ||
871 | |||
872 | memcpy(dev->ib_dev.node_desc, out_mad->data, 64); | ||
873 | |||
874 | in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; | ||
875 | |||
876 | err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); | ||
877 | if (err) | ||
878 | goto out; | ||
879 | |||
880 | dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32)); | ||
881 | memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); | ||
882 | |||
883 | out: | ||
884 | kfree(in_mad); | ||
885 | kfree(out_mad); | ||
886 | return err; | ||
887 | } | ||
888 | |||
889 | static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr, | ||
890 | char *buf) | ||
891 | { | ||
892 | struct mlx5_ib_dev *dev = | ||
893 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); | ||
894 | |||
895 | return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages); | ||
896 | } | ||
897 | |||
898 | static ssize_t show_reg_pages(struct device *device, | ||
899 | struct device_attribute *attr, char *buf) | ||
900 | { | ||
901 | struct mlx5_ib_dev *dev = | ||
902 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); | ||
903 | |||
904 | return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages); | ||
905 | } | ||
906 | |||
907 | static ssize_t show_hca(struct device *device, struct device_attribute *attr, | ||
908 | char *buf) | ||
909 | { | ||
910 | struct mlx5_ib_dev *dev = | ||
911 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); | ||
912 | return sprintf(buf, "MT%d\n", dev->mdev.pdev->device); | ||
913 | } | ||
914 | |||
915 | static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, | ||
916 | char *buf) | ||
917 | { | ||
918 | struct mlx5_ib_dev *dev = | ||
919 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); | ||
920 | return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev), | ||
921 | fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev)); | ||
922 | } | ||
923 | |||
924 | static ssize_t show_rev(struct device *device, struct device_attribute *attr, | ||
925 | char *buf) | ||
926 | { | ||
927 | struct mlx5_ib_dev *dev = | ||
928 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); | ||
929 | return sprintf(buf, "%x\n", dev->mdev.rev_id); | ||
930 | } | ||
931 | |||
932 | static ssize_t show_board(struct device *device, struct device_attribute *attr, | ||
933 | char *buf) | ||
934 | { | ||
935 | struct mlx5_ib_dev *dev = | ||
936 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); | ||
937 | return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN, | ||
938 | dev->mdev.board_id); | ||
939 | } | ||
940 | |||
941 | static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL); | ||
942 | static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL); | ||
943 | static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL); | ||
944 | static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL); | ||
945 | static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL); | ||
946 | static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL); | ||
947 | |||
948 | static struct device_attribute *mlx5_class_attributes[] = { | ||
949 | &dev_attr_hw_rev, | ||
950 | &dev_attr_fw_ver, | ||
951 | &dev_attr_hca_type, | ||
952 | &dev_attr_board_id, | ||
953 | &dev_attr_fw_pages, | ||
954 | &dev_attr_reg_pages, | ||
955 | }; | ||
956 | |||
957 | static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, | ||
958 | void *data) | ||
959 | { | ||
960 | struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev); | ||
961 | struct ib_event ibev; | ||
962 | u8 port = 0; | ||
963 | |||
964 | switch (event) { | ||
965 | case MLX5_DEV_EVENT_SYS_ERROR: | ||
966 | ibdev->ib_active = false; | ||
967 | ibev.event = IB_EVENT_DEVICE_FATAL; | ||
968 | break; | ||
969 | |||
970 | case MLX5_DEV_EVENT_PORT_UP: | ||
971 | ibev.event = IB_EVENT_PORT_ACTIVE; | ||
972 | port = *(u8 *)data; | ||
973 | break; | ||
974 | |||
975 | case MLX5_DEV_EVENT_PORT_DOWN: | ||
976 | ibev.event = IB_EVENT_PORT_ERR; | ||
977 | port = *(u8 *)data; | ||
978 | break; | ||
979 | |||
980 | case MLX5_DEV_EVENT_PORT_INITIALIZED: | ||
981 | /* not used by ULPs */ | ||
982 | return; | ||
983 | |||
984 | case MLX5_DEV_EVENT_LID_CHANGE: | ||
985 | ibev.event = IB_EVENT_LID_CHANGE; | ||
986 | port = *(u8 *)data; | ||
987 | break; | ||
988 | |||
989 | case MLX5_DEV_EVENT_PKEY_CHANGE: | ||
990 | ibev.event = IB_EVENT_PKEY_CHANGE; | ||
991 | port = *(u8 *)data; | ||
992 | break; | ||
993 | |||
994 | case MLX5_DEV_EVENT_GUID_CHANGE: | ||
995 | ibev.event = IB_EVENT_GID_CHANGE; | ||
996 | port = *(u8 *)data; | ||
997 | break; | ||
998 | |||
999 | case MLX5_DEV_EVENT_CLIENT_REREG: | ||
1000 | ibev.event = IB_EVENT_CLIENT_REREGISTER; | ||
1001 | port = *(u8 *)data; | ||
1002 | break; | ||
1003 | } | ||
1004 | |||
1005 | ibev.device = &ibdev->ib_dev; | ||
1006 | ibev.element.port_num = port; | ||
1007 | |||
1008 | if (ibdev->ib_active) | ||
1009 | ib_dispatch_event(&ibev); | ||
1010 | } | ||
1011 | |||
1012 | static void get_ext_port_caps(struct mlx5_ib_dev *dev) | ||
1013 | { | ||
1014 | int port; | ||
1015 | |||
1016 | for (port = 1; port <= dev->mdev.caps.num_ports; port++) | ||
1017 | mlx5_query_ext_port_caps(dev, port); | ||
1018 | } | ||
1019 | |||
1020 | static int get_port_caps(struct mlx5_ib_dev *dev) | ||
1021 | { | ||
1022 | struct ib_device_attr *dprops = NULL; | ||
1023 | struct ib_port_attr *pprops = NULL; | ||
1024 | int err = 0; | ||
1025 | int port; | ||
1026 | |||
1027 | pprops = kmalloc(sizeof(*pprops), GFP_KERNEL); | ||
1028 | if (!pprops) | ||
1029 | goto out; | ||
1030 | |||
1031 | dprops = kmalloc(sizeof(*dprops), GFP_KERNEL); | ||
1032 | if (!dprops) | ||
1033 | goto out; | ||
1034 | |||
1035 | err = mlx5_ib_query_device(&dev->ib_dev, dprops); | ||
1036 | if (err) { | ||
1037 | mlx5_ib_warn(dev, "query_device failed %d\n", err); | ||
1038 | goto out; | ||
1039 | } | ||
1040 | |||
1041 | for (port = 1; port <= dev->mdev.caps.num_ports; port++) { | ||
1042 | err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); | ||
1043 | if (err) { | ||
1044 | mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err); | ||
1045 | break; | ||
1046 | } | ||
1047 | dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys; | ||
1048 | dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len; | ||
1049 | mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n", | ||
1050 | dprops->max_pkeys, pprops->gid_tbl_len); | ||
1051 | } | ||
1052 | |||
1053 | out: | ||
1054 | kfree(pprops); | ||
1055 | kfree(dprops); | ||
1056 | |||
1057 | return err; | ||
1058 | } | ||
1059 | |||
1060 | static void destroy_umrc_res(struct mlx5_ib_dev *dev) | ||
1061 | { | ||
1062 | int err; | ||
1063 | |||
1064 | err = mlx5_mr_cache_cleanup(dev); | ||
1065 | if (err) | ||
1066 | mlx5_ib_warn(dev, "mr cache cleanup failed\n"); | ||
1067 | |||
1068 | mlx5_ib_destroy_qp(dev->umrc.qp); | ||
1069 | ib_destroy_cq(dev->umrc.cq); | ||
1070 | ib_dereg_mr(dev->umrc.mr); | ||
1071 | ib_dealloc_pd(dev->umrc.pd); | ||
1072 | } | ||
1073 | |||
1074 | enum { | ||
1075 | MAX_UMR_WR = 128, | ||
1076 | }; | ||
1077 | |||
1078 | static int create_umr_res(struct mlx5_ib_dev *dev) | ||
1079 | { | ||
1080 | struct ib_qp_init_attr *init_attr = NULL; | ||
1081 | struct ib_qp_attr *attr = NULL; | ||
1082 | struct ib_pd *pd; | ||
1083 | struct ib_cq *cq; | ||
1084 | struct ib_qp *qp; | ||
1085 | struct ib_mr *mr; | ||
1086 | int ret; | ||
1087 | |||
1088 | attr = kzalloc(sizeof(*attr), GFP_KERNEL); | ||
1089 | init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); | ||
1090 | if (!attr || !init_attr) { | ||
1091 | ret = -ENOMEM; | ||
1092 | goto error_0; | ||
1093 | } | ||
1094 | |||
1095 | pd = ib_alloc_pd(&dev->ib_dev); | ||
1096 | if (IS_ERR(pd)) { | ||
1097 | mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n"); | ||
1098 | ret = PTR_ERR(pd); | ||
1099 | goto error_0; | ||
1100 | } | ||
1101 | |||
1102 | mr = ib_get_dma_mr(pd, IB_ACCESS_LOCAL_WRITE); | ||
1103 | if (IS_ERR(mr)) { | ||
1104 | mlx5_ib_dbg(dev, "Couldn't create DMA MR for sync UMR QP\n"); | ||
1105 | ret = PTR_ERR(mr); | ||
1106 | goto error_1; | ||
1107 | } | ||
1108 | |||
1109 | cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL, 128, | ||
1110 | 0); | ||
1111 | if (IS_ERR(cq)) { | ||
1112 | mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n"); | ||
1113 | ret = PTR_ERR(cq); | ||
1114 | goto error_2; | ||
1115 | } | ||
1116 | ib_req_notify_cq(cq, IB_CQ_NEXT_COMP); | ||
1117 | |||
1118 | init_attr->send_cq = cq; | ||
1119 | init_attr->recv_cq = cq; | ||
1120 | init_attr->sq_sig_type = IB_SIGNAL_ALL_WR; | ||
1121 | init_attr->cap.max_send_wr = MAX_UMR_WR; | ||
1122 | init_attr->cap.max_send_sge = 1; | ||
1123 | init_attr->qp_type = MLX5_IB_QPT_REG_UMR; | ||
1124 | init_attr->port_num = 1; | ||
1125 | qp = mlx5_ib_create_qp(pd, init_attr, NULL); | ||
1126 | if (IS_ERR(qp)) { | ||
1127 | mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n"); | ||
1128 | ret = PTR_ERR(qp); | ||
1129 | goto error_3; | ||
1130 | } | ||
1131 | qp->device = &dev->ib_dev; | ||
1132 | qp->real_qp = qp; | ||
1133 | qp->uobject = NULL; | ||
1134 | qp->qp_type = MLX5_IB_QPT_REG_UMR; | ||
1135 | |||
1136 | attr->qp_state = IB_QPS_INIT; | ||
1137 | attr->port_num = 1; | ||
1138 | ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX | | ||
1139 | IB_QP_PORT, NULL); | ||
1140 | if (ret) { | ||
1141 | mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n"); | ||
1142 | goto error_4; | ||
1143 | } | ||
1144 | |||
1145 | memset(attr, 0, sizeof(*attr)); | ||
1146 | attr->qp_state = IB_QPS_RTR; | ||
1147 | attr->path_mtu = IB_MTU_256; | ||
1148 | |||
1149 | ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); | ||
1150 | if (ret) { | ||
1151 | mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n"); | ||
1152 | goto error_4; | ||
1153 | } | ||
1154 | |||
1155 | memset(attr, 0, sizeof(*attr)); | ||
1156 | attr->qp_state = IB_QPS_RTS; | ||
1157 | ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL); | ||
1158 | if (ret) { | ||
1159 | mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n"); | ||
1160 | goto error_4; | ||
1161 | } | ||
1162 | |||
1163 | dev->umrc.qp = qp; | ||
1164 | dev->umrc.cq = cq; | ||
1165 | dev->umrc.mr = mr; | ||
1166 | dev->umrc.pd = pd; | ||
1167 | |||
1168 | sema_init(&dev->umrc.sem, MAX_UMR_WR); | ||
1169 | ret = mlx5_mr_cache_init(dev); | ||
1170 | if (ret) { | ||
1171 | mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); | ||
1172 | goto error_4; | ||
1173 | } | ||
1174 | |||
1175 | kfree(attr); | ||
1176 | kfree(init_attr); | ||
1177 | |||
1178 | return 0; | ||
1179 | |||
1180 | error_4: | ||
1181 | mlx5_ib_destroy_qp(qp); | ||
1182 | |||
1183 | error_3: | ||
1184 | ib_destroy_cq(cq); | ||
1185 | |||
1186 | error_2: | ||
1187 | ib_dereg_mr(mr); | ||
1188 | |||
1189 | error_1: | ||
1190 | ib_dealloc_pd(pd); | ||
1191 | |||
1192 | error_0: | ||
1193 | kfree(attr); | ||
1194 | kfree(init_attr); | ||
1195 | return ret; | ||
1196 | } | ||
1197 | |||
1198 | static int create_dev_resources(struct mlx5_ib_resources *devr) | ||
1199 | { | ||
1200 | struct ib_srq_init_attr attr; | ||
1201 | struct mlx5_ib_dev *dev; | ||
1202 | int ret = 0; | ||
1203 | |||
1204 | dev = container_of(devr, struct mlx5_ib_dev, devr); | ||
1205 | |||
1206 | devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL); | ||
1207 | if (IS_ERR(devr->p0)) { | ||
1208 | ret = PTR_ERR(devr->p0); | ||
1209 | goto error0; | ||
1210 | } | ||
1211 | devr->p0->device = &dev->ib_dev; | ||
1212 | devr->p0->uobject = NULL; | ||
1213 | atomic_set(&devr->p0->usecnt, 0); | ||
1214 | |||
1215 | devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, 1, 0, NULL, NULL); | ||
1216 | if (IS_ERR(devr->c0)) { | ||
1217 | ret = PTR_ERR(devr->c0); | ||
1218 | goto error1; | ||
1219 | } | ||
1220 | devr->c0->device = &dev->ib_dev; | ||
1221 | devr->c0->uobject = NULL; | ||
1222 | devr->c0->comp_handler = NULL; | ||
1223 | devr->c0->event_handler = NULL; | ||
1224 | devr->c0->cq_context = NULL; | ||
1225 | atomic_set(&devr->c0->usecnt, 0); | ||
1226 | |||
1227 | devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); | ||
1228 | if (IS_ERR(devr->x0)) { | ||
1229 | ret = PTR_ERR(devr->x0); | ||
1230 | goto error2; | ||
1231 | } | ||
1232 | devr->x0->device = &dev->ib_dev; | ||
1233 | devr->x0->inode = NULL; | ||
1234 | atomic_set(&devr->x0->usecnt, 0); | ||
1235 | mutex_init(&devr->x0->tgt_qp_mutex); | ||
1236 | INIT_LIST_HEAD(&devr->x0->tgt_qp_list); | ||
1237 | |||
1238 | devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL); | ||
1239 | if (IS_ERR(devr->x1)) { | ||
1240 | ret = PTR_ERR(devr->x1); | ||
1241 | goto error3; | ||
1242 | } | ||
1243 | devr->x1->device = &dev->ib_dev; | ||
1244 | devr->x1->inode = NULL; | ||
1245 | atomic_set(&devr->x1->usecnt, 0); | ||
1246 | mutex_init(&devr->x1->tgt_qp_mutex); | ||
1247 | INIT_LIST_HEAD(&devr->x1->tgt_qp_list); | ||
1248 | |||
1249 | memset(&attr, 0, sizeof(attr)); | ||
1250 | attr.attr.max_sge = 1; | ||
1251 | attr.attr.max_wr = 1; | ||
1252 | attr.srq_type = IB_SRQT_XRC; | ||
1253 | attr.ext.xrc.cq = devr->c0; | ||
1254 | attr.ext.xrc.xrcd = devr->x0; | ||
1255 | |||
1256 | devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL); | ||
1257 | if (IS_ERR(devr->s0)) { | ||
1258 | ret = PTR_ERR(devr->s0); | ||
1259 | goto error4; | ||
1260 | } | ||
1261 | devr->s0->device = &dev->ib_dev; | ||
1262 | devr->s0->pd = devr->p0; | ||
1263 | devr->s0->uobject = NULL; | ||
1264 | devr->s0->event_handler = NULL; | ||
1265 | devr->s0->srq_context = NULL; | ||
1266 | devr->s0->srq_type = IB_SRQT_XRC; | ||
1267 | devr->s0->ext.xrc.xrcd = devr->x0; | ||
1268 | devr->s0->ext.xrc.cq = devr->c0; | ||
1269 | atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt); | ||
1270 | atomic_inc(&devr->s0->ext.xrc.cq->usecnt); | ||
1271 | atomic_inc(&devr->p0->usecnt); | ||
1272 | atomic_set(&devr->s0->usecnt, 0); | ||
1273 | |||
1274 | return 0; | ||
1275 | |||
1276 | error4: | ||
1277 | mlx5_ib_dealloc_xrcd(devr->x1); | ||
1278 | error3: | ||
1279 | mlx5_ib_dealloc_xrcd(devr->x0); | ||
1280 | error2: | ||
1281 | mlx5_ib_destroy_cq(devr->c0); | ||
1282 | error1: | ||
1283 | mlx5_ib_dealloc_pd(devr->p0); | ||
1284 | error0: | ||
1285 | return ret; | ||
1286 | } | ||
1287 | |||
1288 | static void destroy_dev_resources(struct mlx5_ib_resources *devr) | ||
1289 | { | ||
1290 | mlx5_ib_destroy_srq(devr->s0); | ||
1291 | mlx5_ib_dealloc_xrcd(devr->x0); | ||
1292 | mlx5_ib_dealloc_xrcd(devr->x1); | ||
1293 | mlx5_ib_destroy_cq(devr->c0); | ||
1294 | mlx5_ib_dealloc_pd(devr->p0); | ||
1295 | } | ||
1296 | |||
1297 | static int init_one(struct pci_dev *pdev, | ||
1298 | const struct pci_device_id *id) | ||
1299 | { | ||
1300 | struct mlx5_core_dev *mdev; | ||
1301 | struct mlx5_ib_dev *dev; | ||
1302 | int err; | ||
1303 | int i; | ||
1304 | |||
1305 | printk_once(KERN_INFO "%s", mlx5_version); | ||
1306 | |||
1307 | dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); | ||
1308 | if (!dev) | ||
1309 | return -ENOMEM; | ||
1310 | |||
1311 | mdev = &dev->mdev; | ||
1312 | mdev->event = mlx5_ib_event; | ||
1313 | if (prof_sel >= ARRAY_SIZE(profile)) { | ||
1314 | pr_warn("selected pofile out of range, selceting default\n"); | ||
1315 | prof_sel = 0; | ||
1316 | } | ||
1317 | mdev->profile = &profile[prof_sel]; | ||
1318 | err = mlx5_dev_init(mdev, pdev); | ||
1319 | if (err) | ||
1320 | goto err_free; | ||
1321 | |||
1322 | err = get_port_caps(dev); | ||
1323 | if (err) | ||
1324 | goto err_cleanup; | ||
1325 | |||
1326 | get_ext_port_caps(dev); | ||
1327 | |||
1328 | err = alloc_comp_eqs(dev); | ||
1329 | if (err) | ||
1330 | goto err_cleanup; | ||
1331 | |||
1332 | MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock); | ||
1333 | |||
1334 | strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX); | ||
1335 | dev->ib_dev.owner = THIS_MODULE; | ||
1336 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; | ||
1337 | dev->ib_dev.local_dma_lkey = mdev->caps.reserved_lkey; | ||
1338 | dev->num_ports = mdev->caps.num_ports; | ||
1339 | dev->ib_dev.phys_port_cnt = dev->num_ports; | ||
1340 | dev->ib_dev.num_comp_vectors = dev->num_comp_vectors; | ||
1341 | dev->ib_dev.dma_device = &mdev->pdev->dev; | ||
1342 | |||
1343 | dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION; | ||
1344 | dev->ib_dev.uverbs_cmd_mask = | ||
1345 | (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | | ||
1346 | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | | ||
1347 | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | | ||
1348 | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | | ||
1349 | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | | ||
1350 | (1ull << IB_USER_VERBS_CMD_REG_MR) | | ||
1351 | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | | ||
1352 | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | | ||
1353 | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | | ||
1354 | (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | | ||
1355 | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | | ||
1356 | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | | ||
1357 | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | | ||
1358 | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | | ||
1359 | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | | ||
1360 | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | | ||
1361 | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | | ||
1362 | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | | ||
1363 | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | | ||
1364 | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | | ||
1365 | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | | ||
1366 | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | | ||
1367 | (1ull << IB_USER_VERBS_CMD_OPEN_QP); | ||
1368 | |||
1369 | dev->ib_dev.query_device = mlx5_ib_query_device; | ||
1370 | dev->ib_dev.query_port = mlx5_ib_query_port; | ||
1371 | dev->ib_dev.query_gid = mlx5_ib_query_gid; | ||
1372 | dev->ib_dev.query_pkey = mlx5_ib_query_pkey; | ||
1373 | dev->ib_dev.modify_device = mlx5_ib_modify_device; | ||
1374 | dev->ib_dev.modify_port = mlx5_ib_modify_port; | ||
1375 | dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext; | ||
1376 | dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext; | ||
1377 | dev->ib_dev.mmap = mlx5_ib_mmap; | ||
1378 | dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd; | ||
1379 | dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd; | ||
1380 | dev->ib_dev.create_ah = mlx5_ib_create_ah; | ||
1381 | dev->ib_dev.query_ah = mlx5_ib_query_ah; | ||
1382 | dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah; | ||
1383 | dev->ib_dev.create_srq = mlx5_ib_create_srq; | ||
1384 | dev->ib_dev.modify_srq = mlx5_ib_modify_srq; | ||
1385 | dev->ib_dev.query_srq = mlx5_ib_query_srq; | ||
1386 | dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq; | ||
1387 | dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv; | ||
1388 | dev->ib_dev.create_qp = mlx5_ib_create_qp; | ||
1389 | dev->ib_dev.modify_qp = mlx5_ib_modify_qp; | ||
1390 | dev->ib_dev.query_qp = mlx5_ib_query_qp; | ||
1391 | dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp; | ||
1392 | dev->ib_dev.post_send = mlx5_ib_post_send; | ||
1393 | dev->ib_dev.post_recv = mlx5_ib_post_recv; | ||
1394 | dev->ib_dev.create_cq = mlx5_ib_create_cq; | ||
1395 | dev->ib_dev.modify_cq = mlx5_ib_modify_cq; | ||
1396 | dev->ib_dev.resize_cq = mlx5_ib_resize_cq; | ||
1397 | dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq; | ||
1398 | dev->ib_dev.poll_cq = mlx5_ib_poll_cq; | ||
1399 | dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq; | ||
1400 | dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr; | ||
1401 | dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr; | ||
1402 | dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr; | ||
1403 | dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach; | ||
1404 | dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach; | ||
1405 | dev->ib_dev.process_mad = mlx5_ib_process_mad; | ||
1406 | dev->ib_dev.alloc_fast_reg_mr = mlx5_ib_alloc_fast_reg_mr; | ||
1407 | dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; | ||
1408 | dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; | ||
1409 | |||
1410 | if (mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC) { | ||
1411 | dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd; | ||
1412 | dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd; | ||
1413 | dev->ib_dev.uverbs_cmd_mask |= | ||
1414 | (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) | | ||
1415 | (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD); | ||
1416 | } | ||
1417 | |||
1418 | err = init_node_data(dev); | ||
1419 | if (err) | ||
1420 | goto err_eqs; | ||
1421 | |||
1422 | mutex_init(&dev->cap_mask_mutex); | ||
1423 | spin_lock_init(&dev->mr_lock); | ||
1424 | |||
1425 | err = create_dev_resources(&dev->devr); | ||
1426 | if (err) | ||
1427 | goto err_eqs; | ||
1428 | |||
1429 | if (ib_register_device(&dev->ib_dev, NULL)) | ||
1430 | goto err_rsrc; | ||
1431 | |||
1432 | err = create_umr_res(dev); | ||
1433 | if (err) | ||
1434 | goto err_dev; | ||
1435 | |||
1436 | for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) { | ||
1437 | if (device_create_file(&dev->ib_dev.dev, | ||
1438 | mlx5_class_attributes[i])) | ||
1439 | goto err_umrc; | ||
1440 | } | ||
1441 | |||
1442 | dev->ib_active = true; | ||
1443 | |||
1444 | return 0; | ||
1445 | |||
1446 | err_umrc: | ||
1447 | destroy_umrc_res(dev); | ||
1448 | |||
1449 | err_dev: | ||
1450 | ib_unregister_device(&dev->ib_dev); | ||
1451 | |||
1452 | err_rsrc: | ||
1453 | destroy_dev_resources(&dev->devr); | ||
1454 | |||
1455 | err_eqs: | ||
1456 | free_comp_eqs(dev); | ||
1457 | |||
1458 | err_cleanup: | ||
1459 | mlx5_dev_cleanup(mdev); | ||
1460 | |||
1461 | err_free: | ||
1462 | ib_dealloc_device((struct ib_device *)dev); | ||
1463 | |||
1464 | return err; | ||
1465 | } | ||
1466 | |||
1467 | static void remove_one(struct pci_dev *pdev) | ||
1468 | { | ||
1469 | struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev); | ||
1470 | |||
1471 | destroy_umrc_res(dev); | ||
1472 | ib_unregister_device(&dev->ib_dev); | ||
1473 | destroy_dev_resources(&dev->devr); | ||
1474 | free_comp_eqs(dev); | ||
1475 | mlx5_dev_cleanup(&dev->mdev); | ||
1476 | ib_dealloc_device(&dev->ib_dev); | ||
1477 | } | ||
1478 | |||
1479 | static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = { | ||
1480 | { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */ | ||
1481 | { 0, } | ||
1482 | }; | ||
1483 | |||
1484 | MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table); | ||
1485 | |||
1486 | static struct pci_driver mlx5_ib_driver = { | ||
1487 | .name = DRIVER_NAME, | ||
1488 | .id_table = mlx5_ib_pci_table, | ||
1489 | .probe = init_one, | ||
1490 | .remove = remove_one | ||
1491 | }; | ||
1492 | |||
1493 | static int __init mlx5_ib_init(void) | ||
1494 | { | ||
1495 | return pci_register_driver(&mlx5_ib_driver); | ||
1496 | } | ||
1497 | |||
1498 | static void __exit mlx5_ib_cleanup(void) | ||
1499 | { | ||
1500 | pci_unregister_driver(&mlx5_ib_driver); | ||
1501 | } | ||
1502 | |||
1503 | module_init(mlx5_ib_init); | ||
1504 | module_exit(mlx5_ib_cleanup); | ||