aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/eq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/mlx4/eq.c')
-rw-r--r--drivers/net/mlx4/eq.c696
1 files changed, 696 insertions, 0 deletions
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
new file mode 100644
index 000000000000..acf1c801a1b8
--- /dev/null
+++ b/drivers/net/mlx4/eq.c
@@ -0,0 +1,696 @@
1/*
2 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/init.h>
35#include <linux/interrupt.h>
36
37#include <linux/mlx4/cmd.h>
38
39#include "mlx4.h"
40#include "fw.h"
41
42enum {
43 MLX4_NUM_ASYNC_EQE = 0x100,
44 MLX4_NUM_SPARE_EQE = 0x80,
45 MLX4_EQ_ENTRY_SIZE = 0x20
46};
47
48/*
49 * Must be packed because start is 64 bits but only aligned to 32 bits.
50 */
51struct mlx4_eq_context {
52 __be32 flags;
53 u16 reserved1[3];
54 __be16 page_offset;
55 u8 log_eq_size;
56 u8 reserved2[4];
57 u8 eq_period;
58 u8 reserved3;
59 u8 eq_max_count;
60 u8 reserved4[3];
61 u8 intr;
62 u8 log_page_size;
63 u8 reserved5[2];
64 u8 mtt_base_addr_h;
65 __be32 mtt_base_addr_l;
66 u32 reserved6[2];
67 __be32 consumer_index;
68 __be32 producer_index;
69 u32 reserved7[4];
70};
71
72#define MLX4_EQ_STATUS_OK ( 0 << 28)
73#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
74#define MLX4_EQ_OWNER_SW ( 0 << 24)
75#define MLX4_EQ_OWNER_HW ( 1 << 24)
76#define MLX4_EQ_FLAG_EC ( 1 << 18)
77#define MLX4_EQ_FLAG_OI ( 1 << 17)
78#define MLX4_EQ_STATE_ARMED ( 9 << 8)
79#define MLX4_EQ_STATE_FIRED (10 << 8)
80#define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
81
82#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
83 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
84 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
85 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
86 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
87 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
88 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
89 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
90 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
91 (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
92 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
93 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
94 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
95 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
96 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
97 (1ull << MLX4_EVENT_TYPE_CMD))
98#define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR)
99
100struct mlx4_eqe {
101 u8 reserved1;
102 u8 type;
103 u8 reserved2;
104 u8 subtype;
105 union {
106 u32 raw[6];
107 struct {
108 __be32 cqn;
109 } __attribute__((packed)) comp;
110 struct {
111 u16 reserved1;
112 __be16 token;
113 u32 reserved2;
114 u8 reserved3[3];
115 u8 status;
116 __be64 out_param;
117 } __attribute__((packed)) cmd;
118 struct {
119 __be32 qpn;
120 } __attribute__((packed)) qp;
121 struct {
122 __be32 srqn;
123 } __attribute__((packed)) srq;
124 struct {
125 __be32 cqn;
126 u32 reserved1;
127 u8 reserved2[3];
128 u8 syndrome;
129 } __attribute__((packed)) cq_err;
130 struct {
131 u32 reserved1[2];
132 __be32 port;
133 } __attribute__((packed)) port_change;
134 } event;
135 u8 reserved3[3];
136 u8 owner;
137} __attribute__((packed));
138
139static void eq_set_ci(struct mlx4_eq *eq, int req_not)
140{
141 __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
142 req_not << 31),
143 eq->doorbell);
144 /* We still want ordering, just not swabbing, so add a barrier */
145 mb();
146}
147
148static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
149{
150 unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
151 return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
152}
153
154static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
155{
156 struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
157 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
158}
159
160static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
161{
162 struct mlx4_eqe *eqe;
163 int cqn;
164 int eqes_found = 0;
165 int set_ci = 0;
166
167 while ((eqe = next_eqe_sw(eq))) {
168 /*
169 * Make sure we read EQ entry contents after we've
170 * checked the ownership bit.
171 */
172 rmb();
173
174 switch (eqe->type) {
175 case MLX4_EVENT_TYPE_COMP:
176 cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
177 mlx4_cq_completion(dev, cqn);
178 break;
179
180 case MLX4_EVENT_TYPE_PATH_MIG:
181 case MLX4_EVENT_TYPE_COMM_EST:
182 case MLX4_EVENT_TYPE_SQ_DRAINED:
183 case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
184 case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
185 case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
186 case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
187 case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
188 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
189 eqe->type);
190 break;
191
192 case MLX4_EVENT_TYPE_SRQ_LIMIT:
193 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
194 mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
195 eqe->type);
196 break;
197
198 case MLX4_EVENT_TYPE_CMD:
199 mlx4_cmd_event(dev,
200 be16_to_cpu(eqe->event.cmd.token),
201 eqe->event.cmd.status,
202 be64_to_cpu(eqe->event.cmd.out_param));
203 break;
204
205 case MLX4_EVENT_TYPE_PORT_CHANGE:
206 mlx4_dispatch_event(dev, eqe->type, eqe->subtype,
207 be32_to_cpu(eqe->event.port_change.port) >> 28);
208 break;
209
210 case MLX4_EVENT_TYPE_CQ_ERROR:
211 mlx4_warn(dev, "CQ %s on CQN %06x\n",
212 eqe->event.cq_err.syndrome == 1 ?
213 "overrun" : "access violation",
214 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
215 mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
216 eqe->type);
217 break;
218
219 case MLX4_EVENT_TYPE_EQ_OVERFLOW:
220 mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
221 break;
222
223 case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
224 case MLX4_EVENT_TYPE_ECC_DETECT:
225 default:
226 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
227 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
228 break;
229 };
230
231 ++eq->cons_index;
232 eqes_found = 1;
233 ++set_ci;
234
235 /*
236 * The HCA will think the queue has overflowed if we
237 * don't tell it we've been processing events. We
238 * create our EQs with MLX4_NUM_SPARE_EQE extra
239 * entries, so we must update our consumer index at
240 * least that often.
241 */
242 if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
243 /*
244 * Conditional on hca_type is OK here because
245 * this is a rare case, not the fast path.
246 */
247 eq_set_ci(eq, 0);
248 set_ci = 0;
249 }
250 }
251
252 eq_set_ci(eq, 1);
253
254 return eqes_found;
255}
256
257static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
258{
259 struct mlx4_dev *dev = dev_ptr;
260 struct mlx4_priv *priv = mlx4_priv(dev);
261 int work = 0;
262 int i;
263
264 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
265
266 for (i = 0; i < MLX4_EQ_CATAS; ++i)
267 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
268
269 return IRQ_RETVAL(work);
270}
271
272static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
273{
274 struct mlx4_eq *eq = eq_ptr;
275 struct mlx4_dev *dev = eq->dev;
276
277 mlx4_eq_int(dev, eq);
278
279 /* MSI-X vectors always belong to us */
280 return IRQ_HANDLED;
281}
282
283static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr)
284{
285 mlx4_handle_catas_err(dev_ptr);
286
287 /* MSI-X vectors always belong to us */
288 return IRQ_HANDLED;
289}
290
291static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
292 int eq_num)
293{
294 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
295 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
296}
297
298static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
299 int eq_num)
300{
301 return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
302 MLX4_CMD_TIME_CLASS_A);
303}
304
305static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
306 int eq_num)
307{
308 return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
309 MLX4_CMD_TIME_CLASS_A);
310}
311
312static void __devinit __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev,
313 struct mlx4_eq *eq)
314{
315 struct mlx4_priv *priv = mlx4_priv(dev);
316 int index;
317
318 index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
319
320 if (!priv->eq_table.uar_map[index]) {
321 priv->eq_table.uar_map[index] =
322 ioremap(pci_resource_start(dev->pdev, 2) +
323 ((eq->eqn / 4) << PAGE_SHIFT),
324 PAGE_SIZE);
325 if (!priv->eq_table.uar_map[index]) {
326 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
327 eq->eqn);
328 return NULL;
329 }
330 }
331
332 return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
333}
334
335static int __devinit mlx4_create_eq(struct mlx4_dev *dev, int nent,
336 u8 intr, struct mlx4_eq *eq)
337{
338 struct mlx4_priv *priv = mlx4_priv(dev);
339 struct mlx4_cmd_mailbox *mailbox;
340 struct mlx4_eq_context *eq_context;
341 int npages;
342 u64 *dma_list = NULL;
343 dma_addr_t t;
344 u64 mtt_addr;
345 int err = -ENOMEM;
346 int i;
347
348 eq->dev = dev;
349 eq->nent = roundup_pow_of_two(max(nent, 2));
350 npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
351
352 eq->page_list = kmalloc(npages * sizeof *eq->page_list,
353 GFP_KERNEL);
354 if (!eq->page_list)
355 goto err_out;
356
357 for (i = 0; i < npages; ++i)
358 eq->page_list[i].buf = NULL;
359
360 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
361 if (!dma_list)
362 goto err_out_free;
363
364 mailbox = mlx4_alloc_cmd_mailbox(dev);
365 if (IS_ERR(mailbox))
366 goto err_out_free;
367 eq_context = mailbox->buf;
368
369 for (i = 0; i < npages; ++i) {
370 eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
371 PAGE_SIZE, &t, GFP_KERNEL);
372 if (!eq->page_list[i].buf)
373 goto err_out_free_pages;
374
375 dma_list[i] = t;
376 eq->page_list[i].map = t;
377
378 memset(eq->page_list[i].buf, 0, PAGE_SIZE);
379 }
380
381 eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
382 if (eq->eqn == -1)
383 goto err_out_free_pages;
384
385 eq->doorbell = mlx4_get_eq_uar(dev, eq);
386 if (!eq->doorbell) {
387 err = -ENOMEM;
388 goto err_out_free_eq;
389 }
390
391 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
392 if (err)
393 goto err_out_free_eq;
394
395 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
396 if (err)
397 goto err_out_free_mtt;
398
399 memset(eq_context, 0, sizeof *eq_context);
400 eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
401 MLX4_EQ_STATE_ARMED);
402 eq_context->log_eq_size = ilog2(eq->nent);
403 eq_context->intr = intr;
404 eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
405
406 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
407 eq_context->mtt_base_addr_h = mtt_addr >> 32;
408 eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
409
410 err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
411 if (err) {
412 mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
413 goto err_out_free_mtt;
414 }
415
416 kfree(dma_list);
417 mlx4_free_cmd_mailbox(dev, mailbox);
418
419 eq->cons_index = 0;
420
421 return err;
422
423err_out_free_mtt:
424 mlx4_mtt_cleanup(dev, &eq->mtt);
425
426err_out_free_eq:
427 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
428
429err_out_free_pages:
430 for (i = 0; i < npages; ++i)
431 if (eq->page_list[i].buf)
432 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
433 eq->page_list[i].buf,
434 eq->page_list[i].map);
435
436 mlx4_free_cmd_mailbox(dev, mailbox);
437
438err_out_free:
439 kfree(eq->page_list);
440 kfree(dma_list);
441
442err_out:
443 return err;
444}
445
446static void mlx4_free_eq(struct mlx4_dev *dev,
447 struct mlx4_eq *eq)
448{
449 struct mlx4_priv *priv = mlx4_priv(dev);
450 struct mlx4_cmd_mailbox *mailbox;
451 int err;
452 int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
453 int i;
454
455 mailbox = mlx4_alloc_cmd_mailbox(dev);
456 if (IS_ERR(mailbox))
457 return;
458
459 err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
460 if (err)
461 mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
462
463 if (0) {
464 mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
465 for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
466 if (i % 4 == 0)
467 printk("[%02x] ", i * 4);
468 printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
469 if ((i + 1) % 4 == 0)
470 printk("\n");
471 }
472 }
473
474 mlx4_mtt_cleanup(dev, &eq->mtt);
475 for (i = 0; i < npages; ++i)
476 pci_free_consistent(dev->pdev, PAGE_SIZE,
477 eq->page_list[i].buf,
478 eq->page_list[i].map);
479
480 kfree(eq->page_list);
481 mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
482 mlx4_free_cmd_mailbox(dev, mailbox);
483}
484
485static void mlx4_free_irqs(struct mlx4_dev *dev)
486{
487 struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
488 int i;
489
490 if (eq_table->have_irq)
491 free_irq(dev->pdev->irq, dev);
492 for (i = 0; i < MLX4_NUM_EQ; ++i)
493 if (eq_table->eq[i].have_irq)
494 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
495}
496
497static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev)
498{
499 struct mlx4_priv *priv = mlx4_priv(dev);
500
501 priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
502 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
503 if (!priv->clr_base) {
504 mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
505 return -ENOMEM;
506 }
507
508 return 0;
509}
510
511static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
512{
513 struct mlx4_priv *priv = mlx4_priv(dev);
514
515 iounmap(priv->clr_base);
516}
517
518int __devinit mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
519{
520 struct mlx4_priv *priv = mlx4_priv(dev);
521 int ret;
522
523 /*
524 * We assume that mapping one page is enough for the whole EQ
525 * context table. This is fine with all current HCAs, because
526 * we only use 32 EQs and each EQ uses 64 bytes of context
527 * memory, or 1 KB total.
528 */
529 priv->eq_table.icm_virt = icm_virt;
530 priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
531 if (!priv->eq_table.icm_page)
532 return -ENOMEM;
533 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
534 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
535 if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
536 __free_page(priv->eq_table.icm_page);
537 return -ENOMEM;
538 }
539
540 ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
541 if (ret) {
542 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
543 PCI_DMA_BIDIRECTIONAL);
544 __free_page(priv->eq_table.icm_page);
545 }
546
547 return ret;
548}
549
550void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
551{
552 struct mlx4_priv *priv = mlx4_priv(dev);
553
554 mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
555 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
556 PCI_DMA_BIDIRECTIONAL);
557 __free_page(priv->eq_table.icm_page);
558}
559
560int __devinit mlx4_init_eq_table(struct mlx4_dev *dev)
561{
562 struct mlx4_priv *priv = mlx4_priv(dev);
563 int err;
564 int i;
565
566 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
567 dev->caps.num_eqs - 1, dev->caps.reserved_eqs);
568 if (err)
569 return err;
570
571 for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
572 priv->eq_table.uar_map[i] = NULL;
573
574 err = mlx4_map_clr_int(dev);
575 if (err)
576 goto err_out_free;
577
578 priv->eq_table.clr_mask =
579 swab32(1 << (priv->eq_table.inta_pin & 31));
580 priv->eq_table.clr_int = priv->clr_base +
581 (priv->eq_table.inta_pin < 32 ? 4 : 0);
582
583 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
584 (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0,
585 &priv->eq_table.eq[MLX4_EQ_COMP]);
586 if (err)
587 goto err_out_unmap;
588
589 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
590 (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
591 &priv->eq_table.eq[MLX4_EQ_ASYNC]);
592 if (err)
593 goto err_out_comp;
594
595 if (dev->flags & MLX4_FLAG_MSI_X) {
596 static const char *eq_name[] = {
597 [MLX4_EQ_COMP] = DRV_NAME " (comp)",
598 [MLX4_EQ_ASYNC] = DRV_NAME " (async)",
599 [MLX4_EQ_CATAS] = DRV_NAME " (catas)"
600 };
601
602 err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS,
603 &priv->eq_table.eq[MLX4_EQ_CATAS]);
604 if (err)
605 goto err_out_async;
606
607 for (i = 0; i < MLX4_EQ_CATAS; ++i) {
608 err = request_irq(priv->eq_table.eq[i].irq,
609 mlx4_msi_x_interrupt,
610 0, eq_name[i], priv->eq_table.eq + i);
611 if (err)
612 goto err_out_catas;
613
614 priv->eq_table.eq[i].have_irq = 1;
615 }
616
617 err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq,
618 mlx4_catas_interrupt, 0,
619 eq_name[MLX4_EQ_CATAS], dev);
620 if (err)
621 goto err_out_catas;
622
623 priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1;
624 } else {
625 err = request_irq(dev->pdev->irq, mlx4_interrupt,
626 SA_SHIRQ, DRV_NAME, dev);
627 if (err)
628 goto err_out_async;
629
630 priv->eq_table.have_irq = 1;
631 }
632
633 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
634 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
635 if (err)
636 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
637 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
638
639 for (i = 0; i < MLX4_EQ_CATAS; ++i)
640 eq_set_ci(&priv->eq_table.eq[i], 1);
641
642 if (dev->flags & MLX4_FLAG_MSI_X) {
643 err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0,
644 priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
645 if (err)
646 mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n",
647 priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err);
648 }
649
650 return 0;
651
652err_out_catas:
653 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
654
655err_out_async:
656 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
657
658err_out_comp:
659 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]);
660
661err_out_unmap:
662 mlx4_unmap_clr_int(dev);
663 mlx4_free_irqs(dev);
664
665err_out_free:
666 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
667 return err;
668}
669
670void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
671{
672 struct mlx4_priv *priv = mlx4_priv(dev);
673 int i;
674
675 if (dev->flags & MLX4_FLAG_MSI_X)
676 mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1,
677 priv->eq_table.eq[MLX4_EQ_CATAS].eqn);
678
679 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
680 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
681
682 mlx4_free_irqs(dev);
683
684 for (i = 0; i < MLX4_EQ_CATAS; ++i)
685 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
686 if (dev->flags & MLX4_FLAG_MSI_X)
687 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]);
688
689 mlx4_unmap_clr_int(dev);
690
691 for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
692 if (priv->eq_table.uar_map[i])
693 iounmap(priv->eq_table.uar_map[i]);
694
695 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
696}