diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/mlx4/cq.c | 11 | ||||
-rw-r--r-- | drivers/net/mlx4/en_cq.c | 9 | ||||
-rw-r--r-- | drivers/net/mlx4/en_main.c | 4 | ||||
-rw-r--r-- | drivers/net/mlx4/eq.c | 117 | ||||
-rw-r--r-- | drivers/net/mlx4/main.c | 53 | ||||
-rw-r--r-- | drivers/net/mlx4/mlx4.h | 14 | ||||
-rw-r--r-- | drivers/net/mlx4/profile.c | 4 |
7 files changed, 152 insertions, 60 deletions
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c index b7ad2829d67e..ac57b6a42c6e 100644 --- a/drivers/net/mlx4/cq.c +++ b/drivers/net/mlx4/cq.c | |||
@@ -189,7 +189,7 @@ EXPORT_SYMBOL_GPL(mlx4_cq_resize); | |||
189 | 189 | ||
190 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | 190 | int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, |
191 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, | 191 | struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, |
192 | int collapsed) | 192 | unsigned vector, int collapsed) |
193 | { | 193 | { |
194 | struct mlx4_priv *priv = mlx4_priv(dev); | 194 | struct mlx4_priv *priv = mlx4_priv(dev); |
195 | struct mlx4_cq_table *cq_table = &priv->cq_table; | 195 | struct mlx4_cq_table *cq_table = &priv->cq_table; |
@@ -198,6 +198,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | |||
198 | u64 mtt_addr; | 198 | u64 mtt_addr; |
199 | int err; | 199 | int err; |
200 | 200 | ||
201 | if (vector >= dev->caps.num_comp_vectors) | ||
202 | return -EINVAL; | ||
203 | |||
204 | cq->vector = vector; | ||
205 | |||
201 | cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); | 206 | cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap); |
202 | if (cq->cqn == -1) | 207 | if (cq->cqn == -1) |
203 | return -ENOMEM; | 208 | return -ENOMEM; |
@@ -227,7 +232,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, | |||
227 | 232 | ||
228 | cq_context->flags = cpu_to_be32(!!collapsed << 18); | 233 | cq_context->flags = cpu_to_be32(!!collapsed << 18); |
229 | cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); | 234 | cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); |
230 | cq_context->comp_eqn = priv->eq_table.eq[MLX4_EQ_COMP].eqn; | 235 | cq_context->comp_eqn = priv->eq_table.eq[vector].eqn; |
231 | cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; | 236 | cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; |
232 | 237 | ||
233 | mtt_addr = mlx4_mtt_addr(dev, mtt); | 238 | mtt_addr = mlx4_mtt_addr(dev, mtt); |
@@ -276,7 +281,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq) | |||
276 | if (err) | 281 | if (err) |
277 | mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); | 282 | mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn); |
278 | 283 | ||
279 | synchronize_irq(priv->eq_table.eq[MLX4_EQ_COMP].irq); | 284 | synchronize_irq(priv->eq_table.eq[cq->vector].irq); |
280 | 285 | ||
281 | spin_lock_irq(&cq_table->lock); | 286 | spin_lock_irq(&cq_table->lock); |
282 | radix_tree_delete(&cq_table->tree, cq->cqn); | 287 | radix_tree_delete(&cq_table->tree, cq->cqn); |
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c index 1368a8010af4..674f836e225b 100644 --- a/drivers/net/mlx4/en_cq.c +++ b/drivers/net/mlx4/en_cq.c | |||
@@ -51,10 +51,13 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, | |||
51 | int err; | 51 | int err; |
52 | 52 | ||
53 | cq->size = entries; | 53 | cq->size = entries; |
54 | if (mode == RX) | 54 | if (mode == RX) { |
55 | cq->buf_size = cq->size * sizeof(struct mlx4_cqe); | 55 | cq->buf_size = cq->size * sizeof(struct mlx4_cqe); |
56 | else | 56 | cq->vector = ring % mdev->dev->caps.num_comp_vectors; |
57 | } else { | ||
57 | cq->buf_size = sizeof(struct mlx4_cqe); | 58 | cq->buf_size = sizeof(struct mlx4_cqe); |
59 | cq->vector = 0; | ||
60 | } | ||
58 | 61 | ||
59 | cq->ring = ring; | 62 | cq->ring = ring; |
60 | cq->is_tx = mode; | 63 | cq->is_tx = mode; |
@@ -86,7 +89,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) | |||
86 | memset(cq->buf, 0, cq->buf_size); | 89 | memset(cq->buf, 0, cq->buf_size); |
87 | 90 | ||
88 | err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, | 91 | err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, |
89 | cq->wqres.db.dma, &cq->mcq, cq->is_tx); | 92 | cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx); |
90 | if (err) | 93 | if (err) |
91 | return err; | 94 | return err; |
92 | 95 | ||
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c index 4b9794e97a79..c1c05852a95e 100644 --- a/drivers/net/mlx4/en_main.c +++ b/drivers/net/mlx4/en_main.c | |||
@@ -170,9 +170,9 @@ static void *mlx4_en_add(struct mlx4_dev *dev) | |||
170 | mlx4_info(mdev, "Using %d tx rings for port:%d\n", | 170 | mlx4_info(mdev, "Using %d tx rings for port:%d\n", |
171 | mdev->profile.prof[i].tx_ring_num, i); | 171 | mdev->profile.prof[i].tx_ring_num, i); |
172 | if (!mdev->profile.prof[i].rx_ring_num) { | 172 | if (!mdev->profile.prof[i].rx_ring_num) { |
173 | mdev->profile.prof[i].rx_ring_num = 1; | 173 | mdev->profile.prof[i].rx_ring_num = dev->caps.num_comp_vectors; |
174 | mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", | 174 | mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n", |
175 | 1, i); | 175 | mdev->profile.prof[i].rx_ring_num, i); |
176 | } else | 176 | } else |
177 | mlx4_info(mdev, "Using %d rx rings for port:%d\n", | 177 | mlx4_info(mdev, "Using %d rx rings for port:%d\n", |
178 | mdev->profile.prof[i].rx_ring_num, i); | 178 | mdev->profile.prof[i].rx_ring_num, i); |
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index de169338cd90..5d867ebe6a4d 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -266,7 +266,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) | |||
266 | 266 | ||
267 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); | 267 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); |
268 | 268 | ||
269 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 269 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
270 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); | 270 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); |
271 | 271 | ||
272 | return IRQ_RETVAL(work); | 272 | return IRQ_RETVAL(work); |
@@ -304,6 +304,17 @@ static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, | |||
304 | MLX4_CMD_TIME_CLASS_A); | 304 | MLX4_CMD_TIME_CLASS_A); |
305 | } | 305 | } |
306 | 306 | ||
307 | static int mlx4_num_eq_uar(struct mlx4_dev *dev) | ||
308 | { | ||
309 | /* | ||
310 | * Each UAR holds 4 EQ doorbells. To figure out how many UARs | ||
311 | * we need to map, take the difference of highest index and | ||
312 | * the lowest index we'll use and add 1. | ||
313 | */ | ||
314 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - | ||
315 | dev->caps.reserved_eqs / 4 + 1; | ||
316 | } | ||
317 | |||
307 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) | 318 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) |
308 | { | 319 | { |
309 | struct mlx4_priv *priv = mlx4_priv(dev); | 320 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -483,9 +494,11 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) | |||
483 | 494 | ||
484 | if (eq_table->have_irq) | 495 | if (eq_table->have_irq) |
485 | free_irq(dev->pdev->irq, dev); | 496 | free_irq(dev->pdev->irq, dev); |
486 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 497 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
487 | if (eq_table->eq[i].have_irq) | 498 | if (eq_table->eq[i].have_irq) |
488 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); | 499 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); |
500 | |||
501 | kfree(eq_table->irq_names); | ||
489 | } | 502 | } |
490 | 503 | ||
491 | static int mlx4_map_clr_int(struct mlx4_dev *dev) | 504 | static int mlx4_map_clr_int(struct mlx4_dev *dev) |
@@ -551,57 +564,93 @@ void mlx4_unmap_eq_icm(struct mlx4_dev *dev) | |||
551 | __free_page(priv->eq_table.icm_page); | 564 | __free_page(priv->eq_table.icm_page); |
552 | } | 565 | } |
553 | 566 | ||
567 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) | ||
568 | { | ||
569 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
570 | |||
571 | priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs, | ||
572 | sizeof *priv->eq_table.eq, GFP_KERNEL); | ||
573 | if (!priv->eq_table.eq) | ||
574 | return -ENOMEM; | ||
575 | |||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | void mlx4_free_eq_table(struct mlx4_dev *dev) | ||
580 | { | ||
581 | kfree(mlx4_priv(dev)->eq_table.eq); | ||
582 | } | ||
583 | |||
554 | int mlx4_init_eq_table(struct mlx4_dev *dev) | 584 | int mlx4_init_eq_table(struct mlx4_dev *dev) |
555 | { | 585 | { |
556 | struct mlx4_priv *priv = mlx4_priv(dev); | 586 | struct mlx4_priv *priv = mlx4_priv(dev); |
557 | int err; | 587 | int err; |
558 | int i; | 588 | int i; |
559 | 589 | ||
590 | priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map, | ||
591 | mlx4_num_eq_uar(dev), GFP_KERNEL); | ||
592 | if (!priv->eq_table.uar_map) { | ||
593 | err = -ENOMEM; | ||
594 | goto err_out_free; | ||
595 | } | ||
596 | |||
560 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, | 597 | err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, |
561 | dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); | 598 | dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); |
562 | if (err) | 599 | if (err) |
563 | return err; | 600 | goto err_out_free; |
564 | 601 | ||
565 | for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) | 602 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) |
566 | priv->eq_table.uar_map[i] = NULL; | 603 | priv->eq_table.uar_map[i] = NULL; |
567 | 604 | ||
568 | err = mlx4_map_clr_int(dev); | 605 | err = mlx4_map_clr_int(dev); |
569 | if (err) | 606 | if (err) |
570 | goto err_out_free; | 607 | goto err_out_bitmap; |
571 | 608 | ||
572 | priv->eq_table.clr_mask = | 609 | priv->eq_table.clr_mask = |
573 | swab32(1 << (priv->eq_table.inta_pin & 31)); | 610 | swab32(1 << (priv->eq_table.inta_pin & 31)); |
574 | priv->eq_table.clr_int = priv->clr_base + | 611 | priv->eq_table.clr_int = priv->clr_base + |
575 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | 612 | (priv->eq_table.inta_pin < 32 ? 4 : 0); |
576 | 613 | ||
577 | err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, | 614 | priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL); |
578 | (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0, | 615 | if (!priv->eq_table.irq_names) { |
579 | &priv->eq_table.eq[MLX4_EQ_COMP]); | 616 | err = -ENOMEM; |
580 | if (err) | 617 | goto err_out_bitmap; |
581 | goto err_out_unmap; | 618 | } |
619 | |||
620 | for (i = 0; i < dev->caps.num_comp_vectors; ++i) { | ||
621 | err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, | ||
622 | (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, | ||
623 | &priv->eq_table.eq[i]); | ||
624 | if (err) | ||
625 | goto err_out_unmap; | ||
626 | } | ||
582 | 627 | ||
583 | err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, | 628 | err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, |
584 | (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0, | 629 | (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0, |
585 | &priv->eq_table.eq[MLX4_EQ_ASYNC]); | 630 | &priv->eq_table.eq[dev->caps.num_comp_vectors]); |
586 | if (err) | 631 | if (err) |
587 | goto err_out_comp; | 632 | goto err_out_comp; |
588 | 633 | ||
589 | if (dev->flags & MLX4_FLAG_MSI_X) { | 634 | if (dev->flags & MLX4_FLAG_MSI_X) { |
590 | static const char *eq_name[] = { | 635 | static const char async_eq_name[] = "mlx4-async"; |
591 | [MLX4_EQ_COMP] = DRV_NAME " (comp)", | 636 | const char *eq_name; |
592 | [MLX4_EQ_ASYNC] = DRV_NAME " (async)" | 637 | |
593 | }; | 638 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { |
639 | if (i < dev->caps.num_comp_vectors) { | ||
640 | snprintf(priv->eq_table.irq_names + i * 16, 16, | ||
641 | "mlx4-comp-%d", i); | ||
642 | eq_name = priv->eq_table.irq_names + i * 16; | ||
643 | } else | ||
644 | eq_name = async_eq_name; | ||
594 | 645 | ||
595 | for (i = 0; i < MLX4_NUM_EQ; ++i) { | ||
596 | err = request_irq(priv->eq_table.eq[i].irq, | 646 | err = request_irq(priv->eq_table.eq[i].irq, |
597 | mlx4_msi_x_interrupt, | 647 | mlx4_msi_x_interrupt, 0, eq_name, |
598 | 0, eq_name[i], priv->eq_table.eq + i); | 648 | priv->eq_table.eq + i); |
599 | if (err) | 649 | if (err) |
600 | goto err_out_async; | 650 | goto err_out_async; |
601 | 651 | ||
602 | priv->eq_table.eq[i].have_irq = 1; | 652 | priv->eq_table.eq[i].have_irq = 1; |
603 | } | 653 | } |
604 | |||
605 | } else { | 654 | } else { |
606 | err = request_irq(dev->pdev->irq, mlx4_interrupt, | 655 | err = request_irq(dev->pdev->irq, mlx4_interrupt, |
607 | IRQF_SHARED, DRV_NAME, dev); | 656 | IRQF_SHARED, DRV_NAME, dev); |
@@ -612,28 +661,36 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
612 | } | 661 | } |
613 | 662 | ||
614 | err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | 663 | err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, |
615 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); | 664 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); |
616 | if (err) | 665 | if (err) |
617 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | 666 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", |
618 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); | 667 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err); |
619 | 668 | ||
620 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 669 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
621 | eq_set_ci(&priv->eq_table.eq[i], 1); | 670 | eq_set_ci(&priv->eq_table.eq[i], 1); |
622 | 671 | ||
623 | return 0; | 672 | return 0; |
624 | 673 | ||
625 | err_out_async: | 674 | err_out_async: |
626 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); | 675 | mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]); |
627 | 676 | ||
628 | err_out_comp: | 677 | err_out_comp: |
629 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]); | 678 | i = dev->caps.num_comp_vectors - 1; |
630 | 679 | ||
631 | err_out_unmap: | 680 | err_out_unmap: |
681 | while (i >= 0) { | ||
682 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | ||
683 | --i; | ||
684 | } | ||
632 | mlx4_unmap_clr_int(dev); | 685 | mlx4_unmap_clr_int(dev); |
633 | mlx4_free_irqs(dev); | 686 | mlx4_free_irqs(dev); |
634 | 687 | ||
635 | err_out_free: | 688 | err_out_bitmap: |
636 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); | 689 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); |
690 | |||
691 | err_out_free: | ||
692 | kfree(priv->eq_table.uar_map); | ||
693 | |||
637 | return err; | 694 | return err; |
638 | } | 695 | } |
639 | 696 | ||
@@ -643,18 +700,20 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |||
643 | int i; | 700 | int i; |
644 | 701 | ||
645 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, | 702 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, |
646 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); | 703 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); |
647 | 704 | ||
648 | mlx4_free_irqs(dev); | 705 | mlx4_free_irqs(dev); |
649 | 706 | ||
650 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 707 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
651 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | 708 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
652 | 709 | ||
653 | mlx4_unmap_clr_int(dev); | 710 | mlx4_unmap_clr_int(dev); |
654 | 711 | ||
655 | for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) | 712 | for (i = 0; i < mlx4_num_eq_uar(dev); ++i) |
656 | if (priv->eq_table.uar_map[i]) | 713 | if (priv->eq_table.uar_map[i]) |
657 | iounmap(priv->eq_table.uar_map[i]); | 714 | iounmap(priv->eq_table.uar_map[i]); |
658 | 715 | ||
659 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); | 716 | mlx4_bitmap_cleanup(&priv->eq_table.bitmap); |
717 | |||
718 | kfree(priv->eq_table.uar_map); | ||
660 | } | 719 | } |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 90a0281d15ea..710c79e7a2db 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -421,9 +421,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base, | |||
421 | ((u64) (MLX4_CMPT_TYPE_EQ * | 421 | ((u64) (MLX4_CMPT_TYPE_EQ * |
422 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), | 422 | cmpt_entry_sz) << MLX4_CMPT_SHIFT), |
423 | cmpt_entry_sz, | 423 | cmpt_entry_sz, |
424 | roundup_pow_of_two(MLX4_NUM_EQ + | 424 | dev->caps.num_eqs, dev->caps.num_eqs, 0, 0); |
425 | dev->caps.reserved_eqs), | ||
426 | MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0); | ||
427 | if (err) | 425 | if (err) |
428 | goto err_cq; | 426 | goto err_cq; |
429 | 427 | ||
@@ -810,12 +808,12 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) | |||
810 | if (dev->flags & MLX4_FLAG_MSI_X) { | 808 | if (dev->flags & MLX4_FLAG_MSI_X) { |
811 | mlx4_warn(dev, "NOP command failed to generate MSI-X " | 809 | mlx4_warn(dev, "NOP command failed to generate MSI-X " |
812 | "interrupt IRQ %d).\n", | 810 | "interrupt IRQ %d).\n", |
813 | priv->eq_table.eq[MLX4_EQ_ASYNC].irq); | 811 | priv->eq_table.eq[dev->caps.num_comp_vectors].irq); |
814 | mlx4_warn(dev, "Trying again without MSI-X.\n"); | 812 | mlx4_warn(dev, "Trying again without MSI-X.\n"); |
815 | } else { | 813 | } else { |
816 | mlx4_err(dev, "NOP command failed to generate interrupt " | 814 | mlx4_err(dev, "NOP command failed to generate interrupt " |
817 | "(IRQ %d), aborting.\n", | 815 | "(IRQ %d), aborting.\n", |
818 | priv->eq_table.eq[MLX4_EQ_ASYNC].irq); | 816 | priv->eq_table.eq[dev->caps.num_comp_vectors].irq); |
819 | mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); | 817 | mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n"); |
820 | } | 818 | } |
821 | 819 | ||
@@ -908,31 +906,50 @@ err_uar_table_free: | |||
908 | static void mlx4_enable_msi_x(struct mlx4_dev *dev) | 906 | static void mlx4_enable_msi_x(struct mlx4_dev *dev) |
909 | { | 907 | { |
910 | struct mlx4_priv *priv = mlx4_priv(dev); | 908 | struct mlx4_priv *priv = mlx4_priv(dev); |
911 | struct msix_entry entries[MLX4_NUM_EQ]; | 909 | struct msix_entry *entries; |
910 | int nreq; | ||
912 | int err; | 911 | int err; |
913 | int i; | 912 | int i; |
914 | 913 | ||
915 | if (msi_x) { | 914 | if (msi_x) { |
916 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 915 | nreq = min(dev->caps.num_eqs - dev->caps.reserved_eqs, |
916 | num_possible_cpus() + 1); | ||
917 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); | ||
918 | if (!entries) | ||
919 | goto no_msi; | ||
920 | |||
921 | for (i = 0; i < nreq; ++i) | ||
917 | entries[i].entry = i; | 922 | entries[i].entry = i; |
918 | 923 | ||
919 | err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries)); | 924 | retry: |
925 | err = pci_enable_msix(dev->pdev, entries, nreq); | ||
920 | if (err) { | 926 | if (err) { |
921 | if (err > 0) | 927 | /* Try again if at least 2 vectors are available */ |
922 | mlx4_info(dev, "Only %d MSI-X vectors available, " | 928 | if (err > 1) { |
923 | "not using MSI-X\n", err); | 929 | mlx4_info(dev, "Requested %d vectors, " |
930 | "but only %d MSI-X vectors available, " | ||
931 | "trying again\n", nreq, err); | ||
932 | nreq = err; | ||
933 | goto retry; | ||
934 | } | ||
935 | |||
924 | goto no_msi; | 936 | goto no_msi; |
925 | } | 937 | } |
926 | 938 | ||
927 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 939 | dev->caps.num_comp_vectors = nreq - 1; |
940 | for (i = 0; i < nreq; ++i) | ||
928 | priv->eq_table.eq[i].irq = entries[i].vector; | 941 | priv->eq_table.eq[i].irq = entries[i].vector; |
929 | 942 | ||
930 | dev->flags |= MLX4_FLAG_MSI_X; | 943 | dev->flags |= MLX4_FLAG_MSI_X; |
944 | |||
945 | kfree(entries); | ||
931 | return; | 946 | return; |
932 | } | 947 | } |
933 | 948 | ||
934 | no_msi: | 949 | no_msi: |
935 | for (i = 0; i < MLX4_NUM_EQ; ++i) | 950 | dev->caps.num_comp_vectors = 1; |
951 | |||
952 | for (i = 0; i < 2; ++i) | ||
936 | priv->eq_table.eq[i].irq = dev->pdev->irq; | 953 | priv->eq_table.eq[i].irq = dev->pdev->irq; |
937 | } | 954 | } |
938 | 955 | ||
@@ -1074,6 +1091,10 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1074 | if (err) | 1091 | if (err) |
1075 | goto err_cmd; | 1092 | goto err_cmd; |
1076 | 1093 | ||
1094 | err = mlx4_alloc_eq_table(dev); | ||
1095 | if (err) | ||
1096 | goto err_close; | ||
1097 | |||
1077 | mlx4_enable_msi_x(dev); | 1098 | mlx4_enable_msi_x(dev); |
1078 | 1099 | ||
1079 | err = mlx4_setup_hca(dev); | 1100 | err = mlx4_setup_hca(dev); |
@@ -1084,7 +1105,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1084 | } | 1105 | } |
1085 | 1106 | ||
1086 | if (err) | 1107 | if (err) |
1087 | goto err_close; | 1108 | goto err_free_eq; |
1088 | 1109 | ||
1089 | for (port = 1; port <= dev->caps.num_ports; port++) { | 1110 | for (port = 1; port <= dev->caps.num_ports; port++) { |
1090 | err = mlx4_init_port_info(dev, port); | 1111 | err = mlx4_init_port_info(dev, port); |
@@ -1114,6 +1135,9 @@ err_port: | |||
1114 | mlx4_cleanup_pd_table(dev); | 1135 | mlx4_cleanup_pd_table(dev); |
1115 | mlx4_cleanup_uar_table(dev); | 1136 | mlx4_cleanup_uar_table(dev); |
1116 | 1137 | ||
1138 | err_free_eq: | ||
1139 | mlx4_free_eq_table(dev); | ||
1140 | |||
1117 | err_close: | 1141 | err_close: |
1118 | if (dev->flags & MLX4_FLAG_MSI_X) | 1142 | if (dev->flags & MLX4_FLAG_MSI_X) |
1119 | pci_disable_msix(pdev); | 1143 | pci_disable_msix(pdev); |
@@ -1177,6 +1201,7 @@ static void mlx4_remove_one(struct pci_dev *pdev) | |||
1177 | iounmap(priv->kar); | 1201 | iounmap(priv->kar); |
1178 | mlx4_uar_free(dev, &priv->driver_uar); | 1202 | mlx4_uar_free(dev, &priv->driver_uar); |
1179 | mlx4_cleanup_uar_table(dev); | 1203 | mlx4_cleanup_uar_table(dev); |
1204 | mlx4_free_eq_table(dev); | ||
1180 | mlx4_close_hca(dev); | 1205 | mlx4_close_hca(dev); |
1181 | mlx4_cmd_cleanup(dev); | 1206 | mlx4_cmd_cleanup(dev); |
1182 | 1207 | ||
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 34c909deaff3..e0213bad61c7 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -63,12 +63,6 @@ enum { | |||
63 | }; | 63 | }; |
64 | 64 | ||
65 | enum { | 65 | enum { |
66 | MLX4_EQ_ASYNC, | ||
67 | MLX4_EQ_COMP, | ||
68 | MLX4_NUM_EQ | ||
69 | }; | ||
70 | |||
71 | enum { | ||
72 | MLX4_NUM_PDS = 1 << 15 | 66 | MLX4_NUM_PDS = 1 << 15 |
73 | }; | 67 | }; |
74 | 68 | ||
@@ -205,10 +199,11 @@ struct mlx4_cq_table { | |||
205 | 199 | ||
206 | struct mlx4_eq_table { | 200 | struct mlx4_eq_table { |
207 | struct mlx4_bitmap bitmap; | 201 | struct mlx4_bitmap bitmap; |
202 | char *irq_names; | ||
208 | void __iomem *clr_int; | 203 | void __iomem *clr_int; |
209 | void __iomem *uar_map[(MLX4_NUM_EQ + 6) / 4]; | 204 | void __iomem **uar_map; |
210 | u32 clr_mask; | 205 | u32 clr_mask; |
211 | struct mlx4_eq eq[MLX4_NUM_EQ]; | 206 | struct mlx4_eq *eq; |
212 | u64 icm_virt; | 207 | u64 icm_virt; |
213 | struct page *icm_page; | 208 | struct page *icm_page; |
214 | dma_addr_t icm_dma; | 209 | dma_addr_t icm_dma; |
@@ -328,6 +323,9 @@ void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap); | |||
328 | 323 | ||
329 | int mlx4_reset(struct mlx4_dev *dev); | 324 | int mlx4_reset(struct mlx4_dev *dev); |
330 | 325 | ||
326 | int mlx4_alloc_eq_table(struct mlx4_dev *dev); | ||
327 | void mlx4_free_eq_table(struct mlx4_dev *dev); | ||
328 | |||
331 | int mlx4_init_pd_table(struct mlx4_dev *dev); | 329 | int mlx4_init_pd_table(struct mlx4_dev *dev); |
332 | int mlx4_init_uar_table(struct mlx4_dev *dev); | 330 | int mlx4_init_uar_table(struct mlx4_dev *dev); |
333 | int mlx4_init_mr_table(struct mlx4_dev *dev); | 331 | int mlx4_init_mr_table(struct mlx4_dev *dev); |
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c index 9ca42b213d54..919fb9eb1b62 100644 --- a/drivers/net/mlx4/profile.c +++ b/drivers/net/mlx4/profile.c | |||
@@ -107,7 +107,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, | |||
107 | profile[MLX4_RES_AUXC].num = request->num_qp; | 107 | profile[MLX4_RES_AUXC].num = request->num_qp; |
108 | profile[MLX4_RES_SRQ].num = request->num_srq; | 108 | profile[MLX4_RES_SRQ].num = request->num_srq; |
109 | profile[MLX4_RES_CQ].num = request->num_cq; | 109 | profile[MLX4_RES_CQ].num = request->num_cq; |
110 | profile[MLX4_RES_EQ].num = MLX4_NUM_EQ + dev_cap->reserved_eqs; | 110 | profile[MLX4_RES_EQ].num = min(dev_cap->max_eqs, |
111 | dev_cap->reserved_eqs + | ||
112 | num_possible_cpus() + 1); | ||
111 | profile[MLX4_RES_DMPT].num = request->num_mpt; | 113 | profile[MLX4_RES_DMPT].num = request->num_mpt; |
112 | profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; | 114 | profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; |
113 | profile[MLX4_RES_MTT].num = request->num_mtt; | 115 | profile[MLX4_RES_MTT].num = request->num_mtt; |