aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/eq.c
diff options
context:
space:
mode:
authorYevgeny Petrilin <yevgenyp@mellanox.co.il>2008-12-22 10:15:03 -0500
committerRoland Dreier <rolandd@cisco.com>2008-12-22 10:15:03 -0500
commitb8dd786f9417e5885929bfe33a235c76a9c1c569 (patch)
tree16b38c672980d142ffa0ac0ccdeb4af19c20cc31 /drivers/net/mlx4/eq.c
parent061e41fdb5047b1fb161e89664057835935ca1d2 (diff)
mlx4_core: Add support for multiple completion event vectors
When using MSI-X mode, create a completion event queue for each CPU. Report the number of completion EQs in a new struct mlx4_caps member, num_comp_vectors, and extend the mlx4_cq_alloc() interface with a vector parameter so that consumers can specify which completion EQ should be used to report events for the CQ being created. Signed-off-by: Yevgeny Petrilin <yevgenyp@mellanox.co.il> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/net/mlx4/eq.c')
-rw-r--r--drivers/net/mlx4/eq.c117
1 files changed, 88 insertions, 29 deletions
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index de169338cd90..5d867ebe6a4d 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -266,7 +266,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
266 266
267 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); 267 writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
268 268
269 for (i = 0; i < MLX4_NUM_EQ; ++i) 269 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
270 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); 270 work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
271 271
272 return IRQ_RETVAL(work); 272 return IRQ_RETVAL(work);
@@ -304,6 +304,17 @@ static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
304 MLX4_CMD_TIME_CLASS_A); 304 MLX4_CMD_TIME_CLASS_A);
305} 305}
306 306
307static int mlx4_num_eq_uar(struct mlx4_dev *dev)
308{
309 /*
310 * Each UAR holds 4 EQ doorbells. To figure out how many UARs
311 * we need to map, take the difference of highest index and
312 * the lowest index we'll use and add 1.
313 */
314 return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
315 dev->caps.reserved_eqs / 4 + 1;
316}
317
307static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) 318static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
308{ 319{
309 struct mlx4_priv *priv = mlx4_priv(dev); 320 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -483,9 +494,11 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
483 494
484 if (eq_table->have_irq) 495 if (eq_table->have_irq)
485 free_irq(dev->pdev->irq, dev); 496 free_irq(dev->pdev->irq, dev);
486 for (i = 0; i < MLX4_NUM_EQ; ++i) 497 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
487 if (eq_table->eq[i].have_irq) 498 if (eq_table->eq[i].have_irq)
488 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 499 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
500
501 kfree(eq_table->irq_names);
489} 502}
490 503
491static int mlx4_map_clr_int(struct mlx4_dev *dev) 504static int mlx4_map_clr_int(struct mlx4_dev *dev)
@@ -551,57 +564,93 @@ void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
551 __free_page(priv->eq_table.icm_page); 564 __free_page(priv->eq_table.icm_page);
552} 565}
553 566
567int mlx4_alloc_eq_table(struct mlx4_dev *dev)
568{
569 struct mlx4_priv *priv = mlx4_priv(dev);
570
571 priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
572 sizeof *priv->eq_table.eq, GFP_KERNEL);
573 if (!priv->eq_table.eq)
574 return -ENOMEM;
575
576 return 0;
577}
578
579void mlx4_free_eq_table(struct mlx4_dev *dev)
580{
581 kfree(mlx4_priv(dev)->eq_table.eq);
582}
583
554int mlx4_init_eq_table(struct mlx4_dev *dev) 584int mlx4_init_eq_table(struct mlx4_dev *dev)
555{ 585{
556 struct mlx4_priv *priv = mlx4_priv(dev); 586 struct mlx4_priv *priv = mlx4_priv(dev);
557 int err; 587 int err;
558 int i; 588 int i;
559 589
590 priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
591 mlx4_num_eq_uar(dev), GFP_KERNEL);
592 if (!priv->eq_table.uar_map) {
593 err = -ENOMEM;
594 goto err_out_free;
595 }
596
560 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs, 597 err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
561 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0); 598 dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
562 if (err) 599 if (err)
563 return err; 600 goto err_out_free;
564 601
565 for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) 602 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
566 priv->eq_table.uar_map[i] = NULL; 603 priv->eq_table.uar_map[i] = NULL;
567 604
568 err = mlx4_map_clr_int(dev); 605 err = mlx4_map_clr_int(dev);
569 if (err) 606 if (err)
570 goto err_out_free; 607 goto err_out_bitmap;
571 608
572 priv->eq_table.clr_mask = 609 priv->eq_table.clr_mask =
573 swab32(1 << (priv->eq_table.inta_pin & 31)); 610 swab32(1 << (priv->eq_table.inta_pin & 31));
574 priv->eq_table.clr_int = priv->clr_base + 611 priv->eq_table.clr_int = priv->clr_base +
575 (priv->eq_table.inta_pin < 32 ? 4 : 0); 612 (priv->eq_table.inta_pin < 32 ? 4 : 0);
576 613
577 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, 614 priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL);
578 (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0, 615 if (!priv->eq_table.irq_names) {
579 &priv->eq_table.eq[MLX4_EQ_COMP]); 616 err = -ENOMEM;
580 if (err) 617 goto err_out_bitmap;
581 goto err_out_unmap; 618 }
619
620 for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
621 err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
622 (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
623 &priv->eq_table.eq[i]);
624 if (err)
625 goto err_out_unmap;
626 }
582 627
583 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE, 628 err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
584 (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0, 629 (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
585 &priv->eq_table.eq[MLX4_EQ_ASYNC]); 630 &priv->eq_table.eq[dev->caps.num_comp_vectors]);
586 if (err) 631 if (err)
587 goto err_out_comp; 632 goto err_out_comp;
588 633
589 if (dev->flags & MLX4_FLAG_MSI_X) { 634 if (dev->flags & MLX4_FLAG_MSI_X) {
590 static const char *eq_name[] = { 635 static const char async_eq_name[] = "mlx4-async";
591 [MLX4_EQ_COMP] = DRV_NAME " (comp)", 636 const char *eq_name;
592 [MLX4_EQ_ASYNC] = DRV_NAME " (async)" 637
593 }; 638 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
639 if (i < dev->caps.num_comp_vectors) {
640 snprintf(priv->eq_table.irq_names + i * 16, 16,
641 "mlx4-comp-%d", i);
642 eq_name = priv->eq_table.irq_names + i * 16;
643 } else
644 eq_name = async_eq_name;
594 645
595 for (i = 0; i < MLX4_NUM_EQ; ++i) {
596 err = request_irq(priv->eq_table.eq[i].irq, 646 err = request_irq(priv->eq_table.eq[i].irq,
597 mlx4_msi_x_interrupt, 647 mlx4_msi_x_interrupt, 0, eq_name,
598 0, eq_name[i], priv->eq_table.eq + i); 648 priv->eq_table.eq + i);
599 if (err) 649 if (err)
600 goto err_out_async; 650 goto err_out_async;
601 651
602 priv->eq_table.eq[i].have_irq = 1; 652 priv->eq_table.eq[i].have_irq = 1;
603 } 653 }
604
605 } else { 654 } else {
606 err = request_irq(dev->pdev->irq, mlx4_interrupt, 655 err = request_irq(dev->pdev->irq, mlx4_interrupt,
607 IRQF_SHARED, DRV_NAME, dev); 656 IRQF_SHARED, DRV_NAME, dev);
@@ -612,28 +661,36 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
612 } 661 }
613 662
614 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, 663 err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
615 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); 664 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
616 if (err) 665 if (err)
617 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", 666 mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
618 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); 667 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
619 668
620 for (i = 0; i < MLX4_NUM_EQ; ++i) 669 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
621 eq_set_ci(&priv->eq_table.eq[i], 1); 670 eq_set_ci(&priv->eq_table.eq[i], 1);
622 671
623 return 0; 672 return 0;
624 673
625err_out_async: 674err_out_async:
626 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); 675 mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
627 676
628err_out_comp: 677err_out_comp:
629 mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]); 678 i = dev->caps.num_comp_vectors - 1;
630 679
631err_out_unmap: 680err_out_unmap:
681 while (i >= 0) {
682 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
683 --i;
684 }
632 mlx4_unmap_clr_int(dev); 685 mlx4_unmap_clr_int(dev);
633 mlx4_free_irqs(dev); 686 mlx4_free_irqs(dev);
634 687
635err_out_free: 688err_out_bitmap:
636 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 689 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
690
691err_out_free:
692 kfree(priv->eq_table.uar_map);
693
637 return err; 694 return err;
638} 695}
639 696
@@ -643,18 +700,20 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
643 int i; 700 int i;
644 701
645 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, 702 mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
646 priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); 703 priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
647 704
648 mlx4_free_irqs(dev); 705 mlx4_free_irqs(dev);
649 706
650 for (i = 0; i < MLX4_NUM_EQ; ++i) 707 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
651 mlx4_free_eq(dev, &priv->eq_table.eq[i]); 708 mlx4_free_eq(dev, &priv->eq_table.eq[i]);
652 709
653 mlx4_unmap_clr_int(dev); 710 mlx4_unmap_clr_int(dev);
654 711
655 for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i) 712 for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
656 if (priv->eq_table.uar_map[i]) 713 if (priv->eq_table.uar_map[i])
657 iounmap(priv->eq_table.uar_map[i]); 714 iounmap(priv->eq_table.uar_map[i]);
658 715
659 mlx4_bitmap_cleanup(&priv->eq_table.bitmap); 716 mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
717
718 kfree(priv->eq_table.uar_map);
660} 719}