diff options
Diffstat (limited to 'drivers/net/mlx4/eq.c')
-rw-r--r-- | drivers/net/mlx4/eq.c | 155 |
1 files changed, 148 insertions, 7 deletions
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 6d7b2bf210ce..1ad1f6029af8 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -42,7 +42,7 @@ | |||
42 | #include "fw.h" | 42 | #include "fw.h" |
43 | 43 | ||
44 | enum { | 44 | enum { |
45 | MLX4_IRQNAME_SIZE = 64 | 45 | MLX4_IRQNAME_SIZE = 32 |
46 | }; | 46 | }; |
47 | 47 | ||
48 | enum { | 48 | enum { |
@@ -317,8 +317,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev) | |||
317 | * we need to map, take the difference of highest index and | 317 | * we need to map, take the difference of highest index and |
318 | * the lowest index we'll use and add 1. | 318 | * the lowest index we'll use and add 1. |
319 | */ | 319 | */ |
320 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 - | 320 | return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs + |
321 | dev->caps.reserved_eqs / 4 + 1; | 321 | dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1; |
322 | } | 322 | } |
323 | 323 | ||
324 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) | 324 | static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq) |
@@ -496,16 +496,32 @@ static void mlx4_free_eq(struct mlx4_dev *dev, | |||
496 | static void mlx4_free_irqs(struct mlx4_dev *dev) | 496 | static void mlx4_free_irqs(struct mlx4_dev *dev) |
497 | { | 497 | { |
498 | struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; | 498 | struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table; |
499 | int i; | 499 | struct mlx4_priv *priv = mlx4_priv(dev); |
500 | int i, vec; | ||
500 | 501 | ||
501 | if (eq_table->have_irq) | 502 | if (eq_table->have_irq) |
502 | free_irq(dev->pdev->irq, dev); | 503 | free_irq(dev->pdev->irq, dev); |
504 | |||
503 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) | 505 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) |
504 | if (eq_table->eq[i].have_irq) { | 506 | if (eq_table->eq[i].have_irq) { |
505 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); | 507 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); |
506 | eq_table->eq[i].have_irq = 0; | 508 | eq_table->eq[i].have_irq = 0; |
507 | } | 509 | } |
508 | 510 | ||
511 | for (i = 0; i < dev->caps.comp_pool; i++) { | ||
512 | /* | ||
513 | * Freeing the assigned irq's | ||
514 | * all bits should be 0, but we need to validate | ||
515 | */ | ||
516 | if (priv->msix_ctl.pool_bm & 1ULL << i) { | ||
517 | /* NO need protecting*/ | ||
518 | vec = dev->caps.num_comp_vectors + 1 + i; | ||
519 | free_irq(priv->eq_table.eq[vec].irq, | ||
520 | &priv->eq_table.eq[vec]); | ||
521 | } | ||
522 | } | ||
523 | |||
524 | |||
509 | kfree(eq_table->irq_names); | 525 | kfree(eq_table->irq_names); |
510 | } | 526 | } |
511 | 527 | ||
@@ -578,7 +594,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
578 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | 594 | (priv->eq_table.inta_pin < 32 ? 4 : 0); |
579 | 595 | ||
580 | priv->eq_table.irq_names = | 596 | priv->eq_table.irq_names = |
581 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), | 597 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 + |
598 | dev->caps.comp_pool), | ||
582 | GFP_KERNEL); | 599 | GFP_KERNEL); |
583 | if (!priv->eq_table.irq_names) { | 600 | if (!priv->eq_table.irq_names) { |
584 | err = -ENOMEM; | 601 | err = -ENOMEM; |
@@ -586,7 +603,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
586 | } | 603 | } |
587 | 604 | ||
588 | for (i = 0; i < dev->caps.num_comp_vectors; ++i) { | 605 | for (i = 0; i < dev->caps.num_comp_vectors; ++i) { |
589 | err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE, | 606 | err = mlx4_create_eq(dev, dev->caps.num_cqs - |
607 | dev->caps.reserved_cqs + | ||
608 | MLX4_NUM_SPARE_EQE, | ||
590 | (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, | 609 | (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, |
591 | &priv->eq_table.eq[i]); | 610 | &priv->eq_table.eq[i]); |
592 | if (err) { | 611 | if (err) { |
@@ -601,6 +620,22 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
601 | if (err) | 620 | if (err) |
602 | goto err_out_comp; | 621 | goto err_out_comp; |
603 | 622 | ||
623 | /*if additional completion vectors poolsize is 0 this loop will not run*/ | ||
624 | for (i = dev->caps.num_comp_vectors + 1; | ||
625 | i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) { | ||
626 | |||
627 | err = mlx4_create_eq(dev, dev->caps.num_cqs - | ||
628 | dev->caps.reserved_cqs + | ||
629 | MLX4_NUM_SPARE_EQE, | ||
630 | (dev->flags & MLX4_FLAG_MSI_X) ? i : 0, | ||
631 | &priv->eq_table.eq[i]); | ||
632 | if (err) { | ||
633 | --i; | ||
634 | goto err_out_unmap; | ||
635 | } | ||
636 | } | ||
637 | |||
638 | |||
604 | if (dev->flags & MLX4_FLAG_MSI_X) { | 639 | if (dev->flags & MLX4_FLAG_MSI_X) { |
605 | const char *eq_name; | 640 | const char *eq_name; |
606 | 641 | ||
@@ -686,7 +721,7 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |||
686 | 721 | ||
687 | mlx4_free_irqs(dev); | 722 | mlx4_free_irqs(dev); |
688 | 723 | ||
689 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) | 724 | for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) |
690 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | 725 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
691 | 726 | ||
692 | mlx4_unmap_clr_int(dev); | 727 | mlx4_unmap_clr_int(dev); |
@@ -699,3 +734,109 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |||
699 | 734 | ||
700 | kfree(priv->eq_table.uar_map); | 735 | kfree(priv->eq_table.uar_map); |
701 | } | 736 | } |
737 | |||
738 | /* A test that verifies that we can accept interrupts on all | ||
739 | * the irq vectors of the device. | ||
740 | * Interrupts are checked using the NOP command. | ||
741 | */ | ||
742 | int mlx4_test_interrupts(struct mlx4_dev *dev) | ||
743 | { | ||
744 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
745 | int i; | ||
746 | int err; | ||
747 | |||
748 | err = mlx4_NOP(dev); | ||
749 | /* When not in MSI_X, there is only one irq to check */ | ||
750 | if (!(dev->flags & MLX4_FLAG_MSI_X)) | ||
751 | return err; | ||
752 | |||
753 | /* A loop over all completion vectors, for each vector we will check | ||
754 | * whether it works by mapping command completions to that vector | ||
755 | * and performing a NOP command | ||
756 | */ | ||
757 | for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) { | ||
758 | /* Temporary use polling for command completions */ | ||
759 | mlx4_cmd_use_polling(dev); | ||
760 | |||
761 | /* Map the new eq to handle all asyncronous events */ | ||
762 | err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | ||
763 | priv->eq_table.eq[i].eqn); | ||
764 | if (err) { | ||
765 | mlx4_warn(dev, "Failed mapping eq for interrupt test\n"); | ||
766 | mlx4_cmd_use_events(dev); | ||
767 | break; | ||
768 | } | ||
769 | |||
770 | /* Go back to using events */ | ||
771 | mlx4_cmd_use_events(dev); | ||
772 | err = mlx4_NOP(dev); | ||
773 | } | ||
774 | |||
775 | /* Return to default */ | ||
776 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0, | ||
777 | priv->eq_table.eq[dev->caps.num_comp_vectors].eqn); | ||
778 | return err; | ||
779 | } | ||
780 | EXPORT_SYMBOL(mlx4_test_interrupts); | ||
781 | |||
782 | int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector) | ||
783 | { | ||
784 | |||
785 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
786 | int vec = 0, err = 0, i; | ||
787 | |||
788 | spin_lock(&priv->msix_ctl.pool_lock); | ||
789 | for (i = 0; !vec && i < dev->caps.comp_pool; i++) { | ||
790 | if (~priv->msix_ctl.pool_bm & 1ULL << i) { | ||
791 | priv->msix_ctl.pool_bm |= 1ULL << i; | ||
792 | vec = dev->caps.num_comp_vectors + 1 + i; | ||
793 | snprintf(priv->eq_table.irq_names + | ||
794 | vec * MLX4_IRQNAME_SIZE, | ||
795 | MLX4_IRQNAME_SIZE, "%s", name); | ||
796 | err = request_irq(priv->eq_table.eq[vec].irq, | ||
797 | mlx4_msi_x_interrupt, 0, | ||
798 | &priv->eq_table.irq_names[vec<<5], | ||
799 | priv->eq_table.eq + vec); | ||
800 | if (err) { | ||
801 | /*zero out bit by fliping it*/ | ||
802 | priv->msix_ctl.pool_bm ^= 1 << i; | ||
803 | vec = 0; | ||
804 | continue; | ||
805 | /*we dont want to break here*/ | ||
806 | } | ||
807 | eq_set_ci(&priv->eq_table.eq[vec], 1); | ||
808 | } | ||
809 | } | ||
810 | spin_unlock(&priv->msix_ctl.pool_lock); | ||
811 | |||
812 | if (vec) { | ||
813 | *vector = vec; | ||
814 | } else { | ||
815 | *vector = 0; | ||
816 | err = (i == dev->caps.comp_pool) ? -ENOSPC : err; | ||
817 | } | ||
818 | return err; | ||
819 | } | ||
820 | EXPORT_SYMBOL(mlx4_assign_eq); | ||
821 | |||
822 | void mlx4_release_eq(struct mlx4_dev *dev, int vec) | ||
823 | { | ||
824 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
825 | /*bm index*/ | ||
826 | int i = vec - dev->caps.num_comp_vectors - 1; | ||
827 | |||
828 | if (likely(i >= 0)) { | ||
829 | /*sanity check , making sure were not trying to free irq's | ||
830 | Belonging to a legacy EQ*/ | ||
831 | spin_lock(&priv->msix_ctl.pool_lock); | ||
832 | if (priv->msix_ctl.pool_bm & 1ULL << i) { | ||
833 | free_irq(priv->eq_table.eq[vec].irq, | ||
834 | &priv->eq_table.eq[vec]); | ||
835 | priv->msix_ctl.pool_bm &= ~(1ULL << i); | ||
836 | } | ||
837 | spin_unlock(&priv->msix_ctl.pool_lock); | ||
838 | } | ||
839 | |||
840 | } | ||
841 | EXPORT_SYMBOL(mlx4_release_eq); | ||
842 | |||