diff options
Diffstat (limited to 'drivers/net/mlx4/eq.c')
-rw-r--r-- | drivers/net/mlx4/eq.c | 77 |
1 files changed, 26 insertions, 51 deletions
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index b9ceddde46c0..bffb7995cb70 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -31,7 +31,6 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/interrupt.h> | 34 | #include <linux/interrupt.h> |
36 | #include <linux/mm.h> | 35 | #include <linux/mm.h> |
37 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
@@ -42,6 +41,10 @@ | |||
42 | #include "fw.h" | 41 | #include "fw.h" |
43 | 42 | ||
44 | enum { | 43 | enum { |
44 | MLX4_IRQNAME_SIZE = 64 | ||
45 | }; | ||
46 | |||
47 | enum { | ||
45 | MLX4_NUM_ASYNC_EQE = 0x100, | 48 | MLX4_NUM_ASYNC_EQE = 0x100, |
46 | MLX4_NUM_SPARE_EQE = 0x80, | 49 | MLX4_NUM_SPARE_EQE = 0x80, |
47 | MLX4_EQ_ENTRY_SIZE = 0x20 | 50 | MLX4_EQ_ENTRY_SIZE = 0x20 |
@@ -526,48 +529,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev) | |||
526 | iounmap(priv->clr_base); | 529 | iounmap(priv->clr_base); |
527 | } | 530 | } |
528 | 531 | ||
529 | int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) | ||
530 | { | ||
531 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
532 | int ret; | ||
533 | |||
534 | /* | ||
535 | * We assume that mapping one page is enough for the whole EQ | ||
536 | * context table. This is fine with all current HCAs, because | ||
537 | * we only use 32 EQs and each EQ uses 64 bytes of context | ||
538 | * memory, or 1 KB total. | ||
539 | */ | ||
540 | priv->eq_table.icm_virt = icm_virt; | ||
541 | priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER); | ||
542 | if (!priv->eq_table.icm_page) | ||
543 | return -ENOMEM; | ||
544 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, | ||
545 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
546 | if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { | ||
547 | __free_page(priv->eq_table.icm_page); | ||
548 | return -ENOMEM; | ||
549 | } | ||
550 | |||
551 | ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt); | ||
552 | if (ret) { | ||
553 | pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, | ||
554 | PCI_DMA_BIDIRECTIONAL); | ||
555 | __free_page(priv->eq_table.icm_page); | ||
556 | } | ||
557 | |||
558 | return ret; | ||
559 | } | ||
560 | |||
561 | void mlx4_unmap_eq_icm(struct mlx4_dev *dev) | ||
562 | { | ||
563 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
564 | |||
565 | mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1); | ||
566 | pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, | ||
567 | PCI_DMA_BIDIRECTIONAL); | ||
568 | __free_page(priv->eq_table.icm_page); | ||
569 | } | ||
570 | |||
571 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) | 532 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) |
572 | { | 533 | { |
573 | struct mlx4_priv *priv = mlx4_priv(dev); | 534 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -615,7 +576,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
615 | priv->eq_table.clr_int = priv->clr_base + | 576 | priv->eq_table.clr_int = priv->clr_base + |
616 | (priv->eq_table.inta_pin < 32 ? 4 : 0); | 577 | (priv->eq_table.inta_pin < 32 ? 4 : 0); |
617 | 578 | ||
618 | priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL); | 579 | priv->eq_table.irq_names = |
580 | kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1), | ||
581 | GFP_KERNEL); | ||
619 | if (!priv->eq_table.irq_names) { | 582 | if (!priv->eq_table.irq_names) { |
620 | err = -ENOMEM; | 583 | err = -ENOMEM; |
621 | goto err_out_bitmap; | 584 | goto err_out_bitmap; |
@@ -638,17 +601,25 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
638 | goto err_out_comp; | 601 | goto err_out_comp; |
639 | 602 | ||
640 | if (dev->flags & MLX4_FLAG_MSI_X) { | 603 | if (dev->flags & MLX4_FLAG_MSI_X) { |
641 | static const char async_eq_name[] = "mlx4-async"; | ||
642 | const char *eq_name; | 604 | const char *eq_name; |
643 | 605 | ||
644 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { | 606 | for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) { |
645 | if (i < dev->caps.num_comp_vectors) { | 607 | if (i < dev->caps.num_comp_vectors) { |
646 | snprintf(priv->eq_table.irq_names + i * 16, 16, | 608 | snprintf(priv->eq_table.irq_names + |
647 | "mlx4-comp-%d", i); | 609 | i * MLX4_IRQNAME_SIZE, |
648 | eq_name = priv->eq_table.irq_names + i * 16; | 610 | MLX4_IRQNAME_SIZE, |
649 | } else | 611 | "mlx4-comp-%d@pci:%s", i, |
650 | eq_name = async_eq_name; | 612 | pci_name(dev->pdev)); |
613 | } else { | ||
614 | snprintf(priv->eq_table.irq_names + | ||
615 | i * MLX4_IRQNAME_SIZE, | ||
616 | MLX4_IRQNAME_SIZE, | ||
617 | "mlx4-async@pci:%s", | ||
618 | pci_name(dev->pdev)); | ||
619 | } | ||
651 | 620 | ||
621 | eq_name = priv->eq_table.irq_names + | ||
622 | i * MLX4_IRQNAME_SIZE; | ||
652 | err = request_irq(priv->eq_table.eq[i].irq, | 623 | err = request_irq(priv->eq_table.eq[i].irq, |
653 | mlx4_msi_x_interrupt, 0, eq_name, | 624 | mlx4_msi_x_interrupt, 0, eq_name, |
654 | priv->eq_table.eq + i); | 625 | priv->eq_table.eq + i); |
@@ -658,8 +629,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev) | |||
658 | priv->eq_table.eq[i].have_irq = 1; | 629 | priv->eq_table.eq[i].have_irq = 1; |
659 | } | 630 | } |
660 | } else { | 631 | } else { |
632 | snprintf(priv->eq_table.irq_names, | ||
633 | MLX4_IRQNAME_SIZE, | ||
634 | DRV_NAME "@pci:%s", | ||
635 | pci_name(dev->pdev)); | ||
661 | err = request_irq(dev->pdev->irq, mlx4_interrupt, | 636 | err = request_irq(dev->pdev->irq, mlx4_interrupt, |
662 | IRQF_SHARED, DRV_NAME, dev); | 637 | IRQF_SHARED, priv->eq_table.irq_names, dev); |
663 | if (err) | 638 | if (err) |
664 | goto err_out_async; | 639 | goto err_out_async; |
665 | 640 | ||