diff options
author | Roland Dreier <rolandd@cisco.com> | 2009-09-05 23:24:49 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2009-09-05 23:24:49 -0400 |
commit | fa0681d2129732027355d6b7083dd8932b9b799d (patch) | |
tree | 0730a4ccab5f7c5b4da772b76e6e709839ffe643 /drivers/net/mlx4 | |
parent | 338a8fad27908f64a0d249cc9f5c7d4ddb7e5684 (diff) |
mlx4_core: Allocate and map sufficient ICM memory for EQ context
The current implementation allocates a single host page for EQ context
memory, which was OK when we only allocated a few EQs. However, since
we now allocate an EQ for each CPU core, this patch removes the
hard-coded limit (which we exceed with 4 KB pages and 128 byte EQ
context entries with 32 CPUs) and uses the same ICM table code as all
other context tables, which ends up simplifying the code quite a bit
while fixing the problem.
This problem was actually hit in practice on a dual-socket Nehalem box
with 16 real hardware threads and sufficiently odd ACPI tables that it
shows on boot
SMP: Allowing 32 CPUs, 16 hotplug CPUs
so num_possible_cpus() ends up 32, and mlx4 ends up creating 33 MSI-X
interrupts and 33 EQs. This mlx4 bug means that mlx4 can't even
initialize at all on this quite mainstream system.
Cc: <stable@kernel.org>
Reported-by: Eli Cohen <eli@mellanox.co.il>
Tested-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/net/mlx4')
-rw-r--r-- | drivers/net/mlx4/eq.c | 42 | ||||
-rw-r--r-- | drivers/net/mlx4/main.c | 9 | ||||
-rw-r--r-- | drivers/net/mlx4/mlx4.h | 7 |
3 files changed, 7 insertions, 51 deletions
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index c11a0525c40e..d7974a60b961 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
@@ -525,48 +525,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev) | |||
525 | iounmap(priv->clr_base); | 525 | iounmap(priv->clr_base); |
526 | } | 526 | } |
527 | 527 | ||
528 | int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt) | ||
529 | { | ||
530 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
531 | int ret; | ||
532 | |||
533 | /* | ||
534 | * We assume that mapping one page is enough for the whole EQ | ||
535 | * context table. This is fine with all current HCAs, because | ||
536 | * we only use 32 EQs and each EQ uses 64 bytes of context | ||
537 | * memory, or 1 KB total. | ||
538 | */ | ||
539 | priv->eq_table.icm_virt = icm_virt; | ||
540 | priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER); | ||
541 | if (!priv->eq_table.icm_page) | ||
542 | return -ENOMEM; | ||
543 | priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0, | ||
544 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | ||
545 | if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) { | ||
546 | __free_page(priv->eq_table.icm_page); | ||
547 | return -ENOMEM; | ||
548 | } | ||
549 | |||
550 | ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt); | ||
551 | if (ret) { | ||
552 | pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, | ||
553 | PCI_DMA_BIDIRECTIONAL); | ||
554 | __free_page(priv->eq_table.icm_page); | ||
555 | } | ||
556 | |||
557 | return ret; | ||
558 | } | ||
559 | |||
560 | void mlx4_unmap_eq_icm(struct mlx4_dev *dev) | ||
561 | { | ||
562 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
563 | |||
564 | mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1); | ||
565 | pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE, | ||
566 | PCI_DMA_BIDIRECTIONAL); | ||
567 | __free_page(priv->eq_table.icm_page); | ||
568 | } | ||
569 | |||
570 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) | 528 | int mlx4_alloc_eq_table(struct mlx4_dev *dev) |
571 | { | 529 | { |
572 | struct mlx4_priv *priv = mlx4_priv(dev); | 530 | struct mlx4_priv *priv = mlx4_priv(dev); |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 5c1afe0d73e8..528f89b2cde3 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, | |||
525 | goto err_unmap_aux; | 525 | goto err_unmap_aux; |
526 | } | 526 | } |
527 | 527 | ||
528 | err = mlx4_map_eq_icm(dev, init_hca->eqc_base); | 528 | err = mlx4_init_icm_table(dev, &priv->eq_table.table, |
529 | init_hca->eqc_base, dev_cap->eqc_entry_sz, | ||
530 | dev->caps.num_eqs, dev->caps.num_eqs, | ||
531 | 0, 0); | ||
529 | if (err) { | 532 | if (err) { |
530 | mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); | 533 | mlx4_err(dev, "Failed to map EQ context memory, aborting.\n"); |
531 | goto err_unmap_cmpt; | 534 | goto err_unmap_cmpt; |
@@ -668,7 +671,7 @@ err_unmap_mtt: | |||
668 | mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); | 671 | mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); |
669 | 672 | ||
670 | err_unmap_eq: | 673 | err_unmap_eq: |
671 | mlx4_unmap_eq_icm(dev); | 674 | mlx4_cleanup_icm_table(dev, &priv->eq_table.table); |
672 | 675 | ||
673 | err_unmap_cmpt: | 676 | err_unmap_cmpt: |
674 | mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); | 677 | mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); |
@@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev) | |||
698 | mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); | 701 | mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table); |
699 | mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); | 702 | mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table); |
700 | mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); | 703 | mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table); |
704 | mlx4_cleanup_icm_table(dev, &priv->eq_table.table); | ||
701 | mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); | 705 | mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table); |
702 | mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); | 706 | mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table); |
703 | mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); | 707 | mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table); |
704 | mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); | 708 | mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table); |
705 | mlx4_unmap_eq_icm(dev); | ||
706 | 709 | ||
707 | mlx4_UNMAP_ICM_AUX(dev); | 710 | mlx4_UNMAP_ICM_AUX(dev); |
708 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); | 711 | mlx4_free_icm(dev, priv->fw.aux_icm, 0); |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 5bd79c2b184f..bc72d6e4919b 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -205,9 +205,7 @@ struct mlx4_eq_table { | |||
205 | void __iomem **uar_map; | 205 | void __iomem **uar_map; |
206 | u32 clr_mask; | 206 | u32 clr_mask; |
207 | struct mlx4_eq *eq; | 207 | struct mlx4_eq *eq; |
208 | u64 icm_virt; | 208 | struct mlx4_icm_table table; |
209 | struct page *icm_page; | ||
210 | dma_addr_t icm_dma; | ||
211 | struct mlx4_icm_table cmpt_table; | 209 | struct mlx4_icm_table cmpt_table; |
212 | int have_irq; | 210 | int have_irq; |
213 | u8 inta_pin; | 211 | u8 inta_pin; |
@@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, | |||
373 | struct mlx4_dev_cap *dev_cap, | 371 | struct mlx4_dev_cap *dev_cap, |
374 | struct mlx4_init_hca_param *init_hca); | 372 | struct mlx4_init_hca_param *init_hca); |
375 | 373 | ||
376 | int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt); | ||
377 | void mlx4_unmap_eq_icm(struct mlx4_dev *dev); | ||
378 | |||
379 | int mlx4_cmd_init(struct mlx4_dev *dev); | 374 | int mlx4_cmd_init(struct mlx4_dev *dev); |
380 | void mlx4_cmd_cleanup(struct mlx4_dev *dev); | 375 | void mlx4_cmd_cleanup(struct mlx4_dev *dev); |
381 | void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); | 376 | void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param); |