aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mlx4/eq.c
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2009-09-05 23:24:49 -0400
committerRoland Dreier <rolandd@cisco.com>2009-09-05 23:24:49 -0400
commitfa0681d2129732027355d6b7083dd8932b9b799d (patch)
tree0730a4ccab5f7c5b4da772b76e6e709839ffe643 /drivers/net/mlx4/eq.c
parent338a8fad27908f64a0d249cc9f5c7d4ddb7e5684 (diff)
mlx4_core: Allocate and map sufficient ICM memory for EQ context
The current implementation allocates a single host page for EQ context memory, which was OK when we only allocated a few EQs. However, since we now allocate an EQ for each CPU core, this patch removes the hard-coded limit (which we exceed with 4 KB pages and 128 byte EQ context entries with 32 CPUs) and uses the same ICM table code as all other context tables, which ends up simplifying the code quite a bit while fixing the problem. This problem was actually hit in practice on a dual-socket Nehalem box with 16 real hardware threads and sufficiently odd ACPI tables that it shows on boot SMP: Allowing 32 CPUs, 16 hotplug CPUs so num_possible_cpus() ends up 32, and mlx4 ends up creating 33 MSI-X interrupts and 33 EQs. This mlx4 bug means that mlx4 can't even initialize at all on this quite mainstream system. Cc: <stable@kernel.org> Reported-by: Eli Cohen <eli@mellanox.co.il> Tested-by: Christoph Lameter <cl@linux-foundation.org> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/net/mlx4/eq.c')
-rw-r--r--drivers/net/mlx4/eq.c42
1 files changed, 0 insertions, 42 deletions
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index c11a0525c40e..d7974a60b961 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -525,48 +525,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
525 iounmap(priv->clr_base); 525 iounmap(priv->clr_base);
526} 526}
527 527
528int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
529{
530 struct mlx4_priv *priv = mlx4_priv(dev);
531 int ret;
532
533 /*
534 * We assume that mapping one page is enough for the whole EQ
535 * context table. This is fine with all current HCAs, because
536 * we only use 32 EQs and each EQ uses 64 bytes of context
537 * memory, or 1 KB total.
538 */
539 priv->eq_table.icm_virt = icm_virt;
540 priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
541 if (!priv->eq_table.icm_page)
542 return -ENOMEM;
543 priv->eq_table.icm_dma = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
544 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
545 if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
546 __free_page(priv->eq_table.icm_page);
547 return -ENOMEM;
548 }
549
550 ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
551 if (ret) {
552 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
553 PCI_DMA_BIDIRECTIONAL);
554 __free_page(priv->eq_table.icm_page);
555 }
556
557 return ret;
558}
559
560void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
561{
562 struct mlx4_priv *priv = mlx4_priv(dev);
563
564 mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
565 pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
566 PCI_DMA_BIDIRECTIONAL);
567 __free_page(priv->eq_table.icm_page);
568}
569
570int mlx4_alloc_eq_table(struct mlx4_dev *dev) 528int mlx4_alloc_eq_table(struct mlx4_dev *dev)
571{ 529{
572 struct mlx4_priv *priv = mlx4_priv(dev); 530 struct mlx4_priv *priv = mlx4_priv(dev);