diff options
| -rw-r--r-- | drivers/net/mlx4/catas.c | 106 | ||||
| -rw-r--r-- | drivers/net/mlx4/eq.c | 56 | ||||
| -rw-r--r-- | drivers/net/mlx4/intf.c | 2 | ||||
| -rw-r--r-- | drivers/net/mlx4/main.c | 24 | ||||
| -rw-r--r-- | drivers/net/mlx4/mlx4.h | 13 |
5 files changed, 129 insertions, 72 deletions
diff --git a/drivers/net/mlx4/catas.c b/drivers/net/mlx4/catas.c index 1bb088aeaf71..6b32ec94b3a8 100644 --- a/drivers/net/mlx4/catas.c +++ b/drivers/net/mlx4/catas.c | |||
| @@ -30,41 +30,133 @@ | |||
| 30 | * SOFTWARE. | 30 | * SOFTWARE. |
| 31 | */ | 31 | */ |
| 32 | 32 | ||
| 33 | #include <linux/workqueue.h> | ||
| 34 | |||
| 33 | #include "mlx4.h" | 35 | #include "mlx4.h" |
| 34 | 36 | ||
| 35 | void mlx4_handle_catas_err(struct mlx4_dev *dev) | 37 | enum { |
| 38 | MLX4_CATAS_POLL_INTERVAL = 5 * HZ, | ||
| 39 | }; | ||
| 40 | |||
| 41 | static DEFINE_SPINLOCK(catas_lock); | ||
| 42 | |||
| 43 | static LIST_HEAD(catas_list); | ||
| 44 | static struct workqueue_struct *catas_wq; | ||
| 45 | static struct work_struct catas_work; | ||
| 46 | |||
| 47 | static int internal_err_reset = 1; | ||
| 48 | module_param(internal_err_reset, int, 0644); | ||
| 49 | MODULE_PARM_DESC(internal_err_reset, | ||
| 50 | "Reset device on internal errors if non-zero (default 1)"); | ||
| 51 | |||
| 52 | static void dump_err_buf(struct mlx4_dev *dev) | ||
| 36 | { | 53 | { |
| 37 | struct mlx4_priv *priv = mlx4_priv(dev); | 54 | struct mlx4_priv *priv = mlx4_priv(dev); |
| 38 | 55 | ||
| 39 | int i; | 56 | int i; |
| 40 | 57 | ||
| 41 | mlx4_err(dev, "Catastrophic error detected:\n"); | 58 | mlx4_err(dev, "Internal error detected:\n"); |
| 42 | for (i = 0; i < priv->fw.catas_size; ++i) | 59 | for (i = 0; i < priv->fw.catas_size; ++i) |
| 43 | mlx4_err(dev, " buf[%02x]: %08x\n", | 60 | mlx4_err(dev, " buf[%02x]: %08x\n", |
| 44 | i, swab32(readl(priv->catas_err.map + i))); | 61 | i, swab32(readl(priv->catas_err.map + i))); |
| 62 | } | ||
| 45 | 63 | ||
| 46 | mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0); | 64 | static void poll_catas(unsigned long dev_ptr) |
| 65 | { | ||
| 66 | struct mlx4_dev *dev = (struct mlx4_dev *) dev_ptr; | ||
| 67 | struct mlx4_priv *priv = mlx4_priv(dev); | ||
| 68 | |||
| 69 | if (readl(priv->catas_err.map)) { | ||
| 70 | dump_err_buf(dev); | ||
| 71 | |||
| 72 | mlx4_dispatch_event(dev, MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR, 0, 0); | ||
| 73 | |||
| 74 | if (internal_err_reset) { | ||
| 75 | spin_lock(&catas_lock); | ||
| 76 | list_add(&priv->catas_err.list, &catas_list); | ||
| 77 | spin_unlock(&catas_lock); | ||
| 78 | |||
| 79 | queue_work(catas_wq, &catas_work); | ||
| 80 | } | ||
| 81 | } else | ||
| 82 | mod_timer(&priv->catas_err.timer, | ||
| 83 | round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL)); | ||
| 47 | } | 84 | } |
| 48 | 85 | ||
| 49 | void mlx4_map_catas_buf(struct mlx4_dev *dev) | 86 | static void catas_reset(struct work_struct *work) |
| 87 | { | ||
| 88 | struct mlx4_priv *priv, *tmppriv; | ||
| 89 | struct mlx4_dev *dev; | ||
| 90 | |||
| 91 | LIST_HEAD(tlist); | ||
| 92 | int ret; | ||
| 93 | |||
| 94 | spin_lock_irq(&catas_lock); | ||
| 95 | list_splice_init(&catas_list, &tlist); | ||
| 96 | spin_unlock_irq(&catas_lock); | ||
| 97 | |||
| 98 | list_for_each_entry_safe(priv, tmppriv, &tlist, catas_err.list) { | ||
| 99 | ret = mlx4_restart_one(priv->dev.pdev); | ||
| 100 | dev = &priv->dev; | ||
| 101 | if (ret) | ||
| 102 | mlx4_err(dev, "Reset failed (%d)\n", ret); | ||
| 103 | else | ||
| 104 | mlx4_dbg(dev, "Reset succeeded\n"); | ||
| 105 | } | ||
| 106 | } | ||
| 107 | |||
| 108 | void mlx4_start_catas_poll(struct mlx4_dev *dev) | ||
| 50 | { | 109 | { |
| 51 | struct mlx4_priv *priv = mlx4_priv(dev); | 110 | struct mlx4_priv *priv = mlx4_priv(dev); |
| 52 | unsigned long addr; | 111 | unsigned long addr; |
| 53 | 112 | ||
| 113 | INIT_LIST_HEAD(&priv->catas_err.list); | ||
| 114 | init_timer(&priv->catas_err.timer); | ||
| 115 | priv->catas_err.map = NULL; | ||
| 116 | |||
| 54 | addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + | 117 | addr = pci_resource_start(dev->pdev, priv->fw.catas_bar) + |
| 55 | priv->fw.catas_offset; | 118 | priv->fw.catas_offset; |
| 56 | 119 | ||
| 57 | priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); | 120 | priv->catas_err.map = ioremap(addr, priv->fw.catas_size * 4); |
| 58 | if (!priv->catas_err.map) | 121 | if (!priv->catas_err.map) { |
| 59 | mlx4_warn(dev, "Failed to map catastrophic error buffer at 0x%lx\n", | 122 | mlx4_warn(dev, "Failed to map internal error buffer at 0x%lx\n", |
| 60 | addr); | 123 | addr); |
| 124 | return; | ||
| 125 | } | ||
| 61 | 126 | ||
| 127 | priv->catas_err.timer.data = (unsigned long) dev; | ||
| 128 | priv->catas_err.timer.function = poll_catas; | ||
| 129 | priv->catas_err.timer.expires = | ||
| 130 | round_jiffies(jiffies + MLX4_CATAS_POLL_INTERVAL); | ||
| 131 | add_timer(&priv->catas_err.timer); | ||
| 62 | } | 132 | } |
| 63 | 133 | ||
| 64 | void mlx4_unmap_catas_buf(struct mlx4_dev *dev) | 134 | void mlx4_stop_catas_poll(struct mlx4_dev *dev) |
| 65 | { | 135 | { |
| 66 | struct mlx4_priv *priv = mlx4_priv(dev); | 136 | struct mlx4_priv *priv = mlx4_priv(dev); |
| 67 | 137 | ||
| 138 | del_timer_sync(&priv->catas_err.timer); | ||
| 139 | |||
| 68 | if (priv->catas_err.map) | 140 | if (priv->catas_err.map) |
| 69 | iounmap(priv->catas_err.map); | 141 | iounmap(priv->catas_err.map); |
| 142 | |||
| 143 | spin_lock_irq(&catas_lock); | ||
| 144 | list_del(&priv->catas_err.list); | ||
| 145 | spin_unlock_irq(&catas_lock); | ||
| 146 | } | ||
| 147 | |||
| 148 | int __init mlx4_catas_init(void) | ||
| 149 | { | ||
| 150 | INIT_WORK(&catas_work, catas_reset); | ||
| 151 | |||
| 152 | catas_wq = create_singlethread_workqueue("mlx4_err"); | ||
| 153 | if (!catas_wq) | ||
| 154 | return -ENOMEM; | ||
| 155 | |||
| 156 | return 0; | ||
| 157 | } | ||
| 158 | |||
| 159 | void mlx4_catas_cleanup(void) | ||
| 160 | { | ||
| 161 | destroy_workqueue(catas_wq); | ||
| 70 | } | 162 | } |
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c index 27a82cecd693..2095c843fa15 100644 --- a/drivers/net/mlx4/eq.c +++ b/drivers/net/mlx4/eq.c | |||
| @@ -89,14 +89,12 @@ struct mlx4_eq_context { | |||
| 89 | (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ | 89 | (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \ |
| 90 | (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ | 90 | (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ |
| 91 | (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ | 91 | (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \ |
| 92 | (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ | ||
| 93 | (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ | 92 | (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \ |
| 94 | (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ | 93 | (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \ |
| 95 | (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ | 94 | (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \ |
| 96 | (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ | 95 | (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ |
| 97 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ | 96 | (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ |
| 98 | (1ull << MLX4_EVENT_TYPE_CMD)) | 97 | (1ull << MLX4_EVENT_TYPE_CMD)) |
| 99 | #define MLX4_CATAS_EVENT_MASK (1ull << MLX4_EVENT_TYPE_LOCAL_CATAS_ERROR) | ||
| 100 | 98 | ||
| 101 | struct mlx4_eqe { | 99 | struct mlx4_eqe { |
| 102 | u8 reserved1; | 100 | u8 reserved1; |
| @@ -264,7 +262,7 @@ static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr) | |||
| 264 | 262 | ||
| 265 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); | 263 | writel(priv->eq_table.clr_mask, priv->eq_table.clr_int); |
| 266 | 264 | ||
| 267 | for (i = 0; i < MLX4_EQ_CATAS; ++i) | 265 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
| 268 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); | 266 | work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]); |
| 269 | 267 | ||
| 270 | return IRQ_RETVAL(work); | 268 | return IRQ_RETVAL(work); |
| @@ -281,14 +279,6 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr) | |||
| 281 | return IRQ_HANDLED; | 279 | return IRQ_HANDLED; |
| 282 | } | 280 | } |
| 283 | 281 | ||
| 284 | static irqreturn_t mlx4_catas_interrupt(int irq, void *dev_ptr) | ||
| 285 | { | ||
| 286 | mlx4_handle_catas_err(dev_ptr); | ||
| 287 | |||
| 288 | /* MSI-X vectors always belong to us */ | ||
| 289 | return IRQ_HANDLED; | ||
| 290 | } | ||
| 291 | |||
| 292 | static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, | 282 | static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap, |
| 293 | int eq_num) | 283 | int eq_num) |
| 294 | { | 284 | { |
| @@ -490,11 +480,9 @@ static void mlx4_free_irqs(struct mlx4_dev *dev) | |||
| 490 | 480 | ||
| 491 | if (eq_table->have_irq) | 481 | if (eq_table->have_irq) |
| 492 | free_irq(dev->pdev->irq, dev); | 482 | free_irq(dev->pdev->irq, dev); |
| 493 | for (i = 0; i < MLX4_EQ_CATAS; ++i) | 483 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
| 494 | if (eq_table->eq[i].have_irq) | 484 | if (eq_table->eq[i].have_irq) |
| 495 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); | 485 | free_irq(eq_table->eq[i].irq, eq_table->eq + i); |
| 496 | if (eq_table->eq[MLX4_EQ_CATAS].have_irq) | ||
| 497 | free_irq(eq_table->eq[MLX4_EQ_CATAS].irq, dev); | ||
| 498 | } | 486 | } |
| 499 | 487 | ||
| 500 | static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev) | 488 | static int __devinit mlx4_map_clr_int(struct mlx4_dev *dev) |
| @@ -598,32 +586,19 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev) | |||
| 598 | if (dev->flags & MLX4_FLAG_MSI_X) { | 586 | if (dev->flags & MLX4_FLAG_MSI_X) { |
| 599 | static const char *eq_name[] = { | 587 | static const char *eq_name[] = { |
| 600 | [MLX4_EQ_COMP] = DRV_NAME " (comp)", | 588 | [MLX4_EQ_COMP] = DRV_NAME " (comp)", |
| 601 | [MLX4_EQ_ASYNC] = DRV_NAME " (async)", | 589 | [MLX4_EQ_ASYNC] = DRV_NAME " (async)" |
| 602 | [MLX4_EQ_CATAS] = DRV_NAME " (catas)" | ||
| 603 | }; | 590 | }; |
| 604 | 591 | ||
| 605 | err = mlx4_create_eq(dev, 1, MLX4_EQ_CATAS, | 592 | for (i = 0; i < MLX4_NUM_EQ; ++i) { |
| 606 | &priv->eq_table.eq[MLX4_EQ_CATAS]); | ||
| 607 | if (err) | ||
| 608 | goto err_out_async; | ||
| 609 | |||
| 610 | for (i = 0; i < MLX4_EQ_CATAS; ++i) { | ||
| 611 | err = request_irq(priv->eq_table.eq[i].irq, | 593 | err = request_irq(priv->eq_table.eq[i].irq, |
| 612 | mlx4_msi_x_interrupt, | 594 | mlx4_msi_x_interrupt, |
| 613 | 0, eq_name[i], priv->eq_table.eq + i); | 595 | 0, eq_name[i], priv->eq_table.eq + i); |
| 614 | if (err) | 596 | if (err) |
| 615 | goto err_out_catas; | 597 | goto err_out_async; |
| 616 | 598 | ||
| 617 | priv->eq_table.eq[i].have_irq = 1; | 599 | priv->eq_table.eq[i].have_irq = 1; |
| 618 | } | 600 | } |
| 619 | 601 | ||
| 620 | err = request_irq(priv->eq_table.eq[MLX4_EQ_CATAS].irq, | ||
| 621 | mlx4_catas_interrupt, 0, | ||
| 622 | eq_name[MLX4_EQ_CATAS], dev); | ||
| 623 | if (err) | ||
| 624 | goto err_out_catas; | ||
| 625 | |||
| 626 | priv->eq_table.eq[MLX4_EQ_CATAS].have_irq = 1; | ||
| 627 | } else { | 602 | } else { |
| 628 | err = request_irq(dev->pdev->irq, mlx4_interrupt, | 603 | err = request_irq(dev->pdev->irq, mlx4_interrupt, |
| 629 | IRQF_SHARED, DRV_NAME, dev); | 604 | IRQF_SHARED, DRV_NAME, dev); |
| @@ -639,22 +614,11 @@ int __devinit mlx4_init_eq_table(struct mlx4_dev *dev) | |||
| 639 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", | 614 | mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", |
| 640 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); | 615 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err); |
| 641 | 616 | ||
| 642 | for (i = 0; i < MLX4_EQ_CATAS; ++i) | 617 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
| 643 | eq_set_ci(&priv->eq_table.eq[i], 1); | 618 | eq_set_ci(&priv->eq_table.eq[i], 1); |
| 644 | 619 | ||
| 645 | if (dev->flags & MLX4_FLAG_MSI_X) { | ||
| 646 | err = mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 0, | ||
| 647 | priv->eq_table.eq[MLX4_EQ_CATAS].eqn); | ||
| 648 | if (err) | ||
| 649 | mlx4_warn(dev, "MAP_EQ for catas EQ %d failed (%d)\n", | ||
| 650 | priv->eq_table.eq[MLX4_EQ_CATAS].eqn, err); | ||
| 651 | } | ||
| 652 | |||
| 653 | return 0; | 620 | return 0; |
| 654 | 621 | ||
| 655 | err_out_catas: | ||
| 656 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]); | ||
| 657 | |||
| 658 | err_out_async: | 622 | err_out_async: |
| 659 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); | 623 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]); |
| 660 | 624 | ||
| @@ -675,19 +639,13 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev) | |||
| 675 | struct mlx4_priv *priv = mlx4_priv(dev); | 639 | struct mlx4_priv *priv = mlx4_priv(dev); |
| 676 | int i; | 640 | int i; |
| 677 | 641 | ||
| 678 | if (dev->flags & MLX4_FLAG_MSI_X) | ||
| 679 | mlx4_MAP_EQ(dev, MLX4_CATAS_EVENT_MASK, 1, | ||
| 680 | priv->eq_table.eq[MLX4_EQ_CATAS].eqn); | ||
| 681 | |||
| 682 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, | 642 | mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1, |
| 683 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); | 643 | priv->eq_table.eq[MLX4_EQ_ASYNC].eqn); |
| 684 | 644 | ||
| 685 | mlx4_free_irqs(dev); | 645 | mlx4_free_irqs(dev); |
| 686 | 646 | ||
| 687 | for (i = 0; i < MLX4_EQ_CATAS; ++i) | 647 | for (i = 0; i < MLX4_NUM_EQ; ++i) |
| 688 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); | 648 | mlx4_free_eq(dev, &priv->eq_table.eq[i]); |
| 689 | if (dev->flags & MLX4_FLAG_MSI_X) | ||
| 690 | mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_CATAS]); | ||
| 691 | 649 | ||
| 692 | mlx4_unmap_clr_int(dev); | 650 | mlx4_unmap_clr_int(dev); |
| 693 | 651 | ||
diff --git a/drivers/net/mlx4/intf.c b/drivers/net/mlx4/intf.c index 9ae951bf6aa6..be5d9e90ccf2 100644 --- a/drivers/net/mlx4/intf.c +++ b/drivers/net/mlx4/intf.c | |||
| @@ -142,6 +142,7 @@ int mlx4_register_device(struct mlx4_dev *dev) | |||
| 142 | mlx4_add_device(intf, priv); | 142 | mlx4_add_device(intf, priv); |
| 143 | 143 | ||
| 144 | mutex_unlock(&intf_mutex); | 144 | mutex_unlock(&intf_mutex); |
| 145 | mlx4_start_catas_poll(dev); | ||
| 145 | 146 | ||
| 146 | return 0; | 147 | return 0; |
| 147 | } | 148 | } |
| @@ -151,6 +152,7 @@ void mlx4_unregister_device(struct mlx4_dev *dev) | |||
| 151 | struct mlx4_priv *priv = mlx4_priv(dev); | 152 | struct mlx4_priv *priv = mlx4_priv(dev); |
| 152 | struct mlx4_interface *intf; | 153 | struct mlx4_interface *intf; |
| 153 | 154 | ||
| 155 | mlx4_stop_catas_poll(dev); | ||
| 154 | mutex_lock(&intf_mutex); | 156 | mutex_lock(&intf_mutex); |
| 155 | 157 | ||
| 156 | list_for_each_entry(intf, &intf_list, list) | 158 | list_for_each_entry(intf, &intf_list, list) |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index a4f2e0475a71..e8f45e6aa95b 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
| @@ -583,13 +583,11 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev) | |||
| 583 | goto err_pd_table_free; | 583 | goto err_pd_table_free; |
| 584 | } | 584 | } |
| 585 | 585 | ||
| 586 | mlx4_map_catas_buf(dev); | ||
| 587 | |||
| 588 | err = mlx4_init_eq_table(dev); | 586 | err = mlx4_init_eq_table(dev); |
| 589 | if (err) { | 587 | if (err) { |
| 590 | mlx4_err(dev, "Failed to initialize " | 588 | mlx4_err(dev, "Failed to initialize " |
| 591 | "event queue table, aborting.\n"); | 589 | "event queue table, aborting.\n"); |
| 592 | goto err_catas_buf; | 590 | goto err_mr_table_free; |
| 593 | } | 591 | } |
| 594 | 592 | ||
| 595 | err = mlx4_cmd_use_events(dev); | 593 | err = mlx4_cmd_use_events(dev); |
| @@ -659,8 +657,7 @@ err_cmd_poll: | |||
| 659 | err_eq_table_free: | 657 | err_eq_table_free: |
| 660 | mlx4_cleanup_eq_table(dev); | 658 | mlx4_cleanup_eq_table(dev); |
| 661 | 659 | ||
| 662 | err_catas_buf: | 660 | err_mr_table_free: |
| 663 | mlx4_unmap_catas_buf(dev); | ||
| 664 | mlx4_cleanup_mr_table(dev); | 661 | mlx4_cleanup_mr_table(dev); |
| 665 | 662 | ||
| 666 | err_pd_table_free: | 663 | err_pd_table_free: |
| @@ -836,9 +833,6 @@ err_cleanup: | |||
| 836 | mlx4_cleanup_cq_table(dev); | 833 | mlx4_cleanup_cq_table(dev); |
| 837 | mlx4_cmd_use_polling(dev); | 834 | mlx4_cmd_use_polling(dev); |
| 838 | mlx4_cleanup_eq_table(dev); | 835 | mlx4_cleanup_eq_table(dev); |
| 839 | |||
| 840 | mlx4_unmap_catas_buf(dev); | ||
| 841 | |||
| 842 | mlx4_cleanup_mr_table(dev); | 836 | mlx4_cleanup_mr_table(dev); |
| 843 | mlx4_cleanup_pd_table(dev); | 837 | mlx4_cleanup_pd_table(dev); |
| 844 | mlx4_cleanup_uar_table(dev); | 838 | mlx4_cleanup_uar_table(dev); |
| @@ -885,9 +879,6 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev) | |||
| 885 | mlx4_cleanup_cq_table(dev); | 879 | mlx4_cleanup_cq_table(dev); |
| 886 | mlx4_cmd_use_polling(dev); | 880 | mlx4_cmd_use_polling(dev); |
| 887 | mlx4_cleanup_eq_table(dev); | 881 | mlx4_cleanup_eq_table(dev); |
| 888 | |||
| 889 | mlx4_unmap_catas_buf(dev); | ||
| 890 | |||
| 891 | mlx4_cleanup_mr_table(dev); | 882 | mlx4_cleanup_mr_table(dev); |
| 892 | mlx4_cleanup_pd_table(dev); | 883 | mlx4_cleanup_pd_table(dev); |
| 893 | 884 | ||
| @@ -908,6 +899,12 @@ static void __devexit mlx4_remove_one(struct pci_dev *pdev) | |||
| 908 | } | 899 | } |
| 909 | } | 900 | } |
| 910 | 901 | ||
| 902 | int mlx4_restart_one(struct pci_dev *pdev) | ||
| 903 | { | ||
| 904 | mlx4_remove_one(pdev); | ||
| 905 | return mlx4_init_one(pdev, NULL); | ||
| 906 | } | ||
| 907 | |||
| 911 | static struct pci_device_id mlx4_pci_table[] = { | 908 | static struct pci_device_id mlx4_pci_table[] = { |
| 912 | { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ | 909 | { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */ |
| 913 | { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ | 910 | { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */ |
| @@ -930,6 +927,10 @@ static int __init mlx4_init(void) | |||
| 930 | { | 927 | { |
| 931 | int ret; | 928 | int ret; |
| 932 | 929 | ||
| 930 | ret = mlx4_catas_init(); | ||
| 931 | if (ret) | ||
| 932 | return ret; | ||
| 933 | |||
| 933 | ret = pci_register_driver(&mlx4_driver); | 934 | ret = pci_register_driver(&mlx4_driver); |
| 934 | return ret < 0 ? ret : 0; | 935 | return ret < 0 ? ret : 0; |
| 935 | } | 936 | } |
| @@ -937,6 +938,7 @@ static int __init mlx4_init(void) | |||
| 937 | static void __exit mlx4_cleanup(void) | 938 | static void __exit mlx4_cleanup(void) |
| 938 | { | 939 | { |
| 939 | pci_unregister_driver(&mlx4_driver); | 940 | pci_unregister_driver(&mlx4_driver); |
| 941 | mlx4_catas_cleanup(); | ||
| 940 | } | 942 | } |
| 941 | 943 | ||
| 942 | module_init(mlx4_init); | 944 | module_init(mlx4_init); |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index d9c91a71fc87..be304a7c2c91 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | 39 | ||
| 40 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
| 41 | #include <linux/radix-tree.h> | 41 | #include <linux/radix-tree.h> |
| 42 | #include <linux/timer.h> | ||
| 42 | 43 | ||
| 43 | #include <linux/mlx4/device.h> | 44 | #include <linux/mlx4/device.h> |
| 44 | #include <linux/mlx4/doorbell.h> | 45 | #include <linux/mlx4/doorbell.h> |
| @@ -67,7 +68,6 @@ enum { | |||
| 67 | enum { | 68 | enum { |
| 68 | MLX4_EQ_ASYNC, | 69 | MLX4_EQ_ASYNC, |
| 69 | MLX4_EQ_COMP, | 70 | MLX4_EQ_COMP, |
| 70 | MLX4_EQ_CATAS, | ||
| 71 | MLX4_NUM_EQ | 71 | MLX4_NUM_EQ |
| 72 | }; | 72 | }; |
| 73 | 73 | ||
| @@ -248,7 +248,8 @@ struct mlx4_mcg_table { | |||
| 248 | 248 | ||
| 249 | struct mlx4_catas_err { | 249 | struct mlx4_catas_err { |
| 250 | u32 __iomem *map; | 250 | u32 __iomem *map; |
| 251 | int size; | 251 | struct timer_list timer; |
| 252 | struct list_head list; | ||
| 252 | }; | 253 | }; |
| 253 | 254 | ||
| 254 | struct mlx4_priv { | 255 | struct mlx4_priv { |
| @@ -311,9 +312,11 @@ void mlx4_cleanup_qp_table(struct mlx4_dev *dev); | |||
| 311 | void mlx4_cleanup_srq_table(struct mlx4_dev *dev); | 312 | void mlx4_cleanup_srq_table(struct mlx4_dev *dev); |
| 312 | void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); | 313 | void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); |
| 313 | 314 | ||
| 314 | void mlx4_map_catas_buf(struct mlx4_dev *dev); | 315 | void mlx4_start_catas_poll(struct mlx4_dev *dev); |
| 315 | void mlx4_unmap_catas_buf(struct mlx4_dev *dev); | 316 | void mlx4_stop_catas_poll(struct mlx4_dev *dev); |
| 316 | 317 | int mlx4_catas_init(void); | |
| 318 | void mlx4_catas_cleanup(void); | ||
| 319 | int mlx4_restart_one(struct pci_dev *pdev); | ||
| 317 | int mlx4_register_device(struct mlx4_dev *dev); | 320 | int mlx4_register_device(struct mlx4_dev *dev); |
| 318 | void mlx4_unregister_device(struct mlx4_dev *dev); | 321 | void mlx4_unregister_device(struct mlx4_dev *dev); |
| 319 | void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, | 322 | void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_event type, |
