diff options
author | Ayaz Abdulla <aabdulla@nvidia.com> | 2009-04-25 05:17:56 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-04-27 05:40:51 -0400 |
commit | 8f955d7f042e4ac44891a400d5000928f8db9f58 (patch) | |
tree | f4f4e85a014c23eebb5b33c28dc42b5cf441f9fb /drivers/net | |
parent | c759a6b4e1cae6aff71f58c9c85404ebcd81b6e0 (diff) |
forcedeth: tx timeout fix
This patch fixes the tx_timeout() to properly handle the clean up of the
tx ring. It also sets the tx put pointer back to the correct position to
be in sync with HW.
Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/forcedeth.c | 31 |
1 files changed, 21 insertions, 10 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 11d5db16ed9c..f9a846b1b92f 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -1880,6 +1880,7 @@ static void nv_init_tx(struct net_device *dev) | |||
1880 | np->tx_pkts_in_progress = 0; | 1880 | np->tx_pkts_in_progress = 0; |
1881 | np->tx_change_owner = NULL; | 1881 | np->tx_change_owner = NULL; |
1882 | np->tx_end_flip = NULL; | 1882 | np->tx_end_flip = NULL; |
1883 | np->tx_stop = 0; | ||
1883 | 1884 | ||
1884 | for (i = 0; i < np->tx_ring_size; i++) { | 1885 | for (i = 0; i < np->tx_ring_size; i++) { |
1885 | if (!nv_optimized(np)) { | 1886 | if (!nv_optimized(np)) { |
@@ -2530,6 +2531,8 @@ static void nv_tx_timeout(struct net_device *dev) | |||
2530 | struct fe_priv *np = netdev_priv(dev); | 2531 | struct fe_priv *np = netdev_priv(dev); |
2531 | u8 __iomem *base = get_hwbase(dev); | 2532 | u8 __iomem *base = get_hwbase(dev); |
2532 | u32 status; | 2533 | u32 status; |
2534 | union ring_type put_tx; | ||
2535 | int saved_tx_limit; | ||
2533 | 2536 | ||
2534 | if (np->msi_flags & NV_MSI_X_ENABLED) | 2537 | if (np->msi_flags & NV_MSI_X_ENABLED) |
2535 | status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; | 2538 | status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; |
@@ -2589,24 +2592,32 @@ static void nv_tx_timeout(struct net_device *dev) | |||
2589 | /* 1) stop tx engine */ | 2592 | /* 1) stop tx engine */ |
2590 | nv_stop_tx(dev); | 2593 | nv_stop_tx(dev); |
2591 | 2594 | ||
2592 | /* 2) check that the packets were not sent already: */ | 2595 | /* 2) complete any outstanding tx and do not give HW any limited tx pkts */ |
2596 | saved_tx_limit = np->tx_limit; | ||
2597 | np->tx_limit = 0; /* prevent giving HW any limited pkts */ | ||
2598 | np->tx_stop = 0; /* prevent waking tx queue */ | ||
2593 | if (!nv_optimized(np)) | 2599 | if (!nv_optimized(np)) |
2594 | nv_tx_done(dev, np->tx_ring_size); | 2600 | nv_tx_done(dev, np->tx_ring_size); |
2595 | else | 2601 | else |
2596 | nv_tx_done_optimized(dev, np->tx_ring_size); | 2602 | nv_tx_done_optimized(dev, np->tx_ring_size); |
2597 | 2603 | ||
2598 | /* 3) if there are dead entries: clear everything */ | 2604 | /* save current HW postion */ |
2599 | if (np->get_tx_ctx != np->put_tx_ctx) { | 2605 | if (np->tx_change_owner) |
2600 | printk(KERN_DEBUG "%s: tx_timeout: dead entries!\n", dev->name); | 2606 | put_tx.ex = np->tx_change_owner->first_tx_desc; |
2601 | nv_drain_tx(dev); | 2607 | else |
2602 | nv_init_tx(dev); | 2608 | put_tx = np->put_tx; |
2603 | setup_hw_rings(dev, NV_SETUP_TX_RING); | ||
2604 | } | ||
2605 | 2609 | ||
2606 | netif_wake_queue(dev); | 2610 | /* 3) clear all tx state */ |
2611 | nv_drain_tx(dev); | ||
2612 | nv_init_tx(dev); | ||
2613 | |||
2614 | /* 4) restore state to current HW position */ | ||
2615 | np->get_tx = np->put_tx = put_tx; | ||
2616 | np->tx_limit = saved_tx_limit; | ||
2607 | 2617 | ||
2608 | /* 4) restart tx engine */ | 2618 | /* 5) restart tx engine */ |
2609 | nv_start_tx(dev); | 2619 | nv_start_tx(dev); |
2620 | netif_wake_queue(dev); | ||
2610 | spin_unlock_irq(&np->lock); | 2621 | spin_unlock_irq(&np->lock); |
2611 | } | 2622 | } |
2612 | 2623 | ||