diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-08-27 23:39:13 -0400 |
---|---|---|
committer | Lennert Buytenhek <buytenh@marvell.com> | 2008-09-05 00:33:59 -0400 |
commit | 4fdeca3f4e7e35f6c19e87617c23fbb59dfb1a63 (patch) | |
tree | 6c02b3edeadf5f9ed842102604e692b2e7d5abd3 /drivers/net/mv643xx_eth.c | |
parent | ac840605f3b1d9b99e1e6629a54994f8e003ff91 (diff) |
mv643xx_eth: get rid of netif_{stop,wake}_queue() calls on link down/up
There is no need to call netif_{stop,wake}_queue() when the link goes
down/up, as the networking already takes care of this internally.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 27 |
1 files changed, 8 insertions, 19 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index b4850cf2a8ce..bd3ca470916b 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -631,11 +631,9 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | |||
631 | for (i = 0; i < mp->txq_count; i++) | 631 | for (i = 0; i < mp->txq_count; i++) |
632 | txq_reclaim(mp->txq + i, 0); | 632 | txq_reclaim(mp->txq + i, 0); |
633 | 633 | ||
634 | if (netif_carrier_ok(mp->dev)) { | 634 | spin_lock_irq(&mp->lock); |
635 | spin_lock_irq(&mp->lock); | 635 | __txq_maybe_wake(mp->txq); |
636 | __txq_maybe_wake(mp->txq); | 636 | spin_unlock_irq(&mp->lock); |
637 | spin_unlock_irq(&mp->lock); | ||
638 | } | ||
639 | } | 637 | } |
640 | #endif | 638 | #endif |
641 | 639 | ||
@@ -1765,7 +1763,6 @@ static void handle_link_event(struct mv643xx_eth_private *mp) | |||
1765 | printk(KERN_INFO "%s: link down\n", dev->name); | 1763 | printk(KERN_INFO "%s: link down\n", dev->name); |
1766 | 1764 | ||
1767 | netif_carrier_off(dev); | 1765 | netif_carrier_off(dev); |
1768 | netif_stop_queue(dev); | ||
1769 | 1766 | ||
1770 | for (i = 0; i < mp->txq_count; i++) { | 1767 | for (i = 0; i < mp->txq_count; i++) { |
1771 | struct tx_queue *txq = mp->txq + i; | 1768 | struct tx_queue *txq = mp->txq + i; |
@@ -1799,10 +1796,8 @@ static void handle_link_event(struct mv643xx_eth_private *mp) | |||
1799 | speed, duplex ? "full" : "half", | 1796 | speed, duplex ? "full" : "half", |
1800 | fc ? "en" : "dis"); | 1797 | fc ? "en" : "dis"); |
1801 | 1798 | ||
1802 | if (!netif_carrier_ok(dev)) { | 1799 | if (!netif_carrier_ok(dev)) |
1803 | netif_carrier_on(dev); | 1800 | netif_carrier_on(dev); |
1804 | netif_wake_queue(dev); | ||
1805 | } | ||
1806 | } | 1801 | } |
1807 | 1802 | ||
1808 | static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) | 1803 | static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) |
@@ -1851,11 +1846,9 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) | |||
1851 | * Enough space again in the primary TX queue for a | 1846 | * Enough space again in the primary TX queue for a |
1852 | * full packet? | 1847 | * full packet? |
1853 | */ | 1848 | */ |
1854 | if (netif_carrier_ok(dev)) { | 1849 | spin_lock(&mp->lock); |
1855 | spin_lock(&mp->lock); | 1850 | __txq_maybe_wake(mp->txq); |
1856 | __txq_maybe_wake(mp->txq); | 1851 | spin_unlock(&mp->lock); |
1857 | spin_unlock(&mp->lock); | ||
1858 | } | ||
1859 | } | 1852 | } |
1860 | 1853 | ||
1861 | /* | 1854 | /* |
@@ -2060,7 +2053,6 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
2060 | } | 2053 | } |
2061 | 2054 | ||
2062 | netif_carrier_off(dev); | 2055 | netif_carrier_off(dev); |
2063 | netif_stop_queue(dev); | ||
2064 | 2056 | ||
2065 | port_start(mp); | 2057 | port_start(mp); |
2066 | 2058 | ||
@@ -2123,7 +2115,6 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
2123 | del_timer_sync(&mp->rx_oom); | 2115 | del_timer_sync(&mp->rx_oom); |
2124 | 2116 | ||
2125 | netif_carrier_off(dev); | 2117 | netif_carrier_off(dev); |
2126 | netif_stop_queue(dev); | ||
2127 | 2118 | ||
2128 | free_irq(dev->irq, dev); | 2119 | free_irq(dev->irq, dev); |
2129 | 2120 | ||
@@ -2184,11 +2175,9 @@ static void tx_timeout_task(struct work_struct *ugly) | |||
2184 | mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); | 2175 | mp = container_of(ugly, struct mv643xx_eth_private, tx_timeout_task); |
2185 | if (netif_running(mp->dev)) { | 2176 | if (netif_running(mp->dev)) { |
2186 | netif_stop_queue(mp->dev); | 2177 | netif_stop_queue(mp->dev); |
2187 | |||
2188 | port_reset(mp); | 2178 | port_reset(mp); |
2189 | port_start(mp); | 2179 | port_start(mp); |
2190 | 2180 | netif_wake_queue(mp->dev); | |
2191 | __txq_maybe_wake(mp->txq); | ||
2192 | } | 2181 | } |
2193 | } | 2182 | } |
2194 | 2183 | ||