diff options
author | David S. Miller <davem@davemloft.net> | 2008-01-16 01:43:24 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-17 04:49:29 -0500 |
commit | d2c7ddd6261eb885091cf6ddbcfae01f4216fb8e (patch) | |
tree | fefe1f6e6162e019be7a3f27788a395b1d121688 /drivers/net/e1000 | |
parent | d8c89eb3a12f0da96d049bd515c7fa3702e511c5 (diff) |
[NET]: Fix TX timeout regression in Intel drivers.
This fixes a regression added by changeset
53e52c729cc169db82a6105fac7a166e10c2ec36 ("[NET]: Make ->poll()
breakout consistent in Intel ethernet drivers.")
As pointed out by Jesse Brandeburg, for three of the drivers edited
above there is breakout logic in the *_clean_tx_irq() code to prevent
running TX reclaim forever. If this occurs, we have to elide NAPI
poll completion or else those TX events will never be serviced.
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Diffstat (limited to 'drivers/net/e1000')
-rw-r--r-- | drivers/net/e1000/e1000_main.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index 13d57b0a88fa..0c9a6f7104d2 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c | |||
@@ -3919,7 +3919,7 @@ e1000_clean(struct napi_struct *napi, int budget) | |||
3919 | { | 3919 | { |
3920 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); | 3920 | struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); |
3921 | struct net_device *poll_dev = adapter->netdev; | 3921 | struct net_device *poll_dev = adapter->netdev; |
3922 | int work_done = 0; | 3922 | int tx_cleaned = 0, work_done = 0; |
3923 | 3923 | ||
3924 | /* Must NOT use netdev_priv macro here. */ | 3924 | /* Must NOT use netdev_priv macro here. */ |
3925 | adapter = poll_dev->priv; | 3925 | adapter = poll_dev->priv; |
@@ -3929,14 +3929,17 @@ e1000_clean(struct napi_struct *napi, int budget) | |||
3929 | * simultaneously. A failure obtaining the lock means | 3929 | * simultaneously. A failure obtaining the lock means |
3930 | * tx_ring[0] is currently being cleaned anyway. */ | 3930 | * tx_ring[0] is currently being cleaned anyway. */ |
3931 | if (spin_trylock(&adapter->tx_queue_lock)) { | 3931 | if (spin_trylock(&adapter->tx_queue_lock)) { |
3932 | e1000_clean_tx_irq(adapter, | 3932 | tx_cleaned = e1000_clean_tx_irq(adapter, |
3933 | &adapter->tx_ring[0]); | 3933 | &adapter->tx_ring[0]); |
3934 | spin_unlock(&adapter->tx_queue_lock); | 3934 | spin_unlock(&adapter->tx_queue_lock); |
3935 | } | 3935 | } |
3936 | 3936 | ||
3937 | adapter->clean_rx(adapter, &adapter->rx_ring[0], | 3937 | adapter->clean_rx(adapter, &adapter->rx_ring[0], |
3938 | &work_done, budget); | 3938 | &work_done, budget); |
3939 | 3939 | ||
3940 | if (tx_cleaned) | ||
3941 | work_done = budget; | ||
3942 | |||
3940 | /* If budget not fully consumed, exit the polling mode */ | 3943 | /* If budget not fully consumed, exit the polling mode */ |
3941 | if (work_done < budget) { | 3944 | if (work_done < budget) { |
3942 | if (likely(adapter->itr_setting & 3)) | 3945 | if (likely(adapter->itr_setting & 3)) |