aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAyaz Abdulla <aabdulla@nvidia.com>2009-01-09 06:03:44 -0500
committerDavid S. Miller <davem@davemloft.net>2009-01-11 03:07:32 -0500
commit001eb84bbf7205f8cc541a75364a6a0892b5d0a2 (patch)
treebcd1265262417bd61b6f4b37fada2fca80c13b48
parent52255bbe3551e481b7af423406ca229a13990b1c (diff)
forcedeth: xmit lock fix
This patch fixes a potential race condition between xmit thread and xmit completion thread. The calculation of empty tx descriptors is not performed under the lock. This could cause it to set the stop flag while the completion thread finishes all tx's. This will result in the tx queue in stopped state and no one to wake it up. Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/forcedeth.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5b68dc20168d..6905ec9467df 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2096,14 +2096,15 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2096 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2096 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2097 } 2097 }
2098 2098
2099 spin_lock_irqsave(&np->lock, flags);
2099 empty_slots = nv_get_empty_tx_slots(np); 2100 empty_slots = nv_get_empty_tx_slots(np);
2100 if (unlikely(empty_slots <= entries)) { 2101 if (unlikely(empty_slots <= entries)) {
2101 spin_lock_irqsave(&np->lock, flags);
2102 netif_stop_queue(dev); 2102 netif_stop_queue(dev);
2103 np->tx_stop = 1; 2103 np->tx_stop = 1;
2104 spin_unlock_irqrestore(&np->lock, flags); 2104 spin_unlock_irqrestore(&np->lock, flags);
2105 return NETDEV_TX_BUSY; 2105 return NETDEV_TX_BUSY;
2106 } 2106 }
2107 spin_unlock_irqrestore(&np->lock, flags);
2107 2108
2108 start_tx = put_tx = np->put_tx.orig; 2109 start_tx = put_tx = np->put_tx.orig;
2109 2110
@@ -2214,14 +2215,15 @@ static int nv_start_xmit_optimized(struct sk_buff *skb, struct net_device *dev)
2214 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2215 ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2215 } 2216 }
2216 2217
2218 spin_lock_irqsave(&np->lock, flags);
2217 empty_slots = nv_get_empty_tx_slots(np); 2219 empty_slots = nv_get_empty_tx_slots(np);
2218 if (unlikely(empty_slots <= entries)) { 2220 if (unlikely(empty_slots <= entries)) {
2219 spin_lock_irqsave(&np->lock, flags);
2220 netif_stop_queue(dev); 2221 netif_stop_queue(dev);
2221 np->tx_stop = 1; 2222 np->tx_stop = 1;
2222 spin_unlock_irqrestore(&np->lock, flags); 2223 spin_unlock_irqrestore(&np->lock, flags);
2223 return NETDEV_TX_BUSY; 2224 return NETDEV_TX_BUSY;
2224 } 2225 }
2226 spin_unlock_irqrestore(&np->lock, flags);
2225 2227
2226 start_tx = put_tx = np->put_tx.ex; 2228 start_tx = put_tx = np->put_tx.ex;
2227 start_tx_ctx = np->put_tx_ctx; 2229 start_tx_ctx = np->put_tx_ctx;