diff options
author | Anton Vorontsov <avorontsov@ru.mvista.com> | 2009-11-10 09:11:10 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-11-11 22:03:36 -0500 |
commit | a3bc1f11e9b867a4f49505ecac486a33af248b2e (patch) | |
tree | 026512ce0362e0f13f0c35d2fc6bc01d2498a2ed /drivers/net/gianfar.c | |
parent | 836cf7faf8c75743477ed6ed341cce491f3183fb (diff) |
gianfar: Revive SKB recycling
Before calling gfar_clean_tx_ring() the driver grabs an irqsave
spinlock, and then tries to recycle skbs. But since
skb_recycle_check() returns 0 with IRQs disabled, we'll never
recycle any skbs.
It appears that gfar_clean_tx_ring() and gfar_start_xmit() are
mostly idependent and can work in parallel, except when they
modify num_txbdfree.
So we can drop the lock from most sections and thus fix the skb
recycling.
Signed-off-by: Anton Vorontsov <avorontsov@ru.mvista.com>
Acked-by: Kumar Gala <galak@kernel.crashing.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r-- | drivers/net/gianfar.c | 31 |
1 files changed, 19 insertions, 12 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index fde430a0b84b..16def131c390 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -1928,14 +1928,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1928 | /* total number of fragments in the SKB */ | 1928 | /* total number of fragments in the SKB */ |
1929 | nr_frags = skb_shinfo(skb)->nr_frags; | 1929 | nr_frags = skb_shinfo(skb)->nr_frags; |
1930 | 1930 | ||
1931 | spin_lock_irqsave(&tx_queue->txlock, flags); | ||
1932 | |||
1933 | /* check if there is space to queue this packet */ | 1931 | /* check if there is space to queue this packet */ |
1934 | if ((nr_frags+1) > tx_queue->num_txbdfree) { | 1932 | if ((nr_frags+1) > tx_queue->num_txbdfree) { |
1935 | /* no space, stop the queue */ | 1933 | /* no space, stop the queue */ |
1936 | netif_tx_stop_queue(txq); | 1934 | netif_tx_stop_queue(txq); |
1937 | dev->stats.tx_fifo_errors++; | 1935 | dev->stats.tx_fifo_errors++; |
1938 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | ||
1939 | return NETDEV_TX_BUSY; | 1936 | return NETDEV_TX_BUSY; |
1940 | } | 1937 | } |
1941 | 1938 | ||
@@ -1999,6 +1996,20 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1999 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); | 1996 | lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); |
2000 | 1997 | ||
2001 | /* | 1998 | /* |
1999 | * We can work in parallel with gfar_clean_tx_ring(), except | ||
2000 | * when modifying num_txbdfree. Note that we didn't grab the lock | ||
2001 | * when we were reading the num_txbdfree and checking for available | ||
2002 | * space, that's because outside of this function it can only grow, | ||
2003 | * and once we've got needed space, it cannot suddenly disappear. | ||
2004 | * | ||
2005 | * The lock also protects us from gfar_error(), which can modify | ||
2006 | * regs->tstat and thus retrigger the transfers, which is why we | ||
2007 | * also must grab the lock before setting ready bit for the first | ||
2008 | * to be transmitted BD. | ||
2009 | */ | ||
2010 | spin_lock_irqsave(&tx_queue->txlock, flags); | ||
2011 | |||
2012 | /* | ||
2002 | * The powerpc-specific eieio() is used, as wmb() has too strong | 2013 | * The powerpc-specific eieio() is used, as wmb() has too strong |
2003 | * semantics (it requires synchronization between cacheable and | 2014 | * semantics (it requires synchronization between cacheable and |
2004 | * uncacheable mappings, which eieio doesn't provide and which we | 2015 | * uncacheable mappings, which eieio doesn't provide and which we |
@@ -2225,6 +2236,8 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
2225 | skb_dirtytx = tx_queue->skb_dirtytx; | 2236 | skb_dirtytx = tx_queue->skb_dirtytx; |
2226 | 2237 | ||
2227 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { | 2238 | while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) { |
2239 | unsigned long flags; | ||
2240 | |||
2228 | frags = skb_shinfo(skb)->nr_frags; | 2241 | frags = skb_shinfo(skb)->nr_frags; |
2229 | lbdp = skip_txbd(bdp, frags, base, tx_ring_size); | 2242 | lbdp = skip_txbd(bdp, frags, base, tx_ring_size); |
2230 | 2243 | ||
@@ -2269,7 +2282,9 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) | |||
2269 | TX_RING_MOD_MASK(tx_ring_size); | 2282 | TX_RING_MOD_MASK(tx_ring_size); |
2270 | 2283 | ||
2271 | howmany++; | 2284 | howmany++; |
2285 | spin_lock_irqsave(&tx_queue->txlock, flags); | ||
2272 | tx_queue->num_txbdfree += frags + 1; | 2286 | tx_queue->num_txbdfree += frags + 1; |
2287 | spin_unlock_irqrestore(&tx_queue->txlock, flags); | ||
2273 | } | 2288 | } |
2274 | 2289 | ||
2275 | /* If we freed a buffer, we can restart transmission, if necessary */ | 2290 | /* If we freed a buffer, we can restart transmission, if necessary */ |
@@ -2548,7 +2563,6 @@ static int gfar_poll(struct napi_struct *napi, int budget) | |||
2548 | int tx_cleaned = 0, i, left_over_budget = budget; | 2563 | int tx_cleaned = 0, i, left_over_budget = budget; |
2549 | unsigned long serviced_queues = 0; | 2564 | unsigned long serviced_queues = 0; |
2550 | int num_queues = 0; | 2565 | int num_queues = 0; |
2551 | unsigned long flags; | ||
2552 | 2566 | ||
2553 | num_queues = gfargrp->num_rx_queues; | 2567 | num_queues = gfargrp->num_rx_queues; |
2554 | budget_per_queue = budget/num_queues; | 2568 | budget_per_queue = budget/num_queues; |
@@ -2568,14 +2582,7 @@ static int gfar_poll(struct napi_struct *napi, int budget) | |||
2568 | rx_queue = priv->rx_queue[i]; | 2582 | rx_queue = priv->rx_queue[i]; |
2569 | tx_queue = priv->tx_queue[rx_queue->qindex]; | 2583 | tx_queue = priv->tx_queue[rx_queue->qindex]; |
2570 | 2584 | ||
2571 | /* If we fail to get the lock, | 2585 | tx_cleaned += gfar_clean_tx_ring(tx_queue); |
2572 | * don't bother with the TX BDs */ | ||
2573 | if (spin_trylock_irqsave(&tx_queue->txlock, flags)) { | ||
2574 | tx_cleaned += gfar_clean_tx_ring(tx_queue); | ||
2575 | spin_unlock_irqrestore(&tx_queue->txlock, | ||
2576 | flags); | ||
2577 | } | ||
2578 | |||
2579 | rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, | 2586 | rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue, |
2580 | budget_per_queue); | 2587 | budget_per_queue); |
2581 | rx_cleaned += rx_cleaned_per_queue; | 2588 | rx_cleaned += rx_cleaned_per_queue; |