aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/marvell
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-10-05 02:23:55 -0400
committerDavid S. Miller <davem@davemloft.net>2012-10-07 00:40:54 -0400
commitacb600def2110b1310466c0e485c0d26299898ae (patch)
tree21036c7d0518601aba70dde0246ac229cd8dfc0c /drivers/net/ethernet/marvell
parent809d5fc9bf6589276a12bd4fd611e4c7ff9940c3 (diff)
net: remove skb recycling
Over time, skb recycling infrastructure got litle interest and many bugs. Generic rx path skb allocation is now using page fragments for efficient GRO / TCP coalescing, and recyling a tx skb for rx path is not worth the pain. Last identified bug is that fat skbs can be recycled and it can endup using high order pages after few iterations. With help from Maxime Bizon, who pointed out that commit 87151b8689d (net: allow pskb_expand_head() to get maximum tailroom) introduced this regression for recycled skbs. Instead of fixing this bug, lets remove skb recycling. Drivers wanting really hot skbs should use build_skb() anyway, to allocate/populate sk_buff right before netif_receive_skb() Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Maxime Bizon <mbizon@freebox.fr> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/marvell')
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 087b9e0669f1..84c13263c514 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -412,7 +412,6 @@ struct mv643xx_eth_private {
412 u8 work_rx_refill; 412 u8 work_rx_refill;
413 413
414 int skb_size; 414 int skb_size;
415 struct sk_buff_head rx_recycle;
416 415
417 /* 416 /*
418 * RX state. 417 * RX state.
@@ -673,9 +672,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
673 struct rx_desc *rx_desc; 672 struct rx_desc *rx_desc;
674 int size; 673 int size;
675 674
676 skb = __skb_dequeue(&mp->rx_recycle); 675 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
677 if (skb == NULL)
678 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
679 676
680 if (skb == NULL) { 677 if (skb == NULL) {
681 mp->oom = 1; 678 mp->oom = 1;
@@ -989,14 +986,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
989 desc->byte_cnt, DMA_TO_DEVICE); 986 desc->byte_cnt, DMA_TO_DEVICE);
990 } 987 }
991 988
992 if (skb != NULL) { 989 dev_kfree_skb(skb);
993 if (skb_queue_len(&mp->rx_recycle) <
994 mp->rx_ring_size &&
995 skb_recycle_check(skb, mp->skb_size))
996 __skb_queue_head(&mp->rx_recycle, skb);
997 else
998 dev_kfree_skb(skb);
999 }
1000 } 990 }
1001 991
1002 __netif_tx_unlock(nq); 992 __netif_tx_unlock(nq);
@@ -2349,8 +2339,6 @@ static int mv643xx_eth_open(struct net_device *dev)
2349 2339
2350 napi_enable(&mp->napi); 2340 napi_enable(&mp->napi);
2351 2341
2352 skb_queue_head_init(&mp->rx_recycle);
2353
2354 mp->int_mask = INT_EXT; 2342 mp->int_mask = INT_EXT;
2355 2343
2356 for (i = 0; i < mp->rxq_count; i++) { 2344 for (i = 0; i < mp->rxq_count; i++) {
@@ -2445,8 +2433,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
2445 mib_counters_update(mp); 2433 mib_counters_update(mp);
2446 del_timer_sync(&mp->mib_counters_timer); 2434 del_timer_sync(&mp->mib_counters_timer);
2447 2435
2448 skb_queue_purge(&mp->rx_recycle);
2449
2450 for (i = 0; i < mp->rxq_count; i++) 2436 for (i = 0; i < mp->rxq_count; i++)
2451 rxq_deinit(mp->rxq + i); 2437 rxq_deinit(mp->rxq + i);
2452 for (i = 0; i < mp->txq_count; i++) 2438 for (i = 0; i < mp->txq_count; i++)