aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro/stmmac
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-10-05 02:23:55 -0400
committerDavid S. Miller <davem@davemloft.net>2012-10-07 00:40:54 -0400
commitacb600def2110b1310466c0e485c0d26299898ae (patch)
tree21036c7d0518601aba70dde0246ac229cd8dfc0c /drivers/net/ethernet/stmicro/stmmac
parent809d5fc9bf6589276a12bd4fd611e4c7ff9940c3 (diff)
net: remove skb recycling
Over time, skb recycling infrastructure got litle interest and many bugs. Generic rx path skb allocation is now using page fragments for efficient GRO / TCP coalescing, and recyling a tx skb for rx path is not worth the pain. Last identified bug is that fat skbs can be recycled and it can endup using high order pages after few iterations. With help from Maxime Bizon, who pointed out that commit 87151b8689d (net: allow pskb_expand_head() to get maximum tailroom) introduced this regression for recycled skbs. Instead of fixing this bug, lets remove skb recycling. Drivers wanting really hot skbs should use build_skb() anyway, to allocate/populate sk_buff right before netif_receive_skb() Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Maxime Bizon <mbizon@freebox.fr> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/stmicro/stmmac')
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
2 files changed, 2 insertions, 19 deletions
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e872e1da3137..7d51a65ab099 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -50,7 +50,6 @@ struct stmmac_priv {
50 unsigned int dirty_rx; 50 unsigned int dirty_rx;
51 struct sk_buff **rx_skbuff; 51 struct sk_buff **rx_skbuff;
52 dma_addr_t *rx_skbuff_dma; 52 dma_addr_t *rx_skbuff_dma;
53 struct sk_buff_head rx_recycle;
54 53
55 struct net_device *dev; 54 struct net_device *dev;
56 dma_addr_t dma_rx_phy; 55 dma_addr_t dma_rx_phy;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3be88331d17a..c6cdbc4eb05e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -747,18 +747,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
747 priv->hw->ring->clean_desc3(p); 747 priv->hw->ring->clean_desc3(p);
748 748
749 if (likely(skb != NULL)) { 749 if (likely(skb != NULL)) {
750 /* 750 dev_kfree_skb(skb);
751 * If there's room in the queue (limit it to size)
752 * we add this skb back into the pool,
753 * if it's the right size.
754 */
755 if ((skb_queue_len(&priv->rx_recycle) <
756 priv->dma_rx_size) &&
757 skb_recycle_check(skb, priv->dma_buf_sz))
758 __skb_queue_head(&priv->rx_recycle, skb);
759 else
760 dev_kfree_skb(skb);
761
762 priv->tx_skbuff[entry] = NULL; 751 priv->tx_skbuff[entry] = NULL;
763 } 752 }
764 753
@@ -1169,7 +1158,6 @@ static int stmmac_open(struct net_device *dev)
1169 priv->eee_enabled = stmmac_eee_init(priv); 1158 priv->eee_enabled = stmmac_eee_init(priv);
1170 1159
1171 napi_enable(&priv->napi); 1160 napi_enable(&priv->napi);
1172 skb_queue_head_init(&priv->rx_recycle);
1173 netif_start_queue(dev); 1161 netif_start_queue(dev);
1174 1162
1175 return 0; 1163 return 0;
@@ -1222,7 +1210,6 @@ static int stmmac_release(struct net_device *dev)
1222 kfree(priv->tm); 1210 kfree(priv->tm);
1223#endif 1211#endif
1224 napi_disable(&priv->napi); 1212 napi_disable(&priv->napi);
1225 skb_queue_purge(&priv->rx_recycle);
1226 1213
1227 /* Free the IRQ lines */ 1214 /* Free the IRQ lines */
1228 free_irq(dev->irq, dev); 1215 free_irq(dev->irq, dev);
@@ -1388,10 +1375,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1388 if (likely(priv->rx_skbuff[entry] == NULL)) { 1375 if (likely(priv->rx_skbuff[entry] == NULL)) {
1389 struct sk_buff *skb; 1376 struct sk_buff *skb;
1390 1377
1391 skb = __skb_dequeue(&priv->rx_recycle); 1378 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1392 if (skb == NULL)
1393 skb = netdev_alloc_skb_ip_align(priv->dev,
1394 bfsize);
1395 1379
1396 if (unlikely(skb == NULL)) 1380 if (unlikely(skb == NULL))
1397 break; 1381 break;