aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/calxeda/xgmac.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-10-05 02:23:55 -0400
committerDavid S. Miller <davem@davemloft.net>2012-10-07 00:40:54 -0400
commitacb600def2110b1310466c0e485c0d26299898ae (patch)
tree21036c7d0518601aba70dde0246ac229cd8dfc0c /drivers/net/ethernet/calxeda/xgmac.c
parent809d5fc9bf6589276a12bd4fd611e4c7ff9940c3 (diff)
net: remove skb recycling
Over time, skb recycling infrastructure got litle interest and many bugs. Generic rx path skb allocation is now using page fragments for efficient GRO / TCP coalescing, and recyling a tx skb for rx path is not worth the pain. Last identified bug is that fat skbs can be recycled and it can endup using high order pages after few iterations. With help from Maxime Bizon, who pointed out that commit 87151b8689d (net: allow pskb_expand_head() to get maximum tailroom) introduced this regression for recycled skbs. Instead of fixing this bug, lets remove skb recycling. Drivers wanting really hot skbs should use build_skb() anyway, to allocate/populate sk_buff right before netif_receive_skb() Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Maxime Bizon <mbizon@freebox.fr> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/calxeda/xgmac.c')
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c19
1 files changed, 2 insertions, 17 deletions
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 2b4b4f529ab4..16814b34d4b6 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -375,7 +375,6 @@ struct xgmac_priv {
375 unsigned int tx_tail; 375 unsigned int tx_tail;
376 376
377 void __iomem *base; 377 void __iomem *base;
378 struct sk_buff_head rx_recycle;
379 unsigned int dma_buf_sz; 378 unsigned int dma_buf_sz;
380 dma_addr_t dma_rx_phy; 379 dma_addr_t dma_rx_phy;
381 dma_addr_t dma_tx_phy; 380 dma_addr_t dma_tx_phy;
@@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
672 p = priv->dma_rx + entry; 671 p = priv->dma_rx + entry;
673 672
674 if (priv->rx_skbuff[entry] == NULL) { 673 if (priv->rx_skbuff[entry] == NULL) {
675 skb = __skb_dequeue(&priv->rx_recycle); 674 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
676 if (skb == NULL)
677 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
678 if (unlikely(skb == NULL)) 675 if (unlikely(skb == NULL))
679 break; 676 break;
680 677
@@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
887 desc_get_buf_len(p), DMA_TO_DEVICE); 884 desc_get_buf_len(p), DMA_TO_DEVICE);
888 } 885 }
889 886
890 /* 887 dev_kfree_skb(skb);
891 * If there's room in the queue (limit it to size)
892 * we add this skb back into the pool,
893 * if it's the right size.
894 */
895 if ((skb_queue_len(&priv->rx_recycle) <
896 DMA_RX_RING_SZ) &&
897 skb_recycle_check(skb, priv->dma_buf_sz))
898 __skb_queue_head(&priv->rx_recycle, skb);
899 else
900 dev_kfree_skb(skb);
901 } 888 }
902 889
903 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 890 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
@@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev)
1016 dev->dev_addr); 1003 dev->dev_addr);
1017 } 1004 }
1018 1005
1019 skb_queue_head_init(&priv->rx_recycle);
1020 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); 1006 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
1021 1007
1022 /* Initialize the XGMAC and descriptors */ 1008 /* Initialize the XGMAC and descriptors */
@@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev)
1053 napi_disable(&priv->napi); 1039 napi_disable(&priv->napi);
1054 1040
1055 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1041 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1056 skb_queue_purge(&priv->rx_recycle);
1057 1042
1058 /* Disable the MAC core */ 1043 /* Disable the MAC core */
1059 xgmac_mac_disable(priv->base); 1044 xgmac_mac_disable(priv->base);