aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2012-10-05 02:23:55 -0400
committerDavid S. Miller <davem@davemloft.net>2012-10-07 00:40:54 -0400
commitacb600def2110b1310466c0e485c0d26299898ae (patch)
tree21036c7d0518601aba70dde0246ac229cd8dfc0c /drivers
parent809d5fc9bf6589276a12bd4fd611e4c7ff9940c3 (diff)
net: remove skb recycling
Over time, skb recycling infrastructure got litle interest and many bugs. Generic rx path skb allocation is now using page fragments for efficient GRO / TCP coalescing, and recyling a tx skb for rx path is not worth the pain. Last identified bug is that fat skbs can be recycled and it can endup using high order pages after few iterations. With help from Maxime Bizon, who pointed out that commit 87151b8689d (net: allow pskb_expand_head() to get maximum tailroom) introduced this regression for recycled skbs. Instead of fixing this bug, lets remove skb recycling. Drivers wanting really hot skbs should use build_skb() anyway, to allocate/populate sk_buff right before netif_receive_skb() Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Maxime Bizon <mbizon@freebox.fr> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c19
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c27
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c29
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
8 files changed, 16 insertions, 102 deletions
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 2b4b4f529ab4..16814b34d4b6 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -375,7 +375,6 @@ struct xgmac_priv {
375 unsigned int tx_tail; 375 unsigned int tx_tail;
376 376
377 void __iomem *base; 377 void __iomem *base;
378 struct sk_buff_head rx_recycle;
379 unsigned int dma_buf_sz; 378 unsigned int dma_buf_sz;
380 dma_addr_t dma_rx_phy; 379 dma_addr_t dma_rx_phy;
381 dma_addr_t dma_tx_phy; 380 dma_addr_t dma_tx_phy;
@@ -672,9 +671,7 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
672 p = priv->dma_rx + entry; 671 p = priv->dma_rx + entry;
673 672
674 if (priv->rx_skbuff[entry] == NULL) { 673 if (priv->rx_skbuff[entry] == NULL) {
675 skb = __skb_dequeue(&priv->rx_recycle); 674 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
676 if (skb == NULL)
677 skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
678 if (unlikely(skb == NULL)) 675 if (unlikely(skb == NULL))
679 break; 676 break;
680 677
@@ -887,17 +884,7 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
887 desc_get_buf_len(p), DMA_TO_DEVICE); 884 desc_get_buf_len(p), DMA_TO_DEVICE);
888 } 885 }
889 886
890 /* 887 dev_kfree_skb(skb);
891 * If there's room in the queue (limit it to size)
892 * we add this skb back into the pool,
893 * if it's the right size.
894 */
895 if ((skb_queue_len(&priv->rx_recycle) <
896 DMA_RX_RING_SZ) &&
897 skb_recycle_check(skb, priv->dma_buf_sz))
898 __skb_queue_head(&priv->rx_recycle, skb);
899 else
900 dev_kfree_skb(skb);
901 } 888 }
902 889
903 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > 890 if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
@@ -1016,7 +1003,6 @@ static int xgmac_open(struct net_device *dev)
1016 dev->dev_addr); 1003 dev->dev_addr);
1017 } 1004 }
1018 1005
1019 skb_queue_head_init(&priv->rx_recycle);
1020 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats)); 1006 memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
1021 1007
1022 /* Initialize the XGMAC and descriptors */ 1008 /* Initialize the XGMAC and descriptors */
@@ -1053,7 +1039,6 @@ static int xgmac_stop(struct net_device *dev)
1053 napi_disable(&priv->napi); 1039 napi_disable(&priv->napi);
1054 1040
1055 writel(0, priv->base + XGMAC_DMA_INTR_ENA); 1041 writel(0, priv->base + XGMAC_DMA_INTR_ENA);
1056 skb_queue_purge(&priv->rx_recycle);
1057 1042
1058 /* Disable the MAC core */ 1043 /* Disable the MAC core */
1059 xgmac_mac_disable(priv->base); 1044 xgmac_mac_disable(priv->base);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a1b52ec3b930..1d03dcdd5e56 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1765,7 +1765,6 @@ static void free_skb_resources(struct gfar_private *priv)
1765 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1765 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1766 priv->tx_queue[0]->tx_bd_base, 1766 priv->tx_queue[0]->tx_bd_base,
1767 priv->tx_queue[0]->tx_bd_dma_base); 1767 priv->tx_queue[0]->tx_bd_dma_base);
1768 skb_queue_purge(&priv->rx_recycle);
1769} 1768}
1770 1769
1771void gfar_start(struct net_device *dev) 1770void gfar_start(struct net_device *dev)
@@ -1943,8 +1942,6 @@ static int gfar_enet_open(struct net_device *dev)
1943 1942
1944 enable_napi(priv); 1943 enable_napi(priv);
1945 1944
1946 skb_queue_head_init(&priv->rx_recycle);
1947
1948 /* Initialize a bunch of registers */ 1945 /* Initialize a bunch of registers */
1949 init_registers(dev); 1946 init_registers(dev);
1950 1947
@@ -2533,16 +2530,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2533 2530
2534 bytes_sent += skb->len; 2531 bytes_sent += skb->len;
2535 2532
2536 /* If there's room in the queue (limit it to rx_buffer_size) 2533 dev_kfree_skb_any(skb);
2537 * we add this skb back into the pool, if it's the right size
2538 */
2539 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
2540 skb_recycle_check(skb, priv->rx_buffer_size +
2541 RXBUF_ALIGNMENT)) {
2542 gfar_align_skb(skb);
2543 skb_queue_head(&priv->rx_recycle, skb);
2544 } else
2545 dev_kfree_skb_any(skb);
2546 2534
2547 tx_queue->tx_skbuff[skb_dirtytx] = NULL; 2535 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2548 2536
@@ -2608,7 +2596,7 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
2608static struct sk_buff *gfar_alloc_skb(struct net_device *dev) 2596static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2609{ 2597{
2610 struct gfar_private *priv = netdev_priv(dev); 2598 struct gfar_private *priv = netdev_priv(dev);
2611 struct sk_buff *skb = NULL; 2599 struct sk_buff *skb;
2612 2600
2613 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); 2601 skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT);
2614 if (!skb) 2602 if (!skb)
@@ -2621,14 +2609,7 @@ static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
2621 2609
2622struct sk_buff *gfar_new_skb(struct net_device *dev) 2610struct sk_buff *gfar_new_skb(struct net_device *dev)
2623{ 2611{
2624 struct gfar_private *priv = netdev_priv(dev); 2612 return gfar_alloc_skb(dev);
2625 struct sk_buff *skb = NULL;
2626
2627 skb = skb_dequeue(&priv->rx_recycle);
2628 if (!skb)
2629 skb = gfar_alloc_skb(dev);
2630
2631 return skb;
2632} 2613}
2633 2614
2634static inline void count_errors(unsigned short status, struct net_device *dev) 2615static inline void count_errors(unsigned short status, struct net_device *dev)
@@ -2787,7 +2768,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2787 if (unlikely(!newskb)) 2768 if (unlikely(!newskb))
2788 newskb = skb; 2769 newskb = skb;
2789 else if (skb) 2770 else if (skb)
2790 skb_queue_head(&priv->rx_recycle, skb); 2771 dev_kfree_skb(skb);
2791 } else { 2772 } else {
2792 /* Increment the number of packets */ 2773 /* Increment the number of packets */
2793 rx_queue->stats.rx_packets++; 2774 rx_queue->stats.rx_packets++;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 4141ef2ddafc..22eabc13ca99 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1080,8 +1080,6 @@ struct gfar_private {
1080 1080
1081 u32 cur_filer_idx; 1081 u32 cur_filer_idx;
1082 1082
1083 struct sk_buff_head rx_recycle;
1084
1085 /* RX queue filer rule set*/ 1083 /* RX queue filer rule set*/
1086 struct ethtool_rx_list rx_list; 1084 struct ethtool_rx_list rx_list;
1087 struct mutex rx_queue_access; 1085 struct mutex rx_queue_access;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 164288439220..dfa0aaaab009 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -209,14 +209,12 @@ static struct list_head *dequeue(struct list_head *lh)
209static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, 209static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
210 u8 __iomem *bd) 210 u8 __iomem *bd)
211{ 211{
212 struct sk_buff *skb = NULL; 212 struct sk_buff *skb;
213 213
214 skb = __skb_dequeue(&ugeth->rx_recycle); 214 skb = netdev_alloc_skb(ugeth->ndev,
215 ugeth->ug_info->uf_info.max_rx_buf_length +
216 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
215 if (!skb) 217 if (!skb)
216 skb = netdev_alloc_skb(ugeth->ndev,
217 ugeth->ug_info->uf_info.max_rx_buf_length +
218 UCC_GETH_RX_DATA_BUF_ALIGNMENT);
219 if (skb == NULL)
220 return NULL; 218 return NULL;
221 219
222 /* We need the data buffer to be aligned properly. We will reserve 220 /* We need the data buffer to be aligned properly. We will reserve
@@ -2020,8 +2018,6 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
2020 iounmap(ugeth->ug_regs); 2018 iounmap(ugeth->ug_regs);
2021 ugeth->ug_regs = NULL; 2019 ugeth->ug_regs = NULL;
2022 } 2020 }
2023
2024 skb_queue_purge(&ugeth->rx_recycle);
2025} 2021}
2026 2022
2027static void ucc_geth_set_multi(struct net_device *dev) 2023static void ucc_geth_set_multi(struct net_device *dev)
@@ -2230,8 +2226,6 @@ static int ucc_struct_init(struct ucc_geth_private *ugeth)
2230 return -ENOMEM; 2226 return -ENOMEM;
2231 } 2227 }
2232 2228
2233 skb_queue_head_init(&ugeth->rx_recycle);
2234
2235 return 0; 2229 return 0;
2236} 2230}
2237 2231
@@ -3274,12 +3268,7 @@ static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit
3274 if (netif_msg_rx_err(ugeth)) 3268 if (netif_msg_rx_err(ugeth))
3275 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3269 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x",
3276 __func__, __LINE__, (u32) skb); 3270 __func__, __LINE__, (u32) skb);
3277 if (skb) { 3271 dev_free_skb(skb);
3278 skb->data = skb->head + NET_SKB_PAD;
3279 skb->len = 0;
3280 skb_reset_tail_pointer(skb);
3281 __skb_queue_head(&ugeth->rx_recycle, skb);
3282 }
3283 3272
3284 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; 3273 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3285 dev->stats.rx_dropped++; 3274 dev->stats.rx_dropped++;
@@ -3349,13 +3338,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
3349 3338
3350 dev->stats.tx_packets++; 3339 dev->stats.tx_packets++;
3351 3340
3352 if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3341 dev_kfree_skb(skb);
3353 skb_recycle_check(skb,
3354 ugeth->ug_info->uf_info.max_rx_buf_length +
3355 UCC_GETH_RX_DATA_BUF_ALIGNMENT))
3356 __skb_queue_head(&ugeth->rx_recycle, skb);
3357 else
3358 dev_kfree_skb(skb);
3359 3342
3360 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3343 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3361 ugeth->skb_dirtytx[txQ] = 3344 ugeth->skb_dirtytx[txQ] =
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index f71b3e7b12de..75f337163ce3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -1214,8 +1214,6 @@ struct ucc_geth_private {
1214 /* index of the first skb which hasn't been transmitted yet. */ 1214 /* index of the first skb which hasn't been transmitted yet. */
1215 u16 skb_dirtytx[NUM_TX_QUEUES]; 1215 u16 skb_dirtytx[NUM_TX_QUEUES];
1216 1216
1217 struct sk_buff_head rx_recycle;
1218
1219 struct ugeth_mii_info *mii_info; 1217 struct ugeth_mii_info *mii_info;
1220 struct phy_device *phydev; 1218 struct phy_device *phydev;
1221 phy_interface_t phy_interface; 1219 phy_interface_t phy_interface;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 087b9e0669f1..84c13263c514 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -412,7 +412,6 @@ struct mv643xx_eth_private {
412 u8 work_rx_refill; 412 u8 work_rx_refill;
413 413
414 int skb_size; 414 int skb_size;
415 struct sk_buff_head rx_recycle;
416 415
417 /* 416 /*
418 * RX state. 417 * RX state.
@@ -673,9 +672,7 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
673 struct rx_desc *rx_desc; 672 struct rx_desc *rx_desc;
674 int size; 673 int size;
675 674
676 skb = __skb_dequeue(&mp->rx_recycle); 675 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
677 if (skb == NULL)
678 skb = netdev_alloc_skb(mp->dev, mp->skb_size);
679 676
680 if (skb == NULL) { 677 if (skb == NULL) {
681 mp->oom = 1; 678 mp->oom = 1;
@@ -989,14 +986,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
989 desc->byte_cnt, DMA_TO_DEVICE); 986 desc->byte_cnt, DMA_TO_DEVICE);
990 } 987 }
991 988
992 if (skb != NULL) { 989 dev_kfree_skb(skb);
993 if (skb_queue_len(&mp->rx_recycle) <
994 mp->rx_ring_size &&
995 skb_recycle_check(skb, mp->skb_size))
996 __skb_queue_head(&mp->rx_recycle, skb);
997 else
998 dev_kfree_skb(skb);
999 }
1000 } 990 }
1001 991
1002 __netif_tx_unlock(nq); 992 __netif_tx_unlock(nq);
@@ -2349,8 +2339,6 @@ static int mv643xx_eth_open(struct net_device *dev)
2349 2339
2350 napi_enable(&mp->napi); 2340 napi_enable(&mp->napi);
2351 2341
2352 skb_queue_head_init(&mp->rx_recycle);
2353
2354 mp->int_mask = INT_EXT; 2342 mp->int_mask = INT_EXT;
2355 2343
2356 for (i = 0; i < mp->rxq_count; i++) { 2344 for (i = 0; i < mp->rxq_count; i++) {
@@ -2445,8 +2433,6 @@ static int mv643xx_eth_stop(struct net_device *dev)
2445 mib_counters_update(mp); 2433 mib_counters_update(mp);
2446 del_timer_sync(&mp->mib_counters_timer); 2434 del_timer_sync(&mp->mib_counters_timer);
2447 2435
2448 skb_queue_purge(&mp->rx_recycle);
2449
2450 for (i = 0; i < mp->rxq_count; i++) 2436 for (i = 0; i < mp->rxq_count; i++)
2451 rxq_deinit(mp->rxq + i); 2437 rxq_deinit(mp->rxq + i);
2452 for (i = 0; i < mp->txq_count; i++) 2438 for (i = 0; i < mp->txq_count; i++)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e872e1da3137..7d51a65ab099 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -50,7 +50,6 @@ struct stmmac_priv {
50 unsigned int dirty_rx; 50 unsigned int dirty_rx;
51 struct sk_buff **rx_skbuff; 51 struct sk_buff **rx_skbuff;
52 dma_addr_t *rx_skbuff_dma; 52 dma_addr_t *rx_skbuff_dma;
53 struct sk_buff_head rx_recycle;
54 53
55 struct net_device *dev; 54 struct net_device *dev;
56 dma_addr_t dma_rx_phy; 55 dma_addr_t dma_rx_phy;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3be88331d17a..c6cdbc4eb05e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -747,18 +747,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
747 priv->hw->ring->clean_desc3(p); 747 priv->hw->ring->clean_desc3(p);
748 748
749 if (likely(skb != NULL)) { 749 if (likely(skb != NULL)) {
750 /* 750 dev_kfree_skb(skb);
751 * If there's room in the queue (limit it to size)
752 * we add this skb back into the pool,
753 * if it's the right size.
754 */
755 if ((skb_queue_len(&priv->rx_recycle) <
756 priv->dma_rx_size) &&
757 skb_recycle_check(skb, priv->dma_buf_sz))
758 __skb_queue_head(&priv->rx_recycle, skb);
759 else
760 dev_kfree_skb(skb);
761
762 priv->tx_skbuff[entry] = NULL; 751 priv->tx_skbuff[entry] = NULL;
763 } 752 }
764 753
@@ -1169,7 +1158,6 @@ static int stmmac_open(struct net_device *dev)
1169 priv->eee_enabled = stmmac_eee_init(priv); 1158 priv->eee_enabled = stmmac_eee_init(priv);
1170 1159
1171 napi_enable(&priv->napi); 1160 napi_enable(&priv->napi);
1172 skb_queue_head_init(&priv->rx_recycle);
1173 netif_start_queue(dev); 1161 netif_start_queue(dev);
1174 1162
1175 return 0; 1163 return 0;
@@ -1222,7 +1210,6 @@ static int stmmac_release(struct net_device *dev)
1222 kfree(priv->tm); 1210 kfree(priv->tm);
1223#endif 1211#endif
1224 napi_disable(&priv->napi); 1212 napi_disable(&priv->napi);
1225 skb_queue_purge(&priv->rx_recycle);
1226 1213
1227 /* Free the IRQ lines */ 1214 /* Free the IRQ lines */
1228 free_irq(dev->irq, dev); 1215 free_irq(dev->irq, dev);
@@ -1388,10 +1375,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1388 if (likely(priv->rx_skbuff[entry] == NULL)) { 1375 if (likely(priv->rx_skbuff[entry] == NULL)) {
1389 struct sk_buff *skb; 1376 struct sk_buff *skb;
1390 1377
1391 skb = __skb_dequeue(&priv->rx_recycle); 1378 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
1392 if (skb == NULL)
1393 skb = netdev_alloc_skb_ip_align(priv->dev,
1394 bfsize);
1395 1379
1396 if (unlikely(skb == NULL)) 1380 if (unlikely(skb == NULL))
1397 break; 1381 break;