diff options
author | Andy Fleming <afleming@freescale.com> | 2009-02-04 19:43:16 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-02-04 19:43:16 -0500 |
commit | 0fd56bb5be6455d0d42241e65aed057244665e5e (patch) | |
tree | 45c37c6aa44d8719797a9ad5a4f2d6018a4305da /drivers/net/gianfar.c | |
parent | 1577ecef766650a57fceb171acee2b13cbfaf1d3 (diff) |
gianfar: Add support for skb recycling
Signed-off-by: Andy Fleming <afleming@freescale.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/gianfar.c')
-rw-r--r-- | drivers/net/gianfar.c | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index bd21b6d5f13c..33de25602b32 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -1181,6 +1181,8 @@ static int gfar_enet_open(struct net_device *dev) | |||
1181 | 1181 | ||
1182 | napi_enable(&priv->napi); | 1182 | napi_enable(&priv->napi); |
1183 | 1183 | ||
1184 | skb_queue_head_init(&priv->rx_recycle); | ||
1185 | |||
1184 | /* Initialize a bunch of registers */ | 1186 | /* Initialize a bunch of registers */ |
1185 | init_registers(dev); | 1187 | init_registers(dev); |
1186 | 1188 | ||
@@ -1399,6 +1401,7 @@ static int gfar_close(struct net_device *dev) | |||
1399 | 1401 | ||
1400 | napi_disable(&priv->napi); | 1402 | napi_disable(&priv->napi); |
1401 | 1403 | ||
1404 | skb_queue_purge(&priv->rx_recycle); | ||
1402 | cancel_work_sync(&priv->reset_task); | 1405 | cancel_work_sync(&priv->reset_task); |
1403 | stop_gfar(dev); | 1406 | stop_gfar(dev); |
1404 | 1407 | ||
@@ -1595,7 +1598,17 @@ static int gfar_clean_tx_ring(struct net_device *dev) | |||
1595 | bdp = next_txbd(bdp, base, tx_ring_size); | 1598 | bdp = next_txbd(bdp, base, tx_ring_size); |
1596 | } | 1599 | } |
1597 | 1600 | ||
1598 | dev_kfree_skb_any(skb); | 1601 | /* |
1602 | * If there's room in the queue (limit it to rx_buffer_size) | ||
1603 | * we add this skb back into the pool, if it's the right size | ||
1604 | */ | ||
1605 | if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size && | ||
1606 | skb_recycle_check(skb, priv->rx_buffer_size + | ||
1607 | RXBUF_ALIGNMENT)) | ||
1608 | __skb_queue_head(&priv->rx_recycle, skb); | ||
1609 | else | ||
1610 | dev_kfree_skb_any(skb); | ||
1611 | |||
1599 | priv->tx_skbuff[skb_dirtytx] = NULL; | 1612 | priv->tx_skbuff[skb_dirtytx] = NULL; |
1600 | 1613 | ||
1601 | skb_dirtytx = (skb_dirtytx + 1) & | 1614 | skb_dirtytx = (skb_dirtytx + 1) & |
@@ -1668,8 +1681,10 @@ struct sk_buff * gfar_new_skb(struct net_device *dev) | |||
1668 | struct gfar_private *priv = netdev_priv(dev); | 1681 | struct gfar_private *priv = netdev_priv(dev); |
1669 | struct sk_buff *skb = NULL; | 1682 | struct sk_buff *skb = NULL; |
1670 | 1683 | ||
1671 | /* We have to allocate the skb, so keep trying till we succeed */ | 1684 | skb = __skb_dequeue(&priv->rx_recycle); |
1672 | skb = netdev_alloc_skb(dev, priv->rx_buffer_size + RXBUF_ALIGNMENT); | 1685 | if (!skb) |
1686 | skb = netdev_alloc_skb(dev, | ||
1687 | priv->rx_buffer_size + RXBUF_ALIGNMENT); | ||
1673 | 1688 | ||
1674 | if (!skb) | 1689 | if (!skb) |
1675 | return NULL; | 1690 | return NULL; |
@@ -1817,7 +1832,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) | |||
1817 | if (unlikely(!newskb)) | 1832 | if (unlikely(!newskb)) |
1818 | newskb = skb; | 1833 | newskb = skb; |
1819 | else if (skb) | 1834 | else if (skb) |
1820 | dev_kfree_skb_any(skb); | 1835 | __skb_queue_head(&priv->rx_recycle, skb); |
1821 | } else { | 1836 | } else { |
1822 | /* Increment the number of packets */ | 1837 | /* Increment the number of packets */ |
1823 | dev->stats.rx_packets++; | 1838 | dev->stats.rx_packets++; |