diff options
author | Stephen Hemminger <shemminger@vyatta.com> | 2009-06-17 03:30:38 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-06-17 21:46:54 -0400 |
commit | bd1c6869f14f88aa82587ff51303e72dc7eec30e (patch) | |
tree | 73b0f1bb457cdf15065b77df847c9af68f49df60 /drivers/net | |
parent | e9c1be80a7403fb817ec6229ec20a39e377cc4ce (diff) |
sky2: skb recycling
This patch implements skb recycling. It reclaims transmitted skb's
for use in the receive ring.
Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/sky2.c | 38 | ||||
-rw-r--r-- | drivers/net/sky2.h | 1 |
2 files changed, 28 insertions, 11 deletions
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 6e95d2f54106..4f2afc770f8f 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -1176,6 +1176,7 @@ static void sky2_rx_clean(struct sky2_port *sky2) | |||
1176 | re->skb = NULL; | 1176 | re->skb = NULL; |
1177 | } | 1177 | } |
1178 | } | 1178 | } |
1179 | skb_queue_purge(&sky2->rx_recycle); | ||
1179 | } | 1180 | } |
1180 | 1181 | ||
1181 | /* Basic MII support */ | 1182 | /* Basic MII support */ |
@@ -1252,6 +1253,12 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp | |||
1252 | } | 1253 | } |
1253 | #endif | 1254 | #endif |
1254 | 1255 | ||
1256 | /* Amount of required worst case padding in rx buffer */ | ||
1257 | static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) | ||
1258 | { | ||
1259 | return (hw->flags & SKY2_HW_RAM_BUFFER) ? 8 : 2; | ||
1260 | } | ||
1261 | |||
1255 | /* | 1262 | /* |
1256 | * Allocate an skb for receiving. If the MTU is large enough | 1263 | * Allocate an skb for receiving. If the MTU is large enough |
1257 | * make the skb non-linear with a fragment list of pages. | 1264 | * make the skb non-linear with a fragment list of pages. |
@@ -1261,6 +1268,13 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2) | |||
1261 | struct sk_buff *skb; | 1268 | struct sk_buff *skb; |
1262 | int i; | 1269 | int i; |
1263 | 1270 | ||
1271 | skb = __skb_dequeue(&sky2->rx_recycle); | ||
1272 | if (!skb) | ||
1273 | skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size | ||
1274 | + sky2_rx_pad(sky2->hw)); | ||
1275 | if (!skb) | ||
1276 | goto nomem; | ||
1277 | |||
1264 | if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { | 1278 | if (sky2->hw->flags & SKY2_HW_RAM_BUFFER) { |
1265 | unsigned char *start; | 1279 | unsigned char *start; |
1266 | /* | 1280 | /* |
@@ -1269,18 +1283,10 @@ static struct sk_buff *sky2_rx_alloc(struct sky2_port *sky2) | |||
1269 | * The buffer returned from netdev_alloc_skb is | 1283 | * The buffer returned from netdev_alloc_skb is |
1270 | * aligned except if slab debugging is enabled. | 1284 | * aligned except if slab debugging is enabled. |
1271 | */ | 1285 | */ |
1272 | skb = netdev_alloc_skb(sky2->netdev, sky2->rx_data_size + 8); | ||
1273 | if (!skb) | ||
1274 | goto nomem; | ||
1275 | start = PTR_ALIGN(skb->data, 8); | 1286 | start = PTR_ALIGN(skb->data, 8); |
1276 | skb_reserve(skb, start - skb->data); | 1287 | skb_reserve(skb, start - skb->data); |
1277 | } else { | 1288 | } else |
1278 | skb = netdev_alloc_skb(sky2->netdev, | ||
1279 | sky2->rx_data_size + NET_IP_ALIGN); | ||
1280 | if (!skb) | ||
1281 | goto nomem; | ||
1282 | skb_reserve(skb, NET_IP_ALIGN); | 1289 | skb_reserve(skb, NET_IP_ALIGN); |
1283 | } | ||
1284 | 1290 | ||
1285 | for (i = 0; i < sky2->rx_nfrags; i++) { | 1291 | for (i = 0; i < sky2->rx_nfrags; i++) { |
1286 | struct page *page = alloc_page(GFP_ATOMIC); | 1292 | struct page *page = alloc_page(GFP_ATOMIC); |
@@ -1357,6 +1363,8 @@ static int sky2_rx_start(struct sky2_port *sky2) | |||
1357 | 1363 | ||
1358 | sky2->rx_data_size = size; | 1364 | sky2->rx_data_size = size; |
1359 | 1365 | ||
1366 | skb_queue_head_init(&sky2->rx_recycle); | ||
1367 | |||
1360 | /* Fill Rx ring */ | 1368 | /* Fill Rx ring */ |
1361 | for (i = 0; i < sky2->rx_pending; i++) { | 1369 | for (i = 0; i < sky2->rx_pending; i++) { |
1362 | re = sky2->rx_ring + i; | 1370 | re = sky2->rx_ring + i; |
@@ -1764,14 +1772,22 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) | |||
1764 | } | 1772 | } |
1765 | 1773 | ||
1766 | if (le->ctrl & EOP) { | 1774 | if (le->ctrl & EOP) { |
1775 | struct sk_buff *skb = re->skb; | ||
1776 | |||
1767 | if (unlikely(netif_msg_tx_done(sky2))) | 1777 | if (unlikely(netif_msg_tx_done(sky2))) |
1768 | printk(KERN_DEBUG "%s: tx done %u\n", | 1778 | printk(KERN_DEBUG "%s: tx done %u\n", |
1769 | dev->name, idx); | 1779 | dev->name, idx); |
1770 | 1780 | ||
1771 | dev->stats.tx_packets++; | 1781 | dev->stats.tx_packets++; |
1772 | dev->stats.tx_bytes += re->skb->len; | 1782 | dev->stats.tx_bytes += skb->len; |
1783 | |||
1784 | if (skb_queue_len(&sky2->rx_recycle) < sky2->rx_pending | ||
1785 | && skb_recycle_check(skb, sky2->rx_data_size | ||
1786 | + sky2_rx_pad(sky2->hw))) | ||
1787 | __skb_queue_head(&sky2->rx_recycle, skb); | ||
1788 | else | ||
1789 | dev_kfree_skb_any(skb); | ||
1773 | 1790 | ||
1774 | dev_kfree_skb_any(re->skb); | ||
1775 | sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE); | 1791 | sky2->tx_next = RING_NEXT(idx, TX_RING_SIZE); |
1776 | } | 1792 | } |
1777 | } | 1793 | } |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 92fb24b27d45..b5549c9e5107 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -2028,6 +2028,7 @@ struct sky2_port { | |||
2028 | u16 rx_pending; | 2028 | u16 rx_pending; |
2029 | u16 rx_data_size; | 2029 | u16 rx_data_size; |
2030 | u16 rx_nfrags; | 2030 | u16 rx_nfrags; |
2031 | struct sk_buff_head rx_recycle; | ||
2031 | 2032 | ||
2032 | #ifdef SKY2_VLAN_TAG_USED | 2033 | #ifdef SKY2_VLAN_TAG_USED |
2033 | u16 rx_tag; | 2034 | u16 rx_tag; |