aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@marvell.com>2008-10-01 05:33:57 -0400
committerDavid S. Miller <davem@davemloft.net>2008-10-01 05:33:57 -0400
commit2bcb4b0f111053d0e8fb4366f0708395d997e93a (patch)
tree835a4b8d4ed245ba05a76812ce0cf7cba83daf87 /drivers/net/mv643xx_eth.c
parent04a4bb55bcf35b63d40fd2725e58599ff8310dd7 (diff)
mv643xx_eth: hook up skb recycling
This gives a nice increase in the maximum loss-free packet forwarding rate in routing workloads. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c66
1 files changed, 45 insertions, 21 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 55aa8ba7e0f2..372811ade9f5 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -370,6 +370,9 @@ struct mv643xx_eth_private {
370 u8 work_rx_refill; 370 u8 work_rx_refill;
371 u8 work_rx_oom; 371 u8 work_rx_oom;
372 372
373 int skb_size;
374 struct sk_buff_head rx_recycle;
375
373 /* 376 /*
374 * RX state. 377 * RX state.
375 */ 378 */
@@ -566,31 +569,19 @@ static int rxq_process(struct rx_queue *rxq, int budget)
566static int rxq_refill(struct rx_queue *rxq, int budget) 569static int rxq_refill(struct rx_queue *rxq, int budget)
567{ 570{
568 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 571 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
569 int skb_size;
570 int refilled; 572 int refilled;
571 573
572 /*
573 * Reserve 2+14 bytes for an ethernet header (the hardware
574 * automatically prepends 2 bytes of dummy data to each
575 * received packet), 16 bytes for up to four VLAN tags, and
576 * 4 bytes for the trailing FCS -- 36 bytes total.
577 */
578 skb_size = rxq_to_mp(rxq)->dev->mtu + 36;
579
580 /*
581 * Make sure that the skb size is a multiple of 8 bytes, as
582 * the lower three bits of the receive descriptor's buffer
583 * size field are ignored by the hardware.
584 */
585 skb_size = (skb_size + 7) & ~7;
586
587 refilled = 0; 574 refilled = 0;
588 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) { 575 while (refilled < budget && rxq->rx_desc_count < rxq->rx_ring_size) {
589 struct sk_buff *skb; 576 struct sk_buff *skb;
590 int unaligned; 577 int unaligned;
591 int rx; 578 int rx;
592 579
593 skb = dev_alloc_skb(skb_size + dma_get_cache_alignment() - 1); 580 skb = __skb_dequeue(&mp->rx_recycle);
581 if (skb == NULL)
582 skb = dev_alloc_skb(mp->skb_size +
583 dma_get_cache_alignment() - 1);
584
594 if (skb == NULL) { 585 if (skb == NULL) {
595 mp->work_rx_oom |= 1 << rxq->index; 586 mp->work_rx_oom |= 1 << rxq->index;
596 goto oom; 587 goto oom;
@@ -608,8 +599,8 @@ static int rxq_refill(struct rx_queue *rxq, int budget)
608 rxq->rx_used_desc = 0; 599 rxq->rx_used_desc = 0;
609 600
610 rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data, 601 rxq->rx_desc_area[rx].buf_ptr = dma_map_single(NULL, skb->data,
611 skb_size, DMA_FROM_DEVICE); 602 mp->skb_size, DMA_FROM_DEVICE);
612 rxq->rx_desc_area[rx].buf_size = skb_size; 603 rxq->rx_desc_area[rx].buf_size = mp->skb_size;
613 rxq->rx_skb[rx] = skb; 604 rxq->rx_skb[rx] = skb;
614 wmb(); 605 wmb();
615 rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA | 606 rxq->rx_desc_area[rx].cmd_sts = BUFFER_OWNED_BY_DMA |
@@ -904,8 +895,14 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
904 desc->byte_cnt, DMA_TO_DEVICE); 895 desc->byte_cnt, DMA_TO_DEVICE);
905 } 896 }
906 897
907 if (skb) 898 if (skb != NULL) {
908 dev_kfree_skb(skb); 899 if (skb_queue_len(&mp->rx_recycle) <
900 mp->default_rx_ring_size &&
901 skb_recycle_check(skb, mp->skb_size))
902 __skb_queue_head(&mp->rx_recycle, skb);
903 else
904 dev_kfree_skb(skb);
905 }
909 } 906 }
910 907
911 __netif_tx_unlock(nq); 908 __netif_tx_unlock(nq);
@@ -2042,6 +2039,26 @@ static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int delay)
2042 wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4); 2039 wrl(mp, TX_FIFO_URGENT_THRESHOLD(mp->port_num), (coal & 0x3fff) << 4);
2043} 2040}
2044 2041
2042static void mv643xx_eth_recalc_skb_size(struct mv643xx_eth_private *mp)
2043{
2044 int skb_size;
2045
2046 /*
2047 * Reserve 2+14 bytes for an ethernet header (the hardware
2048 * automatically prepends 2 bytes of dummy data to each
2049 * received packet), 16 bytes for up to four VLAN tags, and
2050 * 4 bytes for the trailing FCS -- 36 bytes total.
2051 */
2052 skb_size = mp->dev->mtu + 36;
2053
2054 /*
2055 * Make sure that the skb size is a multiple of 8 bytes, as
2056 * the lower three bits of the receive descriptor's buffer
2057 * size field are ignored by the hardware.
2058 */
2059 mp->skb_size = (skb_size + 7) & ~7;
2060}
2061
2045static int mv643xx_eth_open(struct net_device *dev) 2062static int mv643xx_eth_open(struct net_device *dev)
2046{ 2063{
2047 struct mv643xx_eth_private *mp = netdev_priv(dev); 2064 struct mv643xx_eth_private *mp = netdev_priv(dev);
@@ -2061,8 +2078,12 @@ static int mv643xx_eth_open(struct net_device *dev)
2061 2078
2062 init_mac_tables(mp); 2079 init_mac_tables(mp);
2063 2080
2081 mv643xx_eth_recalc_skb_size(mp);
2082
2064 napi_enable(&mp->napi); 2083 napi_enable(&mp->napi);
2065 2084
2085 skb_queue_head_init(&mp->rx_recycle);
2086
2066 for (i = 0; i < mp->rxq_count; i++) { 2087 for (i = 0; i < mp->rxq_count; i++) {
2067 err = rxq_init(mp, i); 2088 err = rxq_init(mp, i);
2068 if (err) { 2089 if (err) {
@@ -2158,6 +2179,8 @@ static int mv643xx_eth_stop(struct net_device *dev)
2158 mv643xx_eth_get_stats(dev); 2179 mv643xx_eth_get_stats(dev);
2159 mib_counters_update(mp); 2180 mib_counters_update(mp);
2160 2181
2182 skb_queue_purge(&mp->rx_recycle);
2183
2161 for (i = 0; i < mp->rxq_count; i++) 2184 for (i = 0; i < mp->rxq_count; i++)
2162 rxq_deinit(mp->rxq + i); 2185 rxq_deinit(mp->rxq + i);
2163 for (i = 0; i < mp->txq_count; i++) 2186 for (i = 0; i < mp->txq_count; i++)
@@ -2184,6 +2207,7 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
2184 return -EINVAL; 2207 return -EINVAL;
2185 2208
2186 dev->mtu = new_mtu; 2209 dev->mtu = new_mtu;
2210 mv643xx_eth_recalc_skb_size(mp);
2187 tx_set_rate(mp, 1000000000, 16777216); 2211 tx_set_rate(mp, 1000000000, 16777216);
2188 2212
2189 if (!netif_running(dev)) 2213 if (!netif_running(dev))