aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-08-26 04:23:22 -0400
committerLennert Buytenhek <buytenh@marvell.com>2008-09-05 00:33:58 -0400
commitf7981c1c67b53abb4a7d8a501e68585b9826179a (patch)
tree3f94af4b599aaf693da7e03763a6c5efdcb86f8e /drivers
parent17cd0a59f9c34164c4f3bfe404894f5285bac112 (diff)
mv643xx_eth: require contiguous receive and transmit queue numbering
Simplify receive and transmit queue handling by requiring the set of queue numbers to be contiguous starting from zero. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/mv643xx_eth.c123
1 files changed, 44 insertions, 79 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 53cfd01b405d..c41541d8710f 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -360,8 +360,7 @@ struct mv643xx_eth_private {
360 int default_rx_ring_size; 360 int default_rx_ring_size;
361 unsigned long rx_desc_sram_addr; 361 unsigned long rx_desc_sram_addr;
362 int rx_desc_sram_size; 362 int rx_desc_sram_size;
363 u8 rxq_mask; 363 int rxq_count;
364 int rxq_primary;
365 struct napi_struct napi; 364 struct napi_struct napi;
366 struct timer_list rx_oom; 365 struct timer_list rx_oom;
367 struct rx_queue rxq[8]; 366 struct rx_queue rxq[8];
@@ -372,8 +371,7 @@ struct mv643xx_eth_private {
372 int default_tx_ring_size; 371 int default_tx_ring_size;
373 unsigned long tx_desc_sram_addr; 372 unsigned long tx_desc_sram_addr;
374 int tx_desc_sram_size; 373 int tx_desc_sram_size;
375 u8 txq_mask; 374 int txq_count;
376 int txq_primary;
377 struct tx_queue txq[8]; 375 struct tx_queue txq[8];
378#ifdef MV643XX_ETH_TX_FAST_REFILL 376#ifdef MV643XX_ETH_TX_FAST_REFILL
379 int tx_clean_threshold; 377 int tx_clean_threshold;
@@ -455,7 +453,7 @@ static void __txq_maybe_wake(struct tx_queue *txq)
455 * netif_{stop,wake}_queue() flow control only applies to 453 * netif_{stop,wake}_queue() flow control only applies to
456 * the primary queue. 454 * the primary queue.
457 */ 455 */
458 BUG_ON(txq->index != mp->txq_primary); 456 BUG_ON(txq->index != 0);
459 457
460 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1) 458 if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
461 netif_wake_queue(mp->dev); 459 netif_wake_queue(mp->dev);
@@ -626,13 +624,12 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
626#ifdef MV643XX_ETH_TX_FAST_REFILL 624#ifdef MV643XX_ETH_TX_FAST_REFILL
627 if (++mp->tx_clean_threshold > 5) { 625 if (++mp->tx_clean_threshold > 5) {
628 mp->tx_clean_threshold = 0; 626 mp->tx_clean_threshold = 0;
629 for (i = 0; i < 8; i++) 627 for (i = 0; i < mp->txq_count; i++)
630 if (mp->txq_mask & (1 << i)) 628 txq_reclaim(mp->txq + i, 0);
631 txq_reclaim(mp->txq + i, 0);
632 629
633 if (netif_carrier_ok(mp->dev)) { 630 if (netif_carrier_ok(mp->dev)) {
634 spin_lock_irq(&mp->lock); 631 spin_lock_irq(&mp->lock);
635 __txq_maybe_wake(mp->txq + mp->txq_primary); 632 __txq_maybe_wake(mp->txq);
636 spin_unlock_irq(&mp->lock); 633 spin_unlock_irq(&mp->lock);
637 } 634 }
638 } 635 }
@@ -640,13 +637,11 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
640 637
641 work_done = 0; 638 work_done = 0;
642 oom = 0; 639 oom = 0;
643 for (i = 7; work_done < budget && i >= 0; i--) { 640 for (i = mp->rxq_count - 1; work_done < budget && i >= 0; i--) {
644 if (mp->rxq_mask & (1 << i)) { 641 struct rx_queue *rxq = mp->rxq + i;
645 struct rx_queue *rxq = mp->rxq + i;
646 642
647 work_done += rxq_process(rxq, budget - work_done); 643 work_done += rxq_process(rxq, budget - work_done);
648 work_done += rxq_refill(rxq, budget - work_done, &oom); 644 work_done += rxq_refill(rxq, budget - work_done, &oom);
649 }
650 } 645 }
651 646
652 if (work_done < budget) { 647 if (work_done < budget) {
@@ -846,11 +841,11 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
846 841
847 spin_lock_irqsave(&mp->lock, flags); 842 spin_lock_irqsave(&mp->lock, flags);
848 843
849 txq = mp->txq + mp->txq_primary; 844 txq = mp->txq;
850 845
851 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) { 846 if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
852 spin_unlock_irqrestore(&mp->lock, flags); 847 spin_unlock_irqrestore(&mp->lock, flags);
853 if (txq->index == mp->txq_primary && net_ratelimit()) 848 if (txq->index == 0 && net_ratelimit())
854 dev_printk(KERN_ERR, &dev->dev, 849 dev_printk(KERN_ERR, &dev->dev,
855 "primary tx queue full?!\n"); 850 "primary tx queue full?!\n");
856 kfree_skb(skb); 851 kfree_skb(skb);
@@ -862,7 +857,7 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
862 stats->tx_packets++; 857 stats->tx_packets++;
863 dev->trans_start = jiffies; 858 dev->trans_start = jiffies;
864 859
865 if (txq->index == mp->txq_primary) { 860 if (txq->index == 0) {
866 int entries_left; 861 int entries_left;
867 862
868 entries_left = txq->tx_ring_size - txq->tx_desc_count; 863 entries_left = txq->tx_ring_size - txq->tx_desc_count;
@@ -1517,7 +1512,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1517 1512
1518 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1513 size = rxq->rx_ring_size * sizeof(struct rx_desc);
1519 1514
1520 if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) { 1515 if (index == 0 && size <= mp->rx_desc_sram_size) {
1521 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1516 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1522 mp->rx_desc_sram_size); 1517 mp->rx_desc_sram_size);
1523 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1518 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
@@ -1559,7 +1554,7 @@ static int rxq_init(struct mv643xx_eth_private *mp, int index)
1559 1554
1560 1555
1561out_free: 1556out_free:
1562 if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) 1557 if (index == 0 && size <= mp->rx_desc_sram_size)
1563 iounmap(rxq->rx_desc_area); 1558 iounmap(rxq->rx_desc_area);
1564 else 1559 else
1565 dma_free_coherent(NULL, size, 1560 dma_free_coherent(NULL, size,
@@ -1590,7 +1585,7 @@ static void rxq_deinit(struct rx_queue *rxq)
1590 rxq->rx_desc_count); 1585 rxq->rx_desc_count);
1591 } 1586 }
1592 1587
1593 if (rxq->index == mp->rxq_primary && 1588 if (rxq->index == 0 &&
1594 rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1589 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1595 iounmap(rxq->rx_desc_area); 1590 iounmap(rxq->rx_desc_area);
1596 else 1591 else
@@ -1617,7 +1612,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1617 1612
1618 size = txq->tx_ring_size * sizeof(struct tx_desc); 1613 size = txq->tx_ring_size * sizeof(struct tx_desc);
1619 1614
1620 if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) { 1615 if (index == 0 && size <= mp->tx_desc_sram_size) {
1621 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, 1616 txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr,
1622 mp->tx_desc_sram_size); 1617 mp->tx_desc_sram_size);
1623 txq->tx_desc_dma = mp->tx_desc_sram_addr; 1618 txq->tx_desc_dma = mp->tx_desc_sram_addr;
@@ -1661,7 +1656,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
1661 1656
1662 1657
1663out_free: 1658out_free:
1664 if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) 1659 if (index == 0 && size <= mp->tx_desc_sram_size)
1665 iounmap(txq->tx_desc_area); 1660 iounmap(txq->tx_desc_area);
1666 else 1661 else
1667 dma_free_coherent(NULL, size, 1662 dma_free_coherent(NULL, size,
@@ -1738,7 +1733,7 @@ static void txq_deinit(struct tx_queue *txq)
1738 1733
1739 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); 1734 BUG_ON(txq->tx_used_desc != txq->tx_curr_desc);
1740 1735
1741 if (txq->index == mp->txq_primary && 1736 if (txq->index == 0 &&
1742 txq->tx_desc_area_size <= mp->tx_desc_sram_size) 1737 txq->tx_desc_area_size <= mp->tx_desc_sram_size)
1743 iounmap(txq->tx_desc_area); 1738 iounmap(txq->tx_desc_area);
1744 else 1739 else
@@ -1768,13 +1763,11 @@ static void handle_link_event(struct mv643xx_eth_private *mp)
1768 netif_carrier_off(dev); 1763 netif_carrier_off(dev);
1769 netif_stop_queue(dev); 1764 netif_stop_queue(dev);
1770 1765
1771 for (i = 0; i < 8; i++) { 1766 for (i = 0; i < mp->txq_count; i++) {
1772 struct tx_queue *txq = mp->txq + i; 1767 struct tx_queue *txq = mp->txq + i;
1773 1768
1774 if (mp->txq_mask & (1 << i)) { 1769 txq_reclaim(txq, 1);
1775 txq_reclaim(txq, 1); 1770 txq_reset_hw_ptr(txq);
1776 txq_reset_hw_ptr(txq);
1777 }
1778 } 1771 }
1779 } 1772 }
1780 return; 1773 return;
@@ -1847,9 +1840,8 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1847 if (int_cause_ext & INT_EXT_TX) { 1840 if (int_cause_ext & INT_EXT_TX) {
1848 int i; 1841 int i;
1849 1842
1850 for (i = 0; i < 8; i++) 1843 for (i = 0; i < mp->txq_count; i++)
1851 if (mp->txq_mask & (1 << i)) 1844 txq_reclaim(mp->txq + i, 0);
1852 txq_reclaim(mp->txq + i, 0);
1853 1845
1854 /* 1846 /*
1855 * Enough space again in the primary TX queue for a 1847 * Enough space again in the primary TX queue for a
@@ -1857,7 +1849,7 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1857 */ 1849 */
1858 if (netif_carrier_ok(dev)) { 1850 if (netif_carrier_ok(dev)) {
1859 spin_lock(&mp->lock); 1851 spin_lock(&mp->lock);
1860 __txq_maybe_wake(mp->txq + mp->txq_primary); 1852 __txq_maybe_wake(mp->txq);
1861 spin_unlock(&mp->lock); 1853 spin_unlock(&mp->lock);
1862 } 1854 }
1863 } 1855 }
@@ -1945,12 +1937,9 @@ static void port_start(struct mv643xx_eth_private *mp)
1945 * Configure TX path and queues. 1937 * Configure TX path and queues.
1946 */ 1938 */
1947 tx_set_rate(mp, 1000000000, 16777216); 1939 tx_set_rate(mp, 1000000000, 16777216);
1948 for (i = 0; i < 8; i++) { 1940 for (i = 0; i < mp->txq_count; i++) {
1949 struct tx_queue *txq = mp->txq + i; 1941 struct tx_queue *txq = mp->txq + i;
1950 1942
1951 if ((mp->txq_mask & (1 << i)) == 0)
1952 continue;
1953
1954 txq_reset_hw_ptr(txq); 1943 txq_reset_hw_ptr(txq);
1955 txq_set_rate(txq, 1000000000, 16777216); 1944 txq_set_rate(txq, 1000000000, 16777216);
1956 txq_set_fixed_prio_mode(txq); 1945 txq_set_fixed_prio_mode(txq);
@@ -1975,14 +1964,11 @@ static void port_start(struct mv643xx_eth_private *mp)
1975 /* 1964 /*
1976 * Enable the receive queues. 1965 * Enable the receive queues.
1977 */ 1966 */
1978 for (i = 0; i < 8; i++) { 1967 for (i = 0; i < mp->rxq_count; i++) {
1979 struct rx_queue *rxq = mp->rxq + i; 1968 struct rx_queue *rxq = mp->rxq + i;
1980 int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i); 1969 int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
1981 u32 addr; 1970 u32 addr;
1982 1971
1983 if ((mp->rxq_mask & (1 << i)) == 0)
1984 continue;
1985
1986 addr = (u32)rxq->rx_desc_dma; 1972 addr = (u32)rxq->rx_desc_dma;
1987 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 1973 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
1988 wrl(mp, off, addr); 1974 wrl(mp, off, addr);
@@ -2044,15 +2030,11 @@ static int mv643xx_eth_open(struct net_device *dev)
2044 napi_enable(&mp->napi); 2030 napi_enable(&mp->napi);
2045 2031
2046 oom = 0; 2032 oom = 0;
2047 for (i = 0; i < 8; i++) { 2033 for (i = 0; i < mp->rxq_count; i++) {
2048 if ((mp->rxq_mask & (1 << i)) == 0)
2049 continue;
2050
2051 err = rxq_init(mp, i); 2034 err = rxq_init(mp, i);
2052 if (err) { 2035 if (err) {
2053 while (--i >= 0) 2036 while (--i >= 0)
2054 if (mp->rxq_mask & (1 << i)) 2037 rxq_deinit(mp->rxq + i);
2055 rxq_deinit(mp->rxq + i);
2056 goto out; 2038 goto out;
2057 } 2039 }
2058 2040
@@ -2064,15 +2046,11 @@ static int mv643xx_eth_open(struct net_device *dev)
2064 add_timer(&mp->rx_oom); 2046 add_timer(&mp->rx_oom);
2065 } 2047 }
2066 2048
2067 for (i = 0; i < 8; i++) { 2049 for (i = 0; i < mp->txq_count; i++) {
2068 if ((mp->txq_mask & (1 << i)) == 0)
2069 continue;
2070
2071 err = txq_init(mp, i); 2050 err = txq_init(mp, i);
2072 if (err) { 2051 if (err) {
2073 while (--i >= 0) 2052 while (--i >= 0)
2074 if (mp->txq_mask & (1 << i)) 2053 txq_deinit(mp->txq + i);
2075 txq_deinit(mp->txq + i);
2076 goto out_free; 2054 goto out_free;
2077 } 2055 }
2078 } 2056 }
@@ -2094,9 +2072,8 @@ static int mv643xx_eth_open(struct net_device *dev)
2094 2072
2095 2073
2096out_free: 2074out_free:
2097 for (i = 0; i < 8; i++) 2075 for (i = 0; i < mp->rxq_count; i++)
2098 if (mp->rxq_mask & (1 << i)) 2076 rxq_deinit(mp->rxq + i);
2099 rxq_deinit(mp->rxq + i);
2100out: 2077out:
2101 free_irq(dev->irq, dev); 2078 free_irq(dev->irq, dev);
2102 2079
@@ -2108,12 +2085,10 @@ static void port_reset(struct mv643xx_eth_private *mp)
2108 unsigned int data; 2085 unsigned int data;
2109 int i; 2086 int i;
2110 2087
2111 for (i = 0; i < 8; i++) { 2088 for (i = 0; i < mp->rxq_count; i++)
2112 if (mp->rxq_mask & (1 << i)) 2089 rxq_disable(mp->rxq + i);
2113 rxq_disable(mp->rxq + i); 2090 for (i = 0; i < mp->txq_count; i++)
2114 if (mp->txq_mask & (1 << i)) 2091 txq_disable(mp->txq + i);
2115 txq_disable(mp->txq + i);
2116 }
2117 2092
2118 while (1) { 2093 while (1) {
2119 u32 ps = rdl(mp, PORT_STATUS(mp->port_num)); 2094 u32 ps = rdl(mp, PORT_STATUS(mp->port_num));
@@ -2151,12 +2126,10 @@ static int mv643xx_eth_stop(struct net_device *dev)
2151 port_reset(mp); 2126 port_reset(mp);
2152 mib_counters_update(mp); 2127 mib_counters_update(mp);
2153 2128
2154 for (i = 0; i < 8; i++) { 2129 for (i = 0; i < mp->rxq_count; i++)
2155 if (mp->rxq_mask & (1 << i)) 2130 rxq_deinit(mp->rxq + i);
2156 rxq_deinit(mp->rxq + i); 2131 for (i = 0; i < mp->txq_count; i++)
2157 if (mp->txq_mask & (1 << i)) 2132 txq_deinit(mp->txq + i);
2158 txq_deinit(mp->txq + i);
2159 }
2160 2133
2161 return 0; 2134 return 0;
2162} 2135}
@@ -2211,7 +2184,7 @@ static void tx_timeout_task(struct work_struct *ugly)
2211 port_reset(mp); 2184 port_reset(mp);
2212 port_start(mp); 2185 port_start(mp);
2213 2186
2214 __txq_maybe_wake(mp->txq + mp->txq_primary); 2187 __txq_maybe_wake(mp->txq);
2215 } 2188 }
2216} 2189}
2217 2190
@@ -2453,11 +2426,7 @@ static void set_params(struct mv643xx_eth_private *mp,
2453 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2426 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2454 mp->rx_desc_sram_size = pd->rx_sram_size; 2427 mp->rx_desc_sram_size = pd->rx_sram_size;
2455 2428
2456 if (pd->rx_queue_mask) 2429 mp->rxq_count = pd->rx_queue_count ? : 1;
2457 mp->rxq_mask = pd->rx_queue_mask;
2458 else
2459 mp->rxq_mask = 0x01;
2460 mp->rxq_primary = fls(mp->rxq_mask) - 1;
2461 2430
2462 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2431 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2463 if (pd->tx_queue_size) 2432 if (pd->tx_queue_size)
@@ -2465,11 +2434,7 @@ static void set_params(struct mv643xx_eth_private *mp,
2465 mp->tx_desc_sram_addr = pd->tx_sram_addr; 2434 mp->tx_desc_sram_addr = pd->tx_sram_addr;
2466 mp->tx_desc_sram_size = pd->tx_sram_size; 2435 mp->tx_desc_sram_size = pd->tx_sram_size;
2467 2436
2468 if (pd->tx_queue_mask) 2437 mp->txq_count = pd->tx_queue_count ? : 1;
2469 mp->txq_mask = pd->tx_queue_mask;
2470 else
2471 mp->txq_mask = 0x01;
2472 mp->txq_primary = fls(mp->txq_mask) - 1;
2473} 2438}
2474 2439
2475static int phy_detect(struct mv643xx_eth_private *mp) 2440static int phy_detect(struct mv643xx_eth_private *mp)