aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/mv643xx_eth.c
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-06-01 19:01:26 -0400
committerLennert Buytenhek <buytenh@wantstofly.org>2008-06-12 02:40:36 -0400
commit64da80a29c7455321a7df7b47e27d639e3944c1a (patch)
tree056eb75e01ba9af095fb6420ccd7cda4a3c2a958 /drivers/net/mv643xx_eth.c
parent89df5fdc5290681e17b8755675c59ed9607a487a (diff)
mv643xx_eth: allow multiple RX queues
Allow the platform code to specify that we are running on hardware that is capable of supporting multiple RX queues. If this option is used, initialise all of the given RX queues instead of just RX queue zero. Signed-off-by: Lennert Buytenhek <buytenh@marvell.com> Acked-by: Dale Farnsworth <dale@farnsworth.org>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r--drivers/net/mv643xx_eth.c99
1 files changed, 74 insertions, 25 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 9ce7be09e295..3c8591853999 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -96,7 +96,7 @@ static char mv643xx_eth_driver_version[] = "1.0";
96#define TX_BW_MTU(p) (0x0458 + ((p) << 10)) 96#define TX_BW_MTU(p) (0x0458 + ((p) << 10))
97#define TX_BW_BURST(p) (0x045c + ((p) << 10)) 97#define TX_BW_BURST(p) (0x045c + ((p) << 10))
98#define INT_CAUSE(p) (0x0460 + ((p) << 10)) 98#define INT_CAUSE(p) (0x0460 + ((p) << 10))
99#define INT_RX 0x00000804 99#define INT_RX 0x0007fbfc
100#define INT_EXT 0x00000002 100#define INT_EXT 0x00000002
101#define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10)) 101#define INT_CAUSE_EXT(p) (0x0464 + ((p) << 10))
102#define INT_EXT_LINK 0x00100000 102#define INT_EXT_LINK 0x00100000
@@ -107,7 +107,7 @@ static char mv643xx_eth_driver_version[] = "1.0";
107#define INT_MASK(p) (0x0468 + ((p) << 10)) 107#define INT_MASK(p) (0x0468 + ((p) << 10))
108#define INT_MASK_EXT(p) (0x046c + ((p) << 10)) 108#define INT_MASK_EXT(p) (0x046c + ((p) << 10))
109#define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10)) 109#define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10))
110#define RXQ_CURRENT_DESC_PTR(p) (0x060c + ((p) << 10)) 110#define RXQ_CURRENT_DESC_PTR(p, q) (0x060c + ((p) << 10) + ((q) << 4))
111#define RXQ_COMMAND(p) (0x0680 + ((p) << 10)) 111#define RXQ_COMMAND(p) (0x0680 + ((p) << 10))
112#define TXQ_CURRENT_DESC_PTR(p) (0x06c0 + ((p) << 10)) 112#define TXQ_CURRENT_DESC_PTR(p) (0x06c0 + ((p) << 10))
113#define TXQ_BW_TOKENS(p) (0x0700 + ((p) << 10)) 113#define TXQ_BW_TOKENS(p) (0x0700 + ((p) << 10))
@@ -286,6 +286,8 @@ struct mib_counters {
286}; 286};
287 287
288struct rx_queue { 288struct rx_queue {
289 int index;
290
289 int rx_ring_size; 291 int rx_ring_size;
290 292
291 int rx_desc_count; 293 int rx_desc_count;
@@ -334,8 +336,10 @@ struct mv643xx_eth_private {
334 int default_rx_ring_size; 336 int default_rx_ring_size;
335 unsigned long rx_desc_sram_addr; 337 unsigned long rx_desc_sram_addr;
336 int rx_desc_sram_size; 338 int rx_desc_sram_size;
339 u8 rxq_mask;
340 int rxq_primary;
337 struct napi_struct napi; 341 struct napi_struct napi;
338 struct rx_queue rxq[1]; 342 struct rx_queue rxq[8];
339 343
340 /* 344 /*
341 * TX state. 345 * TX state.
@@ -365,7 +369,7 @@ static inline void wrl(struct mv643xx_eth_private *mp, int offset, u32 data)
365/* rxq/txq helper functions *************************************************/ 369/* rxq/txq helper functions *************************************************/
366static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) 370static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq)
367{ 371{
368 return container_of(rxq, struct mv643xx_eth_private, rxq[0]); 372 return container_of(rxq, struct mv643xx_eth_private, rxq[rxq->index]);
369} 373}
370 374
371static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) 375static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
@@ -376,13 +380,13 @@ static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq)
376static void rxq_enable(struct rx_queue *rxq) 380static void rxq_enable(struct rx_queue *rxq)
377{ 381{
378 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 382 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
379 wrl(mp, RXQ_COMMAND(mp->port_num), 1); 383 wrl(mp, RXQ_COMMAND(mp->port_num), 1 << rxq->index);
380} 384}
381 385
382static void rxq_disable(struct rx_queue *rxq) 386static void rxq_disable(struct rx_queue *rxq)
383{ 387{
384 struct mv643xx_eth_private *mp = rxq_to_mp(rxq); 388 struct mv643xx_eth_private *mp = rxq_to_mp(rxq);
385 u8 mask = 1; 389 u8 mask = 1 << rxq->index;
386 390
387 wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8); 391 wrl(mp, RXQ_COMMAND(mp->port_num), mask << 8);
388 while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask) 392 while (rdl(mp, RXQ_COMMAND(mp->port_num)) & mask)
@@ -583,6 +587,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
583{ 587{
584 struct mv643xx_eth_private *mp; 588 struct mv643xx_eth_private *mp;
585 int rx; 589 int rx;
590 int i;
586 591
587 mp = container_of(napi, struct mv643xx_eth_private, napi); 592 mp = container_of(napi, struct mv643xx_eth_private, napi);
588 593
@@ -593,7 +598,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
593 } 598 }
594#endif 599#endif
595 600
596 rx = rxq_process(mp->rxq, budget); 601 rx = 0;
602 for (i = 7; rx < budget && i >= 0; i--)
603 if (mp->rxq_mask & (1 << i))
604 rx += rxq_process(mp->rxq + i, budget - rx);
597 605
598 if (rx < budget) { 606 if (rx < budget) {
599 netif_rx_complete(mp->dev, napi); 607 netif_rx_complete(mp->dev, napi);
@@ -1306,13 +1314,15 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
1306 1314
1307 1315
1308/* rx/tx queue initialisation ***********************************************/ 1316/* rx/tx queue initialisation ***********************************************/
1309static int rxq_init(struct mv643xx_eth_private *mp) 1317static int rxq_init(struct mv643xx_eth_private *mp, int index)
1310{ 1318{
1311 struct rx_queue *rxq = mp->rxq; 1319 struct rx_queue *rxq = mp->rxq + index;
1312 struct rx_desc *rx_desc; 1320 struct rx_desc *rx_desc;
1313 int size; 1321 int size;
1314 int i; 1322 int i;
1315 1323
1324 rxq->index = index;
1325
1316 rxq->rx_ring_size = mp->default_rx_ring_size; 1326 rxq->rx_ring_size = mp->default_rx_ring_size;
1317 1327
1318 rxq->rx_desc_count = 0; 1328 rxq->rx_desc_count = 0;
@@ -1321,7 +1331,7 @@ static int rxq_init(struct mv643xx_eth_private *mp)
1321 1331
1322 size = rxq->rx_ring_size * sizeof(struct rx_desc); 1332 size = rxq->rx_ring_size * sizeof(struct rx_desc);
1323 1333
1324 if (size <= mp->rx_desc_sram_size) { 1334 if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size) {
1325 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr, 1335 rxq->rx_desc_area = ioremap(mp->rx_desc_sram_addr,
1326 mp->rx_desc_sram_size); 1336 mp->rx_desc_sram_size);
1327 rxq->rx_desc_dma = mp->rx_desc_sram_addr; 1337 rxq->rx_desc_dma = mp->rx_desc_sram_addr;
@@ -1362,7 +1372,7 @@ static int rxq_init(struct mv643xx_eth_private *mp)
1362 1372
1363 1373
1364out_free: 1374out_free:
1365 if (size <= mp->rx_desc_sram_size) 1375 if (index == mp->rxq_primary && size <= mp->rx_desc_sram_size)
1366 iounmap(rxq->rx_desc_area); 1376 iounmap(rxq->rx_desc_area);
1367 else 1377 else
1368 dma_free_coherent(NULL, size, 1378 dma_free_coherent(NULL, size,
@@ -1395,7 +1405,8 @@ static void rxq_deinit(struct rx_queue *rxq)
1395 rxq->rx_desc_count); 1405 rxq->rx_desc_count);
1396 } 1406 }
1397 1407
1398 if (rxq->rx_desc_area_size <= mp->rx_desc_sram_size) 1408 if (rxq->index == mp->rxq_primary &&
1409 rxq->rx_desc_area_size <= mp->rx_desc_sram_size)
1399 iounmap(rxq->rx_desc_area); 1410 iounmap(rxq->rx_desc_area);
1400 else 1411 else
1401 dma_free_coherent(NULL, rxq->rx_desc_area_size, 1412 dma_free_coherent(NULL, rxq->rx_desc_area_size,
@@ -1612,6 +1623,9 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1612 } 1623 }
1613 } 1624 }
1614 1625
1626 /*
1627 * RxBuffer or RxError set for any of the 8 queues?
1628 */
1615#ifdef MV643XX_ETH_NAPI 1629#ifdef MV643XX_ETH_NAPI
1616 if (int_cause & INT_RX) { 1630 if (int_cause & INT_RX) {
1617 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 1631 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
@@ -1620,8 +1634,13 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1620 netif_rx_schedule(dev, &mp->napi); 1634 netif_rx_schedule(dev, &mp->napi);
1621 } 1635 }
1622#else 1636#else
1623 if (int_cause & INT_RX) 1637 if (int_cause & INT_RX) {
1624 rxq_process(mp->rxq, INT_MAX); 1638 int i;
1639
1640 for (i = 7; i >= 0; i--)
1641 if (mp->rxq_mask & (1 << i))
1642 rxq_process(mp->rxq + i, INT_MAX);
1643 }
1625#endif 1644#endif
1626 1645
1627 if (int_cause_ext & INT_EXT_TX) { 1646 if (int_cause_ext & INT_EXT_TX) {
@@ -1707,13 +1726,16 @@ static void port_start(struct mv643xx_eth_private *mp)
1707 wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000); 1726 wrl(mp, PORT_CONFIG_EXT(mp->port_num), 0x00000000);
1708 1727
1709 /* 1728 /*
1710 * Enable the receive queue. 1729 * Enable the receive queues.
1711 */ 1730 */
1712 for (i = 0; i < 1; i++) { 1731 for (i = 0; i < 8; i++) {
1713 struct rx_queue *rxq = mp->rxq; 1732 struct rx_queue *rxq = mp->rxq + i;
1714 int off = RXQ_CURRENT_DESC_PTR(mp->port_num); 1733 int off = RXQ_CURRENT_DESC_PTR(mp->port_num, i);
1715 u32 addr; 1734 u32 addr;
1716 1735
1736 if ((mp->rxq_mask & (1 << i)) == 0)
1737 continue;
1738
1717 addr = (u32)rxq->rx_desc_dma; 1739 addr = (u32)rxq->rx_desc_dma;
1718 addr += rxq->rx_curr_desc * sizeof(struct rx_desc); 1740 addr += rxq->rx_curr_desc * sizeof(struct rx_desc);
1719 wrl(mp, off, addr); 1741 wrl(mp, off, addr);
@@ -1748,6 +1770,7 @@ static int mv643xx_eth_open(struct net_device *dev)
1748{ 1770{
1749 struct mv643xx_eth_private *mp = netdev_priv(dev); 1771 struct mv643xx_eth_private *mp = netdev_priv(dev);
1750 int err; 1772 int err;
1773 int i;
1751 1774
1752 wrl(mp, INT_CAUSE(mp->port_num), 0); 1775 wrl(mp, INT_CAUSE(mp->port_num), 0);
1753 wrl(mp, INT_CAUSE_EXT(mp->port_num), 0); 1776 wrl(mp, INT_CAUSE_EXT(mp->port_num), 0);
@@ -1763,10 +1786,20 @@ static int mv643xx_eth_open(struct net_device *dev)
1763 1786
1764 init_mac_tables(mp); 1787 init_mac_tables(mp);
1765 1788
1766 err = rxq_init(mp); 1789 for (i = 0; i < 8; i++) {
1767 if (err) 1790 if ((mp->rxq_mask & (1 << i)) == 0)
1768 goto out; 1791 continue;
1769 rxq_refill(mp->rxq); 1792
1793 err = rxq_init(mp, i);
1794 if (err) {
1795 while (--i >= 0)
1796 if (mp->rxq_mask & (1 << i))
1797 rxq_deinit(mp->rxq + i);
1798 goto out;
1799 }
1800
1801 rxq_refill(mp->rxq + i);
1802 }
1770 1803
1771 err = txq_init(mp); 1804 err = txq_init(mp);
1772 if (err) 1805 if (err)
@@ -1790,7 +1823,9 @@ static int mv643xx_eth_open(struct net_device *dev)
1790 1823
1791 1824
1792out_free: 1825out_free:
1793 rxq_deinit(mp->rxq); 1826 for (i = 0; i < 8; i++)
1827 if (mp->rxq_mask & (1 << i))
1828 rxq_deinit(mp->rxq + i);
1794out: 1829out:
1795 free_irq(dev->irq, dev); 1830 free_irq(dev->irq, dev);
1796 1831
@@ -1800,9 +1835,13 @@ out:
1800static void port_reset(struct mv643xx_eth_private *mp) 1835static void port_reset(struct mv643xx_eth_private *mp)
1801{ 1836{
1802 unsigned int data; 1837 unsigned int data;
1838 int i;
1803 1839
1840 for (i = 0; i < 8; i++) {
1841 if (mp->rxq_mask & (1 << i))
1842 rxq_disable(mp->rxq + i);
1843 }
1804 txq_disable(mp->txq); 1844 txq_disable(mp->txq);
1805 rxq_disable(mp->rxq);
1806 while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY)) 1845 while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY))
1807 udelay(10); 1846 udelay(10);
1808 1847
@@ -1817,6 +1856,7 @@ static void port_reset(struct mv643xx_eth_private *mp)
1817static int mv643xx_eth_stop(struct net_device *dev) 1856static int mv643xx_eth_stop(struct net_device *dev)
1818{ 1857{
1819 struct mv643xx_eth_private *mp = netdev_priv(dev); 1858 struct mv643xx_eth_private *mp = netdev_priv(dev);
1859 int i;
1820 1860
1821 wrl(mp, INT_MASK(mp->port_num), 0x00000000); 1861 wrl(mp, INT_MASK(mp->port_num), 0x00000000);
1822 rdl(mp, INT_MASK(mp->port_num)); 1862 rdl(mp, INT_MASK(mp->port_num));
@@ -1832,8 +1872,11 @@ static int mv643xx_eth_stop(struct net_device *dev)
1832 port_reset(mp); 1872 port_reset(mp);
1833 mib_counters_update(mp); 1873 mib_counters_update(mp);
1834 1874
1875 for (i = 0; i < 8; i++) {
1876 if (mp->rxq_mask & (1 << i))
1877 rxq_deinit(mp->rxq + i);
1878 }
1835 txq_deinit(mp->txq); 1879 txq_deinit(mp->txq);
1836 rxq_deinit(mp->rxq);
1837 1880
1838 return 0; 1881 return 0;
1839} 1882}
@@ -2085,6 +2128,12 @@ static void set_params(struct mv643xx_eth_private *mp,
2085 mp->rx_desc_sram_addr = pd->rx_sram_addr; 2128 mp->rx_desc_sram_addr = pd->rx_sram_addr;
2086 mp->rx_desc_sram_size = pd->rx_sram_size; 2129 mp->rx_desc_sram_size = pd->rx_sram_size;
2087 2130
2131 if (pd->rx_queue_mask)
2132 mp->rxq_mask = pd->rx_queue_mask;
2133 else
2134 mp->rxq_mask = 0x01;
2135 mp->rxq_primary = fls(mp->rxq_mask) - 1;
2136
2088 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE; 2137 mp->default_tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
2089 if (pd->tx_queue_size) 2138 if (pd->tx_queue_size)
2090 mp->default_tx_ring_size = pd->tx_queue_size; 2139 mp->default_tx_ring_size = pd->tx_queue_size;