diff options
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 146 |
1 files changed, 103 insertions, 43 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 3c8591853999..287155ea9ce1 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -103,16 +103,16 @@ static char mv643xx_eth_driver_version[] = "1.0"; | |||
103 | #define INT_EXT_PHY 0x00010000 | 103 | #define INT_EXT_PHY 0x00010000 |
104 | #define INT_EXT_TX_ERROR_0 0x00000100 | 104 | #define INT_EXT_TX_ERROR_0 0x00000100 |
105 | #define INT_EXT_TX_0 0x00000001 | 105 | #define INT_EXT_TX_0 0x00000001 |
106 | #define INT_EXT_TX 0x00000101 | 106 | #define INT_EXT_TX 0x0000ffff |
107 | #define INT_MASK(p) (0x0468 + ((p) << 10)) | 107 | #define INT_MASK(p) (0x0468 + ((p) << 10)) |
108 | #define INT_MASK_EXT(p) (0x046c + ((p) << 10)) | 108 | #define INT_MASK_EXT(p) (0x046c + ((p) << 10)) |
109 | #define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10)) | 109 | #define TX_FIFO_URGENT_THRESHOLD(p) (0x0474 + ((p) << 10)) |
110 | #define RXQ_CURRENT_DESC_PTR(p, q) (0x060c + ((p) << 10) + ((q) << 4)) | 110 | #define RXQ_CURRENT_DESC_PTR(p, q) (0x060c + ((p) << 10) + ((q) << 4)) |
111 | #define RXQ_COMMAND(p) (0x0680 + ((p) << 10)) | 111 | #define RXQ_COMMAND(p) (0x0680 + ((p) << 10)) |
112 | #define TXQ_CURRENT_DESC_PTR(p) (0x06c0 + ((p) << 10)) | 112 | #define TXQ_CURRENT_DESC_PTR(p, q) (0x06c0 + ((p) << 10) + ((q) << 2)) |
113 | #define TXQ_BW_TOKENS(p) (0x0700 + ((p) << 10)) | 113 | #define TXQ_BW_TOKENS(p, q) (0x0700 + ((p) << 10) + ((q) << 4)) |
114 | #define TXQ_BW_CONF(p) (0x0704 + ((p) << 10)) | 114 | #define TXQ_BW_CONF(p, q) (0x0704 + ((p) << 10) + ((q) << 4)) |
115 | #define TXQ_BW_WRR_CONF(p) (0x0708 + ((p) << 10)) | 115 | #define TXQ_BW_WRR_CONF(p, q) (0x0708 + ((p) << 10) + ((q) << 4)) |
116 | #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) | 116 | #define MIB_COUNTERS(p) (0x1000 + ((p) << 7)) |
117 | #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) | 117 | #define SPECIAL_MCAST_TABLE(p) (0x1400 + ((p) << 10)) |
118 | #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) | 118 | #define OTHER_MCAST_TABLE(p) (0x1500 + ((p) << 10)) |
@@ -303,6 +303,8 @@ struct rx_queue { | |||
303 | }; | 303 | }; |
304 | 304 | ||
305 | struct tx_queue { | 305 | struct tx_queue { |
306 | int index; | ||
307 | |||
306 | int tx_ring_size; | 308 | int tx_ring_size; |
307 | 309 | ||
308 | int tx_desc_count; | 310 | int tx_desc_count; |
@@ -347,7 +349,9 @@ struct mv643xx_eth_private { | |||
347 | int default_tx_ring_size; | 349 | int default_tx_ring_size; |
348 | unsigned long tx_desc_sram_addr; | 350 | unsigned long tx_desc_sram_addr; |
349 | int tx_desc_sram_size; | 351 | int tx_desc_sram_size; |
350 | struct tx_queue txq[1]; | 352 | u8 txq_mask; |
353 | int txq_primary; | ||
354 | struct tx_queue txq[8]; | ||
351 | #ifdef MV643XX_ETH_TX_FAST_REFILL | 355 | #ifdef MV643XX_ETH_TX_FAST_REFILL |
352 | int tx_clean_threshold; | 356 | int tx_clean_threshold; |
353 | #endif | 357 | #endif |
@@ -374,7 +378,7 @@ static struct mv643xx_eth_private *rxq_to_mp(struct rx_queue *rxq) | |||
374 | 378 | ||
375 | static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) | 379 | static struct mv643xx_eth_private *txq_to_mp(struct tx_queue *txq) |
376 | { | 380 | { |
377 | return container_of(txq, struct mv643xx_eth_private, txq[0]); | 381 | return container_of(txq, struct mv643xx_eth_private, txq[txq->index]); |
378 | } | 382 | } |
379 | 383 | ||
380 | static void rxq_enable(struct rx_queue *rxq) | 384 | static void rxq_enable(struct rx_queue *rxq) |
@@ -396,13 +400,13 @@ static void rxq_disable(struct rx_queue *rxq) | |||
396 | static void txq_enable(struct tx_queue *txq) | 400 | static void txq_enable(struct tx_queue *txq) |
397 | { | 401 | { |
398 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | 402 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
399 | wrl(mp, TXQ_COMMAND(mp->port_num), 1); | 403 | wrl(mp, TXQ_COMMAND(mp->port_num), 1 << txq->index); |
400 | } | 404 | } |
401 | 405 | ||
402 | static void txq_disable(struct tx_queue *txq) | 406 | static void txq_disable(struct tx_queue *txq) |
403 | { | 407 | { |
404 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | 408 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
405 | u8 mask = 1; | 409 | u8 mask = 1 << txq->index; |
406 | 410 | ||
407 | wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8); | 411 | wrl(mp, TXQ_COMMAND(mp->port_num), mask << 8); |
408 | while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask) | 412 | while (rdl(mp, TXQ_COMMAND(mp->port_num)) & mask) |
@@ -413,6 +417,12 @@ static void __txq_maybe_wake(struct tx_queue *txq) | |||
413 | { | 417 | { |
414 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | 418 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
415 | 419 | ||
420 | /* | ||
421 | * netif_{stop,wake}_queue() flow control only applies to | ||
422 | * the primary queue. | ||
423 | */ | ||
424 | BUG_ON(txq->index != mp->txq_primary); | ||
425 | |||
416 | if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB) | 426 | if (txq->tx_ring_size - txq->tx_desc_count >= MAX_DESCS_PER_SKB) |
417 | netif_wake_queue(mp->dev); | 427 | netif_wake_queue(mp->dev); |
418 | } | 428 | } |
@@ -593,8 +603,10 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget) | |||
593 | 603 | ||
594 | #ifdef MV643XX_ETH_TX_FAST_REFILL | 604 | #ifdef MV643XX_ETH_TX_FAST_REFILL |
595 | if (++mp->tx_clean_threshold > 5) { | 605 | if (++mp->tx_clean_threshold > 5) { |
596 | txq_reclaim(mp->txq, 0); | ||
597 | mp->tx_clean_threshold = 0; | 606 | mp->tx_clean_threshold = 0; |
607 | for (i = 0; i < 8; i++) | ||
608 | if (mp->txq_mask & (1 << i)) | ||
609 | txq_reclaim(mp->txq + i, 0); | ||
598 | } | 610 | } |
599 | #endif | 611 | #endif |
600 | 612 | ||
@@ -754,8 +766,6 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
754 | struct tx_queue *txq; | 766 | struct tx_queue *txq; |
755 | unsigned long flags; | 767 | unsigned long flags; |
756 | 768 | ||
757 | BUG_ON(netif_queue_stopped(dev)); | ||
758 | |||
759 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { | 769 | if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) { |
760 | stats->tx_dropped++; | 770 | stats->tx_dropped++; |
761 | dev_printk(KERN_DEBUG, &dev->dev, | 771 | dev_printk(KERN_DEBUG, &dev->dev, |
@@ -766,13 +776,15 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
766 | 776 | ||
767 | spin_lock_irqsave(&mp->lock, flags); | 777 | spin_lock_irqsave(&mp->lock, flags); |
768 | 778 | ||
769 | txq = mp->txq; | 779 | txq = mp->txq + mp->txq_primary; |
770 | 780 | ||
771 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) { | 781 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) { |
772 | printk(KERN_ERR "%s: transmit with queue full\n", dev->name); | ||
773 | netif_stop_queue(dev); | ||
774 | spin_unlock_irqrestore(&mp->lock, flags); | 782 | spin_unlock_irqrestore(&mp->lock, flags); |
775 | return NETDEV_TX_BUSY; | 783 | if (txq->index == mp->txq_primary && net_ratelimit()) |
784 | dev_printk(KERN_ERR, &dev->dev, | ||
785 | "primary tx queue full?!\n"); | ||
786 | kfree_skb(skb); | ||
787 | return NETDEV_TX_OK; | ||
776 | } | 788 | } |
777 | 789 | ||
778 | txq_submit_skb(txq, skb); | 790 | txq_submit_skb(txq, skb); |
@@ -780,8 +792,13 @@ static int mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
780 | stats->tx_packets++; | 792 | stats->tx_packets++; |
781 | dev->trans_start = jiffies; | 793 | dev->trans_start = jiffies; |
782 | 794 | ||
783 | if (txq->tx_ring_size - txq->tx_desc_count < MAX_DESCS_PER_SKB) | 795 | if (txq->index == mp->txq_primary) { |
784 | netif_stop_queue(dev); | 796 | int entries_left; |
797 | |||
798 | entries_left = txq->tx_ring_size - txq->tx_desc_count; | ||
799 | if (entries_left < MAX_DESCS_PER_SKB) | ||
800 | netif_stop_queue(dev); | ||
801 | } | ||
785 | 802 | ||
786 | spin_unlock_irqrestore(&mp->lock, flags); | 803 | spin_unlock_irqrestore(&mp->lock, flags); |
787 | 804 | ||
@@ -831,8 +848,8 @@ static void txq_set_rate(struct tx_queue *txq, int rate, int burst) | |||
831 | if (bucket_size > 65535) | 848 | if (bucket_size > 65535) |
832 | bucket_size = 65535; | 849 | bucket_size = 65535; |
833 | 850 | ||
834 | wrl(mp, TXQ_BW_TOKENS(mp->port_num), token_rate << 14); | 851 | wrl(mp, TXQ_BW_TOKENS(mp->port_num, txq->index), token_rate << 14); |
835 | wrl(mp, TXQ_BW_CONF(mp->port_num), | 852 | wrl(mp, TXQ_BW_CONF(mp->port_num, txq->index), |
836 | (bucket_size << 10) | token_rate); | 853 | (bucket_size << 10) | token_rate); |
837 | } | 854 | } |
838 | 855 | ||
@@ -848,7 +865,7 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq) | |||
848 | off = TXQ_FIX_PRIO_CONF(mp->port_num); | 865 | off = TXQ_FIX_PRIO_CONF(mp->port_num); |
849 | 866 | ||
850 | val = rdl(mp, off); | 867 | val = rdl(mp, off); |
851 | val |= 1; | 868 | val |= 1 << txq->index; |
852 | wrl(mp, off, val); | 869 | wrl(mp, off, val); |
853 | } | 870 | } |
854 | 871 | ||
@@ -864,13 +881,13 @@ static void txq_set_wrr(struct tx_queue *txq, int weight) | |||
864 | off = TXQ_FIX_PRIO_CONF(mp->port_num); | 881 | off = TXQ_FIX_PRIO_CONF(mp->port_num); |
865 | 882 | ||
866 | val = rdl(mp, off); | 883 | val = rdl(mp, off); |
867 | val &= ~1; | 884 | val &= ~(1 << txq->index); |
868 | wrl(mp, off, val); | 885 | wrl(mp, off, val); |
869 | 886 | ||
870 | /* | 887 | /* |
871 | * Configure WRR weight for this queue. | 888 | * Configure WRR weight for this queue. |
872 | */ | 889 | */ |
873 | off = TXQ_BW_WRR_CONF(mp->port_num); | 890 | off = TXQ_BW_WRR_CONF(mp->port_num, txq->index); |
874 | 891 | ||
875 | val = rdl(mp, off); | 892 | val = rdl(mp, off); |
876 | val = (val & ~0xff) | (weight & 0xff); | 893 | val = (val & ~0xff) | (weight & 0xff); |
@@ -1415,13 +1432,15 @@ static void rxq_deinit(struct rx_queue *rxq) | |||
1415 | kfree(rxq->rx_skb); | 1432 | kfree(rxq->rx_skb); |
1416 | } | 1433 | } |
1417 | 1434 | ||
1418 | static int txq_init(struct mv643xx_eth_private *mp) | 1435 | static int txq_init(struct mv643xx_eth_private *mp, int index) |
1419 | { | 1436 | { |
1420 | struct tx_queue *txq = mp->txq; | 1437 | struct tx_queue *txq = mp->txq + index; |
1421 | struct tx_desc *tx_desc; | 1438 | struct tx_desc *tx_desc; |
1422 | int size; | 1439 | int size; |
1423 | int i; | 1440 | int i; |
1424 | 1441 | ||
1442 | txq->index = index; | ||
1443 | |||
1425 | txq->tx_ring_size = mp->default_tx_ring_size; | 1444 | txq->tx_ring_size = mp->default_tx_ring_size; |
1426 | 1445 | ||
1427 | txq->tx_desc_count = 0; | 1446 | txq->tx_desc_count = 0; |
@@ -1430,7 +1449,7 @@ static int txq_init(struct mv643xx_eth_private *mp) | |||
1430 | 1449 | ||
1431 | size = txq->tx_ring_size * sizeof(struct tx_desc); | 1450 | size = txq->tx_ring_size * sizeof(struct tx_desc); |
1432 | 1451 | ||
1433 | if (size <= mp->tx_desc_sram_size) { | 1452 | if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) { |
1434 | txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, | 1453 | txq->tx_desc_area = ioremap(mp->tx_desc_sram_addr, |
1435 | mp->tx_desc_sram_size); | 1454 | mp->tx_desc_sram_size); |
1436 | txq->tx_desc_dma = mp->tx_desc_sram_addr; | 1455 | txq->tx_desc_dma = mp->tx_desc_sram_addr; |
@@ -1467,7 +1486,7 @@ static int txq_init(struct mv643xx_eth_private *mp) | |||
1467 | 1486 | ||
1468 | 1487 | ||
1469 | out_free: | 1488 | out_free: |
1470 | if (size <= mp->tx_desc_sram_size) | 1489 | if (index == mp->txq_primary && size <= mp->tx_desc_sram_size) |
1471 | iounmap(txq->tx_desc_area); | 1490 | iounmap(txq->tx_desc_area); |
1472 | else | 1491 | else |
1473 | dma_free_coherent(NULL, size, | 1492 | dma_free_coherent(NULL, size, |
@@ -1539,7 +1558,8 @@ static void txq_deinit(struct tx_queue *txq) | |||
1539 | 1558 | ||
1540 | BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); | 1559 | BUG_ON(txq->tx_used_desc != txq->tx_curr_desc); |
1541 | 1560 | ||
1542 | if (txq->tx_desc_area_size <= mp->tx_desc_sram_size) | 1561 | if (txq->index == mp->txq_primary && |
1562 | txq->tx_desc_area_size <= mp->tx_desc_sram_size) | ||
1543 | iounmap(txq->tx_desc_area); | 1563 | iounmap(txq->tx_desc_area); |
1544 | else | 1564 | else |
1545 | dma_free_coherent(NULL, txq->tx_desc_area_size, | 1565 | dma_free_coherent(NULL, txq->tx_desc_area_size, |
@@ -1578,12 +1598,20 @@ static void update_pscr(struct mv643xx_eth_private *mp, int speed, int duplex) | |||
1578 | if ((pscr_o & SERIAL_PORT_ENABLE) == 0) | 1598 | if ((pscr_o & SERIAL_PORT_ENABLE) == 0) |
1579 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); | 1599 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); |
1580 | else { | 1600 | else { |
1581 | txq_disable(mp->txq); | 1601 | int i; |
1602 | |||
1603 | for (i = 0; i < 8; i++) | ||
1604 | if (mp->txq_mask & (1 << i)) | ||
1605 | txq_disable(mp->txq + i); | ||
1606 | |||
1582 | pscr_o &= ~SERIAL_PORT_ENABLE; | 1607 | pscr_o &= ~SERIAL_PORT_ENABLE; |
1583 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o); | 1608 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_o); |
1584 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); | 1609 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); |
1585 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); | 1610 | wrl(mp, PORT_SERIAL_CONTROL(mp->port_num), pscr_n); |
1586 | txq_enable(mp->txq); | 1611 | |
1612 | for (i = 0; i < 8; i++) | ||
1613 | if (mp->txq_mask & (1 << i)) | ||
1614 | txq_enable(mp->txq + i); | ||
1587 | } | 1615 | } |
1588 | } | 1616 | } |
1589 | } | 1617 | } |
@@ -1609,13 +1637,17 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) | |||
1609 | if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) { | 1637 | if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) { |
1610 | if (mii_link_ok(&mp->mii)) { | 1638 | if (mii_link_ok(&mp->mii)) { |
1611 | struct ethtool_cmd cmd; | 1639 | struct ethtool_cmd cmd; |
1640 | int i; | ||
1612 | 1641 | ||
1613 | mii_ethtool_gset(&mp->mii, &cmd); | 1642 | mii_ethtool_gset(&mp->mii, &cmd); |
1614 | update_pscr(mp, cmd.speed, cmd.duplex); | 1643 | update_pscr(mp, cmd.speed, cmd.duplex); |
1615 | txq_enable(mp->txq); | 1644 | for (i = 0; i < 8; i++) |
1645 | if (mp->txq_mask & (1 << i)) | ||
1646 | txq_enable(mp->txq + i); | ||
1647 | |||
1616 | if (!netif_carrier_ok(dev)) { | 1648 | if (!netif_carrier_ok(dev)) { |
1617 | netif_carrier_on(dev); | 1649 | netif_carrier_on(dev); |
1618 | __txq_maybe_wake(mp->txq); | 1650 | __txq_maybe_wake(mp->txq + mp->txq_primary); |
1619 | } | 1651 | } |
1620 | } else if (netif_carrier_ok(dev)) { | 1652 | } else if (netif_carrier_ok(dev)) { |
1621 | netif_stop_queue(dev); | 1653 | netif_stop_queue(dev); |
@@ -1643,9 +1675,17 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) | |||
1643 | } | 1675 | } |
1644 | #endif | 1676 | #endif |
1645 | 1677 | ||
1678 | /* | ||
1679 | * TxBuffer or TxError set for any of the 8 queues? | ||
1680 | */ | ||
1646 | if (int_cause_ext & INT_EXT_TX) { | 1681 | if (int_cause_ext & INT_EXT_TX) { |
1647 | txq_reclaim(mp->txq, 0); | 1682 | int i; |
1648 | __txq_maybe_wake(mp->txq); | 1683 | |
1684 | for (i = 0; i < 8; i++) | ||
1685 | if (mp->txq_mask & (1 << i)) | ||
1686 | txq_reclaim(mp->txq + i, 0); | ||
1687 | |||
1688 | __txq_maybe_wake(mp->txq + mp->txq_primary); | ||
1649 | } | 1689 | } |
1650 | 1690 | ||
1651 | return IRQ_HANDLED; | 1691 | return IRQ_HANDLED; |
@@ -1696,11 +1736,14 @@ static void port_start(struct mv643xx_eth_private *mp) | |||
1696 | * Configure TX path and queues. | 1736 | * Configure TX path and queues. |
1697 | */ | 1737 | */ |
1698 | tx_set_rate(mp, 1000000000, 16777216); | 1738 | tx_set_rate(mp, 1000000000, 16777216); |
1699 | for (i = 0; i < 1; i++) { | 1739 | for (i = 0; i < 8; i++) { |
1700 | struct tx_queue *txq = mp->txq; | 1740 | struct tx_queue *txq = mp->txq + i; |
1701 | int off = TXQ_CURRENT_DESC_PTR(mp->port_num); | 1741 | int off = TXQ_CURRENT_DESC_PTR(mp->port_num, i); |
1702 | u32 addr; | 1742 | u32 addr; |
1703 | 1743 | ||
1744 | if ((mp->txq_mask & (1 << i)) == 0) | ||
1745 | continue; | ||
1746 | |||
1704 | addr = (u32)txq->tx_desc_dma; | 1747 | addr = (u32)txq->tx_desc_dma; |
1705 | addr += txq->tx_curr_desc * sizeof(struct tx_desc); | 1748 | addr += txq->tx_curr_desc * sizeof(struct tx_desc); |
1706 | wrl(mp, off, addr); | 1749 | wrl(mp, off, addr); |
@@ -1801,9 +1844,18 @@ static int mv643xx_eth_open(struct net_device *dev) | |||
1801 | rxq_refill(mp->rxq + i); | 1844 | rxq_refill(mp->rxq + i); |
1802 | } | 1845 | } |
1803 | 1846 | ||
1804 | err = txq_init(mp); | 1847 | for (i = 0; i < 8; i++) { |
1805 | if (err) | 1848 | if ((mp->txq_mask & (1 << i)) == 0) |
1806 | goto out_free; | 1849 | continue; |
1850 | |||
1851 | err = txq_init(mp, i); | ||
1852 | if (err) { | ||
1853 | while (--i >= 0) | ||
1854 | if (mp->txq_mask & (1 << i)) | ||
1855 | txq_deinit(mp->txq + i); | ||
1856 | goto out_free; | ||
1857 | } | ||
1858 | } | ||
1807 | 1859 | ||
1808 | #ifdef MV643XX_ETH_NAPI | 1860 | #ifdef MV643XX_ETH_NAPI |
1809 | napi_enable(&mp->napi); | 1861 | napi_enable(&mp->napi); |
@@ -1840,8 +1892,9 @@ static void port_reset(struct mv643xx_eth_private *mp) | |||
1840 | for (i = 0; i < 8; i++) { | 1892 | for (i = 0; i < 8; i++) { |
1841 | if (mp->rxq_mask & (1 << i)) | 1893 | if (mp->rxq_mask & (1 << i)) |
1842 | rxq_disable(mp->rxq + i); | 1894 | rxq_disable(mp->rxq + i); |
1895 | if (mp->txq_mask & (1 << i)) | ||
1896 | txq_disable(mp->txq + i); | ||
1843 | } | 1897 | } |
1844 | txq_disable(mp->txq); | ||
1845 | while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY)) | 1898 | while (!(rdl(mp, PORT_STATUS(mp->port_num)) & TX_FIFO_EMPTY)) |
1846 | udelay(10); | 1899 | udelay(10); |
1847 | 1900 | ||
@@ -1875,8 +1928,9 @@ static int mv643xx_eth_stop(struct net_device *dev) | |||
1875 | for (i = 0; i < 8; i++) { | 1928 | for (i = 0; i < 8; i++) { |
1876 | if (mp->rxq_mask & (1 << i)) | 1929 | if (mp->rxq_mask & (1 << i)) |
1877 | rxq_deinit(mp->rxq + i); | 1930 | rxq_deinit(mp->rxq + i); |
1931 | if (mp->txq_mask & (1 << i)) | ||
1932 | txq_deinit(mp->txq + i); | ||
1878 | } | 1933 | } |
1879 | txq_deinit(mp->txq); | ||
1880 | 1934 | ||
1881 | return 0; | 1935 | return 0; |
1882 | } | 1936 | } |
@@ -1928,7 +1982,7 @@ static void tx_timeout_task(struct work_struct *ugly) | |||
1928 | port_reset(mp); | 1982 | port_reset(mp); |
1929 | port_start(mp); | 1983 | port_start(mp); |
1930 | 1984 | ||
1931 | __txq_maybe_wake(mp->txq); | 1985 | __txq_maybe_wake(mp->txq + mp->txq_primary); |
1932 | } | 1986 | } |
1933 | } | 1987 | } |
1934 | 1988 | ||
@@ -2139,6 +2193,12 @@ static void set_params(struct mv643xx_eth_private *mp, | |||
2139 | mp->default_tx_ring_size = pd->tx_queue_size; | 2193 | mp->default_tx_ring_size = pd->tx_queue_size; |
2140 | mp->tx_desc_sram_addr = pd->tx_sram_addr; | 2194 | mp->tx_desc_sram_addr = pd->tx_sram_addr; |
2141 | mp->tx_desc_sram_size = pd->tx_sram_size; | 2195 | mp->tx_desc_sram_size = pd->tx_sram_size; |
2196 | |||
2197 | if (pd->tx_queue_mask) | ||
2198 | mp->txq_mask = pd->tx_queue_mask; | ||
2199 | else | ||
2200 | mp->txq_mask = 0x01; | ||
2201 | mp->txq_primary = fls(mp->txq_mask) - 1; | ||
2142 | } | 2202 | } |
2143 | 2203 | ||
2144 | static int phy_detect(struct mv643xx_eth_private *mp) | 2204 | static int phy_detect(struct mv643xx_eth_private *mp) |