diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2008-07-11 13:38:34 -0400 |
---|---|---|
committer | Lennert Buytenhek <buytenh@marvell.com> | 2008-07-24 00:22:50 -0400 |
commit | 6b368f6859c80343e5d7c6e2a7c49df0a8a273c1 (patch) | |
tree | a94206bb0a36cdfb19e08907dcc4031b51e9348b /drivers/net/mv643xx_eth.c | |
parent | 8fa89bf5de066b11190ac804903021700c2b1185 (diff) |
mv643xx_eth: prevent breakage when link goes down during transmit
When the ethernet link goes down while mv643xx_eth is transmitting
data, transmit DMA can stop before all queued transmit descriptors
have been processed. But even the descriptors that _have_ been
processed might not be properly marked as done before the transmit
DMA unit shuts down.
Then when the link comes up again, the hardware transmit pointer
might have advanced while not all previous packet descriptors have
been marked as transmitted, causing software transmit reclaim to
hang waiting for the hardware to finish transmitting a descriptor
that it has already skipped.
This patch forcibly reclaims all packets on the transmit ring on a
link down interrupt, and then resyncs the hardware transmit pointer to
what the software's idea of the first free descriptor is. Also, we
need to prevent re-waking the transmit queue if we get a 'transmit
done' interrupt at the same time as a 'link down' interrupt, which
this patch does as well.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 57 |
1 files changed, 38 insertions, 19 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 910920e21259..d7620c50efb1 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c | |||
@@ -405,6 +405,17 @@ static void rxq_disable(struct rx_queue *rxq) | |||
405 | udelay(10); | 405 | udelay(10); |
406 | } | 406 | } |
407 | 407 | ||
408 | static void txq_reset_hw_ptr(struct tx_queue *txq) | ||
409 | { | ||
410 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | ||
411 | int off = TXQ_CURRENT_DESC_PTR(mp->port_num, txq->index); | ||
412 | u32 addr; | ||
413 | |||
414 | addr = (u32)txq->tx_desc_dma; | ||
415 | addr += txq->tx_curr_desc * sizeof(struct tx_desc); | ||
416 | wrl(mp, off, addr); | ||
417 | } | ||
418 | |||
408 | static void txq_enable(struct tx_queue *txq) | 419 | static void txq_enable(struct tx_queue *txq) |
409 | { | 420 | { |
410 | struct mv643xx_eth_private *mp = txq_to_mp(txq); | 421 | struct mv643xx_eth_private *mp = txq_to_mp(txq); |
@@ -1545,8 +1556,11 @@ static int txq_init(struct mv643xx_eth_private *mp, int index) | |||
1545 | 1556 | ||
1546 | tx_desc = (struct tx_desc *)txq->tx_desc_area; | 1557 | tx_desc = (struct tx_desc *)txq->tx_desc_area; |
1547 | for (i = 0; i < txq->tx_ring_size; i++) { | 1558 | for (i = 0; i < txq->tx_ring_size; i++) { |
1559 | struct tx_desc *txd = tx_desc + i; | ||
1548 | int nexti = (i + 1) % txq->tx_ring_size; | 1560 | int nexti = (i + 1) % txq->tx_ring_size; |
1549 | tx_desc[i].next_desc_ptr = txq->tx_desc_dma + | 1561 | |
1562 | txd->cmd_sts = 0; | ||
1563 | txd->next_desc_ptr = txq->tx_desc_dma + | ||
1550 | nexti * sizeof(struct tx_desc); | 1564 | nexti * sizeof(struct tx_desc); |
1551 | } | 1565 | } |
1552 | 1566 | ||
@@ -1583,8 +1597,11 @@ static void txq_reclaim(struct tx_queue *txq, int force) | |||
1583 | desc = &txq->tx_desc_area[tx_index]; | 1597 | desc = &txq->tx_desc_area[tx_index]; |
1584 | cmd_sts = desc->cmd_sts; | 1598 | cmd_sts = desc->cmd_sts; |
1585 | 1599 | ||
1586 | if (!force && (cmd_sts & BUFFER_OWNED_BY_DMA)) | 1600 | if (cmd_sts & BUFFER_OWNED_BY_DMA) { |
1587 | break; | 1601 | if (!force) |
1602 | break; | ||
1603 | desc->cmd_sts = cmd_sts & ~BUFFER_OWNED_BY_DMA; | ||
1604 | } | ||
1588 | 1605 | ||
1589 | txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size; | 1606 | txq->tx_used_desc = (tx_index + 1) % txq->tx_ring_size; |
1590 | txq->tx_desc_count--; | 1607 | txq->tx_desc_count--; |
@@ -1705,8 +1722,6 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) | |||
1705 | 1722 | ||
1706 | if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) { | 1723 | if (int_cause_ext & (INT_EXT_PHY | INT_EXT_LINK)) { |
1707 | if (mp->phy_addr == -1 || mii_link_ok(&mp->mii)) { | 1724 | if (mp->phy_addr == -1 || mii_link_ok(&mp->mii)) { |
1708 | int i; | ||
1709 | |||
1710 | if (mp->phy_addr != -1) { | 1725 | if (mp->phy_addr != -1) { |
1711 | struct ethtool_cmd cmd; | 1726 | struct ethtool_cmd cmd; |
1712 | 1727 | ||
@@ -1714,17 +1729,24 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) | |||
1714 | update_pscr(mp, cmd.speed, cmd.duplex); | 1729 | update_pscr(mp, cmd.speed, cmd.duplex); |
1715 | } | 1730 | } |
1716 | 1731 | ||
1717 | for (i = 0; i < 8; i++) | ||
1718 | if (mp->txq_mask & (1 << i)) | ||
1719 | txq_enable(mp->txq + i); | ||
1720 | |||
1721 | if (!netif_carrier_ok(dev)) { | 1732 | if (!netif_carrier_ok(dev)) { |
1722 | netif_carrier_on(dev); | 1733 | netif_carrier_on(dev); |
1723 | __txq_maybe_wake(mp->txq + mp->txq_primary); | 1734 | netif_wake_queue(dev); |
1724 | } | 1735 | } |
1725 | } else if (netif_carrier_ok(dev)) { | 1736 | } else if (netif_carrier_ok(dev)) { |
1737 | int i; | ||
1738 | |||
1726 | netif_stop_queue(dev); | 1739 | netif_stop_queue(dev); |
1727 | netif_carrier_off(dev); | 1740 | netif_carrier_off(dev); |
1741 | |||
1742 | for (i = 0; i < 8; i++) { | ||
1743 | struct tx_queue *txq = mp->txq + i; | ||
1744 | |||
1745 | if (mp->txq_mask & (1 << i)) { | ||
1746 | txq_reclaim(txq, 1); | ||
1747 | txq_reset_hw_ptr(txq); | ||
1748 | } | ||
1749 | } | ||
1728 | } | 1750 | } |
1729 | } | 1751 | } |
1730 | 1752 | ||
@@ -1762,9 +1784,11 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id) | |||
1762 | * Enough space again in the primary TX queue for a | 1784 | * Enough space again in the primary TX queue for a |
1763 | * full packet? | 1785 | * full packet? |
1764 | */ | 1786 | */ |
1765 | spin_lock(&mp->lock); | 1787 | if (netif_carrier_ok(dev)) { |
1766 | __txq_maybe_wake(mp->txq + mp->txq_primary); | 1788 | spin_lock(&mp->lock); |
1767 | spin_unlock(&mp->lock); | 1789 | __txq_maybe_wake(mp->txq + mp->txq_primary); |
1790 | spin_unlock(&mp->lock); | ||
1791 | } | ||
1768 | } | 1792 | } |
1769 | 1793 | ||
1770 | /* | 1794 | /* |
@@ -1851,16 +1875,11 @@ static void port_start(struct mv643xx_eth_private *mp) | |||
1851 | tx_set_rate(mp, 1000000000, 16777216); | 1875 | tx_set_rate(mp, 1000000000, 16777216); |
1852 | for (i = 0; i < 8; i++) { | 1876 | for (i = 0; i < 8; i++) { |
1853 | struct tx_queue *txq = mp->txq + i; | 1877 | struct tx_queue *txq = mp->txq + i; |
1854 | int off = TXQ_CURRENT_DESC_PTR(mp->port_num, i); | ||
1855 | u32 addr; | ||
1856 | 1878 | ||
1857 | if ((mp->txq_mask & (1 << i)) == 0) | 1879 | if ((mp->txq_mask & (1 << i)) == 0) |
1858 | continue; | 1880 | continue; |
1859 | 1881 | ||
1860 | addr = (u32)txq->tx_desc_dma; | 1882 | txq_reset_hw_ptr(txq); |
1861 | addr += txq->tx_curr_desc * sizeof(struct tx_desc); | ||
1862 | wrl(mp, off, addr); | ||
1863 | |||
1864 | txq_set_rate(txq, 1000000000, 16777216); | 1883 | txq_set_rate(txq, 1000000000, 16777216); |
1865 | txq_set_fixed_prio_mode(txq); | 1884 | txq_set_fixed_prio_mode(txq); |
1866 | } | 1885 | } |