aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLennert Buytenhek <buytenh@wantstofly.org>2008-07-14 16:56:55 -0400
committerLennert Buytenhek <buytenh@marvell.com>2008-07-24 00:22:42 -0400
commit8fa89bf5de066b11190ac804903021700c2b1185 (patch)
tree78bbb31e5aaddf9c9ad214705bfbb08b85576f36 /drivers
parentc010b2f76c3032e48097a6eef291d8593d5d79a6 (diff)
mv643xx_eth: fix TX hang erratum workaround
The previously merged TX hang erratum workaround ("mv643xx_eth: work around TX hang hardware issue") assumes that TX_END interrupts are delivered simultaneously with or after their corresponding TX interrupts, but this is not always true in practise. In particular, it appears that TX_END interrupts are issued as soon as descriptor fetch returns an invalid descriptor, which may happen before earlier descriptors have been fully transmitted and written back to memory as being done. This hardware behavior can lead to a situation where the current driver code mistakenly assumes that the MAC has given up transmitting before noticing the packets that it is in fact still currently working on, causing the driver to re-kick the transmit queue, which will only cause the MAC to re-fetch the invalid head descriptor, and generate another TX_END interrupt, et cetera, until the packets in the pipe finally finish transmitting and have their descriptors written back to memory, which will then finally break the loop. Fix this by having the erratum workaround not check the 'number of unfinished descriptor', but instead, to compare the software's idea of what the head descriptor pointer should be to the hardware's head descriptor pointer (which is updated on the same conditions as the TX_END interupt is generated on, i.e. possibly before all previous descriptors have been transmitted and written back). Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/mv643xx_eth.c41
1 files changed, 29 insertions, 12 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8a97a0066a88..910920e21259 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -96,6 +96,7 @@ static char mv643xx_eth_driver_version[] = "1.1";
96#define TX_BW_MTU(p) (0x0458 + ((p) << 10)) 96#define TX_BW_MTU(p) (0x0458 + ((p) << 10))
97#define TX_BW_BURST(p) (0x045c + ((p) << 10)) 97#define TX_BW_BURST(p) (0x045c + ((p) << 10))
98#define INT_CAUSE(p) (0x0460 + ((p) << 10)) 98#define INT_CAUSE(p) (0x0460 + ((p) << 10))
99#define INT_TX_END_0 0x00080000
99#define INT_TX_END 0x07f80000 100#define INT_TX_END 0x07f80000
100#define INT_RX 0x0007fbfc 101#define INT_RX 0x0007fbfc
101#define INT_EXT 0x00000002 102#define INT_EXT 0x00000002
@@ -706,6 +707,7 @@ static inline __be16 sum16_as_be(__sum16 sum)
706 707
707static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb) 708static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
708{ 709{
710 struct mv643xx_eth_private *mp = txq_to_mp(txq);
709 int nr_frags = skb_shinfo(skb)->nr_frags; 711 int nr_frags = skb_shinfo(skb)->nr_frags;
710 int tx_index; 712 int tx_index;
711 struct tx_desc *desc; 713 struct tx_desc *desc;
@@ -759,6 +761,10 @@ static void txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
759 wmb(); 761 wmb();
760 desc->cmd_sts = cmd_sts; 762 desc->cmd_sts = cmd_sts;
761 763
764 /* clear TX_END interrupt status */
765 wrl(mp, INT_CAUSE(mp->port_num), ~(INT_TX_END_0 << txq->index));
766 rdl(mp, INT_CAUSE(mp->port_num));
767
762 /* ensure all descriptors are written before poking hardware */ 768 /* ensure all descriptors are written before poking hardware */
763 wmb(); 769 wmb();
764 txq_enable(txq); 770 txq_enable(txq);
@@ -1684,7 +1690,6 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1684 struct mv643xx_eth_private *mp = netdev_priv(dev); 1690 struct mv643xx_eth_private *mp = netdev_priv(dev);
1685 u32 int_cause; 1691 u32 int_cause;
1686 u32 int_cause_ext; 1692 u32 int_cause_ext;
1687 u32 txq_active;
1688 1693
1689 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) & 1694 int_cause = rdl(mp, INT_CAUSE(mp->port_num)) &
1690 (INT_TX_END | INT_RX | INT_EXT); 1695 (INT_TX_END | INT_RX | INT_EXT);
@@ -1743,8 +1748,6 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1743 } 1748 }
1744#endif 1749#endif
1745 1750
1746 txq_active = rdl(mp, TXQ_COMMAND(mp->port_num));
1747
1748 /* 1751 /*
1749 * TxBuffer or TxError set for any of the 8 queues? 1752 * TxBuffer or TxError set for any of the 8 queues?
1750 */ 1753 */
@@ -1754,6 +1757,14 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1754 for (i = 0; i < 8; i++) 1757 for (i = 0; i < 8; i++)
1755 if (mp->txq_mask & (1 << i)) 1758 if (mp->txq_mask & (1 << i))
1756 txq_reclaim(mp->txq + i, 0); 1759 txq_reclaim(mp->txq + i, 0);
1760
1761 /*
1762 * Enough space again in the primary TX queue for a
1763 * full packet?
1764 */
1765 spin_lock(&mp->lock);
1766 __txq_maybe_wake(mp->txq + mp->txq_primary);
1767 spin_unlock(&mp->lock);
1757 } 1768 }
1758 1769
1759 /* 1770 /*
@@ -1763,19 +1774,25 @@ static irqreturn_t mv643xx_eth_irq(int irq, void *dev_id)
1763 int i; 1774 int i;
1764 1775
1765 wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END)); 1776 wrl(mp, INT_CAUSE(mp->port_num), ~(int_cause & INT_TX_END));
1777
1778 spin_lock(&mp->lock);
1766 for (i = 0; i < 8; i++) { 1779 for (i = 0; i < 8; i++) {
1767 struct tx_queue *txq = mp->txq + i; 1780 struct tx_queue *txq = mp->txq + i;
1768 if (txq->tx_desc_count && !((txq_active >> i) & 1)) 1781 u32 hw_desc_ptr;
1782 u32 expected_ptr;
1783
1784 if ((int_cause & (INT_TX_END_0 << i)) == 0)
1785 continue;
1786
1787 hw_desc_ptr =
1788 rdl(mp, TXQ_CURRENT_DESC_PTR(mp->port_num, i));
1789 expected_ptr = (u32)txq->tx_desc_dma +
1790 txq->tx_curr_desc * sizeof(struct tx_desc);
1791
1792 if (hw_desc_ptr != expected_ptr)
1769 txq_enable(txq); 1793 txq_enable(txq);
1770 } 1794 }
1771 } 1795 spin_unlock(&mp->lock);
1772
1773 /*
1774 * Enough space again in the primary TX queue for a full packet?
1775 */
1776 if (int_cause_ext & INT_EXT_TX) {
1777 struct tx_queue *txq = mp->txq + mp->txq_primary;
1778 __txq_maybe_wake(txq);
1779 } 1796 }
1780 1797
1781 return IRQ_HANDLED; 1798 return IRQ_HANDLED;