diff options
author | Bruno Randolf <br1@einfach.org> | 2010-09-16 22:37:07 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2010-09-21 11:05:15 -0400 |
commit | 234132960dcf8ebd9d424d92a4dfb0e57fa63a17 (patch) | |
tree | 8cb08ec25b181921f9912f72cde3762549a3c520 /drivers/net/wireless/ath/ath5k | |
parent | 923e5b3d3d773b9956b943ac64f782d5a127bdea (diff) |
ath5k: Keep last descriptor in queue
If we return a TX descriptor to the pool of available descriptors, while a
queues TXDP still points to it we could potentially run into all sorts of
troube.
It has been suggested that there is hardware which can set the descriptors
done bit before it reads ds_link and moves on to the next descriptor. While the
documentation says this is not true for newer chipsets (the descriptor contents
are copied to some internal memory), we don't know about older hardware.
To be safe, we always keep the last descriptor in the queue, and avoid dangling
TXDP pointers. Unfortunately this does not fully resolve the problem - queues
still get stuck!
This is similar to what ath9k does.
Signed-off-by: Bruno Randolf <br1@einfach.org>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath5k')
-rw-r--r-- | drivers/net/wireless/ath/ath5k/base.c | 64 |
1 files changed, 32 insertions, 32 deletions
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index cae9fe4676e7..3e5ed6c0c808 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c | |||
@@ -1586,44 +1586,44 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) | |||
1586 | 1586 | ||
1587 | spin_lock(&txq->lock); | 1587 | spin_lock(&txq->lock); |
1588 | list_for_each_entry_safe(bf, bf0, &txq->q, list) { | 1588 | list_for_each_entry_safe(bf, bf0, &txq->q, list) { |
1589 | ds = bf->desc; | 1589 | |
1590 | txq->txq_poll_mark = false; | ||
1591 | |||
1592 | /* skb might already have been processed last time. */ | ||
1593 | if (bf->skb != NULL) { | ||
1594 | ds = bf->desc; | ||
1595 | |||
1596 | ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts); | ||
1597 | if (unlikely(ret == -EINPROGRESS)) | ||
1598 | break; | ||
1599 | else if (unlikely(ret)) { | ||
1600 | ATH5K_ERR(sc, | ||
1601 | "error %d while processing " | ||
1602 | "queue %u\n", ret, txq->qnum); | ||
1603 | break; | ||
1604 | } | ||
1605 | |||
1606 | skb = bf->skb; | ||
1607 | bf->skb = NULL; | ||
1608 | pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, | ||
1609 | PCI_DMA_TODEVICE); | ||
1610 | ath5k_tx_frame_completed(sc, skb, &ts); | ||
1611 | } | ||
1590 | 1612 | ||
1591 | /* | 1613 | /* |
1592 | * It's possible that the hardware can say the buffer is | 1614 | * It's possible that the hardware can say the buffer is |
1593 | * completed when it hasn't yet loaded the ds_link from | 1615 | * completed when it hasn't yet loaded the ds_link from |
1594 | * host memory and moved on. If there are more TX | 1616 | * host memory and moved on. |
1595 | * descriptors in the queue, wait for TXDP to change | 1617 | * Always keep the last descriptor to avoid HW races... |
1596 | * before processing this one. | ||
1597 | */ | 1618 | */ |
1598 | if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr && | 1619 | if (ath5k_hw_get_txdp(sc->ah, txq->qnum) != bf->daddr) { |
1599 | !list_is_last(&bf->list, &txq->q)) | 1620 | spin_lock(&sc->txbuflock); |
1600 | break; | 1621 | list_move_tail(&bf->list, &sc->txbuf); |
1601 | ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts); | 1622 | sc->txbuf_len++; |
1602 | if (unlikely(ret == -EINPROGRESS)) | 1623 | txq->txq_len--; |
1603 | break; | 1624 | spin_unlock(&sc->txbuflock); |
1604 | else if (unlikely(ret)) { | ||
1605 | ATH5K_ERR(sc, "error %d while processing queue %u\n", | ||
1606 | ret, txq->qnum); | ||
1607 | break; | ||
1608 | } | 1625 | } |
1609 | |||
1610 | skb = bf->skb; | ||
1611 | bf->skb = NULL; | ||
1612 | pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, | ||
1613 | PCI_DMA_TODEVICE); | ||
1614 | |||
1615 | ath5k_tx_frame_completed(sc, skb, &ts); | ||
1616 | |||
1617 | spin_lock(&sc->txbuflock); | ||
1618 | list_move_tail(&bf->list, &sc->txbuf); | ||
1619 | sc->txbuf_len++; | ||
1620 | txq->txq_len--; | ||
1621 | spin_unlock(&sc->txbuflock); | ||
1622 | |||
1623 | txq->txq_poll_mark = false; | ||
1624 | } | 1626 | } |
1625 | if (likely(list_empty(&txq->q))) | ||
1626 | txq->link = NULL; | ||
1627 | spin_unlock(&txq->lock); | 1627 | spin_unlock(&txq->lock); |
1628 | if (txq->txq_len < ATH5K_TXQ_LEN_LOW) | 1628 | if (txq->txq_len < ATH5K_TXQ_LEN_LOW) |
1629 | ieee80211_wake_queue(sc->hw, txq->qnum); | 1629 | ieee80211_wake_queue(sc->hw, txq->qnum); |
@@ -2188,7 +2188,7 @@ ath5k_tx_complete_poll_work(struct work_struct *work) | |||
2188 | if (sc->txqs[i].setup) { | 2188 | if (sc->txqs[i].setup) { |
2189 | txq = &sc->txqs[i]; | 2189 | txq = &sc->txqs[i]; |
2190 | spin_lock_bh(&txq->lock); | 2190 | spin_lock_bh(&txq->lock); |
2191 | if (txq->txq_len > 0) { | 2191 | if (txq->txq_len > 1) { |
2192 | if (txq->txq_poll_mark) { | 2192 | if (txq->txq_poll_mark) { |
2193 | ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, | 2193 | ATH5K_DBG(sc, ATH5K_DEBUG_XMIT, |
2194 | "TX queue stuck %d\n", | 2194 | "TX queue stuck %d\n", |