aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@openwrt.org>2010-10-26 20:15:05 -0400
committerJohn W. Linville <linville@tuxdriver.com>2010-10-27 15:46:50 -0400
commite609e2ea2cdb3448e7849703179cd792a28dcc55 (patch)
treed28901d24ff6ff619aaa0191e99b6bee18294cac
parent5e848f789d60000d39d9a5f26ab02dbdd963f6cd (diff)
ath9k: fix tx aggregation flush on AR9003
Completing aggregate frames can lead to new buffers being pushed into the tid queues due to software retransmission. When the tx queues are being drained, all pending aggregates must be completed before the tid queues get drained, otherwise buffers might be leaked. Signed-off-by: Felix Fietkau <nbd@openwrt.org> Cc: stable@kernel.org Signed-off-by: John W. Linville <linville@tuxdriver.com>
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 30ef2dfc1ed2..f2ade2402ce2 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1089,15 +1089,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1089 txq->axq_tx_inprogress = false; 1089 txq->axq_tx_inprogress = false;
1090 spin_unlock_bh(&txq->axq_lock); 1090 spin_unlock_bh(&txq->axq_lock);
1091 1091
1092 /* flush any pending frames if aggregation is enabled */
1093 if (sc->sc_flags & SC_OP_TXAGGR) {
1094 if (!retry_tx) {
1095 spin_lock_bh(&txq->axq_lock);
1096 ath_txq_drain_pending_buffers(sc, txq);
1097 spin_unlock_bh(&txq->axq_lock);
1098 }
1099 }
1100
1101 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1092 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1102 spin_lock_bh(&txq->axq_lock); 1093 spin_lock_bh(&txq->axq_lock);
1103 while (!list_empty(&txq->txq_fifo_pending)) { 1094 while (!list_empty(&txq->txq_fifo_pending)) {
@@ -1118,6 +1109,15 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1118 } 1109 }
1119 spin_unlock_bh(&txq->axq_lock); 1110 spin_unlock_bh(&txq->axq_lock);
1120 } 1111 }
1112
1113 /* flush any pending frames if aggregation is enabled */
1114 if (sc->sc_flags & SC_OP_TXAGGR) {
1115 if (!retry_tx) {
1116 spin_lock_bh(&txq->axq_lock);
1117 ath_txq_drain_pending_buffers(sc, txq);
1118 spin_unlock_bh(&txq->axq_lock);
1119 }
1120 }
1121} 1121}
1122 1122
1123void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1123void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)