aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@openwrt.org>2011-03-11 15:38:19 -0500
committerJohn W. Linville <linville@tuxdriver.com>2011-03-14 14:46:58 -0400
commit86271e460a66003dc1f4cbfd845adafb790b7587 (patch)
treeaac8bd62df92a0a8975d0d1604cd9bb62a7c5f5a /drivers/net/wireless
parent0d51cccc2436fa4d978efc3764552779e163d840 (diff)
ath9k: fix the .flush driver op implementation
This patch simplifies the flush op and reuses ath_drain_all_txq for flushing out pending frames if necessary. It also uses a global timeout of 200ms instead of the per-queue 60ms timeout. Signed-off-by: Felix Fietkau <nbd@openwrt.org> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c28
3 files changed, 34 insertions, 51 deletions
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index c718ab512a97..099bd4183ad0 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -189,7 +189,6 @@ struct ath_txq {
189 u32 axq_ampdu_depth; 189 u32 axq_ampdu_depth;
190 bool stopped; 190 bool stopped;
191 bool axq_tx_inprogress; 191 bool axq_tx_inprogress;
192 bool txq_flush_inprogress;
193 struct list_head axq_acq; 192 struct list_head axq_acq;
194 struct list_head txq_fifo[ATH_TXFIFO_DEPTH]; 193 struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
195 struct list_head txq_fifo_pending; 194 struct list_head txq_fifo_pending;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 2e228aada1a9..115f162c617a 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -2128,56 +2128,42 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
2128 2128
2129static void ath9k_flush(struct ieee80211_hw *hw, bool drop) 2129static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
2130{ 2130{
2131#define ATH_FLUSH_TIMEOUT 60 /* ms */
2132 struct ath_softc *sc = hw->priv; 2131 struct ath_softc *sc = hw->priv;
2133 struct ath_txq *txq = NULL; 2132 int timeout = 200; /* ms */
2134 struct ath_hw *ah = sc->sc_ah; 2133 int i, j;
2135 struct ath_common *common = ath9k_hw_common(ah);
2136 int i, j, npend = 0;
2137 2134
2135 ath9k_ps_wakeup(sc);
2138 mutex_lock(&sc->mutex); 2136 mutex_lock(&sc->mutex);
2139 2137
2140 cancel_delayed_work_sync(&sc->tx_complete_work); 2138 cancel_delayed_work_sync(&sc->tx_complete_work);
2141 2139
2142 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2140 if (drop)
2143 if (!ATH_TXQ_SETUP(sc, i)) 2141 timeout = 1;
2144 continue;
2145 txq = &sc->tx.txq[i];
2146 2142
2147 if (!drop) { 2143 for (j = 0; j < timeout; j++) {
2148 for (j = 0; j < ATH_FLUSH_TIMEOUT; j++) { 2144 int npend = 0;
2149 if (!ath9k_has_pending_frames(sc, txq)) 2145
2150 break; 2146 if (j)
2151 usleep_range(1000, 2000); 2147 usleep_range(1000, 2000);
2152 }
2153 }
2154 2148
2155 if (drop || ath9k_has_pending_frames(sc, txq)) { 2149 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2156 ath_dbg(common, ATH_DBG_QUEUE, "Drop frames from hw queue:%d\n", 2150 if (!ATH_TXQ_SETUP(sc, i))
2157 txq->axq_qnum); 2151 continue;
2158 spin_lock_bh(&txq->axq_lock); 2152
2159 txq->txq_flush_inprogress = true; 2153 npend += ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
2160 spin_unlock_bh(&txq->axq_lock);
2161
2162 ath9k_ps_wakeup(sc);
2163 ath9k_hw_stoptxdma(ah, txq->axq_qnum);
2164 npend = ath9k_hw_numtxpending(ah, txq->axq_qnum);
2165 ath9k_ps_restore(sc);
2166 if (npend)
2167 break;
2168
2169 ath_draintxq(sc, txq, false);
2170 txq->txq_flush_inprogress = false;
2171 } 2154 }
2155
2156 if (!npend)
2157 goto out;
2172 } 2158 }
2173 2159
2174 if (npend) { 2160 if (!ath_drain_all_txq(sc, false))
2175 ath_reset(sc, false); 2161 ath_reset(sc, false);
2176 txq->txq_flush_inprogress = false;
2177 }
2178 2162
2163out:
2179 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0); 2164 ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
2180 mutex_unlock(&sc->mutex); 2165 mutex_unlock(&sc->mutex);
2166 ath9k_ps_restore(sc);
2181} 2167}
2182 2168
2183struct ieee80211_ops ath9k_ops = { 2169struct ieee80211_ops ath9k_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index bb1d29e90eb1..f977f804c68a 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2012,8 +2012,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2012 spin_lock_bh(&txq->axq_lock); 2012 spin_lock_bh(&txq->axq_lock);
2013 if (list_empty(&txq->axq_q)) { 2013 if (list_empty(&txq->axq_q)) {
2014 txq->axq_link = NULL; 2014 txq->axq_link = NULL;
2015 if (sc->sc_flags & SC_OP_TXAGGR && 2015 if (sc->sc_flags & SC_OP_TXAGGR)
2016 !txq->txq_flush_inprogress)
2017 ath_txq_schedule(sc, txq); 2016 ath_txq_schedule(sc, txq);
2018 spin_unlock_bh(&txq->axq_lock); 2017 spin_unlock_bh(&txq->axq_lock);
2019 break; 2018 break;
@@ -2094,7 +2093,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2094 2093
2095 spin_lock_bh(&txq->axq_lock); 2094 spin_lock_bh(&txq->axq_lock);
2096 2095
2097 if (sc->sc_flags & SC_OP_TXAGGR && !txq->txq_flush_inprogress) 2096 if (sc->sc_flags & SC_OP_TXAGGR)
2098 ath_txq_schedule(sc, txq); 2097 ath_txq_schedule(sc, txq);
2099 spin_unlock_bh(&txq->axq_lock); 2098 spin_unlock_bh(&txq->axq_lock);
2100 } 2099 }
@@ -2265,18 +2264,17 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2265 2264
2266 spin_lock_bh(&txq->axq_lock); 2265 spin_lock_bh(&txq->axq_lock);
2267 2266
2268 if (!txq->txq_flush_inprogress) { 2267 if (!list_empty(&txq->txq_fifo_pending)) {
2269 if (!list_empty(&txq->txq_fifo_pending)) { 2268 INIT_LIST_HEAD(&bf_head);
2270 INIT_LIST_HEAD(&bf_head); 2269 bf = list_first_entry(&txq->txq_fifo_pending,
2271 bf = list_first_entry(&txq->txq_fifo_pending, 2270 struct ath_buf, list);
2272 struct ath_buf, list); 2271 list_cut_position(&bf_head,
2273 list_cut_position(&bf_head, 2272 &txq->txq_fifo_pending,
2274 &txq->txq_fifo_pending, 2273 &bf->bf_lastbf->list);
2275 &bf->bf_lastbf->list); 2274 ath_tx_txqaddbuf(sc, txq, &bf_head);
2276 ath_tx_txqaddbuf(sc, txq, &bf_head); 2275 } else if (sc->sc_flags & SC_OP_TXAGGR)
2277 } else if (sc->sc_flags & SC_OP_TXAGGR) 2276 ath_txq_schedule(sc, txq);
2278 ath_txq_schedule(sc, txq); 2277
2279 }
2280 spin_unlock_bh(&txq->axq_lock); 2278 spin_unlock_bh(&txq->axq_lock);
2281 } 2279 }
2282} 2280}