aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath9k/xmit.c
diff options
context:
space:
mode:
authorSenthil Balasubramanian <senthilkumar@atheros.com>2009-07-14 20:17:09 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-07-24 15:05:18 -0400
commit164ace38536849966ffa377b1b1132993a5a375d (patch)
tree91819800dcfaab603a7d44c148ccf32c13640fd8 /drivers/net/wireless/ath/ath9k/xmit.c
parentdd8b15b027d96f7097ae9dbaebd822a114a03c34 (diff)
ath9k: Fix TX hang issue with Atheros chipsets
The hardware doesn't generate interrupts in some cases and so work around this by monitoring the TX status periodically and reset the chip if required. This behavior of the hardware not generating the TX interrupts can be noticed through ath9k debugfs interrupt statistics when heavy traffic is being sent from STA to AP. One can easily see this behavior when the STA is transmitting at a higher rates. The interrupt statistics in the debugfs interface clearly shows that only RX interrupts alone being generated and TX being stuck. TX should be monitored through a timer and reset the chip only when frames are queued to the hardware but TX interrupts are not generated for the same even after one second. Also, we shouldn't remove holding descriptor from AC queue if it happens to be the only descriptor and schedule TX aggregation regarless of queue depth as it improves scheduling of AMPDUs from software to hardware queue. Signed-off-by: Vasanthakumar Thiagarajan <vasanth@atheros.com> Signed-off-by: Senthil Balasubramanian <senthilkumar@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/xmit.c')
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c57
1 files changed, 43 insertions, 14 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 5de9878d2c12..a3bc4310a67c 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -857,6 +857,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
857 txq->axq_aggr_depth = 0; 857 txq->axq_aggr_depth = 0;
858 txq->axq_totalqueued = 0; 858 txq->axq_totalqueued = 0;
859 txq->axq_linkbuf = NULL; 859 txq->axq_linkbuf = NULL;
860 txq->axq_tx_inprogress = false;
860 sc->tx.txqsetup |= 1<<qnum; 861 sc->tx.txqsetup |= 1<<qnum;
861 } 862 }
862 return &sc->tx.txq[qnum]; 863 return &sc->tx.txq[qnum];
@@ -1023,6 +1024,10 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1023 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); 1024 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
1024 } 1025 }
1025 1026
1027 spin_lock_bh(&txq->axq_lock);
1028 txq->axq_tx_inprogress = false;
1029 spin_unlock_bh(&txq->axq_lock);
1030
1026 /* flush any pending frames if aggregation is enabled */ 1031 /* flush any pending frames if aggregation is enabled */
1027 if (sc->sc_flags & SC_OP_TXAGGR) { 1032 if (sc->sc_flags & SC_OP_TXAGGR) {
1028 if (!retry_tx) { 1033 if (!retry_tx) {
@@ -1103,8 +1108,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1103 if (tid->paused) 1108 if (tid->paused)
1104 continue; 1109 continue;
1105 1110
1106 if ((txq->axq_depth % 2) == 0) 1111 ath_tx_sched_aggr(sc, txq, tid);
1107 ath_tx_sched_aggr(sc, txq, tid);
1108 1112
1109 /* 1113 /*
1110 * add tid to round-robin queue if more frames 1114 * add tid to round-robin queue if more frames
@@ -1947,19 +1951,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1947 if (bf->bf_stale) { 1951 if (bf->bf_stale) {
1948 bf_held = bf; 1952 bf_held = bf;
1949 if (list_is_last(&bf_held->list, &txq->axq_q)) { 1953 if (list_is_last(&bf_held->list, &txq->axq_q)) {
1950 txq->axq_link = NULL;
1951 txq->axq_linkbuf = NULL;
1952 spin_unlock_bh(&txq->axq_lock); 1954 spin_unlock_bh(&txq->axq_lock);
1953
1954 /*
1955 * The holding descriptor is the last
1956 * descriptor in queue. It's safe to remove
1957 * the last holding descriptor in BH context.
1958 */
1959 spin_lock_bh(&sc->tx.txbuflock);
1960 list_move_tail(&bf_held->list, &sc->tx.txbuf);
1961 spin_unlock_bh(&sc->tx.txbuflock);
1962
1963 break; 1955 break;
1964 } else { 1956 } else {
1965 bf = list_entry(bf_held->list.next, 1957 bf = list_entry(bf_held->list.next,
@@ -1996,6 +1988,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1996 txq->axq_aggr_depth--; 1988 txq->axq_aggr_depth--;
1997 1989
1998 txok = (ds->ds_txstat.ts_status == 0); 1990 txok = (ds->ds_txstat.ts_status == 0);
1991 txq->axq_tx_inprogress = false;
1999 spin_unlock_bh(&txq->axq_lock); 1992 spin_unlock_bh(&txq->axq_lock);
2000 1993
2001 if (bf_held) { 1994 if (bf_held) {
@@ -2029,6 +2022,40 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2029 } 2022 }
2030} 2023}
2031 2024
2025void ath_tx_complete_poll_work(struct work_struct *work)
2026{
2027 struct ath_softc *sc = container_of(work, struct ath_softc,
2028 tx_complete_work.work);
2029 struct ath_txq *txq;
2030 int i;
2031 bool needreset = false;
2032
2033 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2034 if (ATH_TXQ_SETUP(sc, i)) {
2035 txq = &sc->tx.txq[i];
2036 spin_lock_bh(&txq->axq_lock);
2037 if (txq->axq_depth) {
2038 if (txq->axq_tx_inprogress) {
2039 needreset = true;
2040 spin_unlock_bh(&txq->axq_lock);
2041 break;
2042 } else {
2043 txq->axq_tx_inprogress = true;
2044 }
2045 }
2046 spin_unlock_bh(&txq->axq_lock);
2047 }
2048
2049 if (needreset) {
2050 DPRINTF(sc, ATH_DBG_RESET, "tx hung, resetting the chip\n");
2051 ath_reset(sc, false);
2052 }
2053
2054 queue_delayed_work(sc->hw->workqueue, &sc->tx_complete_work,
2055 msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT));
2056}
2057
2058
2032 2059
2033void ath_tx_tasklet(struct ath_softc *sc) 2060void ath_tx_tasklet(struct ath_softc *sc)
2034{ 2061{
@@ -2069,6 +2096,8 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2069 goto err; 2096 goto err;
2070 } 2097 }
2071 2098
2099 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2100
2072err: 2101err:
2073 if (error != 0) 2102 if (error != 0)
2074 ath_tx_cleanup(sc); 2103 ath_tx_cleanup(sc);