aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath9k/xmit.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/xmit.c')
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c102
1 files changed, 45 insertions, 57 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f2ade2402ce2..6380bbd82d49 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -124,7 +124,7 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
124 124
125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
126{ 126{
127 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 127 struct ath_txq *txq = tid->ac->txq;
128 128
129 WARN_ON(!tid->paused); 129 WARN_ON(!tid->paused);
130 130
@@ -142,7 +142,7 @@ unlock:
142 142
143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
144{ 144{
145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 145 struct ath_txq *txq = tid->ac->txq;
146 struct ath_buf *bf; 146 struct ath_buf *bf;
147 struct list_head bf_head; 147 struct list_head bf_head;
148 struct ath_tx_status ts; 148 struct ath_tx_status ts;
@@ -817,7 +817,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
817{ 817{
818 struct ath_node *an = (struct ath_node *)sta->drv_priv; 818 struct ath_node *an = (struct ath_node *)sta->drv_priv;
819 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 819 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
820 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 820 struct ath_txq *txq = txtid->ac->txq;
821 821
822 if (txtid->state & AGGR_CLEANUP) 822 if (txtid->state & AGGR_CLEANUP)
823 return; 823 return;
@@ -888,10 +888,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
888 struct ath_hw *ah = sc->sc_ah; 888 struct ath_hw *ah = sc->sc_ah;
889 struct ath_common *common = ath9k_hw_common(ah); 889 struct ath_common *common = ath9k_hw_common(ah);
890 struct ath9k_tx_queue_info qi; 890 struct ath9k_tx_queue_info qi;
891 static const int subtype_txq_to_hwq[] = {
892 [WME_AC_BE] = ATH_TXQ_AC_BE,
893 [WME_AC_BK] = ATH_TXQ_AC_BK,
894 [WME_AC_VI] = ATH_TXQ_AC_VI,
895 [WME_AC_VO] = ATH_TXQ_AC_VO,
896 };
891 int qnum, i; 897 int qnum, i;
892 898
893 memset(&qi, 0, sizeof(qi)); 899 memset(&qi, 0, sizeof(qi));
894 qi.tqi_subtype = subtype; 900 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
895 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 901 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
896 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 902 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
897 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 903 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
@@ -940,7 +946,6 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
940 if (!ATH_TXQ_SETUP(sc, qnum)) { 946 if (!ATH_TXQ_SETUP(sc, qnum)) {
941 struct ath_txq *txq = &sc->tx.txq[qnum]; 947 struct ath_txq *txq = &sc->tx.txq[qnum];
942 948
943 txq->axq_class = subtype;
944 txq->axq_qnum = qnum; 949 txq->axq_qnum = qnum;
945 txq->axq_link = NULL; 950 txq->axq_link = NULL;
946 INIT_LIST_HEAD(&txq->axq_q); 951 INIT_LIST_HEAD(&txq->axq_q);
@@ -1148,13 +1153,11 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1148 ath_print(common, ATH_DBG_FATAL, 1153 ath_print(common, ATH_DBG_FATAL,
1149 "Failed to stop TX DMA. Resetting hardware!\n"); 1154 "Failed to stop TX DMA. Resetting hardware!\n");
1150 1155
1151 spin_lock_bh(&sc->sc_resetlock);
1152 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 1156 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
1153 if (r) 1157 if (r)
1154 ath_print(common, ATH_DBG_FATAL, 1158 ath_print(common, ATH_DBG_FATAL,
1155 "Unable to reset hardware; reset status %d\n", 1159 "Unable to reset hardware; reset status %d\n",
1156 r); 1160 r);
1157 spin_unlock_bh(&sc->sc_resetlock);
1158 } 1161 }
1159 1162
1160 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1163 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
@@ -1212,24 +1215,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1212 } 1215 }
1213} 1216}
1214 1217
1215int ath_tx_setup(struct ath_softc *sc, int haltype)
1216{
1217 struct ath_txq *txq;
1218
1219 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1220 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1221 "HAL AC %u out of range, max %zu!\n",
1222 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1223 return 0;
1224 }
1225 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1226 if (txq != NULL) {
1227 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1228 return 1;
1229 } else
1230 return 0;
1231}
1232
1233/***********/ 1218/***********/
1234/* TX, DMA */ 1219/* TX, DMA */
1235/***********/ 1220/***********/
@@ -1710,6 +1695,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1710 goto tx_done; 1695 goto tx_done;
1711 } 1696 }
1712 1697
1698 WARN_ON(tid->ac->txq != txctl->txq);
1713 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1699 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
1714 /* 1700 /*
1715 * Try aggregation if it's a unicast data frame 1701 * Try aggregation if it's a unicast data frame
@@ -1749,6 +1735,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1749 return -1; 1735 return -1;
1750 } 1736 }
1751 1737
1738 q = skb_get_queue_mapping(skb);
1752 r = ath_tx_setup_buffer(hw, bf, skb, txctl); 1739 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
1753 if (unlikely(r)) { 1740 if (unlikely(r)) {
1754 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); 1741 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
@@ -1758,8 +1745,9 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1758 * we will at least have to run TX completionon one buffer 1745 * we will at least have to run TX completionon one buffer
1759 * on the queue */ 1746 * on the queue */
1760 spin_lock_bh(&txq->axq_lock); 1747 spin_lock_bh(&txq->axq_lock);
1761 if (!txq->stopped && txq->axq_depth > 1) { 1748 if (txq == sc->tx.txq_map[q] && !txq->stopped &&
1762 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); 1749 txq->axq_depth > 1) {
1750 ath_mac80211_stop_queue(sc, q);
1763 txq->stopped = 1; 1751 txq->stopped = 1;
1764 } 1752 }
1765 spin_unlock_bh(&txq->axq_lock); 1753 spin_unlock_bh(&txq->axq_lock);
@@ -1769,13 +1757,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1769 return r; 1757 return r;
1770 } 1758 }
1771 1759
1772 q = skb_get_queue_mapping(skb);
1773 if (q >= 4)
1774 q = 0;
1775
1776 spin_lock_bh(&txq->axq_lock); 1760 spin_lock_bh(&txq->axq_lock);
1777 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) { 1761 if (txq == sc->tx.txq_map[q] &&
1778 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb)); 1762 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1763 ath_mac80211_stop_queue(sc, q);
1779 txq->stopped = 1; 1764 txq->stopped = 1;
1780 } 1765 }
1781 spin_unlock_bh(&txq->axq_lock); 1766 spin_unlock_bh(&txq->axq_lock);
@@ -1843,7 +1828,8 @@ exit:
1843/*****************/ 1828/*****************/
1844 1829
1845static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1830static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1846 struct ath_wiphy *aphy, int tx_flags) 1831 struct ath_wiphy *aphy, int tx_flags,
1832 struct ath_txq *txq)
1847{ 1833{
1848 struct ieee80211_hw *hw = sc->hw; 1834 struct ieee80211_hw *hw = sc->hw;
1849 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1835 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1890,11 +1876,12 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1890 ath9k_tx_status(hw, skb); 1876 ath9k_tx_status(hw, skb);
1891 else { 1877 else {
1892 q = skb_get_queue_mapping(skb); 1878 q = skb_get_queue_mapping(skb);
1893 if (q >= 4) 1879 if (txq == sc->tx.txq_map[q]) {
1894 q = 0; 1880 spin_lock_bh(&txq->axq_lock);
1895 1881 if (WARN_ON(--txq->pending_frames < 0))
1896 if (--sc->tx.pending_frames[q] < 0) 1882 txq->pending_frames = 0;
1897 sc->tx.pending_frames[q] = 0; 1883 spin_unlock_bh(&txq->axq_lock);
1884 }
1898 1885
1899 ieee80211_tx_status(hw, skb); 1886 ieee80211_tx_status(hw, skb);
1900 } 1887 }
@@ -1929,8 +1916,8 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1929 else 1916 else
1930 complete(&sc->paprd_complete); 1917 complete(&sc->paprd_complete);
1931 } else { 1918 } else {
1932 ath_debug_stat_tx(sc, txq, bf, ts); 1919 ath_debug_stat_tx(sc, bf, ts);
1933 ath_tx_complete(sc, skb, bf->aphy, tx_flags); 1920 ath_tx_complete(sc, skb, bf->aphy, tx_flags, txq);
1934 } 1921 }
1935 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 1922 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1936 * accidentally reference it later. 1923 * accidentally reference it later.
@@ -2020,16 +2007,13 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
2020 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2007 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2021} 2008}
2022 2009
2023static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) 2010static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
2024{ 2011{
2025 int qnum; 2012 struct ath_txq *txq;
2026
2027 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2028 if (qnum == -1)
2029 return;
2030 2013
2014 txq = sc->tx.txq_map[qnum];
2031 spin_lock_bh(&txq->axq_lock); 2015 spin_lock_bh(&txq->axq_lock);
2032 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) { 2016 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2033 if (ath_mac80211_start_queue(sc, qnum)) 2017 if (ath_mac80211_start_queue(sc, qnum))
2034 txq->stopped = 0; 2018 txq->stopped = 0;
2035 } 2019 }
@@ -2046,6 +2030,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2046 struct ath_tx_status ts; 2030 struct ath_tx_status ts;
2047 int txok; 2031 int txok;
2048 int status; 2032 int status;
2033 int qnum;
2049 2034
2050 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 2035 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2051 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2036 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
@@ -2121,12 +2106,15 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2121 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true); 2106 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true);
2122 } 2107 }
2123 2108
2109 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2110
2124 if (bf_isampdu(bf)) 2111 if (bf_isampdu(bf))
2125 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok); 2112 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
2126 else 2113 else
2127 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); 2114 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2128 2115
2129 ath_wake_mac80211_queue(sc, txq); 2116 if (txq == sc->tx.txq_map[qnum])
2117 ath_wake_mac80211_queue(sc, qnum);
2130 2118
2131 spin_lock_bh(&txq->axq_lock); 2119 spin_lock_bh(&txq->axq_lock);
2132 if (sc->sc_flags & SC_OP_TXAGGR) 2120 if (sc->sc_flags & SC_OP_TXAGGR)
@@ -2196,6 +2184,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2196 struct list_head bf_head; 2184 struct list_head bf_head;
2197 int status; 2185 int status;
2198 int txok; 2186 int txok;
2187 int qnum;
2199 2188
2200 for (;;) { 2189 for (;;) {
2201 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs); 2190 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
@@ -2239,13 +2228,16 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2239 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true); 2228 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true);
2240 } 2229 }
2241 2230
2231 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2232
2242 if (bf_isampdu(bf)) 2233 if (bf_isampdu(bf))
2243 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok); 2234 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2244 else 2235 else
2245 ath_tx_complete_buf(sc, bf, txq, &bf_head, 2236 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2246 &txs, txok, 0); 2237 &txs, txok, 0);
2247 2238
2248 ath_wake_mac80211_queue(sc, txq); 2239 if (txq == sc->tx.txq_map[qnum])
2240 ath_wake_mac80211_queue(sc, qnum);
2249 2241
2250 spin_lock_bh(&txq->axq_lock); 2242 spin_lock_bh(&txq->axq_lock);
2251 if (!list_empty(&txq->txq_fifo_pending)) { 2243 if (!list_empty(&txq->txq_fifo_pending)) {
@@ -2377,7 +2369,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2377 for (acno = 0, ac = &an->ac[acno]; 2369 for (acno = 0, ac = &an->ac[acno];
2378 acno < WME_NUM_AC; acno++, ac++) { 2370 acno < WME_NUM_AC; acno++, ac++) {
2379 ac->sched = false; 2371 ac->sched = false;
2380 ac->qnum = sc->tx.hwq_map[acno]; 2372 ac->txq = sc->tx.txq_map[acno];
2381 INIT_LIST_HEAD(&ac->tid_q); 2373 INIT_LIST_HEAD(&ac->tid_q);
2382 } 2374 }
2383} 2375}
@@ -2387,17 +2379,13 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2387 struct ath_atx_ac *ac; 2379 struct ath_atx_ac *ac;
2388 struct ath_atx_tid *tid; 2380 struct ath_atx_tid *tid;
2389 struct ath_txq *txq; 2381 struct ath_txq *txq;
2390 int i, tidno; 2382 int tidno;
2391 2383
2392 for (tidno = 0, tid = &an->tid[tidno]; 2384 for (tidno = 0, tid = &an->tid[tidno];
2393 tidno < WME_NUM_TID; tidno++, tid++) { 2385 tidno < WME_NUM_TID; tidno++, tid++) {
2394 i = tid->ac->qnum;
2395
2396 if (!ATH_TXQ_SETUP(sc, i))
2397 continue;
2398 2386
2399 txq = &sc->tx.txq[i];
2400 ac = tid->ac; 2387 ac = tid->ac;
2388 txq = ac->txq;
2401 2389
2402 spin_lock_bh(&txq->axq_lock); 2390 spin_lock_bh(&txq->axq_lock);
2403 2391