aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath9k/xmit.c
diff options
context:
space:
mode:
authorSujith <Sujith.Manoharan@atheros.com>2008-12-07 11:14:03 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-12-12 14:02:17 -0500
commitb77f483fcf0579de28873828897f53371a33a0ea (patch)
treea08d0f942d4b5d0cd8a7893753f9b7554ebc89e4 /drivers/net/wireless/ath9k/xmit.c
parent59651e89187293e88863891b821c7379391ef75c (diff)
ath9k: Refactor struct ath_softc
Split ath_softc into smaller structures for rx, tx and beacon handling. Signed-off-by: Sujith <Sujith.Manoharan@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath9k/xmit.c')
-rw-r--r--drivers/net/wireless/ath9k/xmit.c126
1 files changed, 63 insertions, 63 deletions
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index e2e847db0891..f9c309ed3a2d 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -286,17 +286,17 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
286{ 286{
287 struct ath_buf *bf = NULL; 287 struct ath_buf *bf = NULL;
288 288
289 spin_lock_bh(&sc->sc_txbuflock); 289 spin_lock_bh(&sc->tx.txbuflock);
290 290
291 if (unlikely(list_empty(&sc->sc_txbuf))) { 291 if (unlikely(list_empty(&sc->tx.txbuf))) {
292 spin_unlock_bh(&sc->sc_txbuflock); 292 spin_unlock_bh(&sc->tx.txbuflock);
293 return NULL; 293 return NULL;
294 } 294 }
295 295
296 bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list); 296 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
297 list_del(&bf->list); 297 list_del(&bf->list);
298 298
299 spin_unlock_bh(&sc->sc_txbuflock); 299 spin_unlock_bh(&sc->tx.txbuflock);
300 300
301 return bf; 301 return bf;
302} 302}
@@ -341,9 +341,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc,
341 /* 341 /*
342 * Return the list of ath_buf of this mpdu to free queue 342 * Return the list of ath_buf of this mpdu to free queue
343 */ 343 */
344 spin_lock_irqsave(&sc->sc_txbuflock, flags); 344 spin_lock_irqsave(&sc->tx.txbuflock, flags);
345 list_splice_tail_init(bf_q, &sc->sc_txbuf); 345 list_splice_tail_init(bf_q, &sc->tx.txbuf);
346 spin_unlock_irqrestore(&sc->sc_txbuflock, flags); 346 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
347} 347}
348 348
349/* 349/*
@@ -384,7 +384,7 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
384 384
385static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 385static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
386{ 386{
387 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; 387 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
388 388
389 spin_lock_bh(&txq->axq_lock); 389 spin_lock_bh(&txq->axq_lock);
390 390
@@ -397,7 +397,7 @@ static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
397 397
398void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 398void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
399{ 399{
400 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; 400 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
401 401
402 ASSERT(tid->paused > 0); 402 ASSERT(tid->paused > 0);
403 spin_lock_bh(&txq->axq_lock); 403 spin_lock_bh(&txq->axq_lock);
@@ -686,7 +686,7 @@ static int ath_tx_send_normal(struct ath_softc *sc,
686 686
687static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 687static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
688{ 688{
689 struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; 689 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
690 struct ath_buf *bf; 690 struct ath_buf *bf;
691 struct list_head bf_head; 691 struct list_head bf_head;
692 INIT_LIST_HEAD(&bf_head); 692 INIT_LIST_HEAD(&bf_head);
@@ -861,12 +861,12 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
861 struct ath_buf *tbf; 861 struct ath_buf *tbf;
862 862
863 /* allocate new descriptor */ 863 /* allocate new descriptor */
864 spin_lock_bh(&sc->sc_txbuflock); 864 spin_lock_bh(&sc->tx.txbuflock);
865 ASSERT(!list_empty((&sc->sc_txbuf))); 865 ASSERT(!list_empty((&sc->tx.txbuf)));
866 tbf = list_first_entry(&sc->sc_txbuf, 866 tbf = list_first_entry(&sc->tx.txbuf,
867 struct ath_buf, list); 867 struct ath_buf, list);
868 list_del(&tbf->list); 868 list_del(&tbf->list);
869 spin_unlock_bh(&sc->sc_txbuflock); 869 spin_unlock_bh(&sc->tx.txbuflock);
870 870
871 ATH_TXBUF_RESET(tbf); 871 ATH_TXBUF_RESET(tbf);
872 872
@@ -1058,9 +1058,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1058 1058
1059 if (bf_held) { 1059 if (bf_held) {
1060 list_del(&bf_held->list); 1060 list_del(&bf_held->list);
1061 spin_lock_bh(&sc->sc_txbuflock); 1061 spin_lock_bh(&sc->tx.txbuflock);
1062 list_add_tail(&bf_held->list, &sc->sc_txbuf); 1062 list_add_tail(&bf_held->list, &sc->tx.txbuf);
1063 spin_unlock_bh(&sc->sc_txbuflock); 1063 spin_unlock_bh(&sc->tx.txbuflock);
1064 } 1064 }
1065 1065
1066 if (!bf_isampdu(bf)) { 1066 if (!bf_isampdu(bf)) {
@@ -1129,11 +1129,11 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1129 if (!(sc->sc_flags & SC_OP_INVALID)) { 1129 if (!(sc->sc_flags & SC_OP_INVALID)) {
1130 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1130 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1131 if (ATH_TXQ_SETUP(sc, i)) { 1131 if (ATH_TXQ_SETUP(sc, i)) {
1132 ath_tx_stopdma(sc, &sc->sc_txq[i]); 1132 ath_tx_stopdma(sc, &sc->tx.txq[i]);
1133 /* The TxDMA may not really be stopped. 1133 /* The TxDMA may not really be stopped.
1134 * Double check the hal tx pending count */ 1134 * Double check the hal tx pending count */
1135 npend += ath9k_hw_numtxpending(ah, 1135 npend += ath9k_hw_numtxpending(ah,
1136 sc->sc_txq[i].axq_qnum); 1136 sc->tx.txq[i].axq_qnum);
1137 } 1137 }
1138 } 1138 }
1139 } 1139 }
@@ -1158,7 +1158,7 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1158 1158
1159 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1159 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1160 if (ATH_TXQ_SETUP(sc, i)) 1160 if (ATH_TXQ_SETUP(sc, i))
1161 ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx); 1161 ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx);
1162 } 1162 }
1163} 1163}
1164 1164
@@ -1820,9 +1820,9 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
1820 } 1820 }
1821 spin_unlock_bh(&txq->axq_lock); 1821 spin_unlock_bh(&txq->axq_lock);
1822 1822
1823 spin_lock_bh(&sc->sc_txbuflock); 1823 spin_lock_bh(&sc->tx.txbuflock);
1824 list_add_tail(&bf->list, &sc->sc_txbuf); 1824 list_add_tail(&bf->list, &sc->tx.txbuf);
1825 spin_unlock_bh(&sc->sc_txbuflock); 1825 spin_unlock_bh(&sc->tx.txbuflock);
1826 1826
1827 return r; 1827 return r;
1828 } 1828 }
@@ -1839,10 +1839,10 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
1839 int error = 0; 1839 int error = 0;
1840 1840
1841 do { 1841 do {
1842 spin_lock_init(&sc->sc_txbuflock); 1842 spin_lock_init(&sc->tx.txbuflock);
1843 1843
1844 /* Setup tx descriptors */ 1844 /* Setup tx descriptors */
1845 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 1845 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
1846 "tx", nbufs, 1); 1846 "tx", nbufs, 1);
1847 if (error != 0) { 1847 if (error != 0) {
1848 DPRINTF(sc, ATH_DBG_FATAL, 1848 DPRINTF(sc, ATH_DBG_FATAL,
@@ -1852,7 +1852,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
1852 } 1852 }
1853 1853
1854 /* XXX allocate beacon state together with vap */ 1854 /* XXX allocate beacon state together with vap */
1855 error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, 1855 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
1856 "beacon", ATH_BCBUF, 1); 1856 "beacon", ATH_BCBUF, 1);
1857 if (error != 0) { 1857 if (error != 0) {
1858 DPRINTF(sc, ATH_DBG_FATAL, 1858 DPRINTF(sc, ATH_DBG_FATAL,
@@ -1874,12 +1874,12 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
1874int ath_tx_cleanup(struct ath_softc *sc) 1874int ath_tx_cleanup(struct ath_softc *sc)
1875{ 1875{
1876 /* cleanup beacon descriptors */ 1876 /* cleanup beacon descriptors */
1877 if (sc->sc_bdma.dd_desc_len != 0) 1877 if (sc->beacon.bdma.dd_desc_len != 0)
1878 ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); 1878 ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
1879 1879
1880 /* cleanup tx descriptors */ 1880 /* cleanup tx descriptors */
1881 if (sc->sc_txdma.dd_desc_len != 0) 1881 if (sc->tx.txdma.dd_desc_len != 0)
1882 ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); 1882 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
1883 1883
1884 return 0; 1884 return 0;
1885} 1885}
@@ -1927,15 +1927,15 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1927 */ 1927 */
1928 return NULL; 1928 return NULL;
1929 } 1929 }
1930 if (qnum >= ARRAY_SIZE(sc->sc_txq)) { 1930 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
1931 DPRINTF(sc, ATH_DBG_FATAL, 1931 DPRINTF(sc, ATH_DBG_FATAL,
1932 "qnum %u out of range, max %u!\n", 1932 "qnum %u out of range, max %u!\n",
1933 qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq)); 1933 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
1934 ath9k_hw_releasetxqueue(ah, qnum); 1934 ath9k_hw_releasetxqueue(ah, qnum);
1935 return NULL; 1935 return NULL;
1936 } 1936 }
1937 if (!ATH_TXQ_SETUP(sc, qnum)) { 1937 if (!ATH_TXQ_SETUP(sc, qnum)) {
1938 struct ath_txq *txq = &sc->sc_txq[qnum]; 1938 struct ath_txq *txq = &sc->tx.txq[qnum];
1939 1939
1940 txq->axq_qnum = qnum; 1940 txq->axq_qnum = qnum;
1941 txq->axq_link = NULL; 1941 txq->axq_link = NULL;
@@ -1946,9 +1946,9 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1946 txq->axq_aggr_depth = 0; 1946 txq->axq_aggr_depth = 0;
1947 txq->axq_totalqueued = 0; 1947 txq->axq_totalqueued = 0;
1948 txq->axq_linkbuf = NULL; 1948 txq->axq_linkbuf = NULL;
1949 sc->sc_txqsetup |= 1<<qnum; 1949 sc->tx.txqsetup |= 1<<qnum;
1950 } 1950 }
1951 return &sc->sc_txq[qnum]; 1951 return &sc->tx.txq[qnum];
1952} 1952}
1953 1953
1954/* Reclaim resources for a setup queue */ 1954/* Reclaim resources for a setup queue */
@@ -1956,7 +1956,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
1956void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1956void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1957{ 1957{
1958 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); 1958 ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1959 sc->sc_txqsetup &= ~(1<<txq->axq_qnum); 1959 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1960} 1960}
1961 1961
1962/* 1962/*
@@ -1973,15 +1973,15 @@ int ath_tx_setup(struct ath_softc *sc, int haltype)
1973{ 1973{
1974 struct ath_txq *txq; 1974 struct ath_txq *txq;
1975 1975
1976 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { 1976 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1977 DPRINTF(sc, ATH_DBG_FATAL, 1977 DPRINTF(sc, ATH_DBG_FATAL,
1978 "HAL AC %u out of range, max %zu!\n", 1978 "HAL AC %u out of range, max %zu!\n",
1979 haltype, ARRAY_SIZE(sc->sc_haltype2q)); 1979 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1980 return 0; 1980 return 0;
1981 } 1981 }
1982 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); 1982 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1983 if (txq != NULL) { 1983 if (txq != NULL) {
1984 sc->sc_haltype2q[haltype] = txq->axq_qnum; 1984 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1985 return 1; 1985 return 1;
1986 } else 1986 } else
1987 return 0; 1987 return 0;
@@ -1993,19 +1993,19 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
1993 1993
1994 switch (qtype) { 1994 switch (qtype) {
1995 case ATH9K_TX_QUEUE_DATA: 1995 case ATH9K_TX_QUEUE_DATA:
1996 if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { 1996 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1997 DPRINTF(sc, ATH_DBG_FATAL, 1997 DPRINTF(sc, ATH_DBG_FATAL,
1998 "HAL AC %u out of range, max %zu!\n", 1998 "HAL AC %u out of range, max %zu!\n",
1999 haltype, ARRAY_SIZE(sc->sc_haltype2q)); 1999 haltype, ARRAY_SIZE(sc->tx.hwq_map));
2000 return -1; 2000 return -1;
2001 } 2001 }
2002 qnum = sc->sc_haltype2q[haltype]; 2002 qnum = sc->tx.hwq_map[haltype];
2003 break; 2003 break;
2004 case ATH9K_TX_QUEUE_BEACON: 2004 case ATH9K_TX_QUEUE_BEACON:
2005 qnum = sc->sc_bhalq; 2005 qnum = sc->beacon.beaconq;
2006 break; 2006 break;
2007 case ATH9K_TX_QUEUE_CAB: 2007 case ATH9K_TX_QUEUE_CAB:
2008 qnum = sc->sc_cabq->axq_qnum; 2008 qnum = sc->beacon.cabq->axq_qnum;
2009 break; 2009 break;
2010 default: 2010 default:
2011 qnum = -1; 2011 qnum = -1;
@@ -2021,7 +2021,7 @@ struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
2021 int qnum; 2021 int qnum;
2022 2022
2023 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); 2023 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
2024 txq = &sc->sc_txq[qnum]; 2024 txq = &sc->tx.txq[qnum];
2025 2025
2026 spin_lock_bh(&txq->axq_lock); 2026 spin_lock_bh(&txq->axq_lock);
2027 2027
@@ -2050,17 +2050,17 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
2050 int error = 0; 2050 int error = 0;
2051 struct ath9k_tx_queue_info qi; 2051 struct ath9k_tx_queue_info qi;
2052 2052
2053 if (qnum == sc->sc_bhalq) { 2053 if (qnum == sc->beacon.beaconq) {
2054 /* 2054 /*
2055 * XXX: for beacon queue, we just save the parameter. 2055 * XXX: for beacon queue, we just save the parameter.
2056 * It will be picked up by ath_beaconq_config when 2056 * It will be picked up by ath_beaconq_config when
2057 * it's necessary. 2057 * it's necessary.
2058 */ 2058 */
2059 sc->sc_beacon_qi = *qinfo; 2059 sc->beacon.beacon_qi = *qinfo;
2060 return 0; 2060 return 0;
2061 } 2061 }
2062 2062
2063 ASSERT(sc->sc_txq[qnum].axq_qnum == qnum); 2063 ASSERT(sc->tx.txq[qnum].axq_qnum == qnum);
2064 2064
2065 ath9k_hw_get_txq_props(ah, qnum, &qi); 2065 ath9k_hw_get_txq_props(ah, qnum, &qi);
2066 qi.tqi_aifs = qinfo->tqi_aifs; 2066 qi.tqi_aifs = qinfo->tqi_aifs;
@@ -2083,7 +2083,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
2083int ath_cabq_update(struct ath_softc *sc) 2083int ath_cabq_update(struct ath_softc *sc)
2084{ 2084{
2085 struct ath9k_tx_queue_info qi; 2085 struct ath9k_tx_queue_info qi;
2086 int qnum = sc->sc_cabq->axq_qnum; 2086 int qnum = sc->beacon.cabq->axq_qnum;
2087 struct ath_beacon_config conf; 2087 struct ath_beacon_config conf;
2088 2088
2089 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 2089 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
@@ -2117,7 +2117,7 @@ void ath_tx_tasklet(struct ath_softc *sc)
2117 */ 2117 */
2118 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2118 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2119 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2119 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2120 ath_tx_processq(sc, &sc->sc_txq[i]); 2120 ath_tx_processq(sc, &sc->tx.txq[i]);
2121 } 2121 }
2122} 2122}
2123 2123
@@ -2149,9 +2149,9 @@ void ath_tx_draintxq(struct ath_softc *sc,
2149 list_del(&bf->list); 2149 list_del(&bf->list);
2150 spin_unlock_bh(&txq->axq_lock); 2150 spin_unlock_bh(&txq->axq_lock);
2151 2151
2152 spin_lock_bh(&sc->sc_txbuflock); 2152 spin_lock_bh(&sc->tx.txbuflock);
2153 list_add_tail(&bf->list, &sc->sc_txbuf); 2153 list_add_tail(&bf->list, &sc->tx.txbuf);
2154 spin_unlock_bh(&sc->sc_txbuflock); 2154 spin_unlock_bh(&sc->tx.txbuflock);
2155 continue; 2155 continue;
2156 } 2156 }
2157 2157
@@ -2189,9 +2189,9 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2189 /* stop beacon queue. The beacon will be freed when 2189 /* stop beacon queue. The beacon will be freed when
2190 * we go to INIT state */ 2190 * we go to INIT state */
2191 if (!(sc->sc_flags & SC_OP_INVALID)) { 2191 if (!(sc->sc_flags & SC_OP_INVALID)) {
2192 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); 2192 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2193 DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n", 2193 DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n",
2194 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq)); 2194 ath9k_hw_gettxbuf(sc->sc_ah, sc->beacon.beaconq));
2195 } 2195 }
2196 2196
2197 ath_drain_txdataq(sc, retry_tx); 2197 ath_drain_txdataq(sc, retry_tx);
@@ -2199,12 +2199,12 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2199 2199
2200u32 ath_txq_depth(struct ath_softc *sc, int qnum) 2200u32 ath_txq_depth(struct ath_softc *sc, int qnum)
2201{ 2201{
2202 return sc->sc_txq[qnum].axq_depth; 2202 return sc->tx.txq[qnum].axq_depth;
2203} 2203}
2204 2204
2205u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum) 2205u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
2206{ 2206{
2207 return sc->sc_txq[qnum].axq_aggr_depth; 2207 return sc->tx.txq[qnum].axq_aggr_depth;
2208} 2208}
2209 2209
2210bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) 2210bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
@@ -2285,7 +2285,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
2285void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid) 2285void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
2286{ 2286{
2287 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 2287 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
2288 struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum]; 2288 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
2289 struct ath_buf *bf; 2289 struct ath_buf *bf;
2290 struct list_head bf_head; 2290 struct list_head bf_head;
2291 INIT_LIST_HEAD(&bf_head); 2291 INIT_LIST_HEAD(&bf_head);
@@ -2467,7 +2467,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2467 struct ath_txq *txq; 2467 struct ath_txq *txq;
2468 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2468 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2469 if (ATH_TXQ_SETUP(sc, i)) { 2469 if (ATH_TXQ_SETUP(sc, i)) {
2470 txq = &sc->sc_txq[i]; 2470 txq = &sc->tx.txq[i];
2471 2471
2472 spin_lock(&txq->axq_lock); 2472 spin_lock(&txq->axq_lock);
2473 2473
@@ -2512,9 +2512,9 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2512 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { 2512 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2513 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 2513 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2514 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) 2514 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2515 sc->seq_no += 0x10; 2515 sc->tx.seq_no += 0x10;
2516 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 2516 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2517 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no); 2517 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2518 } 2518 }
2519 2519
2520 /* Add the padding after the header if this is not already done */ 2520 /* Add the padding after the header if this is not already done */
@@ -2530,7 +2530,7 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2530 memmove(skb->data, skb->data + padsize, hdrlen); 2530 memmove(skb->data, skb->data + padsize, hdrlen);
2531 } 2531 }
2532 2532
2533 txctl.txq = sc->sc_cabq; 2533 txctl.txq = sc->beacon.cabq;
2534 2534
2535 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb); 2535 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
2536 2536