aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath9k/xmit.c
diff options
context:
space:
mode:
authorSujith <Sujith.Manoharan@atheros.com>2008-08-11 04:35:08 -0400
committerJohn W. Linville <linville@tuxdriver.com>2008-08-29 16:23:59 -0400
commit672840ac04f79f499b60b9f0eb41799c837db4eb (patch)
treeabea21f851556acbf16d217f2849577399610654 /drivers/net/wireless/ath9k/xmit.c
parentb4696c8b9233bea812b972a5e79c5db7ecf13867 (diff)
ath9k: Use bitfields for sc operations
Signed-off-by: Sujith Manoharan <Sujith.Manoharan@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath9k/xmit.c')
-rw-r--r--drivers/net/wireless/ath9k/xmit.c31
1 files changed, 16 insertions, 15 deletions
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index 0b5b2dd92562..75a84924b4dc 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -392,7 +392,7 @@ static int ath_tx_prepare(struct ath_softc *sc,
392 * incremented by the fragmentation routine. 392 * incremented by the fragmentation routine.
393 */ 393 */
394 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) && 394 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
395 txctl->ht && sc->sc_txaggr) { 395 txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
396 struct ath_atx_tid *tid; 396 struct ath_atx_tid *tid;
397 397
398 tid = ATH_AN_2_TID(txctl->an, txctl->tidno); 398 tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
@@ -422,7 +422,7 @@ static int ath_tx_prepare(struct ath_softc *sc,
422 /* 422 /*
423 * XXX not right with fragmentation. 423 * XXX not right with fragmentation.
424 */ 424 */
425 if (sc->sc_flags & ATH_PREAMBLE_SHORT) 425 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
426 dur = rt->info[rix].spAckDuration; 426 dur = rt->info[rix].spAckDuration;
427 else 427 else
428 dur = rt->info[rix].lpAckDuration; 428 dur = rt->info[rix].lpAckDuration;
@@ -438,8 +438,9 @@ static int ath_tx_prepare(struct ath_softc *sc,
438 ** Add time for next fragment. 438 ** Add time for next fragment.
439 */ 439 */
440 dur += ath9k_hw_computetxtime(sc->sc_ah, rt, 440 dur += ath9k_hw_computetxtime(sc->sc_ah, rt,
441 txctl->nextfraglen, 441 txctl->nextfraglen,
442 rix, sc->sc_flags & ATH_PREAMBLE_SHORT); 442 rix,
443 (sc->sc_flags & SC_OP_PREAMBLE_SHORT));
443 } 444 }
444 445
445 if (ieee80211_has_morefrags(fc) || 446 if (ieee80211_has_morefrags(fc) ||
@@ -1406,7 +1407,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1406 /* 1407 /*
1407 * schedule any pending packets if aggregation is enabled 1408 * schedule any pending packets if aggregation is enabled
1408 */ 1409 */
1409 if (sc->sc_txaggr) 1410 if (sc->sc_flags & SC_OP_TXAGGR)
1410 ath_txq_schedule(sc, txq); 1411 ath_txq_schedule(sc, txq);
1411 spin_unlock_bh(&txq->axq_lock); 1412 spin_unlock_bh(&txq->axq_lock);
1412 } 1413 }
@@ -1433,7 +1434,7 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1433 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc); 1434 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
1434 1435
1435 /* XXX return value */ 1436 /* XXX return value */
1436 if (!sc->sc_invalid) { 1437 if (!(sc->sc_flags & SC_OP_INVALID)) {
1437 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1438 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1438 if (ATH_TXQ_SETUP(sc, i)) { 1439 if (ATH_TXQ_SETUP(sc, i)) {
1439 ath_tx_stopdma(sc, &sc->sc_txq[i]); 1440 ath_tx_stopdma(sc, &sc->sc_txq[i]);
@@ -2024,7 +2025,7 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2024 ieee80211_is_pspoll(fc) ? 2025 ieee80211_is_pspoll(fc) ?
2025 (bf->bf_state.bf_type |= BUF_PSPOLL) : 2026 (bf->bf_state.bf_type |= BUF_PSPOLL) :
2026 (bf->bf_state.bf_type &= ~BUF_PSPOLL); 2027 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
2027 (sc->sc_flags & ATH_PREAMBLE_SHORT) ? 2028 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
2028 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) : 2029 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
2029 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE); 2030 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
2030 2031
@@ -2076,7 +2077,7 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2076 2077
2077 spin_lock_bh(&txq->axq_lock); 2078 spin_lock_bh(&txq->axq_lock);
2078 2079
2079 if (txctl->ht && sc->sc_txaggr) { 2080 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
2080 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno); 2081 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
2081 if (ath_aggr_query(sc, an, txctl->tidno)) { 2082 if (ath_aggr_query(sc, an, txctl->tidno)) {
2082 /* 2083 /*
@@ -2153,7 +2154,7 @@ static void xmit_map_sg(struct ath_softc *sc,
2153 tx_status.retries = 0; 2154 tx_status.retries = 0;
2154 tx_status.flags = ATH_TX_ERROR; 2155 tx_status.flags = ATH_TX_ERROR;
2155 2156
2156 if (txctl->ht && sc->sc_txaggr) { 2157 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
2157 /* Reclaim the seqno. */ 2158 /* Reclaim the seqno. */
2158 tid = ATH_AN_2_TID((struct ath_node *) 2159 tid = ATH_AN_2_TID((struct ath_node *)
2159 txctl->an, txctl->tidno); 2160 txctl->an, txctl->tidno);
@@ -2505,7 +2506,7 @@ void ath_tx_draintxq(struct ath_softc *sc,
2505 } 2506 }
2506 2507
2507 /* flush any pending frames if aggregation is enabled */ 2508 /* flush any pending frames if aggregation is enabled */
2508 if (sc->sc_txaggr) { 2509 if (sc->sc_flags & SC_OP_TXAGGR) {
2509 if (!retry_tx) { 2510 if (!retry_tx) {
2510 spin_lock_bh(&txq->axq_lock); 2511 spin_lock_bh(&txq->axq_lock);
2511 ath_txq_drain_pending_buffers(sc, txq, 2512 ath_txq_drain_pending_buffers(sc, txq,
@@ -2521,7 +2522,7 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2521{ 2522{
2522 /* stop beacon queue. The beacon will be freed when 2523 /* stop beacon queue. The beacon will be freed when
2523 * we go to INIT state */ 2524 * we go to INIT state */
2524 if (!sc->sc_invalid) { 2525 if (!(sc->sc_flags & SC_OP_INVALID)) {
2525 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); 2526 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2526 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__, 2527 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2527 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq)); 2528 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
@@ -2548,7 +2549,7 @@ enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2548 struct ath_atx_tid *txtid; 2549 struct ath_atx_tid *txtid;
2549 DECLARE_MAC_BUF(mac); 2550 DECLARE_MAC_BUF(mac);
2550 2551
2551 if (!sc->sc_txaggr) 2552 if (!(sc->sc_flags & SC_OP_TXAGGR))
2552 return AGGR_NOT_REQUIRED; 2553 return AGGR_NOT_REQUIRED;
2553 2554
2554 /* ADDBA exchange must be completed before sending aggregates */ 2555 /* ADDBA exchange must be completed before sending aggregates */
@@ -2595,7 +2596,7 @@ int ath_tx_aggr_start(struct ath_softc *sc,
2595 return -1; 2596 return -1;
2596 } 2597 }
2597 2598
2598 if (sc->sc_txaggr) { 2599 if (sc->sc_flags & SC_OP_TXAGGR) {
2599 txtid = ATH_AN_2_TID(an, tid); 2600 txtid = ATH_AN_2_TID(an, tid);
2600 txtid->addba_exchangeinprogress = 1; 2601 txtid->addba_exchangeinprogress = 1;
2601 ath_tx_pause_tid(sc, txtid); 2602 ath_tx_pause_tid(sc, txtid);
@@ -2755,7 +2756,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2755 2756
2756void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2757void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2757{ 2758{
2758 if (sc->sc_txaggr) { 2759 if (sc->sc_flags & SC_OP_TXAGGR) {
2759 struct ath_atx_tid *tid; 2760 struct ath_atx_tid *tid;
2760 struct ath_atx_ac *ac; 2761 struct ath_atx_ac *ac;
2761 int tidno, acno; 2762 int tidno, acno;
@@ -2867,7 +2868,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc,
2867 2868
2868void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an) 2869void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2869{ 2870{
2870 if (sc->sc_txaggr) { 2871 if (sc->sc_flags & SC_OP_TXAGGR) {
2871 struct ath_atx_tid *tid; 2872 struct ath_atx_tid *tid;
2872 int tidno, i; 2873 int tidno, i;
2873 2874