aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath9k/xmit.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath9k/xmit.c')
-rw-r--r--drivers/net/wireless/ath9k/xmit.c418
1 files changed, 177 insertions, 241 deletions
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index 550129f717e2..3a4757942b3f 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -60,79 +60,6 @@ static u32 bits_per_symbol[][2] = {
60#define IS_HT_RATE(_rate) ((_rate) & 0x80) 60#define IS_HT_RATE(_rate) ((_rate) & 0x80)
61 61
62/* 62/*
63 * Insert a chain of ath_buf (descriptors) on a multicast txq
64 * but do NOT start tx DMA on this queue.
65 * NB: must be called with txq lock held
66 */
67
68static void ath_tx_mcastqaddbuf(struct ath_softc *sc,
69 struct ath_txq *txq,
70 struct list_head *head)
71{
72 struct ath_hal *ah = sc->sc_ah;
73 struct ath_buf *bf;
74
75 if (list_empty(head))
76 return;
77
78 /*
79 * Insert the frame on the outbound list and
80 * pass it on to the hardware.
81 */
82 bf = list_first_entry(head, struct ath_buf, list);
83
84 /*
85 * The CAB queue is started from the SWBA handler since
86 * frames only go out on DTIM and to avoid possible races.
87 */
88 ath9k_hw_set_interrupts(ah, 0);
89
90 /*
91 * If there is anything in the mcastq, we want to set
92 * the "more data" bit in the last item in the queue to
93 * indicate that there is "more data". It makes sense to add
94 * it here since you are *always* going to have
95 * more data when adding to this queue, no matter where
96 * you call from.
97 */
98
99 if (txq->axq_depth) {
100 struct ath_buf *lbf;
101 struct ieee80211_hdr *hdr;
102
103 /*
104 * Add the "more data flag" to the last frame
105 */
106
107 lbf = list_entry(txq->axq_q.prev, struct ath_buf, list);
108 hdr = (struct ieee80211_hdr *)
109 ((struct sk_buff *)(lbf->bf_mpdu))->data;
110 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
111 }
112
113 /*
114 * Now, concat the frame onto the queue
115 */
116 list_splice_tail_init(head, &txq->axq_q);
117 txq->axq_depth++;
118 txq->axq_totalqueued++;
119 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
120
121 DPRINTF(sc, ATH_DBG_QUEUE,
122 "%s: txq depth = %d\n", __func__, txq->axq_depth);
123 if (txq->axq_link != NULL) {
124 *txq->axq_link = bf->bf_daddr;
125 DPRINTF(sc, ATH_DBG_XMIT,
126 "%s: link[%u](%p)=%llx (%p)\n",
127 __func__,
128 txq->axq_qnum, txq->axq_link,
129 ito64(bf->bf_daddr), bf->bf_desc);
130 }
131 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
132 ath9k_hw_set_interrupts(ah, sc->sc_imask);
133}
134
135/*
136 * Insert a chain of ath_buf (descriptors) on a txq and 63 * Insert a chain of ath_buf (descriptors) on a txq and
137 * assume the descriptors are already chained together by caller. 64 * assume the descriptors are already chained together by caller.
138 * NB: must be called with txq lock held 65 * NB: must be called with txq lock held
@@ -277,8 +204,6 @@ static int ath_tx_prepare(struct ath_softc *sc,
277 __le16 fc; 204 __le16 fc;
278 u8 *qc; 205 u8 *qc;
279 206
280 memset(txctl, 0, sizeof(struct ath_tx_control));
281
282 txctl->dev = sc; 207 txctl->dev = sc;
283 hdr = (struct ieee80211_hdr *)skb->data; 208 hdr = (struct ieee80211_hdr *)skb->data;
284 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 209 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
@@ -302,7 +227,6 @@ static int ath_tx_prepare(struct ath_softc *sc,
302 } 227 }
303 228
304 txctl->if_id = 0; 229 txctl->if_id = 0;
305 txctl->nextfraglen = 0;
306 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3); 230 txctl->frmlen = skb->len + FCS_LEN - (hdrlen & 3);
307 txctl->txpower = MAX_RATE_POWER; /* FIXME */ 231 txctl->txpower = MAX_RATE_POWER; /* FIXME */
308 232
@@ -313,13 +237,13 @@ static int ath_tx_prepare(struct ath_softc *sc,
313 237
314 if (tx_info->control.hw_key) { 238 if (tx_info->control.hw_key) {
315 txctl->keyix = tx_info->control.hw_key->hw_key_idx; 239 txctl->keyix = tx_info->control.hw_key->hw_key_idx;
316 txctl->frmlen += tx_info->control.icv_len; 240 txctl->frmlen += tx_info->control.hw_key->icv_len;
317 241
318 if (sc->sc_keytype == ATH9K_CIPHER_WEP) 242 if (tx_info->control.hw_key->alg == ALG_WEP)
319 txctl->keytype = ATH9K_KEY_TYPE_WEP; 243 txctl->keytype = ATH9K_KEY_TYPE_WEP;
320 else if (sc->sc_keytype == ATH9K_CIPHER_TKIP) 244 else if (tx_info->control.hw_key->alg == ALG_TKIP)
321 txctl->keytype = ATH9K_KEY_TYPE_TKIP; 245 txctl->keytype = ATH9K_KEY_TYPE_TKIP;
322 else if (sc->sc_keytype == ATH9K_CIPHER_AES_CCM) 246 else if (tx_info->control.hw_key->alg == ALG_CCMP)
323 txctl->keytype = ATH9K_KEY_TYPE_AES; 247 txctl->keytype = ATH9K_KEY_TYPE_AES;
324 } 248 }
325 249
@@ -329,12 +253,18 @@ static int ath_tx_prepare(struct ath_softc *sc,
329 253
330 /* Fill qnum */ 254 /* Fill qnum */
331 255
332 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); 256 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB)) {
333 txq = &sc->sc_txq[txctl->qnum]; 257 txctl->qnum = 0;
258 txq = sc->sc_cabq;
259 } else {
260 txctl->qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
261 txq = &sc->sc_txq[txctl->qnum];
262 }
334 spin_lock_bh(&txq->axq_lock); 263 spin_lock_bh(&txq->axq_lock);
335 264
336 /* Try to avoid running out of descriptors */ 265 /* Try to avoid running out of descriptors */
337 if (txq->axq_depth >= (ATH_TXBUF - 20)) { 266 if (txq->axq_depth >= (ATH_TXBUF - 20) &&
267 !(txctl->flags & ATH9K_TXDESC_CAB)) {
338 DPRINTF(sc, ATH_DBG_FATAL, 268 DPRINTF(sc, ATH_DBG_FATAL,
339 "%s: TX queue: %d is full, depth: %d\n", 269 "%s: TX queue: %d is full, depth: %d\n",
340 __func__, 270 __func__,
@@ -354,7 +284,7 @@ static int ath_tx_prepare(struct ath_softc *sc,
354 284
355 /* Fill flags */ 285 /* Fill flags */
356 286
357 txctl->flags = ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ 287 txctl->flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
358 288
359 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 289 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
360 txctl->flags |= ATH9K_TXDESC_NOACK; 290 txctl->flags |= ATH9K_TXDESC_NOACK;
@@ -392,7 +322,7 @@ static int ath_tx_prepare(struct ath_softc *sc,
392 * incremented by the fragmentation routine. 322 * incremented by the fragmentation routine.
393 */ 323 */
394 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) && 324 if (likely(!(txctl->flags & ATH9K_TXDESC_FRAG_IS_ON)) &&
395 txctl->ht && sc->sc_txaggr) { 325 txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
396 struct ath_atx_tid *tid; 326 struct ath_atx_tid *tid;
397 327
398 tid = ATH_AN_2_TID(txctl->an, txctl->tidno); 328 tid = ATH_AN_2_TID(txctl->an, txctl->tidno);
@@ -413,50 +343,18 @@ static int ath_tx_prepare(struct ath_softc *sc,
413 } 343 }
414 rix = rcs[0].rix; 344 rix = rcs[0].rix;
415 345
416 /* 346 if (ieee80211_has_morefrags(fc) ||
417 * Calculate duration. This logically belongs in the 802.11 347 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
418 * layer but it lacks sufficient information to calculate it.
419 */
420 if ((txctl->flags & ATH9K_TXDESC_NOACK) == 0 && !ieee80211_is_ctl(fc)) {
421 u16 dur;
422 /* 348 /*
423 * XXX not right with fragmentation. 349 ** Force hardware to use computed duration for next
424 */ 350 ** fragment by disabling multi-rate retry, which
425 if (sc->sc_flags & ATH_PREAMBLE_SHORT) 351 ** updates duration based on the multi-rate
426 dur = rt->info[rix].spAckDuration; 352 ** duration table.
427 else 353 */
428 dur = rt->info[rix].lpAckDuration; 354 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
429 355 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
430 if (le16_to_cpu(hdr->frame_control) & 356 /* reset tries but keep rate index */
431 IEEE80211_FCTL_MOREFRAGS) { 357 rcs[0].tries = ATH_TXMAXTRY;
432 dur += dur; /* Add additional 'SIFS + ACK' */
433
434 /*
435 ** Compute size of next fragment in order to compute
436 ** durations needed to update NAV.
437 ** The last fragment uses the ACK duration only.
438 ** Add time for next fragment.
439 */
440 dur += ath9k_hw_computetxtime(sc->sc_ah, rt,
441 txctl->nextfraglen,
442 rix, sc->sc_flags & ATH_PREAMBLE_SHORT);
443 }
444
445 if (ieee80211_has_morefrags(fc) ||
446 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
447 /*
448 ** Force hardware to use computed duration for next
449 ** fragment by disabling multi-rate retry, which
450 ** updates duration based on the multi-rate
451 ** duration table.
452 */
453 rcs[1].tries = rcs[2].tries = rcs[3].tries = 0;
454 rcs[1].rix = rcs[2].rix = rcs[3].rix = 0;
455 /* reset tries but keep rate index */
456 rcs[0].tries = ATH_TXMAXTRY;
457 }
458
459 hdr->duration_id = cpu_to_le16(dur);
460 } 358 }
461 359
462 /* 360 /*
@@ -484,12 +382,8 @@ static int ath_tx_prepare(struct ath_softc *sc,
484 if (is_multicast_ether_addr(hdr->addr1)) { 382 if (is_multicast_ether_addr(hdr->addr1)) {
485 antenna = sc->sc_mcastantenna + 1; 383 antenna = sc->sc_mcastantenna + 1;
486 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1; 384 sc->sc_mcastantenna = (sc->sc_mcastantenna + 1) & 0x1;
487 } else 385 }
488 antenna = sc->sc_txantenna;
489 386
490#ifdef USE_LEGACY_HAL
491 txctl->antenna = antenna;
492#endif
493 return 0; 387 return 0;
494} 388}
495 389
@@ -502,7 +396,6 @@ static void ath_tx_complete_buf(struct ath_softc *sc,
502{ 396{
503 struct sk_buff *skb = bf->bf_mpdu; 397 struct sk_buff *skb = bf->bf_mpdu;
504 struct ath_xmit_status tx_status; 398 struct ath_xmit_status tx_status;
505 dma_addr_t *pa;
506 399
507 /* 400 /*
508 * Set retry information. 401 * Set retry information.
@@ -518,13 +411,12 @@ static void ath_tx_complete_buf(struct ath_softc *sc,
518 if (!txok) { 411 if (!txok) {
519 tx_status.flags |= ATH_TX_ERROR; 412 tx_status.flags |= ATH_TX_ERROR;
520 413
521 if (bf->bf_isxretried) 414 if (bf_isxretried(bf))
522 tx_status.flags |= ATH_TX_XRETRY; 415 tx_status.flags |= ATH_TX_XRETRY;
523 } 416 }
524 /* Unmap this frame */ 417 /* Unmap this frame */
525 pa = get_dma_mem_context(bf, bf_dmacontext);
526 pci_unmap_single(sc->pdev, 418 pci_unmap_single(sc->pdev,
527 *pa, 419 bf->bf_dmacontext,
528 skb->len, 420 skb->len,
529 PCI_DMA_TODEVICE); 421 PCI_DMA_TODEVICE);
530 /* complete this frame */ 422 /* complete this frame */
@@ -629,7 +521,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc,
629 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) 521 if (isnodegone || ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
630 return 0; 522 return 0;
631 523
632 isaggr = bf->bf_isaggr; 524 isaggr = bf_isaggr(bf);
633 if (isaggr) { 525 if (isaggr) {
634 seq_st = ATH_DS_BA_SEQ(ds); 526 seq_st = ATH_DS_BA_SEQ(ds);
635 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); 527 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
@@ -651,7 +543,7 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf)
651 struct sk_buff *skb; 543 struct sk_buff *skb;
652 struct ieee80211_hdr *hdr; 544 struct ieee80211_hdr *hdr;
653 545
654 bf->bf_isretried = 1; 546 bf->bf_state.bf_type |= BUF_RETRY;
655 bf->bf_retries++; 547 bf->bf_retries++;
656 548
657 skb = bf->bf_mpdu; 549 skb = bf->bf_mpdu;
@@ -698,7 +590,7 @@ static u32 ath_pkt_duration(struct ath_softc *sc,
698 u8 rc; 590 u8 rc;
699 int streams, pktlen; 591 int streams, pktlen;
700 592
701 pktlen = bf->bf_isaggr ? bf->bf_al : bf->bf_frmlen; 593 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
702 rc = rt->info[rix].rateCode; 594 rc = rt->info[rix].rateCode;
703 595
704 /* 596 /*
@@ -742,7 +634,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
742 int i, flags, rtsctsena = 0, dynamic_mimops = 0; 634 int i, flags, rtsctsena = 0, dynamic_mimops = 0;
743 u32 ctsduration = 0; 635 u32 ctsduration = 0;
744 u8 rix = 0, cix, ctsrate = 0; 636 u8 rix = 0, cix, ctsrate = 0;
745 u32 aggr_limit_with_rts = sc->sc_rtsaggrlimit; 637 u32 aggr_limit_with_rts = ah->ah_caps.rts_aggr_limit;
746 struct ath_node *an = (struct ath_node *) bf->bf_node; 638 struct ath_node *an = (struct ath_node *) bf->bf_node;
747 639
748 /* 640 /*
@@ -781,7 +673,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
781 * let rate series flags determine which rates will actually 673 * let rate series flags determine which rates will actually
782 * use RTS. 674 * use RTS.
783 */ 675 */
784 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf->bf_isdata) { 676 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
785 BUG_ON(!an); 677 BUG_ON(!an);
786 /* 678 /*
787 * 802.11g protection not needed, use our default behavior 679 * 802.11g protection not needed, use our default behavior
@@ -793,7 +685,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
793 * and the second aggregate should have any protection at all. 685 * and the second aggregate should have any protection at all.
794 */ 686 */
795 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) { 687 if (an->an_smmode == ATH_SM_PWRSAV_DYNAMIC) {
796 if (!bf->bf_aggrburst) { 688 if (!bf_isaggrburst(bf)) {
797 flags = ATH9K_TXDESC_RTSENA; 689 flags = ATH9K_TXDESC_RTSENA;
798 dynamic_mimops = 1; 690 dynamic_mimops = 1;
799 } else { 691 } else {
@@ -806,7 +698,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
806 * Set protection if aggregate protection on 698 * Set protection if aggregate protection on
807 */ 699 */
808 if (sc->sc_config.ath_aggr_prot && 700 if (sc->sc_config.ath_aggr_prot &&
809 (!bf->bf_isaggr || (bf->bf_isaggr && bf->bf_al < 8192))) { 701 (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
810 flags = ATH9K_TXDESC_RTSENA; 702 flags = ATH9K_TXDESC_RTSENA;
811 cix = rt->info[sc->sc_protrix].controlRate; 703 cix = rt->info[sc->sc_protrix].controlRate;
812 rtsctsena = 1; 704 rtsctsena = 1;
@@ -815,7 +707,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
815 /* 707 /*
816 * For AR5416 - RTS cannot be followed by a frame larger than 8K. 708 * For AR5416 - RTS cannot be followed by a frame larger than 8K.
817 */ 709 */
818 if (bf->bf_isaggr && (bf->bf_al > aggr_limit_with_rts)) { 710 if (bf_isaggr(bf) && (bf->bf_al > aggr_limit_with_rts)) {
819 /* 711 /*
820 * Ensure that in the case of SM Dynamic power save 712 * Ensure that in the case of SM Dynamic power save
821 * while we are bursting the second aggregate the 713 * while we are bursting the second aggregate the
@@ -832,12 +724,12 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
832 /* NB: cix is set above where RTS/CTS is enabled */ 724 /* NB: cix is set above where RTS/CTS is enabled */
833 BUG_ON(cix == 0xff); 725 BUG_ON(cix == 0xff);
834 ctsrate = rt->info[cix].rateCode | 726 ctsrate = rt->info[cix].rateCode |
835 (bf->bf_shpreamble ? rt->info[cix].shortPreamble : 0); 727 (bf_isshpreamble(bf) ? rt->info[cix].shortPreamble : 0);
836 728
837 /* 729 /*
838 * Setup HAL rate series 730 * Setup HAL rate series
839 */ 731 */
840 memzero(series, sizeof(struct ath9k_11n_rate_series) * 4); 732 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
841 733
842 for (i = 0; i < 4; i++) { 734 for (i = 0; i < 4; i++) {
843 if (!bf->bf_rcs[i].tries) 735 if (!bf->bf_rcs[i].tries)
@@ -846,7 +738,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
846 rix = bf->bf_rcs[i].rix; 738 rix = bf->bf_rcs[i].rix;
847 739
848 series[i].Rate = rt->info[rix].rateCode | 740 series[i].Rate = rt->info[rix].rateCode |
849 (bf->bf_shpreamble ? rt->info[rix].shortPreamble : 0); 741 (bf_isshpreamble(bf) ? rt->info[rix].shortPreamble : 0);
850 742
851 series[i].Tries = bf->bf_rcs[i].tries; 743 series[i].Tries = bf->bf_rcs[i].tries;
852 744
@@ -862,7 +754,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
862 sc, rix, bf, 754 sc, rix, bf,
863 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0, 755 (bf->bf_rcs[i].flags & ATH_RC_CW40_FLAG) != 0,
864 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG), 756 (bf->bf_rcs[i].flags & ATH_RC_SGI_FLAG),
865 bf->bf_shpreamble); 757 bf_isshpreamble(bf));
866 758
867 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) && 759 if ((an->an_smmode == ATH_SM_PWRSAV_STATIC) &&
868 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) { 760 (bf->bf_rcs[i].flags & ATH_RC_DS_FLAG) == 0) {
@@ -875,7 +767,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
875 */ 767 */
876 series[i].ChSel = sc->sc_tx_chainmask; 768 series[i].ChSel = sc->sc_tx_chainmask;
877 } else { 769 } else {
878 if (bf->bf_ht) 770 if (bf_isht(bf))
879 series[i].ChSel = 771 series[i].ChSel =
880 ath_chainmask_sel_logic(sc, an); 772 ath_chainmask_sel_logic(sc, an);
881 else 773 else
@@ -908,7 +800,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
908 * use the precalculated ACK durations. 800 * use the precalculated ACK durations.
909 */ 801 */
910 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */ 802 if (flags & ATH9K_TXDESC_RTSENA) { /* SIFS + CTS */
911 ctsduration += bf->bf_shpreamble ? 803 ctsduration += bf_isshpreamble(bf) ?
912 rt->info[cix].spAckDuration : 804 rt->info[cix].spAckDuration :
913 rt->info[cix].lpAckDuration; 805 rt->info[cix].lpAckDuration;
914 } 806 }
@@ -916,7 +808,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
916 ctsduration += series[0].PktDuration; 808 ctsduration += series[0].PktDuration;
917 809
918 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */ 810 if ((bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { /* SIFS + ACK */
919 ctsduration += bf->bf_shpreamble ? 811 ctsduration += bf_isshpreamble(bf) ?
920 rt->info[rix].spAckDuration : 812 rt->info[rix].spAckDuration :
921 rt->info[rix].lpAckDuration; 813 rt->info[rix].lpAckDuration;
922 } 814 }
@@ -925,17 +817,17 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
925 * Disable multi-rate retry when using RTS/CTS by clearing 817 * Disable multi-rate retry when using RTS/CTS by clearing
926 * series 1, 2 and 3. 818 * series 1, 2 and 3.
927 */ 819 */
928 memzero(&series[1], sizeof(struct ath9k_11n_rate_series) * 3); 820 memset(&series[1], 0, sizeof(struct ath9k_11n_rate_series) * 3);
929 } 821 }
930 822
931 /* 823 /*
932 * set dur_update_en for l-sig computation except for PS-Poll frames 824 * set dur_update_en for l-sig computation except for PS-Poll frames
933 */ 825 */
934 ath9k_hw_set11n_ratescenario(ah, ds, lastds, 826 ath9k_hw_set11n_ratescenario(ah, ds, lastds,
935 !bf->bf_ispspoll, 827 !bf_ispspoll(bf),
936 ctsrate, 828 ctsrate,
937 ctsduration, 829 ctsduration,
938 series, 4, flags); 830 series, 4, flags);
939 if (sc->sc_config.ath_aggr_prot && flags) 831 if (sc->sc_config.ath_aggr_prot && flags)
940 ath9k_hw_set11n_burstduration(ah, ds, 8192); 832 ath9k_hw_set11n_burstduration(ah, ds, 8192);
941} 833}
@@ -958,7 +850,7 @@ static int ath_tx_send_normal(struct ath_softc *sc,
958 BUG_ON(list_empty(bf_head)); 850 BUG_ON(list_empty(bf_head));
959 851
960 bf = list_first_entry(bf_head, struct ath_buf, list); 852 bf = list_first_entry(bf_head, struct ath_buf, list);
961 bf->bf_isampdu = 0; /* regular HT frame */ 853 bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */
962 854
963 skb = (struct sk_buff *)bf->bf_mpdu; 855 skb = (struct sk_buff *)bf->bf_mpdu;
964 tx_info = IEEE80211_SKB_CB(skb); 856 tx_info = IEEE80211_SKB_CB(skb);
@@ -998,7 +890,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
998 890
999 while (!list_empty(&tid->buf_q)) { 891 while (!list_empty(&tid->buf_q)) {
1000 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 892 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
1001 ASSERT(!bf->bf_isretried); 893 ASSERT(!bf_isretried(bf));
1002 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); 894 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1003 ath_tx_send_normal(sc, txq, tid, &bf_head); 895 ath_tx_send_normal(sc, txq, tid, &bf_head);
1004 } 896 }
@@ -1025,7 +917,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1025 int isaggr, txfail, txpending, sendbar = 0, needreset = 0; 917 int isaggr, txfail, txpending, sendbar = 0, needreset = 0;
1026 int isnodegone = (an->an_flags & ATH_NODE_CLEAN); 918 int isnodegone = (an->an_flags & ATH_NODE_CLEAN);
1027 919
1028 isaggr = bf->bf_isaggr; 920 isaggr = bf_isaggr(bf);
1029 if (isaggr) { 921 if (isaggr) {
1030 if (txok) { 922 if (txok) {
1031 if (ATH_DS_TX_BA(ds)) { 923 if (ATH_DS_TX_BA(ds)) {
@@ -1038,7 +930,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1038 ATH_DS_BA_BITMAP(ds), 930 ATH_DS_BA_BITMAP(ds),
1039 WME_BA_BMP_SIZE >> 3); 931 WME_BA_BMP_SIZE >> 3);
1040 } else { 932 } else {
1041 memzero(ba, WME_BA_BMP_SIZE >> 3); 933 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
1042 934
1043 /* 935 /*
1044 * AR5416 can become deaf/mute when BA 936 * AR5416 can become deaf/mute when BA
@@ -1047,11 +939,11 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1047 * when perform internal reset in this routine. 939 * when perform internal reset in this routine.
1048 * Only enable reset in STA mode for now. 940 * Only enable reset in STA mode for now.
1049 */ 941 */
1050 if (sc->sc_opmode == ATH9K_M_STA) 942 if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
1051 needreset = 1; 943 needreset = 1;
1052 } 944 }
1053 } else { 945 } else {
1054 memzero(ba, WME_BA_BMP_SIZE >> 3); 946 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
1055 } 947 }
1056 } 948 }
1057 949
@@ -1075,7 +967,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1075 ath_tx_set_retry(sc, bf); 967 ath_tx_set_retry(sc, bf);
1076 txpending = 1; 968 txpending = 1;
1077 } else { 969 } else {
1078 bf->bf_isxretried = 1; 970 bf->bf_state.bf_type |= BUF_XRETRY;
1079 txfail = 1; 971 txfail = 1;
1080 sendbar = 1; 972 sendbar = 1;
1081 } 973 }
@@ -1175,11 +1067,8 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1175 tbf->bf_lastfrm->bf_desc); 1067 tbf->bf_lastfrm->bf_desc);
1176 1068
1177 /* copy the DMA context */ 1069 /* copy the DMA context */
1178 copy_dma_mem_context( 1070 tbf->bf_dmacontext =
1179 get_dma_mem_context(tbf, 1071 bf_last->bf_dmacontext;
1180 bf_dmacontext),
1181 get_dma_mem_context(bf_last,
1182 bf_dmacontext));
1183 } 1072 }
1184 list_add_tail(&tbf->list, &bf_head); 1073 list_add_tail(&tbf->list, &bf_head);
1185 } else { 1074 } else {
@@ -1188,7 +1077,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1188 * software retry 1077 * software retry
1189 */ 1078 */
1190 ath9k_hw_cleartxdesc(sc->sc_ah, 1079 ath9k_hw_cleartxdesc(sc->sc_ah,
1191 bf->bf_lastfrm->bf_desc); 1080 bf->bf_lastfrm->bf_desc);
1192 } 1081 }
1193 1082
1194 /* 1083 /*
@@ -1242,7 +1131,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
1242 } 1131 }
1243 1132
1244 if (needreset) 1133 if (needreset)
1245 ath_internal_reset(sc); 1134 ath_reset(sc, false);
1246 1135
1247 return; 1136 return;
1248} 1137}
@@ -1331,7 +1220,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1331 1220
1332 txq->axq_depth--; 1221 txq->axq_depth--;
1333 1222
1334 if (bf->bf_isaggr) 1223 if (bf_isaggr(bf))
1335 txq->axq_aggr_depth--; 1224 txq->axq_aggr_depth--;
1336 1225
1337 txok = (ds->ds_txstat.ts_status == 0); 1226 txok = (ds->ds_txstat.ts_status == 0);
@@ -1345,14 +1234,14 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1345 spin_unlock_bh(&sc->sc_txbuflock); 1234 spin_unlock_bh(&sc->sc_txbuflock);
1346 } 1235 }
1347 1236
1348 if (!bf->bf_isampdu) { 1237 if (!bf_isampdu(bf)) {
1349 /* 1238 /*
1350 * This frame is sent out as a single frame. 1239 * This frame is sent out as a single frame.
1351 * Use hardware retry status for this frame. 1240 * Use hardware retry status for this frame.
1352 */ 1241 */
1353 bf->bf_retries = ds->ds_txstat.ts_longretry; 1242 bf->bf_retries = ds->ds_txstat.ts_longretry;
1354 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) 1243 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
1355 bf->bf_isxretried = 1; 1244 bf->bf_state.bf_type |= BUF_XRETRY;
1356 nbad = 0; 1245 nbad = 0;
1357 } else { 1246 } else {
1358 nbad = ath_tx_num_badfrms(sc, bf, txok); 1247 nbad = ath_tx_num_badfrms(sc, bf, txok);
@@ -1368,7 +1257,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1368 if (ds->ds_txstat.ts_status == 0) 1257 if (ds->ds_txstat.ts_status == 0)
1369 nacked++; 1258 nacked++;
1370 1259
1371 if (bf->bf_isdata) { 1260 if (bf_isdata(bf)) {
1372 if (isrifs) 1261 if (isrifs)
1373 tmp_ds = bf->bf_rifslast->bf_desc; 1262 tmp_ds = bf->bf_rifslast->bf_desc;
1374 else 1263 else
@@ -1384,7 +1273,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1384 /* 1273 /*
1385 * Complete this transmit unit 1274 * Complete this transmit unit
1386 */ 1275 */
1387 if (bf->bf_isampdu) 1276 if (bf_isampdu(bf))
1388 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok); 1277 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok);
1389 else 1278 else
1390 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0); 1279 ath_tx_complete_buf(sc, bf, &bf_head, txok, 0);
@@ -1406,7 +1295,7 @@ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1406 /* 1295 /*
1407 * schedule any pending packets if aggregation is enabled 1296 * schedule any pending packets if aggregation is enabled
1408 */ 1297 */
1409 if (sc->sc_txaggr) 1298 if (sc->sc_flags & SC_OP_TXAGGR)
1410 ath_txq_schedule(sc, txq); 1299 ath_txq_schedule(sc, txq);
1411 spin_unlock_bh(&txq->axq_lock); 1300 spin_unlock_bh(&txq->axq_lock);
1412 } 1301 }
@@ -1430,10 +1319,9 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1430 struct ath_hal *ah = sc->sc_ah; 1319 struct ath_hal *ah = sc->sc_ah;
1431 int i; 1320 int i;
1432 int npend = 0; 1321 int npend = 0;
1433 enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
1434 1322
1435 /* XXX return value */ 1323 /* XXX return value */
1436 if (!sc->sc_invalid) { 1324 if (!(sc->sc_flags & SC_OP_INVALID)) {
1437 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1325 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1438 if (ATH_TXQ_SETUP(sc, i)) { 1326 if (ATH_TXQ_SETUP(sc, i)) {
1439 ath_tx_stopdma(sc, &sc->sc_txq[i]); 1327 ath_tx_stopdma(sc, &sc->sc_txq[i]);
@@ -1454,10 +1342,11 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
1454 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__); 1342 "%s: Unable to stop TxDMA. Reset HAL!\n", __func__);
1455 1343
1456 spin_lock_bh(&sc->sc_resetlock); 1344 spin_lock_bh(&sc->sc_resetlock);
1457 if (!ath9k_hw_reset(ah, sc->sc_opmode, 1345 if (!ath9k_hw_reset(ah,
1458 &sc->sc_curchan, ht_macmode, 1346 sc->sc_ah->ah_curchan,
1459 sc->sc_tx_chainmask, sc->sc_rx_chainmask, 1347 sc->sc_ht_info.tx_chan_width,
1460 sc->sc_ht_extprotspacing, true, &status)) { 1348 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1349 sc->sc_ht_extprotspacing, true, &status)) {
1461 1350
1462 DPRINTF(sc, ATH_DBG_FATAL, 1351 DPRINTF(sc, ATH_DBG_FATAL,
1463 "%s: unable to reset hardware; hal status %u\n", 1352 "%s: unable to reset hardware; hal status %u\n",
@@ -1481,7 +1370,7 @@ static void ath_tx_addto_baw(struct ath_softc *sc,
1481{ 1370{
1482 int index, cindex; 1371 int index, cindex;
1483 1372
1484 if (bf->bf_isretried) 1373 if (bf_isretried(bf))
1485 return; 1374 return;
1486 1375
1487 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 1376 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
@@ -1516,7 +1405,7 @@ static int ath_tx_send_ampdu(struct ath_softc *sc,
1516 BUG_ON(list_empty(bf_head)); 1405 BUG_ON(list_empty(bf_head));
1517 1406
1518 bf = list_first_entry(bf_head, struct ath_buf, list); 1407 bf = list_first_entry(bf_head, struct ath_buf, list);
1519 bf->bf_isampdu = 1; 1408 bf->bf_state.bf_type |= BUF_AMPDU;
1520 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */ 1409 bf->bf_seqno = txctl->seqno; /* save seqno and tidno in buffer */
1521 bf->bf_tidno = txctl->tidno; 1410 bf->bf_tidno = txctl->tidno;
1522 1411
@@ -1860,7 +1749,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc,
1860 if (bf->bf_nframes == 1) { 1749 if (bf->bf_nframes == 1) {
1861 ASSERT(bf->bf_lastfrm == bf_last); 1750 ASSERT(bf->bf_lastfrm == bf_last);
1862 1751
1863 bf->bf_isaggr = 0; 1752 bf->bf_state.bf_type &= ~BUF_AGGR;
1864 /* 1753 /*
1865 * clear aggr bits for every descriptor 1754 * clear aggr bits for every descriptor
1866 * XXX TODO: is there a way to optimize it? 1755 * XXX TODO: is there a way to optimize it?
@@ -1877,7 +1766,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc,
1877 /* 1766 /*
1878 * setup first desc with rate and aggr info 1767 * setup first desc with rate and aggr info
1879 */ 1768 */
1880 bf->bf_isaggr = 1; 1769 bf->bf_state.bf_type |= BUF_AGGR;
1881 ath_buf_set_rate(sc, bf); 1770 ath_buf_set_rate(sc, bf);
1882 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); 1771 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
1883 1772
@@ -1925,7 +1814,7 @@ static void ath_tid_drain(struct ath_softc *sc,
1925 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); 1814 list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list);
1926 1815
1927 /* update baw for software retried frame */ 1816 /* update baw for software retried frame */
1928 if (bf->bf_isretried) 1817 if (bf_isretried(bf))
1929 ath_tx_update_baw(sc, tid, bf->bf_seqno); 1818 ath_tx_update_baw(sc, tid, bf->bf_seqno);
1930 1819
1931 /* 1820 /*
@@ -1990,13 +1879,18 @@ static int ath_tx_start_dma(struct ath_softc *sc,
1990 struct list_head bf_head; 1879 struct list_head bf_head;
1991 struct ath_desc *ds; 1880 struct ath_desc *ds;
1992 struct ath_hal *ah = sc->sc_ah; 1881 struct ath_hal *ah = sc->sc_ah;
1993 struct ath_txq *txq = &sc->sc_txq[txctl->qnum]; 1882 struct ath_txq *txq;
1994 struct ath_tx_info_priv *tx_info_priv; 1883 struct ath_tx_info_priv *tx_info_priv;
1995 struct ath_rc_series *rcs; 1884 struct ath_rc_series *rcs;
1996 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1885 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1997 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1886 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1998 __le16 fc = hdr->frame_control; 1887 __le16 fc = hdr->frame_control;
1999 1888
1889 if (unlikely(txctl->flags & ATH9K_TXDESC_CAB))
1890 txq = sc->sc_cabq;
1891 else
1892 txq = &sc->sc_txq[txctl->qnum];
1893
2000 /* For each sglist entry, allocate an ath_buf for DMA */ 1894 /* For each sglist entry, allocate an ath_buf for DMA */
2001 INIT_LIST_HEAD(&bf_head); 1895 INIT_LIST_HEAD(&bf_head);
2002 spin_lock_bh(&sc->sc_txbuflock); 1896 spin_lock_bh(&sc->sc_txbuflock);
@@ -2014,11 +1908,21 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2014 /* set up this buffer */ 1908 /* set up this buffer */
2015 ATH_TXBUF_RESET(bf); 1909 ATH_TXBUF_RESET(bf);
2016 bf->bf_frmlen = txctl->frmlen; 1910 bf->bf_frmlen = txctl->frmlen;
2017 bf->bf_isdata = ieee80211_is_data(fc); 1911
2018 bf->bf_isbar = ieee80211_is_back_req(fc); 1912 ieee80211_is_data(fc) ?
2019 bf->bf_ispspoll = ieee80211_is_pspoll(fc); 1913 (bf->bf_state.bf_type |= BUF_DATA) :
1914 (bf->bf_state.bf_type &= ~BUF_DATA);
1915 ieee80211_is_back_req(fc) ?
1916 (bf->bf_state.bf_type |= BUF_BAR) :
1917 (bf->bf_state.bf_type &= ~BUF_BAR);
1918 ieee80211_is_pspoll(fc) ?
1919 (bf->bf_state.bf_type |= BUF_PSPOLL) :
1920 (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1921 (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ?
1922 (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1923 (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1924
2020 bf->bf_flags = txctl->flags; 1925 bf->bf_flags = txctl->flags;
2021 bf->bf_shpreamble = sc->sc_flags & ATH_PREAMBLE_SHORT;
2022 bf->bf_keytype = txctl->keytype; 1926 bf->bf_keytype = txctl->keytype;
2023 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0]; 1927 tx_info_priv = (struct ath_tx_info_priv *)tx_info->driver_data[0];
2024 rcs = tx_info_priv->rcs; 1928 rcs = tx_info_priv->rcs;
@@ -2038,8 +1942,7 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2038 /* 1942 /*
2039 * Save the DMA context in the first ath_buf 1943 * Save the DMA context in the first ath_buf
2040 */ 1944 */
2041 copy_dma_mem_context(get_dma_mem_context(bf, bf_dmacontext), 1945 bf->bf_dmacontext = txctl->dmacontext;
2042 get_dma_mem_context(txctl, dmacontext));
2043 1946
2044 /* 1947 /*
2045 * Formulate first tx descriptor with tx controls. 1948 * Formulate first tx descriptor with tx controls.
@@ -2060,11 +1963,13 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2060 ds); /* first descriptor */ 1963 ds); /* first descriptor */
2061 1964
2062 bf->bf_lastfrm = bf; 1965 bf->bf_lastfrm = bf;
2063 bf->bf_ht = txctl->ht; 1966 (txctl->ht) ?
1967 (bf->bf_state.bf_type |= BUF_HT) :
1968 (bf->bf_state.bf_type &= ~BUF_HT);
2064 1969
2065 spin_lock_bh(&txq->axq_lock); 1970 spin_lock_bh(&txq->axq_lock);
2066 1971
2067 if (txctl->ht && sc->sc_txaggr) { 1972 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
2068 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno); 1973 struct ath_atx_tid *tid = ATH_AN_2_TID(an, txctl->tidno);
2069 if (ath_aggr_query(sc, an, txctl->tidno)) { 1974 if (ath_aggr_query(sc, an, txctl->tidno)) {
2070 /* 1975 /*
@@ -2090,27 +1995,7 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2090 bf->bf_tidno = txctl->tidno; 1995 bf->bf_tidno = txctl->tidno;
2091 } 1996 }
2092 1997
2093 if (is_multicast_ether_addr(hdr->addr1)) { 1998 ath_tx_txqaddbuf(sc, txq, &bf_head);
2094 struct ath_vap *avp = sc->sc_vaps[txctl->if_id];
2095
2096 /*
2097 * When servicing one or more stations in power-save
2098 * mode (or) if there is some mcast data waiting on
2099 * mcast queue (to prevent out of order delivery of
2100 * mcast,bcast packets) multicast frames must be
2101 * buffered until after the beacon. We use the private
2102 * mcast queue for that.
2103 */
2104 /* XXX? more bit in 802.11 frame header */
2105 spin_lock_bh(&avp->av_mcastq.axq_lock);
2106 if (txctl->ps || avp->av_mcastq.axq_depth)
2107 ath_tx_mcastqaddbuf(sc,
2108 &avp->av_mcastq, &bf_head);
2109 else
2110 ath_tx_txqaddbuf(sc, txq, &bf_head);
2111 spin_unlock_bh(&avp->av_mcastq.axq_lock);
2112 } else
2113 ath_tx_txqaddbuf(sc, txq, &bf_head);
2114 } 1999 }
2115 spin_unlock_bh(&txq->axq_lock); 2000 spin_unlock_bh(&txq->axq_lock);
2116 return 0; 2001 return 0;
@@ -2118,30 +2003,31 @@ static int ath_tx_start_dma(struct ath_softc *sc,
2118 2003
2119static void xmit_map_sg(struct ath_softc *sc, 2004static void xmit_map_sg(struct ath_softc *sc,
2120 struct sk_buff *skb, 2005 struct sk_buff *skb,
2121 dma_addr_t *pa,
2122 struct ath_tx_control *txctl) 2006 struct ath_tx_control *txctl)
2123{ 2007{
2124 struct ath_xmit_status tx_status; 2008 struct ath_xmit_status tx_status;
2125 struct ath_atx_tid *tid; 2009 struct ath_atx_tid *tid;
2126 struct scatterlist sg; 2010 struct scatterlist sg;
2127 2011
2128 *pa = pci_map_single(sc->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 2012 txctl->dmacontext = pci_map_single(sc->pdev, skb->data,
2013 skb->len, PCI_DMA_TODEVICE);
2129 2014
2130 /* setup S/G list */ 2015 /* setup S/G list */
2131 memset(&sg, 0, sizeof(struct scatterlist)); 2016 memset(&sg, 0, sizeof(struct scatterlist));
2132 sg_dma_address(&sg) = *pa; 2017 sg_dma_address(&sg) = txctl->dmacontext;
2133 sg_dma_len(&sg) = skb->len; 2018 sg_dma_len(&sg) = skb->len;
2134 2019
2135 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) { 2020 if (ath_tx_start_dma(sc, skb, &sg, 1, txctl) != 0) {
2136 /* 2021 /*
2137 * We have to do drop frame here. 2022 * We have to do drop frame here.
2138 */ 2023 */
2139 pci_unmap_single(sc->pdev, *pa, skb->len, PCI_DMA_TODEVICE); 2024 pci_unmap_single(sc->pdev, txctl->dmacontext,
2025 skb->len, PCI_DMA_TODEVICE);
2140 2026
2141 tx_status.retries = 0; 2027 tx_status.retries = 0;
2142 tx_status.flags = ATH_TX_ERROR; 2028 tx_status.flags = ATH_TX_ERROR;
2143 2029
2144 if (txctl->ht && sc->sc_txaggr) { 2030 if (txctl->ht && (sc->sc_flags & SC_OP_TXAGGR)) {
2145 /* Reclaim the seqno. */ 2031 /* Reclaim the seqno. */
2146 tid = ATH_AN_2_TID((struct ath_node *) 2032 tid = ATH_AN_2_TID((struct ath_node *)
2147 txctl->an, txctl->tidno); 2033 txctl->an, txctl->tidno);
@@ -2162,7 +2048,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2162 2048
2163 /* Setup tx descriptors */ 2049 /* Setup tx descriptors */
2164 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, 2050 error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
2165 "tx", nbufs * ATH_FRAG_PER_MSDU, ATH_TXDESC); 2051 "tx", nbufs, 1);
2166 if (error != 0) { 2052 if (error != 0) {
2167 DPRINTF(sc, ATH_DBG_FATAL, 2053 DPRINTF(sc, ATH_DBG_FATAL,
2168 "%s: failed to allocate tx descriptors: %d\n", 2054 "%s: failed to allocate tx descriptors: %d\n",
@@ -2212,7 +2098,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
2212 struct ath9k_tx_queue_info qi; 2098 struct ath9k_tx_queue_info qi;
2213 int qnum; 2099 int qnum;
2214 2100
2215 memzero(&qi, sizeof(qi)); 2101 memset(&qi, 0, sizeof(qi));
2216 qi.tqi_subtype = subtype; 2102 qi.tqi_subtype = subtype;
2217 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 2103 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
2218 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 2104 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
@@ -2403,6 +2289,7 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2403 struct ath_tx_control txctl; 2289 struct ath_tx_control txctl;
2404 int error = 0; 2290 int error = 0;
2405 2291
2292 memset(&txctl, 0, sizeof(struct ath_tx_control));
2406 error = ath_tx_prepare(sc, skb, &txctl); 2293 error = ath_tx_prepare(sc, skb, &txctl);
2407 if (error == 0) 2294 if (error == 0)
2408 /* 2295 /*
@@ -2410,9 +2297,7 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2410 * ath_tx_start_dma() will be called either synchronously 2297 * ath_tx_start_dma() will be called either synchronously
2411 * or asynchrounsly once DMA is complete. 2298 * or asynchrounsly once DMA is complete.
2412 */ 2299 */
2413 xmit_map_sg(sc, skb, 2300 xmit_map_sg(sc, skb, &txctl);
2414 get_dma_mem_context(&txctl, dmacontext),
2415 &txctl);
2416 else 2301 else
2417 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE); 2302 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2418 2303
@@ -2424,8 +2309,7 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb)
2424 2309
2425void ath_tx_tasklet(struct ath_softc *sc) 2310void ath_tx_tasklet(struct ath_softc *sc)
2426{ 2311{
2427 u64 tsf = ath9k_hw_gettsf64(sc->sc_ah); 2312 int i;
2428 int i, nacked = 0;
2429 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); 2313 u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2430 2314
2431 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); 2315 ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
@@ -2435,10 +2319,8 @@ void ath_tx_tasklet(struct ath_softc *sc)
2435 */ 2319 */
2436 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 2320 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2437 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) 2321 if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
2438 nacked += ath_tx_processq(sc, &sc->sc_txq[i]); 2322 ath_tx_processq(sc, &sc->sc_txq[i]);
2439 } 2323 }
2440 if (nacked)
2441 sc->sc_lastrx = tsf;
2442} 2324}
2443 2325
2444void ath_tx_draintxq(struct ath_softc *sc, 2326void ath_tx_draintxq(struct ath_softc *sc,
@@ -2486,14 +2368,14 @@ void ath_tx_draintxq(struct ath_softc *sc,
2486 2368
2487 spin_unlock_bh(&txq->axq_lock); 2369 spin_unlock_bh(&txq->axq_lock);
2488 2370
2489 if (bf->bf_isampdu) 2371 if (bf_isampdu(bf))
2490 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0); 2372 ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0);
2491 else 2373 else
2492 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); 2374 ath_tx_complete_buf(sc, bf, &bf_head, 0, 0);
2493 } 2375 }
2494 2376
2495 /* flush any pending frames if aggregation is enabled */ 2377 /* flush any pending frames if aggregation is enabled */
2496 if (sc->sc_txaggr) { 2378 if (sc->sc_flags & SC_OP_TXAGGR) {
2497 if (!retry_tx) { 2379 if (!retry_tx) {
2498 spin_lock_bh(&txq->axq_lock); 2380 spin_lock_bh(&txq->axq_lock);
2499 ath_txq_drain_pending_buffers(sc, txq, 2381 ath_txq_drain_pending_buffers(sc, txq,
@@ -2509,7 +2391,7 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx)
2509{ 2391{
2510 /* stop beacon queue. The beacon will be freed when 2392 /* stop beacon queue. The beacon will be freed when
2511 * we go to INIT state */ 2393 * we go to INIT state */
2512 if (!sc->sc_invalid) { 2394 if (!(sc->sc_flags & SC_OP_INVALID)) {
2513 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); 2395 (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
2514 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__, 2396 DPRINTF(sc, ATH_DBG_XMIT, "%s: beacon queue %x\n", __func__,
2515 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq)); 2397 ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
@@ -2536,7 +2418,7 @@ enum ATH_AGGR_CHECK ath_tx_aggr_check(struct ath_softc *sc,
2536 struct ath_atx_tid *txtid; 2418 struct ath_atx_tid *txtid;
2537 DECLARE_MAC_BUF(mac); 2419 DECLARE_MAC_BUF(mac);
2538 2420
2539 if (!sc->sc_txaggr) 2421 if (!(sc->sc_flags & SC_OP_TXAGGR))
2540 return AGGR_NOT_REQUIRED; 2422 return AGGR_NOT_REQUIRED;
2541 2423
2542 /* ADDBA exchange must be completed before sending aggregates */ 2424 /* ADDBA exchange must be completed before sending aggregates */
@@ -2583,7 +2465,7 @@ int ath_tx_aggr_start(struct ath_softc *sc,
2583 return -1; 2465 return -1;
2584 } 2466 }
2585 2467
2586 if (sc->sc_txaggr) { 2468 if (sc->sc_flags & SC_OP_TXAGGR) {
2587 txtid = ATH_AN_2_TID(an, tid); 2469 txtid = ATH_AN_2_TID(an, tid);
2588 txtid->addba_exchangeinprogress = 1; 2470 txtid->addba_exchangeinprogress = 1;
2589 ath_tx_pause_tid(sc, txtid); 2471 ath_tx_pause_tid(sc, txtid);
@@ -2647,7 +2529,7 @@ void ath_tx_aggr_teardown(struct ath_softc *sc,
2647 spin_lock_bh(&txq->axq_lock); 2529 spin_lock_bh(&txq->axq_lock);
2648 while (!list_empty(&txtid->buf_q)) { 2530 while (!list_empty(&txtid->buf_q)) {
2649 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); 2531 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
2650 if (!bf->bf_isretried) { 2532 if (!bf_isretried(bf)) {
2651 /* 2533 /*
2652 * NB: it's based on the assumption that 2534 * NB: it's based on the assumption that
2653 * software retried frame will always stay 2535 * software retried frame will always stay
@@ -2743,7 +2625,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
2743 2625
2744void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2626void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2745{ 2627{
2746 if (sc->sc_txaggr) { 2628 if (sc->sc_flags & SC_OP_TXAGGR) {
2747 struct ath_atx_tid *tid; 2629 struct ath_atx_tid *tid;
2748 struct ath_atx_ac *ac; 2630 struct ath_atx_ac *ac;
2749 int tidno, acno; 2631 int tidno, acno;
@@ -2855,7 +2737,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc,
2855 2737
2856void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an) 2738void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2857{ 2739{
2858 if (sc->sc_txaggr) { 2740 if (sc->sc_flags & SC_OP_TXAGGR) {
2859 struct ath_atx_tid *tid; 2741 struct ath_atx_tid *tid;
2860 int tidno, i; 2742 int tidno, i;
2861 2743
@@ -2869,3 +2751,57 @@ void ath_tx_node_free(struct ath_softc *sc, struct ath_node *an)
2869 } 2751 }
2870 } 2752 }
2871} 2753}
2754
2755void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
2756{
2757 int hdrlen, padsize;
2758 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2759 struct ath_tx_control txctl;
2760
2761 /*
2762 * As a temporary workaround, assign seq# here; this will likely need
2763 * to be cleaned up to work better with Beacon transmission and virtual
2764 * BSSes.
2765 */
2766 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2767 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2768 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2769 sc->seq_no += 0x10;
2770 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2771 hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
2772 }
2773
2774 /* Add the padding after the header if this is not already done */
2775 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2776 if (hdrlen & 3) {
2777 padsize = hdrlen % 4;
2778 if (skb_headroom(skb) < padsize) {
2779 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ padding "
2780 "failed\n", __func__);
2781 dev_kfree_skb_any(skb);
2782 return;
2783 }
2784 skb_push(skb, padsize);
2785 memmove(skb->data, skb->data + padsize, hdrlen);
2786 }
2787
2788 DPRINTF(sc, ATH_DBG_XMIT, "%s: transmitting CABQ packet, skb: %p\n",
2789 __func__,
2790 skb);
2791
2792 memset(&txctl, 0, sizeof(struct ath_tx_control));
2793 txctl.flags = ATH9K_TXDESC_CAB;
2794 if (ath_tx_prepare(sc, skb, &txctl) == 0) {
2795 /*
2796 * Start DMA mapping.
2797 * ath_tx_start_dma() will be called either synchronously
2798 * or asynchrounsly once DMA is complete.
2799 */
2800 xmit_map_sg(sc, skb, &txctl);
2801 } else {
2802 ath_node_put(sc, txctl.an, ATH9K_BH_STATUS_CHANGE);
2803 DPRINTF(sc, ATH_DBG_XMIT, "%s: TX CABQ failed\n", __func__);
2804 dev_kfree_skb_any(skb);
2805 }
2806}
2807