aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath9k/xmit.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/xmit.c')
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c1334
1 files changed, 666 insertions, 668 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 4dda14e36227..33443bcaa8d9 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2011 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -19,7 +19,6 @@
19 19
20#define BITS_PER_BYTE 8 20#define BITS_PER_BYTE 8
21#define OFDM_PLCP_BITS 22 21#define OFDM_PLCP_BITS 22
22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 22#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
24#define L_STF 8 23#define L_STF 8
25#define L_LTF 8 24#define L_LTF 8
@@ -32,7 +31,6 @@
32#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2) 31#define NUM_SYMBOLS_PER_USEC(_usec) (_usec >> 2)
33#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18) 32#define NUM_SYMBOLS_PER_USEC_HALFGI(_usec) (((_usec*5)-4)/18)
34 33
35#define OFDM_SIFS_TIME 16
36 34
37static u16 bits_per_symbol[][2] = { 35static u16 bits_per_symbol[][2] = {
38 /* 20MHz 40MHz */ 36 /* 20MHz 40MHz */
@@ -48,19 +46,20 @@ static u16 bits_per_symbol[][2] = {
48 46
49#define IS_HT_RATE(_rate) ((_rate) & 0x80) 47#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50 48
51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 49static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid, 50 struct ath_atx_tid *tid,
53 struct list_head *bf_head); 51 struct list_head *bf_head);
54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 52static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55 struct ath_txq *txq, struct list_head *bf_q, 53 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar); 54 struct ath_tx_status *ts, int txok, int sendbar);
57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 55static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head); 56 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); 57static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 58static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
61 struct ath_tx_status *ts, int txok); 59 struct ath_tx_status *ts, int nframes, int nbad,
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 60 int txok, bool update_rc);
63 int nbad, int txok, bool update_rc); 61static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
62 int seqno);
64 63
65enum { 64enum {
66 MCS_HT20, 65 MCS_HT20,
@@ -122,7 +121,7 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
122 121
123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 122static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
124{ 123{
125 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 124 struct ath_txq *txq = tid->ac->txq;
126 125
127 WARN_ON(!tid->paused); 126 WARN_ON(!tid->paused);
128 127
@@ -138,23 +137,40 @@ unlock:
138 spin_unlock_bh(&txq->axq_lock); 137 spin_unlock_bh(&txq->axq_lock);
139} 138}
140 139
140static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
141{
142 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
143 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
144 sizeof(tx_info->rate_driver_data));
145 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
146}
147
141static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 148static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
142{ 149{
143 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 150 struct ath_txq *txq = tid->ac->txq;
144 struct ath_buf *bf; 151 struct ath_buf *bf;
145 struct list_head bf_head; 152 struct list_head bf_head;
146 INIT_LIST_HEAD(&bf_head); 153 struct ath_tx_status ts;
154 struct ath_frame_info *fi;
147 155
148 WARN_ON(!tid->paused); 156 INIT_LIST_HEAD(&bf_head);
149 157
158 memset(&ts, 0, sizeof(ts));
150 spin_lock_bh(&txq->axq_lock); 159 spin_lock_bh(&txq->axq_lock);
151 tid->paused = false;
152 160
153 while (!list_empty(&tid->buf_q)) { 161 while (!list_empty(&tid->buf_q)) {
154 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 162 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
155 BUG_ON(bf_isretried(bf));
156 list_move_tail(&bf->list, &bf_head); 163 list_move_tail(&bf->list, &bf_head);
157 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 164
165 spin_unlock_bh(&txq->axq_lock);
166 fi = get_frame_info(bf->bf_mpdu);
167 if (fi->retries) {
168 ath_tx_update_baw(sc, tid, fi->seqno);
169 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
170 } else {
171 ath_tx_send_normal(sc, txq, NULL, &bf_head);
172 }
173 spin_lock_bh(&txq->axq_lock);
158 } 174 }
159 175
160 spin_unlock_bh(&txq->axq_lock); 176 spin_unlock_bh(&txq->axq_lock);
@@ -168,27 +184,22 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
168 index = ATH_BA_INDEX(tid->seq_start, seqno); 184 index = ATH_BA_INDEX(tid->seq_start, seqno);
169 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 185 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
170 186
171 tid->tx_buf[cindex] = NULL; 187 __clear_bit(cindex, tid->tx_buf);
172 188
173 while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) { 189 while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
174 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 190 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
175 INCR(tid->baw_head, ATH_TID_MAX_BUFS); 191 INCR(tid->baw_head, ATH_TID_MAX_BUFS);
176 } 192 }
177} 193}
178 194
179static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 195static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
180 struct ath_buf *bf) 196 u16 seqno)
181{ 197{
182 int index, cindex; 198 int index, cindex;
183 199
184 if (bf_isretried(bf)) 200 index = ATH_BA_INDEX(tid->seq_start, seqno);
185 return;
186
187 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
188 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 201 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
189 202 __set_bit(cindex, tid->tx_buf);
190 BUG_ON(tid->tx_buf[cindex] != NULL);
191 tid->tx_buf[cindex] = bf;
192 203
193 if (index >= ((tid->baw_tail - tid->baw_head) & 204 if (index >= ((tid->baw_tail - tid->baw_head) &
194 (ATH_TID_MAX_BUFS - 1))) { 205 (ATH_TID_MAX_BUFS - 1))) {
@@ -210,6 +221,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
210 struct ath_buf *bf; 221 struct ath_buf *bf;
211 struct list_head bf_head; 222 struct list_head bf_head;
212 struct ath_tx_status ts; 223 struct ath_tx_status ts;
224 struct ath_frame_info *fi;
213 225
214 memset(&ts, 0, sizeof(ts)); 226 memset(&ts, 0, sizeof(ts));
215 INIT_LIST_HEAD(&bf_head); 227 INIT_LIST_HEAD(&bf_head);
@@ -221,8 +233,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
221 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 233 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
222 list_move_tail(&bf->list, &bf_head); 234 list_move_tail(&bf->list, &bf_head);
223 235
224 if (bf_isretried(bf)) 236 fi = get_frame_info(bf->bf_mpdu);
225 ath_tx_update_baw(sc, tid, bf->bf_seqno); 237 if (fi->retries)
238 ath_tx_update_baw(sc, tid, fi->seqno);
226 239
227 spin_unlock(&txq->axq_lock); 240 spin_unlock(&txq->axq_lock);
228 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
@@ -234,16 +247,15 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
234} 247}
235 248
236static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 249static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
237 struct ath_buf *bf) 250 struct sk_buff *skb)
238{ 251{
239 struct sk_buff *skb; 252 struct ath_frame_info *fi = get_frame_info(skb);
240 struct ieee80211_hdr *hdr; 253 struct ieee80211_hdr *hdr;
241 254
242 bf->bf_state.bf_type |= BUF_RETRY;
243 bf->bf_retries++;
244 TX_STAT_INC(txq->axq_qnum, a_retries); 255 TX_STAT_INC(txq->axq_qnum, a_retries);
256 if (fi->retries++ > 0)
257 return;
245 258
246 skb = bf->bf_mpdu;
247 hdr = (struct ieee80211_hdr *)skb->data; 259 hdr = (struct ieee80211_hdr *)skb->data;
248 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 260 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
249} 261}
@@ -284,24 +296,54 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
284 296
285 ATH_TXBUF_RESET(tbf); 297 ATH_TXBUF_RESET(tbf);
286 298
287 tbf->aphy = bf->aphy;
288 tbf->bf_mpdu = bf->bf_mpdu; 299 tbf->bf_mpdu = bf->bf_mpdu;
289 tbf->bf_buf_addr = bf->bf_buf_addr; 300 tbf->bf_buf_addr = bf->bf_buf_addr;
290 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len); 301 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
291 tbf->bf_state = bf->bf_state; 302 tbf->bf_state = bf->bf_state;
292 tbf->bf_dmacontext = bf->bf_dmacontext;
293 303
294 return tbf; 304 return tbf;
295} 305}
296 306
307static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
308 struct ath_tx_status *ts, int txok,
309 int *nframes, int *nbad)
310{
311 struct ath_frame_info *fi;
312 u16 seq_st = 0;
313 u32 ba[WME_BA_BMP_SIZE >> 5];
314 int ba_index;
315 int isaggr = 0;
316
317 *nbad = 0;
318 *nframes = 0;
319
320 isaggr = bf_isaggr(bf);
321 if (isaggr) {
322 seq_st = ts->ts_seqnum;
323 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
324 }
325
326 while (bf) {
327 fi = get_frame_info(bf->bf_mpdu);
328 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
329
330 (*nframes)++;
331 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
332 (*nbad)++;
333
334 bf = bf->bf_next;
335 }
336}
337
338
297static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 339static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
298 struct ath_buf *bf, struct list_head *bf_q, 340 struct ath_buf *bf, struct list_head *bf_q,
299 struct ath_tx_status *ts, int txok) 341 struct ath_tx_status *ts, int txok, bool retry)
300{ 342{
301 struct ath_node *an = NULL; 343 struct ath_node *an = NULL;
302 struct sk_buff *skb; 344 struct sk_buff *skb;
303 struct ieee80211_sta *sta; 345 struct ieee80211_sta *sta;
304 struct ieee80211_hw *hw; 346 struct ieee80211_hw *hw = sc->hw;
305 struct ieee80211_hdr *hdr; 347 struct ieee80211_hdr *hdr;
306 struct ieee80211_tx_info *tx_info; 348 struct ieee80211_tx_info *tx_info;
307 struct ath_atx_tid *tid = NULL; 349 struct ath_atx_tid *tid = NULL;
@@ -312,19 +354,21 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
312 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 354 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
313 bool rc_update = true; 355 bool rc_update = true;
314 struct ieee80211_tx_rate rates[4]; 356 struct ieee80211_tx_rate rates[4];
357 struct ath_frame_info *fi;
358 int nframes;
359 u8 tidno;
360 bool clear_filter;
315 361
316 skb = bf->bf_mpdu; 362 skb = bf->bf_mpdu;
317 hdr = (struct ieee80211_hdr *)skb->data; 363 hdr = (struct ieee80211_hdr *)skb->data;
318 364
319 tx_info = IEEE80211_SKB_CB(skb); 365 tx_info = IEEE80211_SKB_CB(skb);
320 hw = bf->aphy->hw;
321 366
322 memcpy(rates, tx_info->control.rates, sizeof(rates)); 367 memcpy(rates, tx_info->control.rates, sizeof(rates));
323 368
324 rcu_read_lock(); 369 rcu_read_lock();
325 370
326 /* XXX: use ieee80211_find_sta! */ 371 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
327 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
328 if (!sta) { 372 if (!sta) {
329 rcu_read_unlock(); 373 rcu_read_unlock();
330 374
@@ -337,7 +381,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
337 !bf->bf_stale || bf_next != NULL) 381 !bf->bf_stale || bf_next != NULL)
338 list_move_tail(&bf->list, &bf_head); 382 list_move_tail(&bf->list, &bf_head);
339 383
340 ath_tx_rc_status(bf, ts, 0, 0, false); 384 ath_tx_rc_status(sc, bf, ts, 1, 1, 0, false);
341 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 385 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
342 0, 0); 386 0, 0);
343 387
@@ -347,14 +391,15 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
347 } 391 }
348 392
349 an = (struct ath_node *)sta->drv_priv; 393 an = (struct ath_node *)sta->drv_priv;
350 tid = ATH_AN_2_TID(an, bf->bf_tidno); 394 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
395 tid = ATH_AN_2_TID(an, tidno);
351 396
352 /* 397 /*
353 * The hardware occasionally sends a tx status for the wrong TID. 398 * The hardware occasionally sends a tx status for the wrong TID.
354 * In this case, the BA status cannot be considered valid and all 399 * In this case, the BA status cannot be considered valid and all
355 * subframes need to be retransmitted 400 * subframes need to be retransmitted
356 */ 401 */
357 if (bf->bf_tidno != ts->tid) 402 if (tidno != ts->tid)
358 txok = false; 403 txok = false;
359 404
360 isaggr = bf_isaggr(bf); 405 isaggr = bf_isaggr(bf);
@@ -380,15 +425,16 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
380 INIT_LIST_HEAD(&bf_pending); 425 INIT_LIST_HEAD(&bf_pending);
381 INIT_LIST_HEAD(&bf_head); 426 INIT_LIST_HEAD(&bf_head);
382 427
383 nbad = ath_tx_num_badfrms(sc, bf, ts, txok); 428 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
384 while (bf) { 429 while (bf) {
385 txfail = txpending = 0; 430 txfail = txpending = sendbar = 0;
386 bf_next = bf->bf_next; 431 bf_next = bf->bf_next;
387 432
388 skb = bf->bf_mpdu; 433 skb = bf->bf_mpdu;
389 tx_info = IEEE80211_SKB_CB(skb); 434 tx_info = IEEE80211_SKB_CB(skb);
435 fi = get_frame_info(skb);
390 436
391 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { 437 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
392 /* transmit completion, subframe is 438 /* transmit completion, subframe is
393 * acked by block ack */ 439 * acked by block ack */
394 acked_cnt++; 440 acked_cnt++;
@@ -396,23 +442,24 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
396 /* transmit completion */ 442 /* transmit completion */
397 acked_cnt++; 443 acked_cnt++;
398 } else { 444 } else {
399 if (!(tid->state & AGGR_CLEANUP) && 445 if ((tid->state & AGGR_CLEANUP) || !retry) {
400 !bf_last->bf_tx_aborted) {
401 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
402 ath_tx_set_retry(sc, txq, bf);
403 txpending = 1;
404 } else {
405 bf->bf_state.bf_type |= BUF_XRETRY;
406 txfail = 1;
407 sendbar = 1;
408 txfail_cnt++;
409 }
410 } else {
411 /* 446 /*
412 * cleanup in progress, just fail 447 * cleanup in progress, just fail
413 * the un-acked sub-frames 448 * the un-acked sub-frames
414 */ 449 */
415 txfail = 1; 450 txfail = 1;
451 } else if (fi->retries < ATH_MAX_SW_RETRIES) {
452 if (!(ts->ts_status & ATH9K_TXERR_FILT) ||
453 !an->sleeping)
454 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
455
456 clear_filter = true;
457 txpending = 1;
458 } else {
459 bf->bf_state.bf_type |= BUF_XRETRY;
460 txfail = 1;
461 sendbar = 1;
462 txfail_cnt++;
416 } 463 }
417 } 464 }
418 465
@@ -431,27 +478,28 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
431 list_move_tail(&bf->list, &bf_head); 478 list_move_tail(&bf->list, &bf_head);
432 } 479 }
433 480
434 if (!txpending) { 481 if (!txpending || (tid->state & AGGR_CLEANUP)) {
435 /* 482 /*
436 * complete the acked-ones/xretried ones; update 483 * complete the acked-ones/xretried ones; update
437 * block-ack window 484 * block-ack window
438 */ 485 */
439 spin_lock_bh(&txq->axq_lock); 486 spin_lock_bh(&txq->axq_lock);
440 ath_tx_update_baw(sc, tid, bf->bf_seqno); 487 ath_tx_update_baw(sc, tid, fi->seqno);
441 spin_unlock_bh(&txq->axq_lock); 488 spin_unlock_bh(&txq->axq_lock);
442 489
443 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
444 memcpy(tx_info->control.rates, rates, sizeof(rates)); 491 memcpy(tx_info->control.rates, rates, sizeof(rates));
445 ath_tx_rc_status(bf, ts, nbad, txok, true); 492 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, true);
446 rc_update = false; 493 rc_update = false;
447 } else { 494 } else {
448 ath_tx_rc_status(bf, ts, nbad, txok, false); 495 ath_tx_rc_status(sc, bf, ts, nframes, nbad, txok, false);
449 } 496 }
450 497
451 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
452 !txfail, sendbar); 499 !txfail, sendbar);
453 } else { 500 } else {
454 /* retry the un-acked ones */ 501 /* retry the un-acked ones */
502 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, false);
455 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) { 503 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
456 if (bf->bf_next == NULL && bf_last->bf_stale) { 504 if (bf->bf_next == NULL && bf_last->bf_stale) {
457 struct ath_buf *tbf; 505 struct ath_buf *tbf;
@@ -464,14 +512,13 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
464 */ 512 */
465 if (!tbf) { 513 if (!tbf) {
466 spin_lock_bh(&txq->axq_lock); 514 spin_lock_bh(&txq->axq_lock);
467 ath_tx_update_baw(sc, tid, 515 ath_tx_update_baw(sc, tid, fi->seqno);
468 bf->bf_seqno);
469 spin_unlock_bh(&txq->axq_lock); 516 spin_unlock_bh(&txq->axq_lock);
470 517
471 bf->bf_state.bf_type |= 518 bf->bf_state.bf_type |=
472 BUF_XRETRY; 519 BUF_XRETRY;
473 ath_tx_rc_status(bf, ts, nbad, 520 ath_tx_rc_status(sc, bf, ts, nframes,
474 0, false); 521 nbad, 0, false);
475 ath_tx_complete_buf(sc, bf, txq, 522 ath_tx_complete_buf(sc, bf, txq,
476 &bf_head, 523 &bf_head,
477 ts, 0, 0); 524 ts, 0, 0);
@@ -503,28 +550,33 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
503 550
504 /* prepend un-acked frames to the beginning of the pending frame queue */ 551 /* prepend un-acked frames to the beginning of the pending frame queue */
505 if (!list_empty(&bf_pending)) { 552 if (!list_empty(&bf_pending)) {
553 if (an->sleeping)
554 ieee80211_sta_set_tim(sta);
555
506 spin_lock_bh(&txq->axq_lock); 556 spin_lock_bh(&txq->axq_lock);
557 if (clear_filter)
558 tid->ac->clear_ps_filter = true;
507 list_splice(&bf_pending, &tid->buf_q); 559 list_splice(&bf_pending, &tid->buf_q);
508 ath_tx_queue_tid(txq, tid); 560 ath_tx_queue_tid(txq, tid);
509 spin_unlock_bh(&txq->axq_lock); 561 spin_unlock_bh(&txq->axq_lock);
510 } 562 }
511 563
512 if (tid->state & AGGR_CLEANUP) { 564 if (tid->state & AGGR_CLEANUP) {
565 ath_tx_flush_tid(sc, tid);
566
513 if (tid->baw_head == tid->baw_tail) { 567 if (tid->baw_head == tid->baw_tail) {
514 tid->state &= ~AGGR_ADDBA_COMPLETE; 568 tid->state &= ~AGGR_ADDBA_COMPLETE;
515 tid->state &= ~AGGR_CLEANUP; 569 tid->state &= ~AGGR_CLEANUP;
516
517 /* send buffered frames as singles */
518 ath_tx_flush_tid(sc, tid);
519 } 570 }
520 rcu_read_unlock();
521 return;
522 } 571 }
523 572
524 rcu_read_unlock(); 573 rcu_read_unlock();
525 574
526 if (needreset) 575 if (needreset) {
576 spin_unlock_bh(&sc->sc_pcu_lock);
527 ath_reset(sc, false); 577 ath_reset(sc, false);
578 spin_lock_bh(&sc->sc_pcu_lock);
579 }
528} 580}
529 581
530static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, 582static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -585,8 +637,8 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
585 (u32)ATH_AMPDU_LIMIT_MAX); 637 (u32)ATH_AMPDU_LIMIT_MAX);
586 638
587 /* 639 /*
588 * h/w can accept aggregates upto 16 bit lengths (65535). 640 * h/w can accept aggregates up to 16 bit lengths (65535).
589 * The IE, however can hold upto 65536, which shows up here 641 * The IE, however can hold up to 65536, which shows up here
590 * as zero. Ignore 65536 since we are constrained by hw. 642 * as zero. Ignore 65536 since we are constrained by hw.
591 */ 643 */
592 if (tid->an->maxampdu) 644 if (tid->an->maxampdu)
@@ -608,6 +660,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
608 u16 minlen; 660 u16 minlen;
609 u8 flags, rix; 661 u8 flags, rix;
610 int width, streams, half_gi, ndelim, mindelim; 662 int width, streams, half_gi, ndelim, mindelim;
663 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
611 664
612 /* Select standard number of delimiters based on frame length alone */ 665 /* Select standard number of delimiters based on frame length alone */
613 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 666 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
@@ -618,7 +671,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
618 * TODO - this could be improved to be dependent on the rate. 671 * TODO - this could be improved to be dependent on the rate.
619 * The hardware can keep up at lower rates, but not higher rates 672 * The hardware can keep up at lower rates, but not higher rates
620 */ 673 */
621 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) 674 if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
675 !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
622 ndelim += ATH_AGGR_ENCRYPTDELIM; 676 ndelim += ATH_AGGR_ENCRYPTDELIM;
623 677
624 /* 678 /*
@@ -662,7 +716,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
662static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 716static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
663 struct ath_txq *txq, 717 struct ath_txq *txq,
664 struct ath_atx_tid *tid, 718 struct ath_atx_tid *tid,
665 struct list_head *bf_q) 719 struct list_head *bf_q,
720 int *aggr_len)
666{ 721{
667#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 722#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
668 struct ath_buf *bf, *bf_first, *bf_prev = NULL; 723 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
@@ -670,14 +725,17 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
670 u16 aggr_limit = 0, al = 0, bpad = 0, 725 u16 aggr_limit = 0, al = 0, bpad = 0,
671 al_delta, h_baw = tid->baw_size / 2; 726 al_delta, h_baw = tid->baw_size / 2;
672 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 727 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
728 struct ieee80211_tx_info *tx_info;
729 struct ath_frame_info *fi;
673 730
674 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); 731 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
675 732
676 do { 733 do {
677 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 734 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
735 fi = get_frame_info(bf->bf_mpdu);
678 736
679 /* do not step over block-ack window */ 737 /* do not step over block-ack window */
680 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { 738 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
681 status = ATH_AGGR_BAW_CLOSED; 739 status = ATH_AGGR_BAW_CLOSED;
682 break; 740 break;
683 } 741 }
@@ -688,7 +746,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
688 } 746 }
689 747
690 /* do not exceed aggregation limit */ 748 /* do not exceed aggregation limit */
691 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; 749 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
692 750
693 if (nframes && 751 if (nframes &&
694 (aggr_limit < (al + bpad + al_delta + prev_al))) { 752 (aggr_limit < (al + bpad + al_delta + prev_al))) {
@@ -696,6 +754,11 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
696 break; 754 break;
697 } 755 }
698 756
757 tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
758 if (nframes && ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
759 !(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS)))
760 break;
761
699 /* do not exceed subframe limit */ 762 /* do not exceed subframe limit */
700 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { 763 if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
701 status = ATH_AGGR_LIMITED; 764 status = ATH_AGGR_LIMITED;
@@ -710,14 +773,15 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
710 * Get the delimiters needed to meet the MPDU 773 * Get the delimiters needed to meet the MPDU
711 * density for this node. 774 * density for this node.
712 */ 775 */
713 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); 776 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
714 bpad = PADBYTES(al_delta) + (ndelim << 2); 777 bpad = PADBYTES(al_delta) + (ndelim << 2);
715 778
716 bf->bf_next = NULL; 779 bf->bf_next = NULL;
717 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0); 780 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
718 781
719 /* link buffers of this frame to the aggregate */ 782 /* link buffers of this frame to the aggregate */
720 ath_tx_addto_baw(sc, tid, bf); 783 if (!fi->retries)
784 ath_tx_addto_baw(sc, tid, fi->seqno);
721 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); 785 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
722 list_move_tail(&bf->list, bf_q); 786 list_move_tail(&bf->list, bf_q);
723 if (bf_prev) { 787 if (bf_prev) {
@@ -729,8 +793,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
729 793
730 } while (!list_empty(&tid->buf_q)); 794 } while (!list_empty(&tid->buf_q));
731 795
732 bf_first->bf_al = al; 796 *aggr_len = al;
733 bf_first->bf_nframes = nframes;
734 797
735 return status; 798 return status;
736#undef PADBYTES 799#undef PADBYTES
@@ -741,7 +804,9 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
741{ 804{
742 struct ath_buf *bf; 805 struct ath_buf *bf;
743 enum ATH_AGGR_STATUS status; 806 enum ATH_AGGR_STATUS status;
807 struct ath_frame_info *fi;
744 struct list_head bf_q; 808 struct list_head bf_q;
809 int aggr_len;
745 810
746 do { 811 do {
747 if (list_empty(&tid->buf_q)) 812 if (list_empty(&tid->buf_q))
@@ -749,7 +814,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
749 814
750 INIT_LIST_HEAD(&bf_q); 815 INIT_LIST_HEAD(&bf_q);
751 816
752 status = ath_tx_form_aggr(sc, txq, tid, &bf_q); 817 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
753 818
754 /* 819 /*
755 * no frames picked up to be aggregated; 820 * no frames picked up to be aggregated;
@@ -761,19 +826,26 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
761 bf = list_first_entry(&bf_q, struct ath_buf, list); 826 bf = list_first_entry(&bf_q, struct ath_buf, list);
762 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 827 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
763 828
829 if (tid->ac->clear_ps_filter) {
830 tid->ac->clear_ps_filter = false;
831 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
832 }
833
764 /* if only one frame, send as non-aggregate */ 834 /* if only one frame, send as non-aggregate */
765 if (bf->bf_nframes == 1) { 835 if (bf == bf->bf_lastbf) {
836 fi = get_frame_info(bf->bf_mpdu);
837
766 bf->bf_state.bf_type &= ~BUF_AGGR; 838 bf->bf_state.bf_type &= ~BUF_AGGR;
767 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc); 839 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
768 ath_buf_set_rate(sc, bf); 840 ath_buf_set_rate(sc, bf, fi->framelen);
769 ath_tx_txqaddbuf(sc, txq, &bf_q); 841 ath_tx_txqaddbuf(sc, txq, &bf_q);
770 continue; 842 continue;
771 } 843 }
772 844
773 /* setup first desc of aggregate */ 845 /* setup first desc of aggregate */
774 bf->bf_state.bf_type |= BUF_AGGR; 846 bf->bf_state.bf_type |= BUF_AGGR;
775 ath_buf_set_rate(sc, bf); 847 ath_buf_set_rate(sc, bf, aggr_len);
776 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); 848 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
777 849
778 /* anchor last desc of aggregate */ 850 /* anchor last desc of aggregate */
779 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); 851 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
@@ -781,34 +853,37 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
781 ath_tx_txqaddbuf(sc, txq, &bf_q); 853 ath_tx_txqaddbuf(sc, txq, &bf_q);
782 TX_STAT_INC(txq->axq_qnum, a_aggr); 854 TX_STAT_INC(txq->axq_qnum, a_aggr);
783 855
784 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && 856 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
785 status != ATH_AGGR_BAW_CLOSED); 857 status != ATH_AGGR_BAW_CLOSED);
786} 858}
787 859
788void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 860int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
789 u16 tid, u16 *ssn) 861 u16 tid, u16 *ssn)
790{ 862{
791 struct ath_atx_tid *txtid; 863 struct ath_atx_tid *txtid;
792 struct ath_node *an; 864 struct ath_node *an;
793 865
794 an = (struct ath_node *)sta->drv_priv; 866 an = (struct ath_node *)sta->drv_priv;
795 txtid = ATH_AN_2_TID(an, tid); 867 txtid = ATH_AN_2_TID(an, tid);
868
869 if (txtid->state & (AGGR_CLEANUP | AGGR_ADDBA_COMPLETE))
870 return -EAGAIN;
871
796 txtid->state |= AGGR_ADDBA_PROGRESS; 872 txtid->state |= AGGR_ADDBA_PROGRESS;
797 txtid->paused = true; 873 txtid->paused = true;
798 *ssn = txtid->seq_start; 874 *ssn = txtid->seq_start = txtid->seq_next;
875
876 memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
877 txtid->baw_head = txtid->baw_tail = 0;
878
879 return 0;
799} 880}
800 881
801void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 882void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
802{ 883{
803 struct ath_node *an = (struct ath_node *)sta->drv_priv; 884 struct ath_node *an = (struct ath_node *)sta->drv_priv;
804 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 885 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
805 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 886 struct ath_txq *txq = txtid->ac->txq;
806 struct ath_tx_status ts;
807 struct ath_buf *bf;
808 struct list_head bf_head;
809
810 memset(&ts, 0, sizeof(ts));
811 INIT_LIST_HEAD(&bf_head);
812 887
813 if (txtid->state & AGGR_CLEANUP) 888 if (txtid->state & AGGR_CLEANUP)
814 return; 889 return;
@@ -818,30 +893,82 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
818 return; 893 return;
819 } 894 }
820 895
821 /* drop all software retried frames and mark this TID */
822 spin_lock_bh(&txq->axq_lock); 896 spin_lock_bh(&txq->axq_lock);
823 txtid->paused = true; 897 txtid->paused = true;
824 while (!list_empty(&txtid->buf_q)) {
825 bf = list_first_entry(&txtid->buf_q, struct ath_buf, list);
826 if (!bf_isretried(bf)) {
827 /*
828 * NB: it's based on the assumption that
829 * software retried frame will always stay
830 * at the head of software queue.
831 */
832 break;
833 }
834 list_move_tail(&bf->list, &bf_head);
835 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
836 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
837 }
838 spin_unlock_bh(&txq->axq_lock);
839 898
840 if (txtid->baw_head != txtid->baw_tail) { 899 /*
900 * If frames are still being transmitted for this TID, they will be
901 * cleaned up during tx completion. To prevent race conditions, this
902 * TID can only be reused after all in-progress subframes have been
903 * completed.
904 */
905 if (txtid->baw_head != txtid->baw_tail)
841 txtid->state |= AGGR_CLEANUP; 906 txtid->state |= AGGR_CLEANUP;
842 } else { 907 else
843 txtid->state &= ~AGGR_ADDBA_COMPLETE; 908 txtid->state &= ~AGGR_ADDBA_COMPLETE;
844 ath_tx_flush_tid(sc, txtid); 909 spin_unlock_bh(&txq->axq_lock);
910
911 ath_tx_flush_tid(sc, txtid);
912}
913
914bool ath_tx_aggr_sleep(struct ath_softc *sc, struct ath_node *an)
915{
916 struct ath_atx_tid *tid;
917 struct ath_atx_ac *ac;
918 struct ath_txq *txq;
919 bool buffered = false;
920 int tidno;
921
922 for (tidno = 0, tid = &an->tid[tidno];
923 tidno < WME_NUM_TID; tidno++, tid++) {
924
925 if (!tid->sched)
926 continue;
927
928 ac = tid->ac;
929 txq = ac->txq;
930
931 spin_lock_bh(&txq->axq_lock);
932
933 if (!list_empty(&tid->buf_q))
934 buffered = true;
935
936 tid->sched = false;
937 list_del(&tid->list);
938
939 if (ac->sched) {
940 ac->sched = false;
941 list_del(&ac->list);
942 }
943
944 spin_unlock_bh(&txq->axq_lock);
945 }
946
947 return buffered;
948}
949
950void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
951{
952 struct ath_atx_tid *tid;
953 struct ath_atx_ac *ac;
954 struct ath_txq *txq;
955 int tidno;
956
957 for (tidno = 0, tid = &an->tid[tidno];
958 tidno < WME_NUM_TID; tidno++, tid++) {
959
960 ac = tid->ac;
961 txq = ac->txq;
962
963 spin_lock_bh(&txq->axq_lock);
964 ac->clear_ps_filter = true;
965
966 if (!list_empty(&tid->buf_q) && !tid->paused) {
967 ath_tx_queue_tid(txq, tid);
968 ath_txq_schedule(sc, txq);
969 }
970
971 spin_unlock_bh(&txq->axq_lock);
845 } 972 }
846} 973}
847 974
@@ -862,20 +989,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
862 } 989 }
863} 990}
864 991
865bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
866{
867 struct ath_atx_tid *txtid;
868
869 if (!(sc->sc_flags & SC_OP_TXAGGR))
870 return false;
871
872 txtid = ATH_AN_2_TID(an, tidno);
873
874 if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS)))
875 return true;
876 return false;
877}
878
879/********************/ 992/********************/
880/* Queue Management */ 993/* Queue Management */
881/********************/ 994/********************/
@@ -902,10 +1015,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
902 struct ath_hw *ah = sc->sc_ah; 1015 struct ath_hw *ah = sc->sc_ah;
903 struct ath_common *common = ath9k_hw_common(ah); 1016 struct ath_common *common = ath9k_hw_common(ah);
904 struct ath9k_tx_queue_info qi; 1017 struct ath9k_tx_queue_info qi;
905 int qnum, i; 1018 static const int subtype_txq_to_hwq[] = {
1019 [WME_AC_BE] = ATH_TXQ_AC_BE,
1020 [WME_AC_BK] = ATH_TXQ_AC_BK,
1021 [WME_AC_VI] = ATH_TXQ_AC_VI,
1022 [WME_AC_VO] = ATH_TXQ_AC_VO,
1023 };
1024 int axq_qnum, i;
906 1025
907 memset(&qi, 0, sizeof(qi)); 1026 memset(&qi, 0, sizeof(qi));
908 qi.tqi_subtype = subtype; 1027 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
909 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 1028 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
910 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 1029 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
911 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 1030 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
@@ -936,40 +1055,40 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
936 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 1055 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
937 TXQ_FLAG_TXDESCINT_ENABLE; 1056 TXQ_FLAG_TXDESCINT_ENABLE;
938 } 1057 }
939 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 1058 axq_qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
940 if (qnum == -1) { 1059 if (axq_qnum == -1) {
941 /* 1060 /*
942 * NB: don't print a message, this happens 1061 * NB: don't print a message, this happens
943 * normally on parts with too few tx queues 1062 * normally on parts with too few tx queues
944 */ 1063 */
945 return NULL; 1064 return NULL;
946 } 1065 }
947 if (qnum >= ARRAY_SIZE(sc->tx.txq)) { 1066 if (axq_qnum >= ARRAY_SIZE(sc->tx.txq)) {
948 ath_print(common, ATH_DBG_FATAL, 1067 ath_err(common, "qnum %u out of range, max %zu!\n",
949 "qnum %u out of range, max %u!\n", 1068 axq_qnum, ARRAY_SIZE(sc->tx.txq));
950 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq)); 1069 ath9k_hw_releasetxqueue(ah, axq_qnum);
951 ath9k_hw_releasetxqueue(ah, qnum);
952 return NULL; 1070 return NULL;
953 } 1071 }
954 if (!ATH_TXQ_SETUP(sc, qnum)) { 1072 if (!ATH_TXQ_SETUP(sc, axq_qnum)) {
955 struct ath_txq *txq = &sc->tx.txq[qnum]; 1073 struct ath_txq *txq = &sc->tx.txq[axq_qnum];
956 1074
957 txq->axq_class = subtype; 1075 txq->axq_qnum = axq_qnum;
958 txq->axq_qnum = qnum; 1076 txq->mac80211_qnum = -1;
959 txq->axq_link = NULL; 1077 txq->axq_link = NULL;
960 INIT_LIST_HEAD(&txq->axq_q); 1078 INIT_LIST_HEAD(&txq->axq_q);
961 INIT_LIST_HEAD(&txq->axq_acq); 1079 INIT_LIST_HEAD(&txq->axq_acq);
962 spin_lock_init(&txq->axq_lock); 1080 spin_lock_init(&txq->axq_lock);
963 txq->axq_depth = 0; 1081 txq->axq_depth = 0;
1082 txq->axq_ampdu_depth = 0;
964 txq->axq_tx_inprogress = false; 1083 txq->axq_tx_inprogress = false;
965 sc->tx.txqsetup |= 1<<qnum; 1084 sc->tx.txqsetup |= 1<<axq_qnum;
966 1085
967 txq->txq_headidx = txq->txq_tailidx = 0; 1086 txq->txq_headidx = txq->txq_tailidx = 0;
968 for (i = 0; i < ATH_TXFIFO_DEPTH; i++) 1087 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
969 INIT_LIST_HEAD(&txq->txq_fifo[i]); 1088 INIT_LIST_HEAD(&txq->txq_fifo[i]);
970 INIT_LIST_HEAD(&txq->txq_fifo_pending); 1089 INIT_LIST_HEAD(&txq->txq_fifo_pending);
971 } 1090 }
972 return &sc->tx.txq[qnum]; 1091 return &sc->tx.txq[axq_qnum];
973} 1092}
974 1093
975int ath_txq_update(struct ath_softc *sc, int qnum, 1094int ath_txq_update(struct ath_softc *sc, int qnum,
@@ -999,8 +1118,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
999 qi.tqi_readyTime = qinfo->tqi_readyTime; 1118 qi.tqi_readyTime = qinfo->tqi_readyTime;
1000 1119
1001 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 1120 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1002 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1121 ath_err(ath9k_hw_common(sc->sc_ah),
1003 "Unable to update hardware queue %u!\n", qnum); 1122 "Unable to update hardware queue %u!\n", qnum);
1004 error = -EIO; 1123 error = -EIO;
1005 } else { 1124 } else {
1006 ath9k_hw_resettxqueue(ah, qnum); 1125 ath9k_hw_resettxqueue(ah, qnum);
@@ -1012,6 +1131,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
1012int ath_cabq_update(struct ath_softc *sc) 1131int ath_cabq_update(struct ath_softc *sc)
1013{ 1132{
1014 struct ath9k_tx_queue_info qi; 1133 struct ath9k_tx_queue_info qi;
1134 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
1015 int qnum = sc->beacon.cabq->axq_qnum; 1135 int qnum = sc->beacon.cabq->axq_qnum;
1016 1136
1017 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); 1137 ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
@@ -1023,13 +1143,19 @@ int ath_cabq_update(struct ath_softc *sc)
1023 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND) 1143 else if (sc->config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1024 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND; 1144 sc->config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1025 1145
1026 qi.tqi_readyTime = (sc->beacon_interval * 1146 qi.tqi_readyTime = (cur_conf->beacon_interval *
1027 sc->config.cabqReadytime) / 100; 1147 sc->config.cabqReadytime) / 100;
1028 ath_txq_update(sc, qnum, &qi); 1148 ath_txq_update(sc, qnum, &qi);
1029 1149
1030 return 0; 1150 return 0;
1031} 1151}
1032 1152
1153static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1154{
1155 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1156 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1157}
1158
1033/* 1159/*
1034 * Drain a given TX queue (could be Beacon or Data) 1160 * Drain a given TX queue (could be Beacon or Data)
1035 * 1161 *
@@ -1076,8 +1202,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1076 } 1202 }
1077 1203
1078 lastbf = bf->bf_lastbf; 1204 lastbf = bf->bf_lastbf;
1079 if (!retry_tx)
1080 lastbf->bf_tx_aborted = true;
1081 1205
1082 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1206 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1083 list_cut_position(&bf_head, 1207 list_cut_position(&bf_head,
@@ -1090,11 +1214,13 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1090 } 1214 }
1091 1215
1092 txq->axq_depth--; 1216 txq->axq_depth--;
1093 1217 if (bf_is_ampdu_not_probing(bf))
1218 txq->axq_ampdu_depth--;
1094 spin_unlock_bh(&txq->axq_lock); 1219 spin_unlock_bh(&txq->axq_lock);
1095 1220
1096 if (bf_isampdu(bf)) 1221 if (bf_isampdu(bf))
1097 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0); 1222 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1223 retry_tx);
1098 else 1224 else
1099 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 1225 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1100 } 1226 }
@@ -1103,15 +1229,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1103 txq->axq_tx_inprogress = false; 1229 txq->axq_tx_inprogress = false;
1104 spin_unlock_bh(&txq->axq_lock); 1230 spin_unlock_bh(&txq->axq_lock);
1105 1231
1106 /* flush any pending frames if aggregation is enabled */
1107 if (sc->sc_flags & SC_OP_TXAGGR) {
1108 if (!retry_tx) {
1109 spin_lock_bh(&txq->axq_lock);
1110 ath_txq_drain_pending_buffers(sc, txq);
1111 spin_unlock_bh(&txq->axq_lock);
1112 }
1113 }
1114
1115 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1232 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1116 spin_lock_bh(&txq->axq_lock); 1233 spin_lock_bh(&txq->axq_lock);
1117 while (!list_empty(&txq->txq_fifo_pending)) { 1234 while (!list_empty(&txq->txq_fifo_pending)) {
@@ -1124,7 +1241,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1124 1241
1125 if (bf_isampdu(bf)) 1242 if (bf_isampdu(bf))
1126 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 1243 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1127 &ts, 0); 1244 &ts, 0, retry_tx);
1128 else 1245 else
1129 ath_tx_complete_buf(sc, bf, txq, &bf_head, 1246 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1130 &ts, 0, 0); 1247 &ts, 0, 0);
@@ -1132,9 +1249,18 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1132 } 1249 }
1133 spin_unlock_bh(&txq->axq_lock); 1250 spin_unlock_bh(&txq->axq_lock);
1134 } 1251 }
1252
1253 /* flush any pending frames if aggregation is enabled */
1254 if (sc->sc_flags & SC_OP_TXAGGR) {
1255 if (!retry_tx) {
1256 spin_lock_bh(&txq->axq_lock);
1257 ath_txq_drain_pending_buffers(sc, txq);
1258 spin_unlock_bh(&txq->axq_lock);
1259 }
1260 }
1135} 1261}
1136 1262
1137void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1263bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1138{ 1264{
1139 struct ath_hw *ah = sc->sc_ah; 1265 struct ath_hw *ah = sc->sc_ah;
1140 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1266 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -1142,39 +1268,36 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1142 int i, npend = 0; 1268 int i, npend = 0;
1143 1269
1144 if (sc->sc_flags & SC_OP_INVALID) 1270 if (sc->sc_flags & SC_OP_INVALID)
1145 return; 1271 return true;
1146 1272
1147 /* Stop beacon queue */ 1273 ath9k_hw_abort_tx_dma(ah);
1148 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1149 1274
1150 /* Stop data queues */ 1275 /* Check if any queue remains active */
1151 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1276 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1152 if (ATH_TXQ_SETUP(sc, i)) { 1277 if (!ATH_TXQ_SETUP(sc, i))
1153 txq = &sc->tx.txq[i]; 1278 continue;
1154 ath9k_hw_stoptxdma(ah, txq->axq_qnum); 1279
1155 npend += ath9k_hw_numtxpending(ah, txq->axq_qnum); 1280 npend += ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum);
1156 }
1157 } 1281 }
1158 1282
1159 if (npend) { 1283 if (npend)
1160 int r; 1284 ath_err(common, "Failed to stop TX DMA!\n");
1161 1285
1162 ath_print(common, ATH_DBG_FATAL, 1286 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1163 "Failed to stop TX DMA. Resetting hardware!\n"); 1287 if (!ATH_TXQ_SETUP(sc, i))
1288 continue;
1164 1289
1165 spin_lock_bh(&sc->sc_resetlock); 1290 /*
1166 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 1291 * The caller will resume queues with ieee80211_wake_queues.
1167 if (r) 1292 * Mark the queue as not stopped to prevent ath_tx_complete
1168 ath_print(common, ATH_DBG_FATAL, 1293 * from waking the queue too early.
1169 "Unable to reset hardware; reset status %d\n", 1294 */
1170 r); 1295 txq = &sc->tx.txq[i];
1171 spin_unlock_bh(&sc->sc_resetlock); 1296 txq->stopped = false;
1297 ath_draintxq(sc, txq, retry_tx);
1172 } 1298 }
1173 1299
1174 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1300 return !npend;
1175 if (ATH_TXQ_SETUP(sc, i))
1176 ath_draintxq(sc, &sc->tx.txq[i], retry_tx);
1177 }
1178} 1301}
1179 1302
1180void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) 1303void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
@@ -1183,65 +1306,60 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
1183 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); 1306 sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
1184} 1307}
1185 1308
1309/* For each axq_acq entry, for each tid, try to schedule packets
1310 * for transmit until ampdu_depth has reached min Q depth.
1311 */
1186void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) 1312void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1187{ 1313{
1188 struct ath_atx_ac *ac; 1314 struct ath_atx_ac *ac, *ac_tmp, *last_ac;
1189 struct ath_atx_tid *tid; 1315 struct ath_atx_tid *tid, *last_tid;
1190 1316
1191 if (list_empty(&txq->axq_acq)) 1317 if (list_empty(&txq->axq_acq) ||
1318 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1192 return; 1319 return;
1193 1320
1194 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); 1321 ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
1195 list_del(&ac->list); 1322 last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
1196 ac->sched = false;
1197
1198 do {
1199 if (list_empty(&ac->tid_q))
1200 return;
1201 1323
1202 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); 1324 list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
1203 list_del(&tid->list); 1325 last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
1204 tid->sched = false; 1326 list_del(&ac->list);
1327 ac->sched = false;
1205 1328
1206 if (tid->paused) 1329 while (!list_empty(&ac->tid_q)) {
1207 continue; 1330 tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
1331 list);
1332 list_del(&tid->list);
1333 tid->sched = false;
1208 1334
1209 ath_tx_sched_aggr(sc, txq, tid); 1335 if (tid->paused)
1336 continue;
1210 1337
1211 /* 1338 ath_tx_sched_aggr(sc, txq, tid);
1212 * add tid to round-robin queue if more frames
1213 * are pending for the tid
1214 */
1215 if (!list_empty(&tid->buf_q))
1216 ath_tx_queue_tid(txq, tid);
1217 1339
1218 break; 1340 /*
1219 } while (!list_empty(&ac->tid_q)); 1341 * add tid to round-robin queue if more frames
1342 * are pending for the tid
1343 */
1344 if (!list_empty(&tid->buf_q))
1345 ath_tx_queue_tid(txq, tid);
1220 1346
1221 if (!list_empty(&ac->tid_q)) { 1347 if (tid == last_tid ||
1222 if (!ac->sched) { 1348 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1223 ac->sched = true; 1349 break;
1224 list_add_tail(&ac->list, &txq->axq_acq);
1225 } 1350 }
1226 }
1227}
1228 1351
1229int ath_tx_setup(struct ath_softc *sc, int haltype) 1352 if (!list_empty(&ac->tid_q)) {
1230{ 1353 if (!ac->sched) {
1231 struct ath_txq *txq; 1354 ac->sched = true;
1355 list_add_tail(&ac->list, &txq->axq_acq);
1356 }
1357 }
1232 1358
1233 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { 1359 if (ac == last_ac ||
1234 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1360 txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
1235 "HAL AC %u out of range, max %zu!\n", 1361 return;
1236 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1237 return 0;
1238 } 1362 }
1239 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1240 if (txq != NULL) {
1241 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1242 return 1;
1243 } else
1244 return 0;
1245} 1363}
1246 1364
1247/***********/ 1365/***********/
@@ -1269,8 +1387,8 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1269 1387
1270 bf = list_first_entry(head, struct ath_buf, list); 1388 bf = list_first_entry(head, struct ath_buf, list);
1271 1389
1272 ath_print(common, ATH_DBG_QUEUE, 1390 ath_dbg(common, ATH_DBG_QUEUE,
1273 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1391 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1274 1392
1275 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1393 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1276 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) { 1394 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
@@ -1278,49 +1396,49 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1278 return; 1396 return;
1279 } 1397 }
1280 if (!list_empty(&txq->txq_fifo[txq->txq_headidx])) 1398 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1281 ath_print(common, ATH_DBG_XMIT, 1399 ath_dbg(common, ATH_DBG_XMIT,
1282 "Initializing tx fifo %d which " 1400 "Initializing tx fifo %d which is non-empty\n",
1283 "is non-empty\n", 1401 txq->txq_headidx);
1284 txq->txq_headidx);
1285 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]); 1402 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1286 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]); 1403 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1287 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1404 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1405 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1288 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1406 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1289 ath_print(common, ATH_DBG_XMIT, 1407 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1290 "TXDP[%u] = %llx (%p)\n", 1408 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1291 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1292 } else { 1409 } else {
1293 list_splice_tail_init(head, &txq->axq_q); 1410 list_splice_tail_init(head, &txq->axq_q);
1294 1411
1295 if (txq->axq_link == NULL) { 1412 if (txq->axq_link == NULL) {
1413 TX_STAT_INC(txq->axq_qnum, puttxbuf);
1296 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1414 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1297 ath_print(common, ATH_DBG_XMIT, 1415 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1298 "TXDP[%u] = %llx (%p)\n", 1416 txq->axq_qnum, ito64(bf->bf_daddr),
1299 txq->axq_qnum, ito64(bf->bf_daddr), 1417 bf->bf_desc);
1300 bf->bf_desc);
1301 } else { 1418 } else {
1302 *txq->axq_link = bf->bf_daddr; 1419 *txq->axq_link = bf->bf_daddr;
1303 ath_print(common, ATH_DBG_XMIT, 1420 ath_dbg(common, ATH_DBG_XMIT,
1304 "link[%u] (%p)=%llx (%p)\n", 1421 "link[%u] (%p)=%llx (%p)\n",
1305 txq->axq_qnum, txq->axq_link, 1422 txq->axq_qnum, txq->axq_link,
1306 ito64(bf->bf_daddr), bf->bf_desc); 1423 ito64(bf->bf_daddr), bf->bf_desc);
1307 } 1424 }
1308 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, 1425 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1309 &txq->axq_link); 1426 &txq->axq_link);
1427 TX_STAT_INC(txq->axq_qnum, txstart);
1310 ath9k_hw_txstart(ah, txq->axq_qnum); 1428 ath9k_hw_txstart(ah, txq->axq_qnum);
1311 } 1429 }
1312 txq->axq_depth++; 1430 txq->axq_depth++;
1431 if (bf_is_ampdu_not_probing(bf))
1432 txq->axq_ampdu_depth++;
1313} 1433}
1314 1434
1315static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1435static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1316 struct list_head *bf_head, 1436 struct ath_buf *bf, struct ath_tx_control *txctl)
1317 struct ath_tx_control *txctl)
1318{ 1437{
1319 struct ath_buf *bf; 1438 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
1439 struct list_head bf_head;
1320 1440
1321 bf = list_first_entry(bf_head, struct ath_buf, list);
1322 bf->bf_state.bf_type |= BUF_AMPDU; 1441 bf->bf_state.bf_type |= BUF_AMPDU;
1323 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
1324 1442
1325 /* 1443 /*
1326 * Do not queue to h/w when any of the following conditions is true: 1444 * Do not queue to h/w when any of the following conditions is true:
@@ -1330,56 +1448,49 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1330 * - h/w queue depth exceeds low water mark 1448 * - h/w queue depth exceeds low water mark
1331 */ 1449 */
1332 if (!list_empty(&tid->buf_q) || tid->paused || 1450 if (!list_empty(&tid->buf_q) || tid->paused ||
1333 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || 1451 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
1334 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { 1452 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
1335 /* 1453 /*
1336 * Add this frame to software queue for scheduling later 1454 * Add this frame to software queue for scheduling later
1337 * for aggregation. 1455 * for aggregation.
1338 */ 1456 */
1339 list_move_tail(&bf->list, &tid->buf_q); 1457 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_sw);
1458 list_add_tail(&bf->list, &tid->buf_q);
1340 ath_tx_queue_tid(txctl->txq, tid); 1459 ath_tx_queue_tid(txctl->txq, tid);
1341 return; 1460 return;
1342 } 1461 }
1343 1462
1463 INIT_LIST_HEAD(&bf_head);
1464 list_add(&bf->list, &bf_head);
1465
1344 /* Add sub-frame to BAW */ 1466 /* Add sub-frame to BAW */
1345 ath_tx_addto_baw(sc, tid, bf); 1467 if (!fi->retries)
1468 ath_tx_addto_baw(sc, tid, fi->seqno);
1346 1469
1347 /* Queue to h/w without aggregation */ 1470 /* Queue to h/w without aggregation */
1348 bf->bf_nframes = 1; 1471 TX_STAT_INC(txctl->txq->axq_qnum, a_queued_hw);
1349 bf->bf_lastbf = bf; 1472 bf->bf_lastbf = bf;
1350 ath_buf_set_rate(sc, bf); 1473 ath_buf_set_rate(sc, bf, fi->framelen);
1351 ath_tx_txqaddbuf(sc, txctl->txq, bf_head); 1474 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
1352} 1475}
1353 1476
1354static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 1477static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1355 struct ath_atx_tid *tid, 1478 struct ath_atx_tid *tid,
1356 struct list_head *bf_head) 1479 struct list_head *bf_head)
1357{ 1480{
1481 struct ath_frame_info *fi;
1358 struct ath_buf *bf; 1482 struct ath_buf *bf;
1359 1483
1360 bf = list_first_entry(bf_head, struct ath_buf, list); 1484 bf = list_first_entry(bf_head, struct ath_buf, list);
1361 bf->bf_state.bf_type &= ~BUF_AMPDU; 1485 bf->bf_state.bf_type &= ~BUF_AMPDU;
1362 1486
1363 /* update starting sequence number for subsequent ADDBA request */ 1487 /* update starting sequence number for subsequent ADDBA request */
1364 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1488 if (tid)
1365 1489 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1366 bf->bf_nframes = 1;
1367 bf->bf_lastbf = bf;
1368 ath_buf_set_rate(sc, bf);
1369 ath_tx_txqaddbuf(sc, txq, bf_head);
1370 TX_STAT_INC(txq->axq_qnum, queued);
1371}
1372
1373static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1374 struct list_head *bf_head)
1375{
1376 struct ath_buf *bf;
1377
1378 bf = list_first_entry(bf_head, struct ath_buf, list);
1379 1490
1380 bf->bf_lastbf = bf; 1491 bf->bf_lastbf = bf;
1381 bf->bf_nframes = 1; 1492 fi = get_frame_info(bf->bf_mpdu);
1382 ath_buf_set_rate(sc, bf); 1493 ath_buf_set_rate(sc, bf, fi->framelen);
1383 ath_tx_txqaddbuf(sc, txq, bf_head); 1494 ath_tx_txqaddbuf(sc, txq, bf_head);
1384 TX_STAT_INC(txq->axq_qnum, queued); 1495 TX_STAT_INC(txq->axq_qnum, queued);
1385} 1496}
@@ -1407,67 +1518,65 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1407 return htype; 1518 return htype;
1408} 1519}
1409 1520
1410static int get_hw_crypto_keytype(struct sk_buff *skb) 1521static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1411{ 1522 int framelen)
1412 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1413
1414 if (tx_info->control.hw_key) {
1415 if (tx_info->control.hw_key->alg == ALG_WEP)
1416 return ATH9K_KEY_TYPE_WEP;
1417 else if (tx_info->control.hw_key->alg == ALG_TKIP)
1418 return ATH9K_KEY_TYPE_TKIP;
1419 else if (tx_info->control.hw_key->alg == ALG_CCMP)
1420 return ATH9K_KEY_TYPE_AES;
1421 }
1422
1423 return ATH9K_KEY_TYPE_CLEAR;
1424}
1425
1426static void assign_aggr_tid_seqno(struct sk_buff *skb,
1427 struct ath_buf *bf)
1428{ 1523{
1524 struct ath_softc *sc = hw->priv;
1429 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1525 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1526 struct ieee80211_sta *sta = tx_info->control.sta;
1527 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1430 struct ieee80211_hdr *hdr; 1528 struct ieee80211_hdr *hdr;
1431 struct ath_node *an; 1529 struct ath_frame_info *fi = get_frame_info(skb);
1530 struct ath_node *an = NULL;
1432 struct ath_atx_tid *tid; 1531 struct ath_atx_tid *tid;
1433 __le16 fc; 1532 enum ath9k_key_type keytype;
1434 u8 *qc; 1533 u16 seqno = 0;
1534 u8 tidno;
1435 1535
1436 if (!tx_info->control.sta) 1536 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1437 return; 1537
1538 if (sta)
1539 an = (struct ath_node *) sta->drv_priv;
1438 1540
1439 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1440 hdr = (struct ieee80211_hdr *)skb->data; 1541 hdr = (struct ieee80211_hdr *)skb->data;
1441 fc = hdr->frame_control; 1542 if (an && ieee80211_is_data_qos(hdr->frame_control) &&
1543 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
1442 1544
1443 if (ieee80211_is_data_qos(fc)) { 1545 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1444 qc = ieee80211_get_qos_ctl(hdr); 1546
1445 bf->bf_tidno = qc[0] & 0xf; 1547 /*
1548 * Override seqno set by upper layer with the one
1549 * in tx aggregation state.
1550 */
1551 tid = ATH_AN_2_TID(an, tidno);
1552 seqno = tid->seq_next;
1553 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1554 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1446 } 1555 }
1447 1556
1448 /* 1557 memset(fi, 0, sizeof(*fi));
1449 * For HT capable stations, we save tidno for later use. 1558 if (hw_key)
1450 * We also override seqno set by upper layer with the one 1559 fi->keyix = hw_key->hw_key_idx;
1451 * in tx aggregation state. 1560 else if (an && ieee80211_is_data(hdr->frame_control) && an->ps_key > 0)
1452 */ 1561 fi->keyix = an->ps_key;
1453 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1562 else
1454 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1563 fi->keyix = ATH9K_TXKEYIX_INVALID;
1455 bf->bf_seqno = tid->seq_next; 1564 fi->keytype = keytype;
1456 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1565 fi->framelen = framelen;
1566 fi->seqno = seqno;
1457} 1567}
1458 1568
1459static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc) 1569static int setup_tx_flags(struct sk_buff *skb)
1460{ 1570{
1461 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1571 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1462 int flags = 0; 1572 int flags = 0;
1463 1573
1464 flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1465 flags |= ATH9K_TXDESC_INTREQ; 1574 flags |= ATH9K_TXDESC_INTREQ;
1466 1575
1467 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1576 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1468 flags |= ATH9K_TXDESC_NOACK; 1577 flags |= ATH9K_TXDESC_NOACK;
1469 1578
1470 if (use_ldpc) 1579 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1471 flags |= ATH9K_TXDESC_LDPC; 1580 flags |= ATH9K_TXDESC_LDPC;
1472 1581
1473 return flags; 1582 return flags;
@@ -1479,13 +1588,11 @@ static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
1479 * width - 0 for 20 MHz, 1 for 40 MHz 1588 * width - 0 for 20 MHz, 1 for 40 MHz
1480 * half_gi - to use 4us v/s 3.6 us for symbol time 1589 * half_gi - to use 4us v/s 3.6 us for symbol time
1481 */ 1590 */
1482static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf, 1591static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
1483 int width, int half_gi, bool shortPreamble) 1592 int width, int half_gi, bool shortPreamble)
1484{ 1593{
1485 u32 nbits, nsymbits, duration, nsymbols; 1594 u32 nbits, nsymbits, duration, nsymbols;
1486 int streams, pktlen; 1595 int streams;
1487
1488 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1489 1596
1490 /* find number of symbols: PLCP + data */ 1597 /* find number of symbols: PLCP + data */
1491 streams = HT_RC_2_STREAMS(rix); 1598 streams = HT_RC_2_STREAMS(rix);
@@ -1504,7 +1611,19 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1504 return duration; 1611 return duration;
1505} 1612}
1506 1613
1507static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) 1614u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1615{
1616 struct ath_hw *ah = sc->sc_ah;
1617 struct ath9k_channel *curchan = ah->curchan;
1618 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1619 (curchan->channelFlags & CHANNEL_5GHZ) &&
1620 (chainmask == 0x7) && (rate < 0x90))
1621 return 0x3;
1622 else
1623 return chainmask;
1624}
1625
1626static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
1508{ 1627{
1509 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1628 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1510 struct ath9k_11n_rate_series series[4]; 1629 struct ath9k_11n_rate_series series[4];
@@ -1544,10 +1663,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1544 1663
1545 rix = rates[i].idx; 1664 rix = rates[i].idx;
1546 series[i].Tries = rates[i].count; 1665 series[i].Tries = rates[i].count;
1547 series[i].ChSel = common->tx_chainmask;
1548 1666
1549 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) || 1667 if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
1550 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
1551 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 1668 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1552 flags |= ATH9K_TXDESC_RTSENA; 1669 flags |= ATH9K_TXDESC_RTSENA;
1553 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { 1670 } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
@@ -1567,14 +1684,16 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1567 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1684 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1568 /* MCS rates */ 1685 /* MCS rates */
1569 series[i].Rate = rix | 0x80; 1686 series[i].Rate = rix | 0x80;
1570 series[i].PktDuration = ath_pkt_duration(sc, rix, bf, 1687 series[i].ChSel = ath_txchainmask_reduction(sc,
1688 common->tx_chainmask, series[i].Rate);
1689 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
1571 is_40, is_sgi, is_sp); 1690 is_40, is_sgi, is_sp);
1572 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1691 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1573 series[i].RateFlags |= ATH9K_RATESERIES_STBC; 1692 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
1574 continue; 1693 continue;
1575 } 1694 }
1576 1695
1577 /* legcay rates */ 1696 /* legacy rates */
1578 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1697 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1579 !(rate->flags & IEEE80211_RATE_ERP_G)) 1698 !(rate->flags & IEEE80211_RATE_ERP_G))
1580 phy = WLAN_RC_PHY_CCK; 1699 phy = WLAN_RC_PHY_CCK;
@@ -1590,12 +1709,18 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1590 is_sp = false; 1709 is_sp = false;
1591 } 1710 }
1592 1711
1712 if (bf->bf_state.bfs_paprd)
1713 series[i].ChSel = common->tx_chainmask;
1714 else
1715 series[i].ChSel = ath_txchainmask_reduction(sc,
1716 common->tx_chainmask, series[i].Rate);
1717
1593 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1718 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1594 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp); 1719 phy, rate->bitrate * 100, len, rix, is_sp);
1595 } 1720 }
1596 1721
1597 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1722 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1598 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit)) 1723 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1599 flags &= ~ATH9K_TXDESC_RTSENA; 1724 flags &= ~ATH9K_TXDESC_RTSENA;
1600 1725
1601 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 1726 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
@@ -1608,122 +1733,49 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1608 !is_pspoll, ctsrate, 1733 !is_pspoll, ctsrate,
1609 0, series, 4, flags); 1734 0, series, 4, flags);
1610 1735
1611 if (sc->config.ath_aggr_prot && flags)
1612 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
1613} 1736}
1614 1737
1615static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, 1738static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
1616 struct sk_buff *skb, 1739 struct ath_txq *txq,
1617 struct ath_tx_control *txctl) 1740 struct sk_buff *skb)
1618{ 1741{
1619 struct ath_wiphy *aphy = hw->priv; 1742 struct ath_softc *sc = hw->priv;
1620 struct ath_softc *sc = aphy->sc; 1743 struct ath_hw *ah = sc->sc_ah;
1621 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1744 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1622 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1745 struct ath_frame_info *fi = get_frame_info(skb);
1623 int hdrlen; 1746 struct ath_buf *bf;
1624 __le16 fc; 1747 struct ath_desc *ds;
1625 int padpos, padsize; 1748 int frm_type;
1626 bool use_ldpc = false;
1627
1628 tx_info->pad[0] = 0;
1629 switch (txctl->frame_type) {
1630 case ATH9K_IFT_NOT_INTERNAL:
1631 break;
1632 case ATH9K_IFT_PAUSE:
1633 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1634 /* fall through */
1635 case ATH9K_IFT_UNPAUSE:
1636 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1637 break;
1638 }
1639 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1640 fc = hdr->frame_control;
1641
1642 ATH_TXBUF_RESET(bf);
1643
1644 bf->aphy = aphy;
1645 bf->bf_frmlen = skb->len + FCS_LEN;
1646 /* Remove the padding size from bf_frmlen, if any */
1647 padpos = ath9k_cmn_padpos(hdr->frame_control);
1648 padsize = padpos & 3;
1649 if (padsize && skb->len>padpos+padsize) {
1650 bf->bf_frmlen -= padsize;
1651 }
1652
1653 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
1654 bf->bf_state.bf_type |= BUF_HT;
1655 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1656 use_ldpc = true;
1657 }
1658
1659 bf->bf_state.bfs_paprd = txctl->paprd;
1660 if (txctl->paprd)
1661 bf->bf_state.bfs_paprd_timestamp = jiffies;
1662 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
1663 1749
1664 bf->bf_keytype = get_hw_crypto_keytype(skb); 1750 bf = ath_tx_get_buffer(sc);
1665 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { 1751 if (!bf) {
1666 bf->bf_frmlen += tx_info->control.hw_key->icv_len; 1752 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
1667 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx; 1753 return NULL;
1668 } else {
1669 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1670 } 1754 }
1671 1755
1672 if (ieee80211_is_data_qos(fc) && bf_isht(bf) && 1756 ATH_TXBUF_RESET(bf);
1673 (sc->sc_flags & SC_OP_TXAGGR))
1674 assign_aggr_tid_seqno(skb, bf);
1675 1757
1758 bf->bf_flags = setup_tx_flags(skb);
1676 bf->bf_mpdu = skb; 1759 bf->bf_mpdu = skb;
1677 1760
1678 bf->bf_dmacontext = dma_map_single(sc->dev, skb->data, 1761 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
1679 skb->len, DMA_TO_DEVICE); 1762 skb->len, DMA_TO_DEVICE);
1680 if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) { 1763 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
1681 bf->bf_mpdu = NULL; 1764 bf->bf_mpdu = NULL;
1682 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1765 bf->bf_buf_addr = 0;
1683 "dma_mapping_error() on TX\n"); 1766 ath_err(ath9k_hw_common(sc->sc_ah),
1684 return -ENOMEM; 1767 "dma_mapping_error() on TX\n");
1768 ath_tx_return_buffer(sc, bf);
1769 return NULL;
1685 } 1770 }
1686 1771
1687 bf->bf_buf_addr = bf->bf_dmacontext;
1688
1689 /* tag if this is a nullfunc frame to enable PS when AP acks it */
1690 if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
1691 bf->bf_isnullfunc = true;
1692 sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
1693 } else
1694 bf->bf_isnullfunc = false;
1695
1696 bf->bf_tx_aborted = false;
1697
1698 return 0;
1699}
1700
1701/* FIXME: tx power */
1702static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1703 struct ath_tx_control *txctl)
1704{
1705 struct sk_buff *skb = bf->bf_mpdu;
1706 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1707 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1708 struct ath_node *an = NULL;
1709 struct list_head bf_head;
1710 struct ath_desc *ds;
1711 struct ath_atx_tid *tid;
1712 struct ath_hw *ah = sc->sc_ah;
1713 int frm_type;
1714 __le16 fc;
1715
1716 frm_type = get_hw_packet_type(skb); 1772 frm_type = get_hw_packet_type(skb);
1717 fc = hdr->frame_control;
1718
1719 INIT_LIST_HEAD(&bf_head);
1720 list_add_tail(&bf->list, &bf_head);
1721 1773
1722 ds = bf->bf_desc; 1774 ds = bf->bf_desc;
1723 ath9k_hw_set_desc_link(ah, ds, 0); 1775 ath9k_hw_set_desc_link(ah, ds, 0);
1724 1776
1725 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, 1777 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1726 bf->bf_keyix, bf->bf_keytype, bf->bf_flags); 1778 fi->keyix, fi->keytype, bf->bf_flags);
1727 1779
1728 ath9k_hw_filltxdesc(ah, ds, 1780 ath9k_hw_filltxdesc(ah, ds,
1729 skb->len, /* segment length */ 1781 skb->len, /* segment length */
@@ -1731,109 +1783,83 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1731 true, /* last segment */ 1783 true, /* last segment */
1732 ds, /* first descriptor */ 1784 ds, /* first descriptor */
1733 bf->bf_buf_addr, 1785 bf->bf_buf_addr,
1734 txctl->txq->axq_qnum); 1786 txq->axq_qnum);
1735
1736 if (bf->bf_state.bfs_paprd)
1737 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd);
1738
1739 spin_lock_bh(&txctl->txq->axq_lock);
1740
1741 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) &&
1742 tx_info->control.sta) {
1743 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1744 tid = ATH_AN_2_TID(an, bf->bf_tidno);
1745
1746 if (!ieee80211_is_data_qos(fc)) {
1747 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1748 goto tx_done;
1749 }
1750 1787
1751 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
1752 /*
1753 * Try aggregation if it's a unicast data frame
1754 * and the destination is HT capable.
1755 */
1756 ath_tx_send_ampdu(sc, tid, &bf_head, txctl);
1757 } else {
1758 /*
1759 * Send this frame as regular when ADDBA
1760 * exchange is neither complete nor pending.
1761 */
1762 ath_tx_send_ht_normal(sc, txctl->txq,
1763 tid, &bf_head);
1764 }
1765 } else {
1766 ath_tx_send_normal(sc, txctl->txq, &bf_head);
1767 }
1768 1788
1769tx_done: 1789 return bf;
1770 spin_unlock_bh(&txctl->txq->axq_lock);
1771} 1790}
1772 1791
1773/* Upon failure caller should free skb */ 1792/* FIXME: tx power */
1774int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1793static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1775 struct ath_tx_control *txctl) 1794 struct ath_tx_control *txctl)
1776{ 1795{
1777 struct ath_wiphy *aphy = hw->priv; 1796 struct sk_buff *skb = bf->bf_mpdu;
1778 struct ath_softc *sc = aphy->sc; 1797 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1779 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1798 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1780 struct ath_txq *txq = txctl->txq; 1799 struct list_head bf_head;
1781 struct ath_buf *bf; 1800 struct ath_atx_tid *tid = NULL;
1782 int q, r; 1801 u8 tidno;
1783 1802
1784 bf = ath_tx_get_buffer(sc); 1803 spin_lock_bh(&txctl->txq->axq_lock);
1785 if (!bf) { 1804 if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
1786 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n"); 1805 ieee80211_is_data_qos(hdr->frame_control)) {
1787 return -1; 1806 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1807 IEEE80211_QOS_CTL_TID_MASK;
1808 tid = ATH_AN_2_TID(txctl->an, tidno);
1809
1810 WARN_ON(tid->ac->txq != txctl->txq);
1788 } 1811 }
1789 1812
1790 r = ath_tx_setup_buffer(hw, bf, skb, txctl); 1813 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
1791 if (unlikely(r)) { 1814 /*
1792 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n"); 1815 * Try aggregation if it's a unicast data frame
1816 * and the destination is HT capable.
1817 */
1818 ath_tx_send_ampdu(sc, tid, bf, txctl);
1819 } else {
1820 INIT_LIST_HEAD(&bf_head);
1821 list_add_tail(&bf->list, &bf_head);
1793 1822
1794 /* upon ath_tx_processq() this TX queue will be resumed, we 1823 bf->bf_state.bfs_ftype = txctl->frame_type;
1795 * guarantee this will happen by knowing beforehand that 1824 bf->bf_state.bfs_paprd = txctl->paprd;
1796 * we will at least have to run TX completionon one buffer
1797 * on the queue */
1798 spin_lock_bh(&txq->axq_lock);
1799 if (!txq->stopped && txq->axq_depth > 1) {
1800 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1801 txq->stopped = 1;
1802 }
1803 spin_unlock_bh(&txq->axq_lock);
1804 1825
1805 ath_tx_return_buffer(sc, bf); 1826 if (bf->bf_state.bfs_paprd)
1827 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1828 bf->bf_state.bfs_paprd);
1806 1829
1807 return r; 1830 if (txctl->paprd)
1808 } 1831 bf->bf_state.bfs_paprd_timestamp = jiffies;
1809 1832
1810 q = skb_get_queue_mapping(skb); 1833 if (tx_info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
1811 if (q >= 4) 1834 ath9k_hw_set_clrdmask(sc->sc_ah, bf->bf_desc, true);
1812 q = 0;
1813 1835
1814 spin_lock_bh(&txq->axq_lock); 1836 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
1815 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1816 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1817 txq->stopped = 1;
1818 } 1837 }
1819 spin_unlock_bh(&txq->axq_lock);
1820 1838
1821 ath_tx_start_dma(sc, bf, txctl); 1839 spin_unlock_bh(&txctl->txq->axq_lock);
1822
1823 return 0;
1824} 1840}
1825 1841
1826void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb) 1842/* Upon failure caller should free skb */
1843int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1844 struct ath_tx_control *txctl)
1827{ 1845{
1828 struct ath_wiphy *aphy = hw->priv;
1829 struct ath_softc *sc = aphy->sc;
1830 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1831 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1846 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1832 int padpos, padsize;
1833 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1847 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1834 struct ath_tx_control txctl; 1848 struct ieee80211_sta *sta = info->control.sta;
1849 struct ieee80211_vif *vif = info->control.vif;
1850 struct ath_softc *sc = hw->priv;
1851 struct ath_txq *txq = txctl->txq;
1852 struct ath_buf *bf;
1853 int padpos, padsize;
1854 int frmlen = skb->len + FCS_LEN;
1855 int q;
1835 1856
1836 memset(&txctl, 0, sizeof(struct ath_tx_control)); 1857 /* NOTE: sta can be NULL according to net/mac80211.h */
1858 if (sta)
1859 txctl->an = (struct ath_node *)sta->drv_priv;
1860
1861 if (info->control.hw_key)
1862 frmlen += info->control.hw_key->icv_len;
1837 1863
1838 /* 1864 /*
1839 * As a temporary workaround, assign seq# here; this will likely need 1865 * As a temporary workaround, assign seq# here; this will likely need
@@ -1850,30 +1876,42 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1850 /* Add the padding after the header if this is not already done */ 1876 /* Add the padding after the header if this is not already done */
1851 padpos = ath9k_cmn_padpos(hdr->frame_control); 1877 padpos = ath9k_cmn_padpos(hdr->frame_control);
1852 padsize = padpos & 3; 1878 padsize = padpos & 3;
1853 if (padsize && skb->len>padpos) { 1879 if (padsize && skb->len > padpos) {
1854 if (skb_headroom(skb) < padsize) { 1880 if (skb_headroom(skb) < padsize)
1855 ath_print(common, ATH_DBG_XMIT, 1881 return -ENOMEM;
1856 "TX CABQ padding failed\n"); 1882
1857 dev_kfree_skb_any(skb);
1858 return;
1859 }
1860 skb_push(skb, padsize); 1883 skb_push(skb, padsize);
1861 memmove(skb->data, skb->data + padsize, padpos); 1884 memmove(skb->data, skb->data + padsize, padpos);
1862 } 1885 }
1863 1886
1864 txctl.txq = sc->beacon.cabq; 1887 if ((vif && vif->type != NL80211_IFTYPE_AP &&
1888 vif->type != NL80211_IFTYPE_AP_VLAN) ||
1889 !ieee80211_is_data(hdr->frame_control))
1890 info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
1865 1891
1866 ath_print(common, ATH_DBG_XMIT, 1892 setup_frame_info(hw, skb, frmlen);
1867 "transmitting CABQ packet, skb: %p\n", skb);
1868 1893
1869 if (ath_tx_start(hw, skb, &txctl) != 0) { 1894 /*
1870 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n"); 1895 * At this point, the vif, hw_key and sta pointers in the tx control
1871 goto exit; 1896 * info are no longer valid (overwritten by the ath_frame_info data.
1897 */
1898
1899 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
1900 if (unlikely(!bf))
1901 return -ENOMEM;
1902
1903 q = skb_get_queue_mapping(skb);
1904 spin_lock_bh(&txq->axq_lock);
1905 if (txq == sc->tx.txq_map[q] &&
1906 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1907 ieee80211_stop_queue(sc->hw, q);
1908 txq->stopped = 1;
1872 } 1909 }
1910 spin_unlock_bh(&txq->axq_lock);
1911
1912 ath_tx_start_dma(sc, bf, txctl);
1873 1913
1874 return; 1914 return 0;
1875exit:
1876 dev_kfree_skb_any(skb);
1877} 1915}
1878 1916
1879/*****************/ 1917/*****************/
@@ -1881,7 +1919,7 @@ exit:
1881/*****************/ 1919/*****************/
1882 1920
1883static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1921static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1884 struct ath_wiphy *aphy, int tx_flags) 1922 int tx_flags, int ftype, struct ath_txq *txq)
1885{ 1923{
1886 struct ieee80211_hw *hw = sc->hw; 1924 struct ieee80211_hw *hw = sc->hw;
1887 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1925 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1889,10 +1927,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1889 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 1927 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1890 int q, padpos, padsize; 1928 int q, padpos, padsize;
1891 1929
1892 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1930 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1893
1894 if (aphy)
1895 hw = aphy->hw;
1896 1931
1897 if (tx_flags & ATH_TX_BAR) 1932 if (tx_flags & ATH_TX_BAR)
1898 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 1933 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
@@ -1915,27 +1950,28 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1915 1950
1916 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) { 1951 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1917 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 1952 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
1918 ath_print(common, ATH_DBG_PS, 1953 ath_dbg(common, ATH_DBG_PS,
1919 "Going back to sleep after having " 1954 "Going back to sleep after having received TX status (0x%lx)\n",
1920 "received TX status (0x%lx)\n",
1921 sc->ps_flags & (PS_WAIT_FOR_BEACON | 1955 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1922 PS_WAIT_FOR_CAB | 1956 PS_WAIT_FOR_CAB |
1923 PS_WAIT_FOR_PSPOLL_DATA | 1957 PS_WAIT_FOR_PSPOLL_DATA |
1924 PS_WAIT_FOR_TX_ACK)); 1958 PS_WAIT_FOR_TX_ACK));
1925 } 1959 }
1926 1960
1927 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1961 q = skb_get_queue_mapping(skb);
1928 ath9k_tx_status(hw, skb); 1962 if (txq == sc->tx.txq_map[q]) {
1929 else { 1963 spin_lock_bh(&txq->axq_lock);
1930 q = skb_get_queue_mapping(skb); 1964 if (WARN_ON(--txq->pending_frames < 0))
1931 if (q >= 4) 1965 txq->pending_frames = 0;
1932 q = 0;
1933
1934 if (--sc->tx.pending_frames[q] < 0)
1935 sc->tx.pending_frames[q] = 0;
1936 1966
1937 ieee80211_tx_status(hw, skb); 1967 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
1968 ieee80211_wake_queue(sc->hw, q);
1969 txq->stopped = 0;
1970 }
1971 spin_unlock_bh(&txq->axq_lock);
1938 } 1972 }
1973
1974 ieee80211_tx_status(hw, skb);
1939} 1975}
1940 1976
1941static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1977static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -1956,19 +1992,25 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1956 tx_flags |= ATH_TX_XRETRY; 1992 tx_flags |= ATH_TX_XRETRY;
1957 } 1993 }
1958 1994
1959 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1995 dma_unmap_single(sc->dev, bf->bf_buf_addr, skb->len, DMA_TO_DEVICE);
1996 bf->bf_buf_addr = 0;
1960 1997
1961 if (bf->bf_state.bfs_paprd) { 1998 if (bf->bf_state.bfs_paprd) {
1962 if (time_after(jiffies, 1999 if (time_after(jiffies,
1963 bf->bf_state.bfs_paprd_timestamp + 2000 bf->bf_state.bfs_paprd_timestamp +
1964 msecs_to_jiffies(ATH_PAPRD_TIMEOUT))) 2001 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
1965 dev_kfree_skb_any(skb); 2002 dev_kfree_skb_any(skb);
1966 else 2003 else
1967 complete(&sc->paprd_complete); 2004 complete(&sc->paprd_complete);
1968 } else { 2005 } else {
1969 ath_tx_complete(sc, skb, bf->aphy, tx_flags); 2006 ath_debug_stat_tx(sc, bf, ts, txq);
1970 ath_debug_stat_tx(sc, txq, bf, ts); 2007 ath_tx_complete(sc, skb, tx_flags,
2008 bf->bf_state.bfs_ftype, txq);
1971 } 2009 }
2010 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
2011 * accidentally reference it later.
2012 */
2013 bf->bf_mpdu = NULL;
1972 2014
1973 /* 2015 /*
1974 * Return the list of ath_buf of this mpdu to free queue 2016 * Return the list of ath_buf of this mpdu to free queue
@@ -1978,42 +2020,15 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1978 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 2020 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1979} 2021}
1980 2022
1981static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 2023static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
1982 struct ath_tx_status *ts, int txok) 2024 struct ath_tx_status *ts, int nframes, int nbad,
1983{ 2025 int txok, bool update_rc)
1984 u16 seq_st = 0;
1985 u32 ba[WME_BA_BMP_SIZE >> 5];
1986 int ba_index;
1987 int nbad = 0;
1988 int isaggr = 0;
1989
1990 if (bf->bf_lastbf->bf_tx_aborted)
1991 return 0;
1992
1993 isaggr = bf_isaggr(bf);
1994 if (isaggr) {
1995 seq_st = ts->ts_seqnum;
1996 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
1997 }
1998
1999 while (bf) {
2000 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
2001 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
2002 nbad++;
2003
2004 bf = bf->bf_next;
2005 }
2006
2007 return nbad;
2008}
2009
2010static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
2011 int nbad, int txok, bool update_rc)
2012{ 2026{
2013 struct sk_buff *skb = bf->bf_mpdu; 2027 struct sk_buff *skb = bf->bf_mpdu;
2014 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2028 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2015 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 2029 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
2016 struct ieee80211_hw *hw = bf->aphy->hw; 2030 struct ieee80211_hw *hw = sc->hw;
2031 struct ath_hw *ah = sc->sc_ah;
2017 u8 i, tx_rateindex; 2032 u8 i, tx_rateindex;
2018 2033
2019 if (txok) 2034 if (txok)
@@ -2024,21 +2039,35 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
2024 2039
2025 if (ts->ts_status & ATH9K_TXERR_FILT) 2040 if (ts->ts_status & ATH9K_TXERR_FILT)
2026 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2041 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2027 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) 2042 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
2028 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 2043 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
2029 2044
2045 BUG_ON(nbad > nframes);
2046
2047 tx_info->status.ampdu_len = nframes;
2048 tx_info->status.ampdu_ack_len = nframes - nbad;
2049 }
2050
2030 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 2051 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2031 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 2052 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
2032 if (ieee80211_is_data(hdr->frame_control)) { 2053 /*
2033 if (ts->ts_flags & 2054 * If an underrun error is seen assume it as an excessive
2034 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN)) 2055 * retry only if max frame trigger level has been reached
2035 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN; 2056 * (2 KB for single stream, and 4 KB for dual stream).
2036 if ((ts->ts_status & ATH9K_TXERR_XRETRY) || 2057 * Adjust the long retry as if the frame was tried
2037 (ts->ts_status & ATH9K_TXERR_FIFO)) 2058 * hw->max_rate_tries times to affect how rate control updates
2038 tx_info->pad[0] |= ATH_TX_INFO_XRETRY; 2059 * PER for the failed rate.
2039 tx_info->status.ampdu_len = bf->bf_nframes; 2060 * In case of congestion on the bus penalizing this type of
2040 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad; 2061 * underruns should help hardware actually transmit new frames
2041 } 2062 * successfully by eventually preferring slower rates.
2063 * This itself should also alleviate congestion on the bus.
2064 */
2065 if (ieee80211_is_data(hdr->frame_control) &&
2066 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
2067 ATH9K_TX_DELIM_UNDERRUN)) &&
2068 ah->tx_trig_level >= sc->sc_ah->config.max_txtrig_level)
2069 tx_info->status.rates[tx_rateindex].count =
2070 hw->max_rate_tries;
2042 } 2071 }
2043 2072
2044 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 2073 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
@@ -2049,22 +2078,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
2049 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 2078 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2050} 2079}
2051 2080
2052static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
2053{
2054 int qnum;
2055
2056 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2057 if (qnum == -1)
2058 return;
2059
2060 spin_lock_bh(&txq->axq_lock);
2061 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) {
2062 if (ath_mac80211_start_queue(sc, qnum))
2063 txq->stopped = 0;
2064 }
2065 spin_unlock_bh(&txq->axq_lock);
2066}
2067
2068static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 2081static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2069{ 2082{
2070 struct ath_hw *ah = sc->sc_ah; 2083 struct ath_hw *ah = sc->sc_ah;
@@ -2076,14 +2089,16 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2076 int txok; 2089 int txok;
2077 int status; 2090 int status;
2078 2091
2079 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 2092 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2080 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 2093 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2081 txq->axq_link); 2094 txq->axq_link);
2082 2095
2083 for (;;) { 2096 for (;;) {
2084 spin_lock_bh(&txq->axq_lock); 2097 spin_lock_bh(&txq->axq_lock);
2085 if (list_empty(&txq->axq_q)) { 2098 if (list_empty(&txq->axq_q)) {
2086 txq->axq_link = NULL; 2099 txq->axq_link = NULL;
2100 if (sc->sc_flags & SC_OP_TXAGGR)
2101 ath_txq_schedule(sc, txq);
2087 spin_unlock_bh(&txq->axq_lock); 2102 spin_unlock_bh(&txq->axq_lock);
2088 break; 2103 break;
2089 } 2104 }
@@ -2118,18 +2133,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2118 spin_unlock_bh(&txq->axq_lock); 2133 spin_unlock_bh(&txq->axq_lock);
2119 break; 2134 break;
2120 } 2135 }
2121 2136 TX_STAT_INC(txq->axq_qnum, txprocdesc);
2122 /*
2123 * We now know the nullfunc frame has been ACKed so we
2124 * can disable RX.
2125 */
2126 if (bf->bf_isnullfunc &&
2127 (ts.ts_status & ATH9K_TX_ACKED)) {
2128 if ((sc->ps_flags & PS_ENABLED))
2129 ath9k_enable_ps(sc);
2130 else
2131 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2132 }
2133 2137
2134 /* 2138 /*
2135 * Remove ath_buf's of the same transmit unit from txq, 2139 * Remove ath_buf's of the same transmit unit from txq,
@@ -2147,6 +2151,10 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2147 txq->axq_tx_inprogress = false; 2151 txq->axq_tx_inprogress = false;
2148 if (bf_held) 2152 if (bf_held)
2149 list_del(&bf_held->list); 2153 list_del(&bf_held->list);
2154
2155 if (bf_is_ampdu_not_probing(bf))
2156 txq->axq_ampdu_depth--;
2157
2150 spin_unlock_bh(&txq->axq_lock); 2158 spin_unlock_bh(&txq->axq_lock);
2151 2159
2152 if (bf_held) 2160 if (bf_held)
@@ -2159,17 +2167,17 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2159 */ 2167 */
2160 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2168 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2161 bf->bf_state.bf_type |= BUF_XRETRY; 2169 bf->bf_state.bf_type |= BUF_XRETRY;
2162 ath_tx_rc_status(bf, &ts, 0, txok, true); 2170 ath_tx_rc_status(sc, bf, &ts, 1, txok ? 0 : 1, txok, true);
2163 } 2171 }
2164 2172
2165 if (bf_isampdu(bf)) 2173 if (bf_isampdu(bf))
2166 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok); 2174 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2175 true);
2167 else 2176 else
2168 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); 2177 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2169 2178
2170 ath_wake_mac80211_queue(sc, txq);
2171
2172 spin_lock_bh(&txq->axq_lock); 2179 spin_lock_bh(&txq->axq_lock);
2180
2173 if (sc->sc_flags & SC_OP_TXAGGR) 2181 if (sc->sc_flags & SC_OP_TXAGGR)
2174 ath_txq_schedule(sc, txq); 2182 ath_txq_schedule(sc, txq);
2175 spin_unlock_bh(&txq->axq_lock); 2183 spin_unlock_bh(&txq->axq_lock);
@@ -2183,6 +2191,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2183 struct ath_txq *txq; 2191 struct ath_txq *txq;
2184 int i; 2192 int i;
2185 bool needreset = false; 2193 bool needreset = false;
2194#ifdef CONFIG_ATH9K_DEBUGFS
2195 sc->tx_complete_poll_work_seen++;
2196#endif
2186 2197
2187 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 2198 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
2188 if (ATH_TXQ_SETUP(sc, i)) { 2199 if (ATH_TXQ_SETUP(sc, i)) {
@@ -2201,11 +2212,9 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2201 } 2212 }
2202 2213
2203 if (needreset) { 2214 if (needreset) {
2204 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2215 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2205 "tx hung, resetting the chip\n"); 2216 "tx hung, resetting the chip\n");
2206 ath9k_ps_wakeup(sc); 2217 ath_reset(sc, true);
2207 ath_reset(sc, false);
2208 ath9k_ps_restore(sc);
2209 } 2218 }
2210 2219
2211 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2220 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
@@ -2243,8 +2252,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2243 if (status == -EINPROGRESS) 2252 if (status == -EINPROGRESS)
2244 break; 2253 break;
2245 if (status == -EIO) { 2254 if (status == -EIO) {
2246 ath_print(common, ATH_DBG_XMIT, 2255 ath_dbg(common, ATH_DBG_XMIT,
2247 "Error processing tx status\n"); 2256 "Error processing tx status\n");
2248 break; 2257 break;
2249 } 2258 }
2250 2259
@@ -2270,45 +2279,38 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2270 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2279 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2271 txq->axq_depth--; 2280 txq->axq_depth--;
2272 txq->axq_tx_inprogress = false; 2281 txq->axq_tx_inprogress = false;
2282 if (bf_is_ampdu_not_probing(bf))
2283 txq->axq_ampdu_depth--;
2273 spin_unlock_bh(&txq->axq_lock); 2284 spin_unlock_bh(&txq->axq_lock);
2274 2285
2275 txok = !(txs.ts_status & ATH9K_TXERR_MASK); 2286 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2276 2287
2277 /*
2278 * Make sure null func frame is acked before configuring
2279 * hw into ps mode.
2280 */
2281 if (bf->bf_isnullfunc && txok) {
2282 if ((sc->ps_flags & PS_ENABLED))
2283 ath9k_enable_ps(sc);
2284 else
2285 sc->ps_flags |= PS_NULLFUNC_COMPLETED;
2286 }
2287
2288 if (!bf_isampdu(bf)) { 2288 if (!bf_isampdu(bf)) {
2289 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2289 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2290 bf->bf_state.bf_type |= BUF_XRETRY; 2290 bf->bf_state.bf_type |= BUF_XRETRY;
2291 ath_tx_rc_status(bf, &txs, 0, txok, true); 2291 ath_tx_rc_status(sc, bf, &txs, 1, txok ? 0 : 1, txok, true);
2292 } 2292 }
2293 2293
2294 if (bf_isampdu(bf)) 2294 if (bf_isampdu(bf))
2295 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok); 2295 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2296 txok, true);
2296 else 2297 else
2297 ath_tx_complete_buf(sc, bf, txq, &bf_head, 2298 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2298 &txs, txok, 0); 2299 &txs, txok, 0);
2299 2300
2300 ath_wake_mac80211_queue(sc, txq);
2301
2302 spin_lock_bh(&txq->axq_lock); 2301 spin_lock_bh(&txq->axq_lock);
2302
2303 if (!list_empty(&txq->txq_fifo_pending)) { 2303 if (!list_empty(&txq->txq_fifo_pending)) {
2304 INIT_LIST_HEAD(&bf_head); 2304 INIT_LIST_HEAD(&bf_head);
2305 bf = list_first_entry(&txq->txq_fifo_pending, 2305 bf = list_first_entry(&txq->txq_fifo_pending,
2306 struct ath_buf, list); 2306 struct ath_buf, list);
2307 list_cut_position(&bf_head, &txq->txq_fifo_pending, 2307 list_cut_position(&bf_head,
2308 &bf->bf_lastbf->list); 2308 &txq->txq_fifo_pending,
2309 &bf->bf_lastbf->list);
2309 ath_tx_txqaddbuf(sc, txq, &bf_head); 2310 ath_tx_txqaddbuf(sc, txq, &bf_head);
2310 } else if (sc->sc_flags & SC_OP_TXAGGR) 2311 } else if (sc->sc_flags & SC_OP_TXAGGR)
2311 ath_txq_schedule(sc, txq); 2312 ath_txq_schedule(sc, txq);
2313
2312 spin_unlock_bh(&txq->axq_lock); 2314 spin_unlock_bh(&txq->axq_lock);
2313 } 2315 }
2314} 2316}
@@ -2362,16 +2364,16 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2362 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2364 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2363 "tx", nbufs, 1, 1); 2365 "tx", nbufs, 1, 1);
2364 if (error != 0) { 2366 if (error != 0) {
2365 ath_print(common, ATH_DBG_FATAL, 2367 ath_err(common,
2366 "Failed to allocate tx descriptors: %d\n", error); 2368 "Failed to allocate tx descriptors: %d\n", error);
2367 goto err; 2369 goto err;
2368 } 2370 }
2369 2371
2370 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2372 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2371 "beacon", ATH_BCBUF, 1, 1); 2373 "beacon", ATH_BCBUF, 1, 1);
2372 if (error != 0) { 2374 if (error != 0) {
2373 ath_print(common, ATH_DBG_FATAL, 2375 ath_err(common,
2374 "Failed to allocate beacon descriptors: %d\n", error); 2376 "Failed to allocate beacon descriptors: %d\n", error);
2375 goto err; 2377 goto err;
2376 } 2378 }
2377 2379
@@ -2429,7 +2431,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2429 for (acno = 0, ac = &an->ac[acno]; 2431 for (acno = 0, ac = &an->ac[acno];
2430 acno < WME_NUM_AC; acno++, ac++) { 2432 acno < WME_NUM_AC; acno++, ac++) {
2431 ac->sched = false; 2433 ac->sched = false;
2432 ac->qnum = sc->tx.hwq_map[acno]; 2434 ac->txq = sc->tx.txq_map[acno];
2433 INIT_LIST_HEAD(&ac->tid_q); 2435 INIT_LIST_HEAD(&ac->tid_q);
2434 } 2436 }
2435} 2437}
@@ -2439,17 +2441,13 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2439 struct ath_atx_ac *ac; 2441 struct ath_atx_ac *ac;
2440 struct ath_atx_tid *tid; 2442 struct ath_atx_tid *tid;
2441 struct ath_txq *txq; 2443 struct ath_txq *txq;
2442 int i, tidno; 2444 int tidno;
2443 2445
2444 for (tidno = 0, tid = &an->tid[tidno]; 2446 for (tidno = 0, tid = &an->tid[tidno];
2445 tidno < WME_NUM_TID; tidno++, tid++) { 2447 tidno < WME_NUM_TID; tidno++, tid++) {
2446 i = tid->ac->qnum;
2447 2448
2448 if (!ATH_TXQ_SETUP(sc, i))
2449 continue;
2450
2451 txq = &sc->tx.txq[i];
2452 ac = tid->ac; 2449 ac = tid->ac;
2450 txq = ac->txq;
2453 2451
2454 spin_lock_bh(&txq->axq_lock); 2452 spin_lock_bh(&txq->axq_lock);
2455 2453