diff options
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/xmit.c')
-rw-r--r-- | drivers/net/wireless/ath/ath9k/xmit.c | 186 |
1 files changed, 105 insertions, 81 deletions
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 4ccf48e396df..87762da0383b 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c | |||
@@ -59,6 +59,7 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, | |||
59 | struct ath_atx_tid *tid, | 59 | struct ath_atx_tid *tid, |
60 | struct list_head *bf_head); | 60 | struct list_head *bf_head); |
61 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, | 61 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, |
62 | struct ath_txq *txq, | ||
62 | struct list_head *bf_q, | 63 | struct list_head *bf_q, |
63 | int txok, int sendbar); | 64 | int txok, int sendbar); |
64 | static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, | 65 | static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, |
@@ -73,18 +74,6 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, | |||
73 | /* Aggregation logic */ | 74 | /* Aggregation logic */ |
74 | /*********************/ | 75 | /*********************/ |
75 | 76 | ||
76 | static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno) | ||
77 | { | ||
78 | struct ath_atx_tid *tid; | ||
79 | tid = ATH_AN_2_TID(an, tidno); | ||
80 | |||
81 | if (tid->state & AGGR_ADDBA_COMPLETE || | ||
82 | tid->state & AGGR_ADDBA_PROGRESS) | ||
83 | return 1; | ||
84 | else | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) | 77 | static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) |
89 | { | 78 | { |
90 | struct ath_atx_ac *ac = tid->ac; | 79 | struct ath_atx_ac *ac = tid->ac; |
@@ -224,7 +213,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, | |||
224 | ath_tx_update_baw(sc, tid, bf->bf_seqno); | 213 | ath_tx_update_baw(sc, tid, bf->bf_seqno); |
225 | 214 | ||
226 | spin_unlock(&txq->axq_lock); | 215 | spin_unlock(&txq->axq_lock); |
227 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | 216 | ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); |
228 | spin_lock(&txq->axq_lock); | 217 | spin_lock(&txq->axq_lock); |
229 | } | 218 | } |
230 | 219 | ||
@@ -232,13 +221,15 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, | |||
232 | tid->baw_tail = tid->baw_head; | 221 | tid->baw_tail = tid->baw_head; |
233 | } | 222 | } |
234 | 223 | ||
235 | static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) | 224 | static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, |
225 | struct ath_buf *bf) | ||
236 | { | 226 | { |
237 | struct sk_buff *skb; | 227 | struct sk_buff *skb; |
238 | struct ieee80211_hdr *hdr; | 228 | struct ieee80211_hdr *hdr; |
239 | 229 | ||
240 | bf->bf_state.bf_type |= BUF_RETRY; | 230 | bf->bf_state.bf_type |= BUF_RETRY; |
241 | bf->bf_retries++; | 231 | bf->bf_retries++; |
232 | TX_STAT_INC(txq->axq_qnum, a_retries); | ||
242 | 233 | ||
243 | skb = bf->bf_mpdu; | 234 | skb = bf->bf_mpdu; |
244 | hdr = (struct ieee80211_hdr *)skb->data; | 235 | hdr = (struct ieee80211_hdr *)skb->data; |
@@ -250,7 +241,10 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) | |||
250 | struct ath_buf *tbf; | 241 | struct ath_buf *tbf; |
251 | 242 | ||
252 | spin_lock_bh(&sc->tx.txbuflock); | 243 | spin_lock_bh(&sc->tx.txbuflock); |
253 | ASSERT(!list_empty((&sc->tx.txbuf))); | 244 | if (WARN_ON(list_empty(&sc->tx.txbuf))) { |
245 | spin_unlock_bh(&sc->tx.txbuflock); | ||
246 | return NULL; | ||
247 | } | ||
254 | tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); | 248 | tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); |
255 | list_del(&tbf->list); | 249 | list_del(&tbf->list); |
256 | spin_unlock_bh(&sc->tx.txbuflock); | 250 | spin_unlock_bh(&sc->tx.txbuflock); |
@@ -337,7 +331,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
337 | if (!(tid->state & AGGR_CLEANUP) && | 331 | if (!(tid->state & AGGR_CLEANUP) && |
338 | ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { | 332 | ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { |
339 | if (bf->bf_retries < ATH_MAX_SW_RETRIES) { | 333 | if (bf->bf_retries < ATH_MAX_SW_RETRIES) { |
340 | ath_tx_set_retry(sc, bf); | 334 | ath_tx_set_retry(sc, txq, bf); |
341 | txpending = 1; | 335 | txpending = 1; |
342 | } else { | 336 | } else { |
343 | bf->bf_state.bf_type |= BUF_XRETRY; | 337 | bf->bf_state.bf_type |= BUF_XRETRY; |
@@ -384,13 +378,31 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
384 | ath_tx_rc_status(bf, ds, nbad, txok, false); | 378 | ath_tx_rc_status(bf, ds, nbad, txok, false); |
385 | } | 379 | } |
386 | 380 | ||
387 | ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); | 381 | ath_tx_complete_buf(sc, bf, txq, &bf_head, !txfail, sendbar); |
388 | } else { | 382 | } else { |
389 | /* retry the un-acked ones */ | 383 | /* retry the un-acked ones */ |
390 | if (bf->bf_next == NULL && bf_last->bf_stale) { | 384 | if (bf->bf_next == NULL && bf_last->bf_stale) { |
391 | struct ath_buf *tbf; | 385 | struct ath_buf *tbf; |
392 | 386 | ||
393 | tbf = ath_clone_txbuf(sc, bf_last); | 387 | tbf = ath_clone_txbuf(sc, bf_last); |
388 | /* | ||
389 | * Update tx baw and complete the frame with | ||
390 | * failed status if we run out of tx buf | ||
391 | */ | ||
392 | if (!tbf) { | ||
393 | spin_lock_bh(&txq->axq_lock); | ||
394 | ath_tx_update_baw(sc, tid, | ||
395 | bf->bf_seqno); | ||
396 | spin_unlock_bh(&txq->axq_lock); | ||
397 | |||
398 | bf->bf_state.bf_type |= BUF_XRETRY; | ||
399 | ath_tx_rc_status(bf, ds, nbad, | ||
400 | 0, false); | ||
401 | ath_tx_complete_buf(sc, bf, txq, | ||
402 | &bf_head, 0, 0); | ||
403 | break; | ||
404 | } | ||
405 | |||
394 | ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc); | 406 | ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc); |
395 | list_add_tail(&tbf->list, &bf_head); | 407 | list_add_tail(&tbf->list, &bf_head); |
396 | } else { | 408 | } else { |
@@ -414,7 +426,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
414 | if (tid->state & AGGR_CLEANUP) { | 426 | if (tid->state & AGGR_CLEANUP) { |
415 | if (tid->baw_head == tid->baw_tail) { | 427 | if (tid->baw_head == tid->baw_tail) { |
416 | tid->state &= ~AGGR_ADDBA_COMPLETE; | 428 | tid->state &= ~AGGR_ADDBA_COMPLETE; |
417 | tid->addba_exchangeattempts = 0; | ||
418 | tid->state &= ~AGGR_CLEANUP; | 429 | tid->state &= ~AGGR_CLEANUP; |
419 | 430 | ||
420 | /* send buffered frames as singles */ | 431 | /* send buffered frames as singles */ |
@@ -447,7 +458,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, | |||
447 | struct ieee80211_tx_rate *rates; | 458 | struct ieee80211_tx_rate *rates; |
448 | struct ath_tx_info_priv *tx_info_priv; | 459 | struct ath_tx_info_priv *tx_info_priv; |
449 | u32 max_4ms_framelen, frmlen; | 460 | u32 max_4ms_framelen, frmlen; |
450 | u16 aggr_limit, legacy = 0, maxampdu; | 461 | u16 aggr_limit, legacy = 0; |
451 | int i; | 462 | int i; |
452 | 463 | ||
453 | skb = bf->bf_mpdu; | 464 | skb = bf->bf_mpdu; |
@@ -482,16 +493,15 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, | |||
482 | if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) | 493 | if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) |
483 | return 0; | 494 | return 0; |
484 | 495 | ||
485 | aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_DEFAULT); | 496 | aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_MAX); |
486 | 497 | ||
487 | /* | 498 | /* |
488 | * h/w can accept aggregates upto 16 bit lengths (65535). | 499 | * h/w can accept aggregates upto 16 bit lengths (65535). |
489 | * The IE, however can hold upto 65536, which shows up here | 500 | * The IE, however can hold upto 65536, which shows up here |
490 | * as zero. Ignore 65536 since we are constrained by hw. | 501 | * as zero. Ignore 65536 since we are constrained by hw. |
491 | */ | 502 | */ |
492 | maxampdu = tid->an->maxampdu; | 503 | if (tid->an->maxampdu) |
493 | if (maxampdu) | 504 | aggr_limit = min(aggr_limit, tid->an->maxampdu); |
494 | aggr_limit = min(aggr_limit, maxampdu); | ||
495 | 505 | ||
496 | return aggr_limit; | 506 | return aggr_limit; |
497 | } | 507 | } |
@@ -499,7 +509,6 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, | |||
499 | /* | 509 | /* |
500 | * Returns the number of delimiters to be added to | 510 | * Returns the number of delimiters to be added to |
501 | * meet the minimum required mpdudensity. | 511 | * meet the minimum required mpdudensity. |
502 | * caller should make sure that the rate is HT rate . | ||
503 | */ | 512 | */ |
504 | static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, | 513 | static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, |
505 | struct ath_buf *bf, u16 frmlen) | 514 | struct ath_buf *bf, u16 frmlen) |
@@ -507,7 +516,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
507 | const struct ath_rate_table *rt = sc->cur_rate_table; | 516 | const struct ath_rate_table *rt = sc->cur_rate_table; |
508 | struct sk_buff *skb = bf->bf_mpdu; | 517 | struct sk_buff *skb = bf->bf_mpdu; |
509 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 518 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
510 | u32 nsymbits, nsymbols, mpdudensity; | 519 | u32 nsymbits, nsymbols; |
511 | u16 minlen; | 520 | u16 minlen; |
512 | u8 rc, flags, rix; | 521 | u8 rc, flags, rix; |
513 | int width, half_gi, ndelim, mindelim; | 522 | int width, half_gi, ndelim, mindelim; |
@@ -529,14 +538,12 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
529 | * on highest rate in rate series (i.e. first rate) to determine | 538 | * on highest rate in rate series (i.e. first rate) to determine |
530 | * required minimum length for subframe. Take into account | 539 | * required minimum length for subframe. Take into account |
531 | * whether high rate is 20 or 40Mhz and half or full GI. | 540 | * whether high rate is 20 or 40Mhz and half or full GI. |
532 | */ | 541 | * |
533 | mpdudensity = tid->an->mpdudensity; | ||
534 | |||
535 | /* | ||
536 | * If there is no mpdu density restriction, no further calculation | 542 | * If there is no mpdu density restriction, no further calculation |
537 | * is needed. | 543 | * is needed. |
538 | */ | 544 | */ |
539 | if (mpdudensity == 0) | 545 | |
546 | if (tid->an->mpdudensity == 0) | ||
540 | return ndelim; | 547 | return ndelim; |
541 | 548 | ||
542 | rix = tx_info->control.rates[0].idx; | 549 | rix = tx_info->control.rates[0].idx; |
@@ -546,9 +553,9 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
546 | half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; | 553 | half_gi = (flags & IEEE80211_TX_RC_SHORT_GI) ? 1 : 0; |
547 | 554 | ||
548 | if (half_gi) | 555 | if (half_gi) |
549 | nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity); | 556 | nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(tid->an->mpdudensity); |
550 | else | 557 | else |
551 | nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity); | 558 | nsymbols = NUM_SYMBOLS_PER_USEC(tid->an->mpdudensity); |
552 | 559 | ||
553 | if (nsymbols == 0) | 560 | if (nsymbols == 0) |
554 | nsymbols = 1; | 561 | nsymbols = 1; |
@@ -565,6 +572,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
565 | } | 572 | } |
566 | 573 | ||
567 | static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | 574 | static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, |
575 | struct ath_txq *txq, | ||
568 | struct ath_atx_tid *tid, | 576 | struct ath_atx_tid *tid, |
569 | struct list_head *bf_q) | 577 | struct list_head *bf_q) |
570 | { | 578 | { |
@@ -629,6 +637,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | |||
629 | bf_prev->bf_desc->ds_link = bf->bf_daddr; | 637 | bf_prev->bf_desc->ds_link = bf->bf_daddr; |
630 | } | 638 | } |
631 | bf_prev = bf; | 639 | bf_prev = bf; |
640 | |||
632 | } while (!list_empty(&tid->buf_q)); | 641 | } while (!list_empty(&tid->buf_q)); |
633 | 642 | ||
634 | bf_first->bf_al = al; | 643 | bf_first->bf_al = al; |
@@ -651,7 +660,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
651 | 660 | ||
652 | INIT_LIST_HEAD(&bf_q); | 661 | INIT_LIST_HEAD(&bf_q); |
653 | 662 | ||
654 | status = ath_tx_form_aggr(sc, tid, &bf_q); | 663 | status = ath_tx_form_aggr(sc, txq, tid, &bf_q); |
655 | 664 | ||
656 | /* | 665 | /* |
657 | * no frames picked up to be aggregated; | 666 | * no frames picked up to be aggregated; |
@@ -682,30 +691,26 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
682 | 691 | ||
683 | txq->axq_aggr_depth++; | 692 | txq->axq_aggr_depth++; |
684 | ath_tx_txqaddbuf(sc, txq, &bf_q); | 693 | ath_tx_txqaddbuf(sc, txq, &bf_q); |
694 | TX_STAT_INC(txq->axq_qnum, a_aggr); | ||
685 | 695 | ||
686 | } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && | 696 | } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && |
687 | status != ATH_AGGR_BAW_CLOSED); | 697 | status != ATH_AGGR_BAW_CLOSED); |
688 | } | 698 | } |
689 | 699 | ||
690 | int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, | 700 | void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, |
691 | u16 tid, u16 *ssn) | 701 | u16 tid, u16 *ssn) |
692 | { | 702 | { |
693 | struct ath_atx_tid *txtid; | 703 | struct ath_atx_tid *txtid; |
694 | struct ath_node *an; | 704 | struct ath_node *an; |
695 | 705 | ||
696 | an = (struct ath_node *)sta->drv_priv; | 706 | an = (struct ath_node *)sta->drv_priv; |
697 | 707 | txtid = ATH_AN_2_TID(an, tid); | |
698 | if (sc->sc_flags & SC_OP_TXAGGR) { | 708 | txtid->state |= AGGR_ADDBA_PROGRESS; |
699 | txtid = ATH_AN_2_TID(an, tid); | 709 | ath_tx_pause_tid(sc, txtid); |
700 | txtid->state |= AGGR_ADDBA_PROGRESS; | 710 | *ssn = txtid->seq_start; |
701 | ath_tx_pause_tid(sc, txtid); | ||
702 | *ssn = txtid->seq_start; | ||
703 | } | ||
704 | |||
705 | return 0; | ||
706 | } | 711 | } |
707 | 712 | ||
708 | int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) | 713 | void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) |
709 | { | 714 | { |
710 | struct ath_node *an = (struct ath_node *)sta->drv_priv; | 715 | struct ath_node *an = (struct ath_node *)sta->drv_priv; |
711 | struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); | 716 | struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); |
@@ -715,12 +720,11 @@ int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) | |||
715 | INIT_LIST_HEAD(&bf_head); | 720 | INIT_LIST_HEAD(&bf_head); |
716 | 721 | ||
717 | if (txtid->state & AGGR_CLEANUP) | 722 | if (txtid->state & AGGR_CLEANUP) |
718 | return 0; | 723 | return; |
719 | 724 | ||
720 | if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { | 725 | if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { |
721 | txtid->state &= ~AGGR_ADDBA_PROGRESS; | 726 | txtid->state &= ~AGGR_ADDBA_PROGRESS; |
722 | txtid->addba_exchangeattempts = 0; | 727 | return; |
723 | return 0; | ||
724 | } | 728 | } |
725 | 729 | ||
726 | ath_tx_pause_tid(sc, txtid); | 730 | ath_tx_pause_tid(sc, txtid); |
@@ -739,7 +743,7 @@ int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) | |||
739 | } | 743 | } |
740 | list_move_tail(&bf->list, &bf_head); | 744 | list_move_tail(&bf->list, &bf_head); |
741 | ath_tx_update_baw(sc, txtid, bf->bf_seqno); | 745 | ath_tx_update_baw(sc, txtid, bf->bf_seqno); |
742 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | 746 | ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); |
743 | } | 747 | } |
744 | spin_unlock_bh(&txq->axq_lock); | 748 | spin_unlock_bh(&txq->axq_lock); |
745 | 749 | ||
@@ -747,11 +751,8 @@ int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) | |||
747 | txtid->state |= AGGR_CLEANUP; | 751 | txtid->state |= AGGR_CLEANUP; |
748 | } else { | 752 | } else { |
749 | txtid->state &= ~AGGR_ADDBA_COMPLETE; | 753 | txtid->state &= ~AGGR_ADDBA_COMPLETE; |
750 | txtid->addba_exchangeattempts = 0; | ||
751 | ath_tx_flush_tid(sc, txtid); | 754 | ath_tx_flush_tid(sc, txtid); |
752 | } | 755 | } |
753 | |||
754 | return 0; | ||
755 | } | 756 | } |
756 | 757 | ||
757 | void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) | 758 | void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) |
@@ -780,14 +781,8 @@ bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) | |||
780 | 781 | ||
781 | txtid = ATH_AN_2_TID(an, tidno); | 782 | txtid = ATH_AN_2_TID(an, tidno); |
782 | 783 | ||
783 | if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { | 784 | if (!(txtid->state & (AGGR_ADDBA_COMPLETE | AGGR_ADDBA_PROGRESS))) |
784 | if (!(txtid->state & AGGR_ADDBA_PROGRESS) && | ||
785 | (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) { | ||
786 | txtid->addba_exchangeattempts++; | ||
787 | return true; | 785 | return true; |
788 | } | ||
789 | } | ||
790 | |||
791 | return false; | 786 | return false; |
792 | } | 787 | } |
793 | 788 | ||
@@ -870,8 +865,8 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) | |||
870 | spin_lock_init(&txq->axq_lock); | 865 | spin_lock_init(&txq->axq_lock); |
871 | txq->axq_depth = 0; | 866 | txq->axq_depth = 0; |
872 | txq->axq_aggr_depth = 0; | 867 | txq->axq_aggr_depth = 0; |
873 | txq->axq_totalqueued = 0; | ||
874 | txq->axq_linkbuf = NULL; | 868 | txq->axq_linkbuf = NULL; |
869 | txq->axq_tx_inprogress = false; | ||
875 | sc->tx.txqsetup |= 1<<qnum; | 870 | sc->tx.txqsetup |= 1<<qnum; |
876 | } | 871 | } |
877 | return &sc->tx.txq[qnum]; | 872 | return &sc->tx.txq[qnum]; |
@@ -1035,9 +1030,13 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) | |||
1035 | if (bf_isampdu(bf)) | 1030 | if (bf_isampdu(bf)) |
1036 | ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0); | 1031 | ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0); |
1037 | else | 1032 | else |
1038 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | 1033 | ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); |
1039 | } | 1034 | } |
1040 | 1035 | ||
1036 | spin_lock_bh(&txq->axq_lock); | ||
1037 | txq->axq_tx_inprogress = false; | ||
1038 | spin_unlock_bh(&txq->axq_lock); | ||
1039 | |||
1041 | /* flush any pending frames if aggregation is enabled */ | 1040 | /* flush any pending frames if aggregation is enabled */ |
1042 | if (sc->sc_flags & SC_OP_TXAGGR) { | 1041 | if (sc->sc_flags & SC_OP_TXAGGR) { |
1043 | if (!retry_tx) { | 1042 | if (!retry_tx) { |
@@ -1118,8 +1117,7 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | |||
1118 | if (tid->paused) | 1117 | if (tid->paused) |
1119 | continue; | 1118 | continue; |
1120 | 1119 | ||
1121 | if ((txq->axq_depth % 2) == 0) | 1120 | ath_tx_sched_aggr(sc, txq, tid); |
1122 | ath_tx_sched_aggr(sc, txq, tid); | ||
1123 | 1121 | ||
1124 | /* | 1122 | /* |
1125 | * add tid to round-robin queue if more frames | 1123 | * add tid to round-robin queue if more frames |
@@ -1183,7 +1181,6 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, | |||
1183 | 1181 | ||
1184 | list_splice_tail_init(head, &txq->axq_q); | 1182 | list_splice_tail_init(head, &txq->axq_q); |
1185 | txq->axq_depth++; | 1183 | txq->axq_depth++; |
1186 | txq->axq_totalqueued++; | ||
1187 | txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); | 1184 | txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); |
1188 | 1185 | ||
1189 | DPRINTF(sc, ATH_DBG_QUEUE, | 1186 | DPRINTF(sc, ATH_DBG_QUEUE, |
@@ -1231,6 +1228,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
1231 | 1228 | ||
1232 | bf = list_first_entry(bf_head, struct ath_buf, list); | 1229 | bf = list_first_entry(bf_head, struct ath_buf, list); |
1233 | bf->bf_state.bf_type |= BUF_AMPDU; | 1230 | bf->bf_state.bf_type |= BUF_AMPDU; |
1231 | TX_STAT_INC(txctl->txq->axq_qnum, a_queued); | ||
1234 | 1232 | ||
1235 | /* | 1233 | /* |
1236 | * Do not queue to h/w when any of the following conditions is true: | 1234 | * Do not queue to h/w when any of the following conditions is true: |
@@ -1277,6 +1275,7 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, | |||
1277 | bf->bf_lastbf = bf; | 1275 | bf->bf_lastbf = bf; |
1278 | ath_buf_set_rate(sc, bf); | 1276 | ath_buf_set_rate(sc, bf); |
1279 | ath_tx_txqaddbuf(sc, txq, bf_head); | 1277 | ath_tx_txqaddbuf(sc, txq, bf_head); |
1278 | TX_STAT_INC(txq->axq_qnum, queued); | ||
1280 | } | 1279 | } |
1281 | 1280 | ||
1282 | static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, | 1281 | static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, |
@@ -1290,6 +1289,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, | |||
1290 | bf->bf_nframes = 1; | 1289 | bf->bf_nframes = 1; |
1291 | ath_buf_set_rate(sc, bf); | 1290 | ath_buf_set_rate(sc, bf); |
1292 | ath_tx_txqaddbuf(sc, txq, bf_head); | 1291 | ath_tx_txqaddbuf(sc, txq, bf_head); |
1292 | TX_STAT_INC(txq->axq_qnum, queued); | ||
1293 | } | 1293 | } |
1294 | 1294 | ||
1295 | static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) | 1295 | static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) |
@@ -1636,7 +1636,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | |||
1636 | goto tx_done; | 1636 | goto tx_done; |
1637 | } | 1637 | } |
1638 | 1638 | ||
1639 | if (ath_aggr_query(sc, an, bf->bf_tidno)) { | 1639 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { |
1640 | /* | 1640 | /* |
1641 | * Try aggregation if it's a unicast data frame | 1641 | * Try aggregation if it's a unicast data frame |
1642 | * and the destination is HT capable. | 1642 | * and the destination is HT capable. |
@@ -1815,6 +1815,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | |||
1815 | } | 1815 | } |
1816 | 1816 | ||
1817 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, | 1817 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, |
1818 | struct ath_txq *txq, | ||
1818 | struct list_head *bf_q, | 1819 | struct list_head *bf_q, |
1819 | int txok, int sendbar) | 1820 | int txok, int sendbar) |
1820 | { | 1821 | { |
@@ -1822,7 +1823,6 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, | |||
1822 | unsigned long flags; | 1823 | unsigned long flags; |
1823 | int tx_flags = 0; | 1824 | int tx_flags = 0; |
1824 | 1825 | ||
1825 | |||
1826 | if (sendbar) | 1826 | if (sendbar) |
1827 | tx_flags = ATH_TX_BAR; | 1827 | tx_flags = ATH_TX_BAR; |
1828 | 1828 | ||
@@ -1835,6 +1835,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, | |||
1835 | 1835 | ||
1836 | dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); | 1836 | dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); |
1837 | ath_tx_complete(sc, skb, tx_flags); | 1837 | ath_tx_complete(sc, skb, tx_flags); |
1838 | ath_debug_stat_tx(sc, txq, bf); | ||
1838 | 1839 | ||
1839 | /* | 1840 | /* |
1840 | * Return the list of ath_buf of this mpdu to free queue | 1841 | * Return the list of ath_buf of this mpdu to free queue |
@@ -1962,19 +1963,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | |||
1962 | if (bf->bf_stale) { | 1963 | if (bf->bf_stale) { |
1963 | bf_held = bf; | 1964 | bf_held = bf; |
1964 | if (list_is_last(&bf_held->list, &txq->axq_q)) { | 1965 | if (list_is_last(&bf_held->list, &txq->axq_q)) { |
1965 | txq->axq_link = NULL; | ||
1966 | txq->axq_linkbuf = NULL; | ||
1967 | spin_unlock_bh(&txq->axq_lock); | 1966 | spin_unlock_bh(&txq->axq_lock); |
1968 | |||
1969 | /* | ||
1970 | * The holding descriptor is the last | ||
1971 | * descriptor in queue. It's safe to remove | ||
1972 | * the last holding descriptor in BH context. | ||
1973 | */ | ||
1974 | spin_lock_bh(&sc->tx.txbuflock); | ||
1975 | list_move_tail(&bf_held->list, &sc->tx.txbuf); | ||
1976 | spin_unlock_bh(&sc->tx.txbuflock); | ||
1977 | |||
1978 | break; | 1967 | break; |
1979 | } else { | 1968 | } else { |
1980 | bf = list_entry(bf_held->list.next, | 1969 | bf = list_entry(bf_held->list.next, |
@@ -2011,6 +2000,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | |||
2011 | txq->axq_aggr_depth--; | 2000 | txq->axq_aggr_depth--; |
2012 | 2001 | ||
2013 | txok = (ds->ds_txstat.ts_status == 0); | 2002 | txok = (ds->ds_txstat.ts_status == 0); |
2003 | txq->axq_tx_inprogress = false; | ||
2014 | spin_unlock_bh(&txq->axq_lock); | 2004 | spin_unlock_bh(&txq->axq_lock); |
2015 | 2005 | ||
2016 | if (bf_held) { | 2006 | if (bf_held) { |
@@ -2033,7 +2023,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | |||
2033 | if (bf_isampdu(bf)) | 2023 | if (bf_isampdu(bf)) |
2034 | ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok); | 2024 | ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok); |
2035 | else | 2025 | else |
2036 | ath_tx_complete_buf(sc, bf, &bf_head, txok, 0); | 2026 | ath_tx_complete_buf(sc, bf, txq, &bf_head, txok, 0); |
2037 | 2027 | ||
2038 | ath_wake_mac80211_queue(sc, txq); | 2028 | ath_wake_mac80211_queue(sc, txq); |
2039 | 2029 | ||
@@ -2044,6 +2034,40 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | |||
2044 | } | 2034 | } |
2045 | } | 2035 | } |
2046 | 2036 | ||
2037 | static void ath_tx_complete_poll_work(struct work_struct *work) | ||
2038 | { | ||
2039 | struct ath_softc *sc = container_of(work, struct ath_softc, | ||
2040 | tx_complete_work.work); | ||
2041 | struct ath_txq *txq; | ||
2042 | int i; | ||
2043 | bool needreset = false; | ||
2044 | |||
2045 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) | ||
2046 | if (ATH_TXQ_SETUP(sc, i)) { | ||
2047 | txq = &sc->tx.txq[i]; | ||
2048 | spin_lock_bh(&txq->axq_lock); | ||
2049 | if (txq->axq_depth) { | ||
2050 | if (txq->axq_tx_inprogress) { | ||
2051 | needreset = true; | ||
2052 | spin_unlock_bh(&txq->axq_lock); | ||
2053 | break; | ||
2054 | } else { | ||
2055 | txq->axq_tx_inprogress = true; | ||
2056 | } | ||
2057 | } | ||
2058 | spin_unlock_bh(&txq->axq_lock); | ||
2059 | } | ||
2060 | |||
2061 | if (needreset) { | ||
2062 | DPRINTF(sc, ATH_DBG_RESET, "tx hung, resetting the chip\n"); | ||
2063 | ath_reset(sc, false); | ||
2064 | } | ||
2065 | |||
2066 | ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, | ||
2067 | msecs_to_jiffies(ATH_TX_COMPLETE_POLL_INT)); | ||
2068 | } | ||
2069 | |||
2070 | |||
2047 | 2071 | ||
2048 | void ath_tx_tasklet(struct ath_softc *sc) | 2072 | void ath_tx_tasklet(struct ath_softc *sc) |
2049 | { | 2073 | { |
@@ -2084,6 +2108,8 @@ int ath_tx_init(struct ath_softc *sc, int nbufs) | |||
2084 | goto err; | 2108 | goto err; |
2085 | } | 2109 | } |
2086 | 2110 | ||
2111 | INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); | ||
2112 | |||
2087 | err: | 2113 | err: |
2088 | if (error != 0) | 2114 | if (error != 0) |
2089 | ath_tx_cleanup(sc); | 2115 | ath_tx_cleanup(sc); |
@@ -2122,7 +2148,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) | |||
2122 | tid->ac = &an->ac[acno]; | 2148 | tid->ac = &an->ac[acno]; |
2123 | tid->state &= ~AGGR_ADDBA_COMPLETE; | 2149 | tid->state &= ~AGGR_ADDBA_COMPLETE; |
2124 | tid->state &= ~AGGR_ADDBA_PROGRESS; | 2150 | tid->state &= ~AGGR_ADDBA_PROGRESS; |
2125 | tid->addba_exchangeattempts = 0; | ||
2126 | } | 2151 | } |
2127 | 2152 | ||
2128 | for (acno = 0, ac = &an->ac[acno]; | 2153 | for (acno = 0, ac = &an->ac[acno]; |
@@ -2179,7 +2204,6 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) | |||
2179 | tid->sched = false; | 2204 | tid->sched = false; |
2180 | ath_tid_drain(sc, txq, tid); | 2205 | ath_tid_drain(sc, txq, tid); |
2181 | tid->state &= ~AGGR_ADDBA_COMPLETE; | 2206 | tid->state &= ~AGGR_ADDBA_COMPLETE; |
2182 | tid->addba_exchangeattempts = 0; | ||
2183 | tid->state &= ~AGGR_CLEANUP; | 2207 | tid->state &= ~AGGR_CLEANUP; |
2184 | } | 2208 | } |
2185 | } | 2209 | } |