diff options
author | Christian Lamparter <chunkeey@googlemail.com> | 2010-08-04 19:36:41 -0400 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2010-08-16 15:26:39 -0400 |
commit | 2bff8ebf32a7c5ec9e5f5eeffef94a8cb622f5f0 (patch) | |
tree | ba1a904e56eb202481c039d28a0d769d4bcf796f /net/mac80211/rx.c | |
parent | 071d9ac253ff51154beb7e33967168e30bc96053 (diff) |
mac80211: AMPDU rx reorder timeout timer
This patch introduces a new timer, which will release
queued-up MPDUs from the reorder buffer, whenever
they've waited for more than HT_RX_REORDER_BUF_TIMEOUT
(which is at around 100 ms).
The advantage of having a dedicated timer, instead of
relying on a constant stream of freshly arriving aMPDUs
to release the old ones, is particularly observable when
even a small fraction of MPDUs are forever lost at
low network speeds.
Previously under these circumstances frames would become
stuck in the reorder buffer and the network stack of both
HT peers throttled back, instead of revving up and
gunning the pipes.
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/rx.c')
-rw-r--r-- | net/mac80211/rx.c | 70 |
1 files changed, 63 insertions, 7 deletions
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index d5b91b6eb120..f24a0a1cff1a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -572,6 +572,8 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | |||
572 | * frames that have not yet been received are assumed to be lost and the skb | 572 | * frames that have not yet been received are assumed to be lost and the skb |
573 | * can be released for processing. This may also release other skb's from the | 573 | * can be released for processing. This may also release other skb's from the |
574 | * reorder buffer if there are no additional gaps between the frames. | 574 | * reorder buffer if there are no additional gaps between the frames. |
575 | * | ||
576 | * Callers must hold tid_agg_rx->reorder_lock. | ||
575 | */ | 577 | */ |
576 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) | 578 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) |
577 | 579 | ||
@@ -579,7 +581,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | |||
579 | struct tid_ampdu_rx *tid_agg_rx, | 581 | struct tid_ampdu_rx *tid_agg_rx, |
580 | struct sk_buff_head *frames) | 582 | struct sk_buff_head *frames) |
581 | { | 583 | { |
582 | int index; | 584 | int index, j; |
583 | 585 | ||
584 | /* release the buffer until next missing frame */ | 586 | /* release the buffer until next missing frame */ |
585 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 587 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
@@ -590,7 +592,6 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | |||
590 | * No buffers ready to be released, but check whether any | 592 | * No buffers ready to be released, but check whether any |
591 | * frames in the reorder buffer have timed out. | 593 | * frames in the reorder buffer have timed out. |
592 | */ | 594 | */ |
593 | int j; | ||
594 | int skipped = 1; | 595 | int skipped = 1; |
595 | for (j = (index + 1) % tid_agg_rx->buf_size; j != index; | 596 | for (j = (index + 1) % tid_agg_rx->buf_size; j != index; |
596 | j = (j + 1) % tid_agg_rx->buf_size) { | 597 | j = (j + 1) % tid_agg_rx->buf_size) { |
@@ -600,7 +601,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | |||
600 | } | 601 | } |
601 | if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + | 602 | if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + |
602 | HT_RX_REORDER_BUF_TIMEOUT)) | 603 | HT_RX_REORDER_BUF_TIMEOUT)) |
603 | break; | 604 | goto set_release_timer; |
604 | 605 | ||
605 | #ifdef CONFIG_MAC80211_HT_DEBUG | 606 | #ifdef CONFIG_MAC80211_HT_DEBUG |
606 | if (net_ratelimit()) | 607 | if (net_ratelimit()) |
@@ -624,6 +625,25 @@ static void ieee80211_sta_reorder_release(struct ieee80211_hw *hw, | |||
624 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 625 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
625 | tid_agg_rx->buf_size; | 626 | tid_agg_rx->buf_size; |
626 | } | 627 | } |
628 | |||
629 | if (tid_agg_rx->stored_mpdu_num) { | ||
630 | j = index = seq_sub(tid_agg_rx->head_seq_num, | ||
631 | tid_agg_rx->ssn) % tid_agg_rx->buf_size; | ||
632 | |||
633 | for (; j != (index - 1) % tid_agg_rx->buf_size; | ||
634 | j = (j + 1) % tid_agg_rx->buf_size) { | ||
635 | if (tid_agg_rx->reorder_buf[j]) | ||
636 | break; | ||
637 | } | ||
638 | |||
639 | set_release_timer: | ||
640 | |||
641 | mod_timer(&tid_agg_rx->reorder_timer, | ||
642 | tid_agg_rx->reorder_time[j] + | ||
643 | HT_RX_REORDER_BUF_TIMEOUT); | ||
644 | } else { | ||
645 | del_timer(&tid_agg_rx->reorder_timer); | ||
646 | } | ||
627 | } | 647 | } |
628 | 648 | ||
629 | /* | 649 | /* |
@@ -641,14 +661,16 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
641 | u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; | 661 | u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; |
642 | u16 head_seq_num, buf_size; | 662 | u16 head_seq_num, buf_size; |
643 | int index; | 663 | int index; |
664 | bool ret = true; | ||
644 | 665 | ||
645 | buf_size = tid_agg_rx->buf_size; | 666 | buf_size = tid_agg_rx->buf_size; |
646 | head_seq_num = tid_agg_rx->head_seq_num; | 667 | head_seq_num = tid_agg_rx->head_seq_num; |
647 | 668 | ||
669 | spin_lock(&tid_agg_rx->reorder_lock); | ||
648 | /* frame with out of date sequence number */ | 670 | /* frame with out of date sequence number */ |
649 | if (seq_less(mpdu_seq_num, head_seq_num)) { | 671 | if (seq_less(mpdu_seq_num, head_seq_num)) { |
650 | dev_kfree_skb(skb); | 672 | dev_kfree_skb(skb); |
651 | return true; | 673 | goto out; |
652 | } | 674 | } |
653 | 675 | ||
654 | /* | 676 | /* |
@@ -669,7 +691,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
669 | /* check if we already stored this frame */ | 691 | /* check if we already stored this frame */ |
670 | if (tid_agg_rx->reorder_buf[index]) { | 692 | if (tid_agg_rx->reorder_buf[index]) { |
671 | dev_kfree_skb(skb); | 693 | dev_kfree_skb(skb); |
672 | return true; | 694 | goto out; |
673 | } | 695 | } |
674 | 696 | ||
675 | /* | 697 | /* |
@@ -679,7 +701,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
679 | if (mpdu_seq_num == tid_agg_rx->head_seq_num && | 701 | if (mpdu_seq_num == tid_agg_rx->head_seq_num && |
680 | tid_agg_rx->stored_mpdu_num == 0) { | 702 | tid_agg_rx->stored_mpdu_num == 0) { |
681 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | 703 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); |
682 | return false; | 704 | ret = false; |
705 | goto out; | ||
683 | } | 706 | } |
684 | 707 | ||
685 | /* put the frame in the reordering buffer */ | 708 | /* put the frame in the reordering buffer */ |
@@ -688,7 +711,9 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
688 | tid_agg_rx->stored_mpdu_num++; | 711 | tid_agg_rx->stored_mpdu_num++; |
689 | ieee80211_sta_reorder_release(hw, tid_agg_rx, frames); | 712 | ieee80211_sta_reorder_release(hw, tid_agg_rx, frames); |
690 | 713 | ||
691 | return true; | 714 | out: |
715 | spin_unlock(&tid_agg_rx->reorder_lock); | ||
716 | return ret; | ||
692 | } | 717 | } |
693 | 718 | ||
694 | /* | 719 | /* |
@@ -2387,6 +2412,37 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | |||
2387 | #undef CALL_RXH | 2412 | #undef CALL_RXH |
2388 | } | 2413 | } |
2389 | 2414 | ||
2415 | /* | ||
2416 | * This function makes calls into the RX path. Therefore the | ||
2417 | * caller must hold the sta_info->lock and everything has to | ||
2418 | * be under rcu_read_lock protection as well. | ||
2419 | */ | ||
2420 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) | ||
2421 | { | ||
2422 | struct sk_buff_head frames; | ||
2423 | struct ieee80211_rx_data rx = { }; | ||
2424 | |||
2425 | __skb_queue_head_init(&frames); | ||
2426 | |||
2427 | /* construct rx struct */ | ||
2428 | rx.sta = sta; | ||
2429 | rx.sdata = sta->sdata; | ||
2430 | rx.local = sta->local; | ||
2431 | rx.queue = tid; | ||
2432 | rx.flags |= IEEE80211_RX_RA_MATCH; | ||
2433 | |||
2434 | if (unlikely(test_bit(SCAN_HW_SCANNING, &sta->local->scanning) || | ||
2435 | test_bit(SCAN_OFF_CHANNEL, &sta->local->scanning))) | ||
2436 | rx.flags |= IEEE80211_RX_IN_SCAN; | ||
2437 | |||
2438 | spin_lock(&sta->ampdu_mlme.tid_rx[tid]->reorder_lock); | ||
2439 | ieee80211_sta_reorder_release(&sta->local->hw, | ||
2440 | sta->ampdu_mlme.tid_rx[tid], &frames); | ||
2441 | spin_unlock(&sta->ampdu_mlme.tid_rx[tid]->reorder_lock); | ||
2442 | |||
2443 | ieee80211_rx_handlers(&rx, &frames); | ||
2444 | } | ||
2445 | |||
2390 | /* main receive path */ | 2446 | /* main receive path */ |
2391 | 2447 | ||
2392 | static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, | 2448 | static int prepare_for_handlers(struct ieee80211_sub_if_data *sdata, |