diff options
author | Johannes Berg <johannes@sipsolutions.net> | 2009-11-25 11:46:17 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2009-11-28 15:05:02 -0500 |
commit | 2569a826de16ff82302a8a091228275be1aa911c (patch) | |
tree | ea4448779b95893276d91dc0c84946fecef92a27 /net | |
parent | 1edfb1afba2f6e4114ff09f2e3bc948fcae0c419 (diff) |
mac80211: correctly place aMPDU RX reorder code
As indicated by the comment, the aMPDU RX reorder code
should logically be after ieee80211_rx_h_check(). The
previous patch moved the code there, and this patch now
hooks it up in that place by introducing a list of skbs
that are then processed by the remaining handlers. The
list may be empty if the function is buffering the skb
to release it later.
The only change needed to the RX data is that the crypto
handler needs to clear the key that may be set from a
previous loop iteration, and that not everything can be
in the rx flags now.
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/mac80211/ieee80211_i.h | 1 | ||||
-rw-r--r-- | net/mac80211/rx.c | 168 |
2 files changed, 96 insertions, 73 deletions
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index ba5d3637b956..7d3178f1b443 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -164,6 +164,7 @@ typedef unsigned __bitwise__ ieee80211_rx_result; | |||
164 | #define IEEE80211_RX_RA_MATCH BIT(1) | 164 | #define IEEE80211_RX_RA_MATCH BIT(1) |
165 | #define IEEE80211_RX_AMSDU BIT(2) | 165 | #define IEEE80211_RX_AMSDU BIT(2) |
166 | #define IEEE80211_RX_FRAGMENTED BIT(3) | 166 | #define IEEE80211_RX_FRAGMENTED BIT(3) |
167 | /* only add flags here that do not change with subframes of an aMPDU */ | ||
167 | 168 | ||
168 | struct ieee80211_rx_data { | 169 | struct ieee80211_rx_data { |
169 | struct sk_buff *skb; | 170 | struct sk_buff *skb; |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index d71a63e1fd6a..57b8a0a42776 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -27,10 +27,6 @@ | |||
27 | #include "tkip.h" | 27 | #include "tkip.h" |
28 | #include "wme.h" | 28 | #include "wme.h" |
29 | 29 | ||
30 | static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | ||
31 | struct sk_buff *skb, | ||
32 | struct ieee80211_rate *rate); | ||
33 | |||
34 | /* | 30 | /* |
35 | * monitor mode reception | 31 | * monitor mode reception |
36 | * | 32 | * |
@@ -555,7 +551,8 @@ static inline u16 seq_sub(u16 sq1, u16 sq2) | |||
555 | 551 | ||
556 | static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, | 552 | static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, |
557 | struct tid_ampdu_rx *tid_agg_rx, | 553 | struct tid_ampdu_rx *tid_agg_rx, |
558 | int index) | 554 | int index, |
555 | struct sk_buff_head *frames) | ||
559 | { | 556 | { |
560 | struct ieee80211_supported_band *sband; | 557 | struct ieee80211_supported_band *sband; |
561 | struct ieee80211_rate *rate = NULL; | 558 | struct ieee80211_rate *rate = NULL; |
@@ -571,9 +568,9 @@ static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, | |||
571 | sband = hw->wiphy->bands[status->band]; | 568 | sband = hw->wiphy->bands[status->band]; |
572 | if (!(status->flag & RX_FLAG_HT)) | 569 | if (!(status->flag & RX_FLAG_HT)) |
573 | rate = &sband->bitrates[status->rate_idx]; | 570 | rate = &sband->bitrates[status->rate_idx]; |
574 | __ieee80211_rx_handle_packet(hw, skb, rate); | ||
575 | tid_agg_rx->stored_mpdu_num--; | 571 | tid_agg_rx->stored_mpdu_num--; |
576 | tid_agg_rx->reorder_buf[index] = NULL; | 572 | tid_agg_rx->reorder_buf[index] = NULL; |
573 | skb_queue_tail(frames, skb); | ||
577 | 574 | ||
578 | no_frame: | 575 | no_frame: |
579 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | 576 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); |
@@ -581,14 +578,15 @@ no_frame: | |||
581 | 578 | ||
582 | static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | 579 | static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, |
583 | struct tid_ampdu_rx *tid_agg_rx, | 580 | struct tid_ampdu_rx *tid_agg_rx, |
584 | u16 head_seq_num) | 581 | u16 head_seq_num, |
582 | struct sk_buff_head *frames) | ||
585 | { | 583 | { |
586 | int index; | 584 | int index; |
587 | 585 | ||
588 | while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { | 586 | while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { |
589 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 587 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
590 | tid_agg_rx->buf_size; | 588 | tid_agg_rx->buf_size; |
591 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index); | 589 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); |
592 | } | 590 | } |
593 | } | 591 | } |
594 | 592 | ||
@@ -608,7 +606,8 @@ static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | |||
608 | */ | 606 | */ |
609 | static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | 607 | static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, |
610 | struct tid_ampdu_rx *tid_agg_rx, | 608 | struct tid_ampdu_rx *tid_agg_rx, |
611 | struct sk_buff *skb) | 609 | struct sk_buff *skb, |
610 | struct sk_buff_head *frames) | ||
612 | { | 611 | { |
613 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 612 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
614 | u16 sc = le16_to_cpu(hdr->seq_ctrl); | 613 | u16 sc = le16_to_cpu(hdr->seq_ctrl); |
@@ -632,7 +631,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
632 | if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { | 631 | if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { |
633 | head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); | 632 | head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); |
634 | /* release stored frames up to new head to stack */ | 633 | /* release stored frames up to new head to stack */ |
635 | ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num); | 634 | ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num, |
635 | frames); | ||
636 | } | 636 | } |
637 | 637 | ||
638 | /* Now the new frame is always in the range of the reordering buffer */ | 638 | /* Now the new frame is always in the range of the reordering buffer */ |
@@ -687,7 +687,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
687 | "frames\n", | 687 | "frames\n", |
688 | wiphy_name(hw->wiphy)); | 688 | wiphy_name(hw->wiphy)); |
689 | #endif | 689 | #endif |
690 | ieee80211_release_reorder_frame(hw, tid_agg_rx, j); | 690 | ieee80211_release_reorder_frame(hw, tid_agg_rx, |
691 | j, frames); | ||
691 | 692 | ||
692 | /* | 693 | /* |
693 | * Increment the head seq# also for the skipped slots. | 694 | * Increment the head seq# also for the skipped slots. |
@@ -697,7 +698,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
697 | skipped = 0; | 698 | skipped = 0; |
698 | } | 699 | } |
699 | } else while (tid_agg_rx->reorder_buf[index]) { | 700 | } else while (tid_agg_rx->reorder_buf[index]) { |
700 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index); | 701 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames); |
701 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 702 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
702 | tid_agg_rx->buf_size; | 703 | tid_agg_rx->buf_size; |
703 | } | 704 | } |
@@ -709,38 +710,39 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | |||
709 | * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns | 710 | * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns |
710 | * true if the MPDU was buffered, false if it should be processed. | 711 | * true if the MPDU was buffered, false if it should be processed. |
711 | */ | 712 | */ |
712 | static bool ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | 713 | static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, |
713 | struct sk_buff *skb) | 714 | struct sk_buff_head *frames) |
714 | { | 715 | { |
716 | struct sk_buff *skb = rx->skb; | ||
717 | struct ieee80211_local *local = rx->local; | ||
715 | struct ieee80211_hw *hw = &local->hw; | 718 | struct ieee80211_hw *hw = &local->hw; |
716 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 719 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
717 | struct sta_info *sta; | 720 | struct sta_info *sta = rx->sta; |
718 | struct tid_ampdu_rx *tid_agg_rx; | 721 | struct tid_ampdu_rx *tid_agg_rx; |
719 | u16 sc; | 722 | u16 sc; |
720 | int tid; | 723 | int tid; |
721 | 724 | ||
722 | if (!ieee80211_is_data_qos(hdr->frame_control)) | 725 | if (!ieee80211_is_data_qos(hdr->frame_control)) |
723 | return false; | 726 | goto dont_reorder; |
724 | 727 | ||
725 | /* | 728 | /* |
726 | * filter the QoS data rx stream according to | 729 | * filter the QoS data rx stream according to |
727 | * STA/TID and check if this STA/TID is on aggregation | 730 | * STA/TID and check if this STA/TID is on aggregation |
728 | */ | 731 | */ |
729 | 732 | ||
730 | sta = sta_info_get(local, hdr->addr2); | ||
731 | if (!sta) | 733 | if (!sta) |
732 | return false; | 734 | goto dont_reorder; |
733 | 735 | ||
734 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | 736 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; |
735 | 737 | ||
736 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) | 738 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) |
737 | return false; | 739 | goto dont_reorder; |
738 | 740 | ||
739 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | 741 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; |
740 | 742 | ||
741 | /* qos null data frames are excluded */ | 743 | /* qos null data frames are excluded */ |
742 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) | 744 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) |
743 | return false; | 745 | goto dont_reorder; |
744 | 746 | ||
745 | /* new, potentially un-ordered, ampdu frame - process it */ | 747 | /* new, potentially un-ordered, ampdu frame - process it */ |
746 | 748 | ||
@@ -755,10 +757,14 @@ static bool ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | |||
755 | ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, | 757 | ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, |
756 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); | 758 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); |
757 | dev_kfree_skb(skb); | 759 | dev_kfree_skb(skb); |
758 | return true; | 760 | return; |
759 | } | 761 | } |
760 | 762 | ||
761 | return ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb); | 763 | if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) |
764 | return; | ||
765 | |||
766 | dont_reorder: | ||
767 | __skb_queue_tail(frames, skb); | ||
762 | } | 768 | } |
763 | 769 | ||
764 | static ieee80211_rx_result debug_noinline | 770 | static ieee80211_rx_result debug_noinline |
@@ -863,6 +869,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
863 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 869 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
864 | return RX_CONTINUE; | 870 | return RX_CONTINUE; |
865 | 871 | ||
872 | /* start without a key */ | ||
873 | rx->key = NULL; | ||
874 | |||
866 | if (rx->sta) | 875 | if (rx->sta) |
867 | stakey = rcu_dereference(rx->sta->key); | 876 | stakey = rcu_dereference(rx->sta->key); |
868 | 877 | ||
@@ -1815,7 +1824,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | |||
1815 | } | 1824 | } |
1816 | 1825 | ||
1817 | static ieee80211_rx_result debug_noinline | 1826 | static ieee80211_rx_result debug_noinline |
1818 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) | 1827 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) |
1819 | { | 1828 | { |
1820 | struct ieee80211_local *local = rx->local; | 1829 | struct ieee80211_local *local = rx->local; |
1821 | struct ieee80211_hw *hw = &local->hw; | 1830 | struct ieee80211_hw *hw = &local->hw; |
@@ -1845,7 +1854,8 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) | |||
1845 | TU_TO_EXP_TIME(tid_agg_rx->timeout)); | 1854 | TU_TO_EXP_TIME(tid_agg_rx->timeout)); |
1846 | 1855 | ||
1847 | /* release stored frames up to start of BAR */ | 1856 | /* release stored frames up to start of BAR */ |
1848 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num); | 1857 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, |
1858 | frames); | ||
1849 | kfree_skb(skb); | 1859 | kfree_skb(skb); |
1850 | return RX_QUEUED; | 1860 | return RX_QUEUED; |
1851 | } | 1861 | } |
@@ -2168,8 +2178,11 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | |||
2168 | struct sk_buff *skb, | 2178 | struct sk_buff *skb, |
2169 | struct ieee80211_rate *rate) | 2179 | struct ieee80211_rate *rate) |
2170 | { | 2180 | { |
2181 | struct sk_buff_head reorder_release; | ||
2171 | ieee80211_rx_result res = RX_DROP_MONITOR; | 2182 | ieee80211_rx_result res = RX_DROP_MONITOR; |
2172 | 2183 | ||
2184 | __skb_queue_head_init(&reorder_release); | ||
2185 | |||
2173 | rx->skb = skb; | 2186 | rx->skb = skb; |
2174 | rx->sdata = sdata; | 2187 | rx->sdata = sdata; |
2175 | 2188 | ||
@@ -2177,50 +2190,72 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata, | |||
2177 | do { \ | 2190 | do { \ |
2178 | res = rxh(rx); \ | 2191 | res = rxh(rx); \ |
2179 | if (res != RX_CONTINUE) \ | 2192 | if (res != RX_CONTINUE) \ |
2180 | goto rxh_done; \ | 2193 | goto rxh_next; \ |
2181 | } while (0); | 2194 | } while (0); |
2182 | 2195 | ||
2196 | /* | ||
2197 | * NB: the rxh_next label works even if we jump | ||
2198 | * to it from here because then the list will | ||
2199 | * be empty, which is a trivial check | ||
2200 | */ | ||
2183 | CALL_RXH(ieee80211_rx_h_passive_scan) | 2201 | CALL_RXH(ieee80211_rx_h_passive_scan) |
2184 | CALL_RXH(ieee80211_rx_h_check) | 2202 | CALL_RXH(ieee80211_rx_h_check) |
2185 | CALL_RXH(ieee80211_rx_h_decrypt) | 2203 | |
2186 | CALL_RXH(ieee80211_rx_h_check_more_data) | 2204 | ieee80211_rx_reorder_ampdu(rx, &reorder_release); |
2187 | CALL_RXH(ieee80211_rx_h_sta_process) | 2205 | |
2188 | CALL_RXH(ieee80211_rx_h_defragment) | 2206 | while ((skb = __skb_dequeue(&reorder_release))) { |
2189 | CALL_RXH(ieee80211_rx_h_ps_poll) | 2207 | /* |
2190 | CALL_RXH(ieee80211_rx_h_michael_mic_verify) | 2208 | * all the other fields are valid across frames |
2191 | /* must be after MMIC verify so header is counted in MPDU mic */ | 2209 | * that belong to an aMPDU since they are on the |
2192 | CALL_RXH(ieee80211_rx_h_remove_qos_control) | 2210 | * same TID from the same station |
2193 | CALL_RXH(ieee80211_rx_h_amsdu) | 2211 | */ |
2212 | rx->skb = skb; | ||
2213 | |||
2214 | CALL_RXH(ieee80211_rx_h_decrypt) | ||
2215 | CALL_RXH(ieee80211_rx_h_check_more_data) | ||
2216 | CALL_RXH(ieee80211_rx_h_sta_process) | ||
2217 | CALL_RXH(ieee80211_rx_h_defragment) | ||
2218 | CALL_RXH(ieee80211_rx_h_ps_poll) | ||
2219 | CALL_RXH(ieee80211_rx_h_michael_mic_verify) | ||
2220 | /* must be after MMIC verify so header is counted in MPDU mic */ | ||
2221 | CALL_RXH(ieee80211_rx_h_remove_qos_control) | ||
2222 | CALL_RXH(ieee80211_rx_h_amsdu) | ||
2194 | #ifdef CONFIG_MAC80211_MESH | 2223 | #ifdef CONFIG_MAC80211_MESH |
2195 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 2224 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
2196 | CALL_RXH(ieee80211_rx_h_mesh_fwding); | 2225 | CALL_RXH(ieee80211_rx_h_mesh_fwding); |
2197 | #endif | 2226 | #endif |
2198 | CALL_RXH(ieee80211_rx_h_data) | 2227 | CALL_RXH(ieee80211_rx_h_data) |
2199 | CALL_RXH(ieee80211_rx_h_ctrl) | 2228 | |
2200 | CALL_RXH(ieee80211_rx_h_action) | 2229 | /* special treatment -- needs the queue */ |
2201 | CALL_RXH(ieee80211_rx_h_mgmt) | 2230 | res = ieee80211_rx_h_ctrl(rx, &reorder_release); |
2231 | if (res != RX_CONTINUE) | ||
2232 | goto rxh_next; | ||
2233 | |||
2234 | CALL_RXH(ieee80211_rx_h_action) | ||
2235 | CALL_RXH(ieee80211_rx_h_mgmt) | ||
2202 | 2236 | ||
2203 | #undef CALL_RXH | 2237 | #undef CALL_RXH |
2204 | 2238 | ||
2205 | rxh_done: | 2239 | rxh_next: |
2206 | switch (res) { | 2240 | switch (res) { |
2207 | case RX_DROP_MONITOR: | 2241 | case RX_DROP_MONITOR: |
2208 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | 2242 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); |
2209 | if (rx->sta) | 2243 | if (rx->sta) |
2210 | rx->sta->rx_dropped++; | 2244 | rx->sta->rx_dropped++; |
2211 | /* fall through */ | 2245 | /* fall through */ |
2212 | case RX_CONTINUE: | 2246 | case RX_CONTINUE: |
2213 | ieee80211_rx_cooked_monitor(rx, rate); | 2247 | ieee80211_rx_cooked_monitor(rx, rate); |
2214 | break; | 2248 | break; |
2215 | case RX_DROP_UNUSABLE: | 2249 | case RX_DROP_UNUSABLE: |
2216 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); | 2250 | I802_DEBUG_INC(sdata->local->rx_handlers_drop); |
2217 | if (rx->sta) | 2251 | if (rx->sta) |
2218 | rx->sta->rx_dropped++; | 2252 | rx->sta->rx_dropped++; |
2219 | dev_kfree_skb(rx->skb); | 2253 | dev_kfree_skb(rx->skb); |
2220 | break; | 2254 | break; |
2221 | case RX_QUEUED: | 2255 | case RX_QUEUED: |
2222 | I802_DEBUG_INC(sdata->local->rx_handlers_queued); | 2256 | I802_DEBUG_INC(sdata->local->rx_handlers_queued); |
2223 | break; | 2257 | break; |
2258 | } | ||
2224 | } | 2259 | } |
2225 | } | 2260 | } |
2226 | 2261 | ||
@@ -2494,20 +2529,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
2494 | return; | 2529 | return; |
2495 | } | 2530 | } |
2496 | 2531 | ||
2497 | /* | 2532 | __ieee80211_rx_handle_packet(hw, skb, rate); |
2498 | * In theory, the block ack reordering should happen after duplicate | ||
2499 | * removal (ieee80211_rx_h_check(), which is an RX handler). As such, | ||
2500 | * the call to ieee80211_rx_reorder_ampdu() should really be moved to | ||
2501 | * happen as a new RX handler between ieee80211_rx_h_check and | ||
2502 | * ieee80211_rx_h_decrypt. This cleanup may eventually happen, but for | ||
2503 | * the time being, the call can be here since RX reorder buf processing | ||
2504 | * will implicitly skip duplicates. We could, in theory at least, | ||
2505 | * process frames that ieee80211_rx_h_passive_scan would drop (e.g., | ||
2506 | * frames from other than operational channel), but that should not | ||
2507 | * happen in normal networks. | ||
2508 | */ | ||
2509 | if (!ieee80211_rx_reorder_ampdu(local, skb)) | ||
2510 | __ieee80211_rx_handle_packet(hw, skb, rate); | ||
2511 | 2533 | ||
2512 | rcu_read_unlock(); | 2534 | rcu_read_unlock(); |
2513 | 2535 | ||