diff options
author | Johannes Berg <johannes@sipsolutions.net> | 2009-11-25 11:46:16 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2009-11-28 15:05:02 -0500 |
commit | 1edfb1afba2f6e4114ff09f2e3bc948fcae0c419 (patch) | |
tree | fddd91f803af841550c5ab0f2035dfb638243cac /net | |
parent | 8c0c709eea5cbab97fb464cd68b06f24acc58ee1 (diff) |
mac80211: move aMPDU RX reorder code
This code should be part of RX handlers, so move it
to the place where it belongs without changing it.
A follow-up patch will do the changes to hook it up.
The sole purpose of this code move is to make the
other patch readable, it doesn't change the code at
all except that it now requires a different static
function declaration (which will go away too).
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/mac80211/rx.c | 459 |
1 files changed, 229 insertions, 230 deletions
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 097bb0343b91..d71a63e1fd6a 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -27,9 +27,9 @@ | |||
27 | #include "tkip.h" | 27 | #include "tkip.h" |
28 | #include "wme.h" | 28 | #include "wme.h" |
29 | 29 | ||
30 | static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | 30 | static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, |
31 | struct tid_ampdu_rx *tid_agg_rx, | 31 | struct sk_buff *skb, |
32 | u16 head_seq_num); | 32 | struct ieee80211_rate *rate); |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * monitor mode reception | 35 | * monitor mode reception |
@@ -534,6 +534,232 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx) | |||
534 | return RX_CONTINUE; | 534 | return RX_CONTINUE; |
535 | } | 535 | } |
536 | 536 | ||
537 | #define SEQ_MODULO 0x1000 | ||
538 | #define SEQ_MASK 0xfff | ||
539 | |||
540 | static inline int seq_less(u16 sq1, u16 sq2) | ||
541 | { | ||
542 | return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); | ||
543 | } | ||
544 | |||
545 | static inline u16 seq_inc(u16 sq) | ||
546 | { | ||
547 | return (sq + 1) & SEQ_MASK; | ||
548 | } | ||
549 | |||
550 | static inline u16 seq_sub(u16 sq1, u16 sq2) | ||
551 | { | ||
552 | return (sq1 - sq2) & SEQ_MASK; | ||
553 | } | ||
554 | |||
555 | |||
556 | static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, | ||
557 | struct tid_ampdu_rx *tid_agg_rx, | ||
558 | int index) | ||
559 | { | ||
560 | struct ieee80211_supported_band *sband; | ||
561 | struct ieee80211_rate *rate = NULL; | ||
562 | struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; | ||
563 | struct ieee80211_rx_status *status; | ||
564 | |||
565 | if (!skb) | ||
566 | goto no_frame; | ||
567 | |||
568 | status = IEEE80211_SKB_RXCB(skb); | ||
569 | |||
570 | /* release the reordered frames to stack */ | ||
571 | sband = hw->wiphy->bands[status->band]; | ||
572 | if (!(status->flag & RX_FLAG_HT)) | ||
573 | rate = &sband->bitrates[status->rate_idx]; | ||
574 | __ieee80211_rx_handle_packet(hw, skb, rate); | ||
575 | tid_agg_rx->stored_mpdu_num--; | ||
576 | tid_agg_rx->reorder_buf[index] = NULL; | ||
577 | |||
578 | no_frame: | ||
579 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | ||
580 | } | ||
581 | |||
582 | static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | ||
583 | struct tid_ampdu_rx *tid_agg_rx, | ||
584 | u16 head_seq_num) | ||
585 | { | ||
586 | int index; | ||
587 | |||
588 | while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { | ||
589 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
590 | tid_agg_rx->buf_size; | ||
591 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index); | ||
592 | } | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If | ||
597 | * the skb was added to the buffer longer than this time ago, the earlier | ||
598 | * frames that have not yet been received are assumed to be lost and the skb | ||
599 | * can be released for processing. This may also release other skb's from the | ||
600 | * reorder buffer if there are no additional gaps between the frames. | ||
601 | */ | ||
602 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) | ||
603 | |||
604 | /* | ||
605 | * As this function belongs to the RX path it must be under | ||
606 | * rcu_read_lock protection. It returns false if the frame | ||
607 | * can be processed immediately, true if it was consumed. | ||
608 | */ | ||
609 | static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | ||
610 | struct tid_ampdu_rx *tid_agg_rx, | ||
611 | struct sk_buff *skb) | ||
612 | { | ||
613 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
614 | u16 sc = le16_to_cpu(hdr->seq_ctrl); | ||
615 | u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; | ||
616 | u16 head_seq_num, buf_size; | ||
617 | int index; | ||
618 | |||
619 | buf_size = tid_agg_rx->buf_size; | ||
620 | head_seq_num = tid_agg_rx->head_seq_num; | ||
621 | |||
622 | /* frame with out of date sequence number */ | ||
623 | if (seq_less(mpdu_seq_num, head_seq_num)) { | ||
624 | dev_kfree_skb(skb); | ||
625 | return true; | ||
626 | } | ||
627 | |||
628 | /* | ||
629 | * If frame the sequence number exceeds our buffering window | ||
630 | * size release some previous frames to make room for this one. | ||
631 | */ | ||
632 | if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { | ||
633 | head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); | ||
634 | /* release stored frames up to new head to stack */ | ||
635 | ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num); | ||
636 | } | ||
637 | |||
638 | /* Now the new frame is always in the range of the reordering buffer */ | ||
639 | |||
640 | index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size; | ||
641 | |||
642 | /* check if we already stored this frame */ | ||
643 | if (tid_agg_rx->reorder_buf[index]) { | ||
644 | dev_kfree_skb(skb); | ||
645 | return true; | ||
646 | } | ||
647 | |||
648 | /* | ||
649 | * If the current MPDU is in the right order and nothing else | ||
650 | * is stored we can process it directly, no need to buffer it. | ||
651 | */ | ||
652 | if (mpdu_seq_num == tid_agg_rx->head_seq_num && | ||
653 | tid_agg_rx->stored_mpdu_num == 0) { | ||
654 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | ||
655 | return false; | ||
656 | } | ||
657 | |||
658 | /* put the frame in the reordering buffer */ | ||
659 | tid_agg_rx->reorder_buf[index] = skb; | ||
660 | tid_agg_rx->reorder_time[index] = jiffies; | ||
661 | tid_agg_rx->stored_mpdu_num++; | ||
662 | /* release the buffer until next missing frame */ | ||
663 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
664 | tid_agg_rx->buf_size; | ||
665 | if (!tid_agg_rx->reorder_buf[index] && | ||
666 | tid_agg_rx->stored_mpdu_num > 1) { | ||
667 | /* | ||
668 | * No buffers ready to be released, but check whether any | ||
669 | * frames in the reorder buffer have timed out. | ||
670 | */ | ||
671 | int j; | ||
672 | int skipped = 1; | ||
673 | for (j = (index + 1) % tid_agg_rx->buf_size; j != index; | ||
674 | j = (j + 1) % tid_agg_rx->buf_size) { | ||
675 | if (!tid_agg_rx->reorder_buf[j]) { | ||
676 | skipped++; | ||
677 | continue; | ||
678 | } | ||
679 | if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + | ||
680 | HT_RX_REORDER_BUF_TIMEOUT)) | ||
681 | break; | ||
682 | |||
683 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
684 | if (net_ratelimit()) | ||
685 | printk(KERN_DEBUG "%s: release an RX reorder " | ||
686 | "frame due to timeout on earlier " | ||
687 | "frames\n", | ||
688 | wiphy_name(hw->wiphy)); | ||
689 | #endif | ||
690 | ieee80211_release_reorder_frame(hw, tid_agg_rx, j); | ||
691 | |||
692 | /* | ||
693 | * Increment the head seq# also for the skipped slots. | ||
694 | */ | ||
695 | tid_agg_rx->head_seq_num = | ||
696 | (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; | ||
697 | skipped = 0; | ||
698 | } | ||
699 | } else while (tid_agg_rx->reorder_buf[index]) { | ||
700 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index); | ||
701 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
702 | tid_agg_rx->buf_size; | ||
703 | } | ||
704 | |||
705 | return true; | ||
706 | } | ||
707 | |||
708 | /* | ||
709 | * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns | ||
710 | * true if the MPDU was buffered, false if it should be processed. | ||
711 | */ | ||
712 | static bool ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | ||
713 | struct sk_buff *skb) | ||
714 | { | ||
715 | struct ieee80211_hw *hw = &local->hw; | ||
716 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
717 | struct sta_info *sta; | ||
718 | struct tid_ampdu_rx *tid_agg_rx; | ||
719 | u16 sc; | ||
720 | int tid; | ||
721 | |||
722 | if (!ieee80211_is_data_qos(hdr->frame_control)) | ||
723 | return false; | ||
724 | |||
725 | /* | ||
726 | * filter the QoS data rx stream according to | ||
727 | * STA/TID and check if this STA/TID is on aggregation | ||
728 | */ | ||
729 | |||
730 | sta = sta_info_get(local, hdr->addr2); | ||
731 | if (!sta) | ||
732 | return false; | ||
733 | |||
734 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | ||
735 | |||
736 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) | ||
737 | return false; | ||
738 | |||
739 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | ||
740 | |||
741 | /* qos null data frames are excluded */ | ||
742 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) | ||
743 | return false; | ||
744 | |||
745 | /* new, potentially un-ordered, ampdu frame - process it */ | ||
746 | |||
747 | /* reset session timer */ | ||
748 | if (tid_agg_rx->timeout) | ||
749 | mod_timer(&tid_agg_rx->session_timer, | ||
750 | TU_TO_EXP_TIME(tid_agg_rx->timeout)); | ||
751 | |||
752 | /* if this mpdu is fragmented - terminate rx aggregation session */ | ||
753 | sc = le16_to_cpu(hdr->seq_ctrl); | ||
754 | if (sc & IEEE80211_SCTL_FRAG) { | ||
755 | ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, | ||
756 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); | ||
757 | dev_kfree_skb(skb); | ||
758 | return true; | ||
759 | } | ||
760 | |||
761 | return ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb); | ||
762 | } | ||
537 | 763 | ||
538 | static ieee80211_rx_result debug_noinline | 764 | static ieee80211_rx_result debug_noinline |
539 | ieee80211_rx_h_check(struct ieee80211_rx_data *rx) | 765 | ieee80211_rx_h_check(struct ieee80211_rx_data *rx) |
@@ -2187,233 +2413,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, | |||
2187 | dev_kfree_skb(skb); | 2413 | dev_kfree_skb(skb); |
2188 | } | 2414 | } |
2189 | 2415 | ||
2190 | #define SEQ_MODULO 0x1000 | ||
2191 | #define SEQ_MASK 0xfff | ||
2192 | |||
2193 | static inline int seq_less(u16 sq1, u16 sq2) | ||
2194 | { | ||
2195 | return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1); | ||
2196 | } | ||
2197 | |||
2198 | static inline u16 seq_inc(u16 sq) | ||
2199 | { | ||
2200 | return (sq + 1) & SEQ_MASK; | ||
2201 | } | ||
2202 | |||
2203 | static inline u16 seq_sub(u16 sq1, u16 sq2) | ||
2204 | { | ||
2205 | return (sq1 - sq2) & SEQ_MASK; | ||
2206 | } | ||
2207 | |||
2208 | |||
2209 | static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw, | ||
2210 | struct tid_ampdu_rx *tid_agg_rx, | ||
2211 | int index) | ||
2212 | { | ||
2213 | struct ieee80211_supported_band *sband; | ||
2214 | struct ieee80211_rate *rate = NULL; | ||
2215 | struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; | ||
2216 | struct ieee80211_rx_status *status; | ||
2217 | |||
2218 | if (!skb) | ||
2219 | goto no_frame; | ||
2220 | |||
2221 | status = IEEE80211_SKB_RXCB(skb); | ||
2222 | |||
2223 | /* release the reordered frames to stack */ | ||
2224 | sband = hw->wiphy->bands[status->band]; | ||
2225 | if (!(status->flag & RX_FLAG_HT)) | ||
2226 | rate = &sband->bitrates[status->rate_idx]; | ||
2227 | __ieee80211_rx_handle_packet(hw, skb, rate); | ||
2228 | tid_agg_rx->stored_mpdu_num--; | ||
2229 | tid_agg_rx->reorder_buf[index] = NULL; | ||
2230 | |||
2231 | no_frame: | ||
2232 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | ||
2233 | } | ||
2234 | |||
2235 | static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw, | ||
2236 | struct tid_ampdu_rx *tid_agg_rx, | ||
2237 | u16 head_seq_num) | ||
2238 | { | ||
2239 | int index; | ||
2240 | |||
2241 | while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { | ||
2242 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
2243 | tid_agg_rx->buf_size; | ||
2244 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index); | ||
2245 | } | ||
2246 | } | ||
2247 | |||
2248 | /* | ||
2249 | * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If | ||
2250 | * the skb was added to the buffer longer than this time ago, the earlier | ||
2251 | * frames that have not yet been received are assumed to be lost and the skb | ||
2252 | * can be released for processing. This may also release other skb's from the | ||
2253 | * reorder buffer if there are no additional gaps between the frames. | ||
2254 | */ | ||
2255 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) | ||
2256 | |||
2257 | /* | ||
2258 | * As this function belongs to the RX path it must be under | ||
2259 | * rcu_read_lock protection. It returns false if the frame | ||
2260 | * can be processed immediately, true if it was consumed. | ||
2261 | */ | ||
2262 | static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw, | ||
2263 | struct tid_ampdu_rx *tid_agg_rx, | ||
2264 | struct sk_buff *skb) | ||
2265 | { | ||
2266 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
2267 | u16 sc = le16_to_cpu(hdr->seq_ctrl); | ||
2268 | u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4; | ||
2269 | u16 head_seq_num, buf_size; | ||
2270 | int index; | ||
2271 | |||
2272 | buf_size = tid_agg_rx->buf_size; | ||
2273 | head_seq_num = tid_agg_rx->head_seq_num; | ||
2274 | |||
2275 | /* frame with out of date sequence number */ | ||
2276 | if (seq_less(mpdu_seq_num, head_seq_num)) { | ||
2277 | dev_kfree_skb(skb); | ||
2278 | return true; | ||
2279 | } | ||
2280 | |||
2281 | /* | ||
2282 | * If frame the sequence number exceeds our buffering window | ||
2283 | * size release some previous frames to make room for this one. | ||
2284 | */ | ||
2285 | if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { | ||
2286 | head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); | ||
2287 | /* release stored frames up to new head to stack */ | ||
2288 | ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num); | ||
2289 | } | ||
2290 | |||
2291 | /* Now the new frame is always in the range of the reordering buffer */ | ||
2292 | |||
2293 | index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size; | ||
2294 | |||
2295 | /* check if we already stored this frame */ | ||
2296 | if (tid_agg_rx->reorder_buf[index]) { | ||
2297 | dev_kfree_skb(skb); | ||
2298 | return true; | ||
2299 | } | ||
2300 | |||
2301 | /* | ||
2302 | * If the current MPDU is in the right order and nothing else | ||
2303 | * is stored we can process it directly, no need to buffer it. | ||
2304 | */ | ||
2305 | if (mpdu_seq_num == tid_agg_rx->head_seq_num && | ||
2306 | tid_agg_rx->stored_mpdu_num == 0) { | ||
2307 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | ||
2308 | return false; | ||
2309 | } | ||
2310 | |||
2311 | /* put the frame in the reordering buffer */ | ||
2312 | tid_agg_rx->reorder_buf[index] = skb; | ||
2313 | tid_agg_rx->reorder_time[index] = jiffies; | ||
2314 | tid_agg_rx->stored_mpdu_num++; | ||
2315 | /* release the buffer until next missing frame */ | ||
2316 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
2317 | tid_agg_rx->buf_size; | ||
2318 | if (!tid_agg_rx->reorder_buf[index] && | ||
2319 | tid_agg_rx->stored_mpdu_num > 1) { | ||
2320 | /* | ||
2321 | * No buffers ready to be released, but check whether any | ||
2322 | * frames in the reorder buffer have timed out. | ||
2323 | */ | ||
2324 | int j; | ||
2325 | int skipped = 1; | ||
2326 | for (j = (index + 1) % tid_agg_rx->buf_size; j != index; | ||
2327 | j = (j + 1) % tid_agg_rx->buf_size) { | ||
2328 | if (!tid_agg_rx->reorder_buf[j]) { | ||
2329 | skipped++; | ||
2330 | continue; | ||
2331 | } | ||
2332 | if (!time_after(jiffies, tid_agg_rx->reorder_time[j] + | ||
2333 | HT_RX_REORDER_BUF_TIMEOUT)) | ||
2334 | break; | ||
2335 | |||
2336 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
2337 | if (net_ratelimit()) | ||
2338 | printk(KERN_DEBUG "%s: release an RX reorder " | ||
2339 | "frame due to timeout on earlier " | ||
2340 | "frames\n", | ||
2341 | wiphy_name(hw->wiphy)); | ||
2342 | #endif | ||
2343 | ieee80211_release_reorder_frame(hw, tid_agg_rx, j); | ||
2344 | |||
2345 | /* | ||
2346 | * Increment the head seq# also for the skipped slots. | ||
2347 | */ | ||
2348 | tid_agg_rx->head_seq_num = | ||
2349 | (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; | ||
2350 | skipped = 0; | ||
2351 | } | ||
2352 | } else while (tid_agg_rx->reorder_buf[index]) { | ||
2353 | ieee80211_release_reorder_frame(hw, tid_agg_rx, index); | ||
2354 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | ||
2355 | tid_agg_rx->buf_size; | ||
2356 | } | ||
2357 | |||
2358 | return true; | ||
2359 | } | ||
2360 | |||
2361 | /* | ||
2362 | * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns | ||
2363 | * true if the MPDU was buffered, false if it should be processed. | ||
2364 | */ | ||
2365 | static bool ieee80211_rx_reorder_ampdu(struct ieee80211_local *local, | ||
2366 | struct sk_buff *skb) | ||
2367 | { | ||
2368 | struct ieee80211_hw *hw = &local->hw; | ||
2369 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
2370 | struct sta_info *sta; | ||
2371 | struct tid_ampdu_rx *tid_agg_rx; | ||
2372 | u16 sc; | ||
2373 | int tid; | ||
2374 | |||
2375 | if (!ieee80211_is_data_qos(hdr->frame_control)) | ||
2376 | return false; | ||
2377 | |||
2378 | /* | ||
2379 | * filter the QoS data rx stream according to | ||
2380 | * STA/TID and check if this STA/TID is on aggregation | ||
2381 | */ | ||
2382 | |||
2383 | sta = sta_info_get(local, hdr->addr2); | ||
2384 | if (!sta) | ||
2385 | return false; | ||
2386 | |||
2387 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | ||
2388 | |||
2389 | if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) | ||
2390 | return false; | ||
2391 | |||
2392 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | ||
2393 | |||
2394 | /* qos null data frames are excluded */ | ||
2395 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) | ||
2396 | return false; | ||
2397 | |||
2398 | /* new, potentially un-ordered, ampdu frame - process it */ | ||
2399 | |||
2400 | /* reset session timer */ | ||
2401 | if (tid_agg_rx->timeout) | ||
2402 | mod_timer(&tid_agg_rx->session_timer, | ||
2403 | TU_TO_EXP_TIME(tid_agg_rx->timeout)); | ||
2404 | |||
2405 | /* if this mpdu is fragmented - terminate rx aggregation session */ | ||
2406 | sc = le16_to_cpu(hdr->seq_ctrl); | ||
2407 | if (sc & IEEE80211_SCTL_FRAG) { | ||
2408 | ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, | ||
2409 | tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); | ||
2410 | dev_kfree_skb(skb); | ||
2411 | return true; | ||
2412 | } | ||
2413 | |||
2414 | return ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb); | ||
2415 | } | ||
2416 | |||
2417 | /* | 2416 | /* |
2418 | * This is the receive path handler. It is called by a low level driver when an | 2417 | * This is the receive path handler. It is called by a low level driver when an |
2419 | * 802.11 MPDU is received from the hardware. | 2418 | * 802.11 MPDU is received from the hardware. |