aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/rx.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-12-04 16:25:15 -0500
committerDavid S. Miller <davem@davemloft.net>2009-12-04 16:25:15 -0500
commit8f56874bd7e8bee73ed6a1cf80dcec2753616262 (patch)
treeaebd15dea662ef5efd89402b8fd92fec540a98eb /net/mac80211/rx.c
parent47e1c323069bcef0acb8a2b48921688573f5ca63 (diff)
parent159bcfeb9123c91f0dc885a42b6387a98192f896 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6
Diffstat (limited to 'net/mac80211/rx.c')
-rw-r--r--net/mac80211/rx.c589
1 files changed, 305 insertions, 284 deletions
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index beecf50fbd10..4ed60ae81b99 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -27,10 +27,6 @@
27#include "tkip.h" 27#include "tkip.h"
28#include "wme.h" 28#include "wme.h"
29 29
30static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
31 struct tid_ampdu_rx *tid_agg_rx,
32 u16 head_seq_num);
33
34/* 30/*
35 * monitor mode reception 31 * monitor mode reception
36 * 32 *
@@ -534,6 +530,242 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
534 return RX_CONTINUE; 530 return RX_CONTINUE;
535} 531}
536 532
533#define SEQ_MODULO 0x1000
534#define SEQ_MASK 0xfff
535
536static inline int seq_less(u16 sq1, u16 sq2)
537{
538 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
539}
540
541static inline u16 seq_inc(u16 sq)
542{
543 return (sq + 1) & SEQ_MASK;
544}
545
546static inline u16 seq_sub(u16 sq1, u16 sq2)
547{
548 return (sq1 - sq2) & SEQ_MASK;
549}
550
551
552static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
553 struct tid_ampdu_rx *tid_agg_rx,
554 int index,
555 struct sk_buff_head *frames)
556{
557 struct ieee80211_supported_band *sband;
558 struct ieee80211_rate *rate = NULL;
559 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
560 struct ieee80211_rx_status *status;
561
562 if (!skb)
563 goto no_frame;
564
565 status = IEEE80211_SKB_RXCB(skb);
566
567 /* release the reordered frames to stack */
568 sband = hw->wiphy->bands[status->band];
569 if (!(status->flag & RX_FLAG_HT))
570 rate = &sband->bitrates[status->rate_idx];
571 tid_agg_rx->stored_mpdu_num--;
572 tid_agg_rx->reorder_buf[index] = NULL;
573 skb_queue_tail(frames, skb);
574
575no_frame:
576 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
577}
578
579static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
580 struct tid_ampdu_rx *tid_agg_rx,
581 u16 head_seq_num,
582 struct sk_buff_head *frames)
583{
584 int index;
585
586 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
587 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
588 tid_agg_rx->buf_size;
589 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
590 }
591}
592
593/*
594 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
595 * the skb was added to the buffer longer than this time ago, the earlier
596 * frames that have not yet been received are assumed to be lost and the skb
597 * can be released for processing. This may also release other skb's from the
598 * reorder buffer if there are no additional gaps between the frames.
599 */
600#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
601
602/*
603 * As this function belongs to the RX path it must be under
604 * rcu_read_lock protection. It returns false if the frame
605 * can be processed immediately, true if it was consumed.
606 */
607static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
608 struct tid_ampdu_rx *tid_agg_rx,
609 struct sk_buff *skb,
610 struct sk_buff_head *frames)
611{
612 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
613 u16 sc = le16_to_cpu(hdr->seq_ctrl);
614 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
615 u16 head_seq_num, buf_size;
616 int index;
617
618 buf_size = tid_agg_rx->buf_size;
619 head_seq_num = tid_agg_rx->head_seq_num;
620
621 /* frame with out of date sequence number */
622 if (seq_less(mpdu_seq_num, head_seq_num)) {
623 dev_kfree_skb(skb);
624 return true;
625 }
626
627 /*
628 * If frame the sequence number exceeds our buffering window
629 * size release some previous frames to make room for this one.
630 */
631 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
632 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
633 /* release stored frames up to new head to stack */
634 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num,
635 frames);
636 }
637
638 /* Now the new frame is always in the range of the reordering buffer */
639
640 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
641
642 /* check if we already stored this frame */
643 if (tid_agg_rx->reorder_buf[index]) {
644 dev_kfree_skb(skb);
645 return true;
646 }
647
648 /*
649 * If the current MPDU is in the right order and nothing else
650 * is stored we can process it directly, no need to buffer it.
651 */
652 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
653 tid_agg_rx->stored_mpdu_num == 0) {
654 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
655 return false;
656 }
657
658 /* put the frame in the reordering buffer */
659 tid_agg_rx->reorder_buf[index] = skb;
660 tid_agg_rx->reorder_time[index] = jiffies;
661 tid_agg_rx->stored_mpdu_num++;
662 /* release the buffer until next missing frame */
663 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
664 tid_agg_rx->buf_size;
665 if (!tid_agg_rx->reorder_buf[index] &&
666 tid_agg_rx->stored_mpdu_num > 1) {
667 /*
668 * No buffers ready to be released, but check whether any
669 * frames in the reorder buffer have timed out.
670 */
671 int j;
672 int skipped = 1;
673 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
674 j = (j + 1) % tid_agg_rx->buf_size) {
675 if (!tid_agg_rx->reorder_buf[j]) {
676 skipped++;
677 continue;
678 }
679 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
680 HT_RX_REORDER_BUF_TIMEOUT))
681 break;
682
683#ifdef CONFIG_MAC80211_HT_DEBUG
684 if (net_ratelimit())
685 printk(KERN_DEBUG "%s: release an RX reorder "
686 "frame due to timeout on earlier "
687 "frames\n",
688 wiphy_name(hw->wiphy));
689#endif
690 ieee80211_release_reorder_frame(hw, tid_agg_rx,
691 j, frames);
692
693 /*
694 * Increment the head seq# also for the skipped slots.
695 */
696 tid_agg_rx->head_seq_num =
697 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
698 skipped = 0;
699 }
700 } else while (tid_agg_rx->reorder_buf[index]) {
701 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
702 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
703 tid_agg_rx->buf_size;
704 }
705
706 return true;
707}
708
709/*
710 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
711 * true if the MPDU was buffered, false if it should be processed.
712 */
713static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
714 struct sk_buff_head *frames)
715{
716 struct sk_buff *skb = rx->skb;
717 struct ieee80211_local *local = rx->local;
718 struct ieee80211_hw *hw = &local->hw;
719 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
720 struct sta_info *sta = rx->sta;
721 struct tid_ampdu_rx *tid_agg_rx;
722 u16 sc;
723 int tid;
724
725 if (!ieee80211_is_data_qos(hdr->frame_control))
726 goto dont_reorder;
727
728 /*
729 * filter the QoS data rx stream according to
730 * STA/TID and check if this STA/TID is on aggregation
731 */
732
733 if (!sta)
734 goto dont_reorder;
735
736 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
737
738 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
739 goto dont_reorder;
740
741 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
742
743 /* qos null data frames are excluded */
744 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
745 goto dont_reorder;
746
747 /* new, potentially un-ordered, ampdu frame - process it */
748
749 /* reset session timer */
750 if (tid_agg_rx->timeout)
751 mod_timer(&tid_agg_rx->session_timer,
752 TU_TO_EXP_TIME(tid_agg_rx->timeout));
753
754 /* if this mpdu is fragmented - terminate rx aggregation session */
755 sc = le16_to_cpu(hdr->seq_ctrl);
756 if (sc & IEEE80211_SCTL_FRAG) {
757 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
758 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
759 dev_kfree_skb(skb);
760 return;
761 }
762
763 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
764 return;
765
766 dont_reorder:
767 __skb_queue_tail(frames, skb);
768}
537 769
538static ieee80211_rx_result debug_noinline 770static ieee80211_rx_result debug_noinline
539ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 771ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
@@ -637,6 +869,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
637 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 869 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
638 return RX_CONTINUE; 870 return RX_CONTINUE;
639 871
872 /* start without a key */
873 rx->key = NULL;
874
640 if (rx->sta) 875 if (rx->sta)
641 stakey = rcu_dereference(rx->sta->key); 876 stakey = rcu_dereference(rx->sta->key);
642 877
@@ -1589,7 +1824,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1589} 1824}
1590 1825
1591static ieee80211_rx_result debug_noinline 1826static ieee80211_rx_result debug_noinline
1592ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 1827ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1593{ 1828{
1594 struct ieee80211_local *local = rx->local; 1829 struct ieee80211_local *local = rx->local;
1595 struct ieee80211_hw *hw = &local->hw; 1830 struct ieee80211_hw *hw = &local->hw;
@@ -1619,7 +1854,8 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1619 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 1854 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1620 1855
1621 /* release stored frames up to start of BAR */ 1856 /* release stored frames up to start of BAR */
1622 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num); 1857 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1858 frames);
1623 kfree_skb(skb); 1859 kfree_skb(skb);
1624 return RX_QUEUED; 1860 return RX_QUEUED;
1625 } 1861 }
@@ -1868,7 +2104,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
1868 struct net_device *prev_dev = NULL; 2104 struct net_device *prev_dev = NULL;
1869 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2105 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1870 2106
1871 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED) 2107 if (status->flag & RX_FLAG_INTERNAL_CMTR)
1872 goto out_free_skb; 2108 goto out_free_skb;
1873 2109
1874 if (skb_headroom(skb) < sizeof(*rthdr) && 2110 if (skb_headroom(skb) < sizeof(*rthdr) &&
@@ -1929,7 +2165,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
1929 } else 2165 } else
1930 goto out_free_skb; 2166 goto out_free_skb;
1931 2167
1932 rx->flags |= IEEE80211_RX_CMNTR_REPORTED; 2168 status->flag |= RX_FLAG_INTERNAL_CMTR;
1933 return; 2169 return;
1934 2170
1935 out_free_skb: 2171 out_free_skb:
@@ -1942,8 +2178,11 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1942 struct sk_buff *skb, 2178 struct sk_buff *skb,
1943 struct ieee80211_rate *rate) 2179 struct ieee80211_rate *rate)
1944{ 2180{
2181 struct sk_buff_head reorder_release;
1945 ieee80211_rx_result res = RX_DROP_MONITOR; 2182 ieee80211_rx_result res = RX_DROP_MONITOR;
1946 2183
2184 __skb_queue_head_init(&reorder_release);
2185
1947 rx->skb = skb; 2186 rx->skb = skb;
1948 rx->sdata = sdata; 2187 rx->sdata = sdata;
1949 2188
@@ -1951,50 +2190,72 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1951 do { \ 2190 do { \
1952 res = rxh(rx); \ 2191 res = rxh(rx); \
1953 if (res != RX_CONTINUE) \ 2192 if (res != RX_CONTINUE) \
1954 goto rxh_done; \ 2193 goto rxh_next; \
1955 } while (0); 2194 } while (0);
1956 2195
2196 /*
2197 * NB: the rxh_next label works even if we jump
2198 * to it from here because then the list will
2199 * be empty, which is a trivial check
2200 */
1957 CALL_RXH(ieee80211_rx_h_passive_scan) 2201 CALL_RXH(ieee80211_rx_h_passive_scan)
1958 CALL_RXH(ieee80211_rx_h_check) 2202 CALL_RXH(ieee80211_rx_h_check)
1959 CALL_RXH(ieee80211_rx_h_decrypt) 2203
1960 CALL_RXH(ieee80211_rx_h_check_more_data) 2204 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
1961 CALL_RXH(ieee80211_rx_h_sta_process) 2205
1962 CALL_RXH(ieee80211_rx_h_defragment) 2206 while ((skb = __skb_dequeue(&reorder_release))) {
1963 CALL_RXH(ieee80211_rx_h_ps_poll) 2207 /*
1964 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 2208 * all the other fields are valid across frames
1965 /* must be after MMIC verify so header is counted in MPDU mic */ 2209 * that belong to an aMPDU since they are on the
1966 CALL_RXH(ieee80211_rx_h_remove_qos_control) 2210 * same TID from the same station
1967 CALL_RXH(ieee80211_rx_h_amsdu) 2211 */
2212 rx->skb = skb;
2213
2214 CALL_RXH(ieee80211_rx_h_decrypt)
2215 CALL_RXH(ieee80211_rx_h_check_more_data)
2216 CALL_RXH(ieee80211_rx_h_sta_process)
2217 CALL_RXH(ieee80211_rx_h_defragment)
2218 CALL_RXH(ieee80211_rx_h_ps_poll)
2219 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2220 /* must be after MMIC verify so header is counted in MPDU mic */
2221 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2222 CALL_RXH(ieee80211_rx_h_amsdu)
1968#ifdef CONFIG_MAC80211_MESH 2223#ifdef CONFIG_MAC80211_MESH
1969 if (ieee80211_vif_is_mesh(&sdata->vif)) 2224 if (ieee80211_vif_is_mesh(&sdata->vif))
1970 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2225 CALL_RXH(ieee80211_rx_h_mesh_fwding);
1971#endif 2226#endif
1972 CALL_RXH(ieee80211_rx_h_data) 2227 CALL_RXH(ieee80211_rx_h_data)
1973 CALL_RXH(ieee80211_rx_h_ctrl) 2228
1974 CALL_RXH(ieee80211_rx_h_action) 2229 /* special treatment -- needs the queue */
1975 CALL_RXH(ieee80211_rx_h_mgmt) 2230 res = ieee80211_rx_h_ctrl(rx, &reorder_release);
2231 if (res != RX_CONTINUE)
2232 goto rxh_next;
2233
2234 CALL_RXH(ieee80211_rx_h_action)
2235 CALL_RXH(ieee80211_rx_h_mgmt)
1976 2236
1977#undef CALL_RXH 2237#undef CALL_RXH
1978 2238
1979 rxh_done: 2239 rxh_next:
1980 switch (res) { 2240 switch (res) {
1981 case RX_DROP_MONITOR: 2241 case RX_DROP_MONITOR:
1982 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 2242 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1983 if (rx->sta) 2243 if (rx->sta)
1984 rx->sta->rx_dropped++; 2244 rx->sta->rx_dropped++;
1985 /* fall through */ 2245 /* fall through */
1986 case RX_CONTINUE: 2246 case RX_CONTINUE:
1987 ieee80211_rx_cooked_monitor(rx, rate); 2247 ieee80211_rx_cooked_monitor(rx, rate);
1988 break; 2248 break;
1989 case RX_DROP_UNUSABLE: 2249 case RX_DROP_UNUSABLE:
1990 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 2250 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1991 if (rx->sta) 2251 if (rx->sta)
1992 rx->sta->rx_dropped++; 2252 rx->sta->rx_dropped++;
1993 dev_kfree_skb(rx->skb); 2253 dev_kfree_skb(rx->skb);
1994 break; 2254 break;
1995 case RX_QUEUED: 2255 case RX_QUEUED:
1996 I802_DEBUG_INC(sdata->local->rx_handlers_queued); 2256 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
1997 break; 2257 break;
2258 }
1998 } 2259 }
1999} 2260}
2000 2261
@@ -2187,233 +2448,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2187 dev_kfree_skb(skb); 2448 dev_kfree_skb(skb);
2188} 2449}
2189 2450
2190#define SEQ_MODULO 0x1000
2191#define SEQ_MASK 0xfff
2192
2193static inline int seq_less(u16 sq1, u16 sq2)
2194{
2195 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
2196}
2197
2198static inline u16 seq_inc(u16 sq)
2199{
2200 return (sq + 1) & SEQ_MASK;
2201}
2202
2203static inline u16 seq_sub(u16 sq1, u16 sq2)
2204{
2205 return (sq1 - sq2) & SEQ_MASK;
2206}
2207
2208
2209static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
2210 struct tid_ampdu_rx *tid_agg_rx,
2211 int index)
2212{
2213 struct ieee80211_supported_band *sband;
2214 struct ieee80211_rate *rate = NULL;
2215 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
2216 struct ieee80211_rx_status *status;
2217
2218 if (!skb)
2219 goto no_frame;
2220
2221 status = IEEE80211_SKB_RXCB(skb);
2222
2223 /* release the reordered frames to stack */
2224 sband = hw->wiphy->bands[status->band];
2225 if (!(status->flag & RX_FLAG_HT))
2226 rate = &sband->bitrates[status->rate_idx];
2227 __ieee80211_rx_handle_packet(hw, skb, rate);
2228 tid_agg_rx->stored_mpdu_num--;
2229 tid_agg_rx->reorder_buf[index] = NULL;
2230
2231no_frame:
2232 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2233}
2234
2235static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
2236 struct tid_ampdu_rx *tid_agg_rx,
2237 u16 head_seq_num)
2238{
2239 int index;
2240
2241 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2242 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2243 tid_agg_rx->buf_size;
2244 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2245 }
2246}
2247
2248/*
2249 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
2250 * the skb was added to the buffer longer than this time ago, the earlier
2251 * frames that have not yet been received are assumed to be lost and the skb
2252 * can be released for processing. This may also release other skb's from the
2253 * reorder buffer if there are no additional gaps between the frames.
2254 */
2255#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
2256
2257/*
2258 * As this function belongs to the RX path it must be under
2259 * rcu_read_lock protection. It returns false if the frame
2260 * can be processed immediately, true if it was consumed.
2261 */
2262static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2263 struct tid_ampdu_rx *tid_agg_rx,
2264 struct sk_buff *skb)
2265{
2266 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2267 u16 sc = le16_to_cpu(hdr->seq_ctrl);
2268 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2269 u16 head_seq_num, buf_size;
2270 int index;
2271
2272 buf_size = tid_agg_rx->buf_size;
2273 head_seq_num = tid_agg_rx->head_seq_num;
2274
2275 /* frame with out of date sequence number */
2276 if (seq_less(mpdu_seq_num, head_seq_num)) {
2277 dev_kfree_skb(skb);
2278 return true;
2279 }
2280
2281 /*
2282 * If frame the sequence number exceeds our buffering window
2283 * size release some previous frames to make room for this one.
2284 */
2285 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
2286 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
2287 /* release stored frames up to new head to stack */
2288 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
2289 }
2290
2291 /* Now the new frame is always in the range of the reordering buffer */
2292
2293 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2294
2295 /* check if we already stored this frame */
2296 if (tid_agg_rx->reorder_buf[index]) {
2297 dev_kfree_skb(skb);
2298 return true;
2299 }
2300
2301 /*
2302 * If the current MPDU is in the right order and nothing else
2303 * is stored we can process it directly, no need to buffer it.
2304 */
2305 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2306 tid_agg_rx->stored_mpdu_num == 0) {
2307 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2308 return false;
2309 }
2310
2311 /* put the frame in the reordering buffer */
2312 tid_agg_rx->reorder_buf[index] = skb;
2313 tid_agg_rx->reorder_time[index] = jiffies;
2314 tid_agg_rx->stored_mpdu_num++;
2315 /* release the buffer until next missing frame */
2316 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2317 tid_agg_rx->buf_size;
2318 if (!tid_agg_rx->reorder_buf[index] &&
2319 tid_agg_rx->stored_mpdu_num > 1) {
2320 /*
2321 * No buffers ready to be released, but check whether any
2322 * frames in the reorder buffer have timed out.
2323 */
2324 int j;
2325 int skipped = 1;
2326 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
2327 j = (j + 1) % tid_agg_rx->buf_size) {
2328 if (!tid_agg_rx->reorder_buf[j]) {
2329 skipped++;
2330 continue;
2331 }
2332 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
2333 HT_RX_REORDER_BUF_TIMEOUT))
2334 break;
2335
2336#ifdef CONFIG_MAC80211_HT_DEBUG
2337 if (net_ratelimit())
2338 printk(KERN_DEBUG "%s: release an RX reorder "
2339 "frame due to timeout on earlier "
2340 "frames\n",
2341 wiphy_name(hw->wiphy));
2342#endif
2343 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
2344
2345 /*
2346 * Increment the head seq# also for the skipped slots.
2347 */
2348 tid_agg_rx->head_seq_num =
2349 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
2350 skipped = 0;
2351 }
2352 } else while (tid_agg_rx->reorder_buf[index]) {
2353 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2354 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2355 tid_agg_rx->buf_size;
2356 }
2357
2358 return true;
2359}
2360
2361/*
2362 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
2363 * true if the MPDU was buffered, false if it should be processed.
2364 */
2365static bool ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2366 struct sk_buff *skb)
2367{
2368 struct ieee80211_hw *hw = &local->hw;
2369 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2370 struct sta_info *sta;
2371 struct tid_ampdu_rx *tid_agg_rx;
2372 u16 sc;
2373 int tid;
2374
2375 if (!ieee80211_is_data_qos(hdr->frame_control))
2376 return false;
2377
2378 /*
2379 * filter the QoS data rx stream according to
2380 * STA/TID and check if this STA/TID is on aggregation
2381 */
2382
2383 sta = sta_info_get(local, hdr->addr2);
2384 if (!sta)
2385 return false;
2386
2387 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2388
2389 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2390 return false;
2391
2392 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2393
2394 /* qos null data frames are excluded */
2395 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2396 return false;
2397
2398 /* new, potentially un-ordered, ampdu frame - process it */
2399
2400 /* reset session timer */
2401 if (tid_agg_rx->timeout)
2402 mod_timer(&tid_agg_rx->session_timer,
2403 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2404
2405 /* if this mpdu is fragmented - terminate rx aggregation session */
2406 sc = le16_to_cpu(hdr->seq_ctrl);
2407 if (sc & IEEE80211_SCTL_FRAG) {
2408 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2409 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2410 dev_kfree_skb(skb);
2411 return true;
2412 }
2413
2414 return ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb);
2415}
2416
2417/* 2451/*
2418 * This is the receive path handler. It is called by a low level driver when an 2452 * This is the receive path handler. It is called by a low level driver when an
2419 * 802.11 MPDU is received from the hardware. 2453 * 802.11 MPDU is received from the hardware.
@@ -2495,20 +2529,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2495 return; 2529 return;
2496 } 2530 }
2497 2531
2498 /* 2532 __ieee80211_rx_handle_packet(hw, skb, rate);
2499 * In theory, the block ack reordering should happen after duplicate
2500 * removal (ieee80211_rx_h_check(), which is an RX handler). As such,
2501 * the call to ieee80211_rx_reorder_ampdu() should really be moved to
2502 * happen as a new RX handler between ieee80211_rx_h_check and
2503 * ieee80211_rx_h_decrypt. This cleanup may eventually happen, but for
2504 * the time being, the call can be here since RX reorder buf processing
2505 * will implicitly skip duplicates. We could, in theory at least,
2506 * process frames that ieee80211_rx_h_passive_scan would drop (e.g.,
2507 * frames from other than operational channel), but that should not
2508 * happen in normal networks.
2509 */
2510 if (!ieee80211_rx_reorder_ampdu(local, skb))
2511 __ieee80211_rx_handle_packet(hw, skb, rate);
2512 2533
2513 rcu_read_unlock(); 2534 rcu_read_unlock();
2514 2535