diff options
author | Christian Lamparter <chunkeey@googlemail.com> | 2013-02-04 12:44:44 -0500 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2013-02-11 12:44:55 -0500 |
commit | f9e124fbd8cbea974b5dc7e9dafddd17d21df7e2 (patch) | |
tree | f09c89096de26a4211d73742933260d5d240b58e /net/mac80211 | |
parent | 601513aa208f27ea87400a410d42c978421530ec (diff) |
mac80211: protect rx-path with spinlock
This patch fixes the problem which was discussed in
"mac80211: Fix PN corruption in case of multiple
virtual interface" [1].
Amit Shakya reported a serious issue with my patch:
mac80211: serialize rx path workers" [2]:
In case, ieee80211_rx_handlers processing is going on
for skbs received on one vif and at the same time, rx
aggregation reorder timer expires on another vif then
sta_rx_agg_reorder_timer_expired is invoked and it will
push skbs into the single queue (local->rx_skb_queue).
ieee80211_rx_handlers in the while loop assumes that
the skbs are for the same sdata and sta. This assumption
doesn't hold good in this scenario and the PN gets
corrupted by PN received in other vif's skb, causing
traffic to stop due to PN mismatch."
[1] Message-Id: http://mid.gmane.org/201302041844.44436.chunkeey@googlemail.com
[2] Commit-Id: 24a8fdad35835e8d71f7
Reported-by: Amit Shakya <amit.shakya@stericsson.com>
Signed-off-by: Christian Lamparter <chunkeey@googlemail.com>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211')
-rw-r--r-- | net/mac80211/ieee80211_i.h | 9 | ||||
-rw-r--r-- | net/mac80211/main.c | 14 | ||||
-rw-r--r-- | net/mac80211/rx.c | 81 |
3 files changed, 48 insertions, 56 deletions
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 5fe9db707880..080cf0942ce7 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -958,14 +958,7 @@ struct ieee80211_local { | |||
958 | struct sk_buff_head skb_queue; | 958 | struct sk_buff_head skb_queue; |
959 | struct sk_buff_head skb_queue_unreliable; | 959 | struct sk_buff_head skb_queue_unreliable; |
960 | 960 | ||
961 | /* | 961 | spinlock_t rx_path_lock; |
962 | * Internal FIFO queue which is shared between multiple rx path | ||
963 | * stages. Its main task is to provide a serialization mechanism, | ||
964 | * so all rx handlers can enjoy having exclusive access to their | ||
965 | * private data structures. | ||
966 | */ | ||
967 | struct sk_buff_head rx_skb_queue; | ||
968 | bool running_rx_handler; /* protected by rx_skb_queue.lock */ | ||
969 | 962 | ||
970 | /* Station data */ | 963 | /* Station data */ |
971 | /* | 964 | /* |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 2bdd454e8bcf..2220331f4b7c 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -34,8 +34,6 @@ | |||
34 | #include "cfg.h" | 34 | #include "cfg.h" |
35 | #include "debugfs.h" | 35 | #include "debugfs.h" |
36 | 36 | ||
37 | static struct lock_class_key ieee80211_rx_skb_queue_class; | ||
38 | |||
39 | void ieee80211_configure_filter(struct ieee80211_local *local) | 37 | void ieee80211_configure_filter(struct ieee80211_local *local) |
40 | { | 38 | { |
41 | u64 mc; | 39 | u64 mc; |
@@ -613,21 +611,12 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
613 | 611 | ||
614 | mutex_init(&local->key_mtx); | 612 | mutex_init(&local->key_mtx); |
615 | spin_lock_init(&local->filter_lock); | 613 | spin_lock_init(&local->filter_lock); |
614 | spin_lock_init(&local->rx_path_lock); | ||
616 | spin_lock_init(&local->queue_stop_reason_lock); | 615 | spin_lock_init(&local->queue_stop_reason_lock); |
617 | 616 | ||
618 | INIT_LIST_HEAD(&local->chanctx_list); | 617 | INIT_LIST_HEAD(&local->chanctx_list); |
619 | mutex_init(&local->chanctx_mtx); | 618 | mutex_init(&local->chanctx_mtx); |
620 | 619 | ||
621 | /* | ||
622 | * The rx_skb_queue is only accessed from tasklets, | ||
623 | * but other SKB queues are used from within IRQ | ||
624 | * context. Therefore, this one needs a different | ||
625 | * locking class so our direct, non-irq-safe use of | ||
626 | * the queue's lock doesn't throw lockdep warnings. | ||
627 | */ | ||
628 | skb_queue_head_init_class(&local->rx_skb_queue, | ||
629 | &ieee80211_rx_skb_queue_class); | ||
630 | |||
631 | INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); | 620 | INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); |
632 | 621 | ||
633 | INIT_WORK(&local->restart_work, ieee80211_restart_work); | 622 | INIT_WORK(&local->restart_work, ieee80211_restart_work); |
@@ -1089,7 +1078,6 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
1089 | wiphy_warn(local->hw.wiphy, "skb_queue not empty\n"); | 1078 | wiphy_warn(local->hw.wiphy, "skb_queue not empty\n"); |
1090 | skb_queue_purge(&local->skb_queue); | 1079 | skb_queue_purge(&local->skb_queue); |
1091 | skb_queue_purge(&local->skb_queue_unreliable); | 1080 | skb_queue_purge(&local->skb_queue_unreliable); |
1092 | skb_queue_purge(&local->rx_skb_queue); | ||
1093 | 1081 | ||
1094 | destroy_workqueue(local->workqueue); | 1082 | destroy_workqueue(local->workqueue); |
1095 | wiphy_unregister(local->hw.wiphy); | 1083 | wiphy_unregister(local->hw.wiphy); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index c98be0593756..b5f1bba7ffe1 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -668,9 +668,9 @@ static inline u16 seq_sub(u16 sq1, u16 sq2) | |||
668 | 668 | ||
669 | static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, | 669 | static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, |
670 | struct tid_ampdu_rx *tid_agg_rx, | 670 | struct tid_ampdu_rx *tid_agg_rx, |
671 | int index) | 671 | int index, |
672 | struct sk_buff_head *frames) | ||
672 | { | 673 | { |
673 | struct ieee80211_local *local = sdata->local; | ||
674 | struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; | 674 | struct sk_buff *skb = tid_agg_rx->reorder_buf[index]; |
675 | struct ieee80211_rx_status *status; | 675 | struct ieee80211_rx_status *status; |
676 | 676 | ||
@@ -684,7 +684,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, | |||
684 | tid_agg_rx->reorder_buf[index] = NULL; | 684 | tid_agg_rx->reorder_buf[index] = NULL; |
685 | status = IEEE80211_SKB_RXCB(skb); | 685 | status = IEEE80211_SKB_RXCB(skb); |
686 | status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; | 686 | status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE; |
687 | skb_queue_tail(&local->rx_skb_queue, skb); | 687 | __skb_queue_tail(frames, skb); |
688 | 688 | ||
689 | no_frame: | 689 | no_frame: |
690 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); | 690 | tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); |
@@ -692,7 +692,8 @@ no_frame: | |||
692 | 692 | ||
693 | static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, | 693 | static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, |
694 | struct tid_ampdu_rx *tid_agg_rx, | 694 | struct tid_ampdu_rx *tid_agg_rx, |
695 | u16 head_seq_num) | 695 | u16 head_seq_num, |
696 | struct sk_buff_head *frames) | ||
696 | { | 697 | { |
697 | int index; | 698 | int index; |
698 | 699 | ||
@@ -701,7 +702,8 @@ static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata | |||
701 | while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { | 702 | while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { |
702 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 703 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
703 | tid_agg_rx->buf_size; | 704 | tid_agg_rx->buf_size; |
704 | ieee80211_release_reorder_frame(sdata, tid_agg_rx, index); | 705 | ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, |
706 | frames); | ||
705 | } | 707 | } |
706 | } | 708 | } |
707 | 709 | ||
@@ -717,7 +719,8 @@ static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata | |||
717 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) | 719 | #define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10) |
718 | 720 | ||
719 | static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, | 721 | static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, |
720 | struct tid_ampdu_rx *tid_agg_rx) | 722 | struct tid_ampdu_rx *tid_agg_rx, |
723 | struct sk_buff_head *frames) | ||
721 | { | 724 | { |
722 | int index, j; | 725 | int index, j; |
723 | 726 | ||
@@ -746,7 +749,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, | |||
746 | 749 | ||
747 | ht_dbg_ratelimited(sdata, | 750 | ht_dbg_ratelimited(sdata, |
748 | "release an RX reorder frame due to timeout on earlier frames\n"); | 751 | "release an RX reorder frame due to timeout on earlier frames\n"); |
749 | ieee80211_release_reorder_frame(sdata, tid_agg_rx, j); | 752 | ieee80211_release_reorder_frame(sdata, tid_agg_rx, j, |
753 | frames); | ||
750 | 754 | ||
751 | /* | 755 | /* |
752 | * Increment the head seq# also for the skipped slots. | 756 | * Increment the head seq# also for the skipped slots. |
@@ -756,7 +760,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, | |||
756 | skipped = 0; | 760 | skipped = 0; |
757 | } | 761 | } |
758 | } else while (tid_agg_rx->reorder_buf[index]) { | 762 | } else while (tid_agg_rx->reorder_buf[index]) { |
759 | ieee80211_release_reorder_frame(sdata, tid_agg_rx, index); | 763 | ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, |
764 | frames); | ||
760 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % | 765 | index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % |
761 | tid_agg_rx->buf_size; | 766 | tid_agg_rx->buf_size; |
762 | } | 767 | } |
@@ -788,7 +793,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata, | |||
788 | */ | 793 | */ |
789 | static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, | 794 | static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata, |
790 | struct tid_ampdu_rx *tid_agg_rx, | 795 | struct tid_ampdu_rx *tid_agg_rx, |
791 | struct sk_buff *skb) | 796 | struct sk_buff *skb, |
797 | struct sk_buff_head *frames) | ||
792 | { | 798 | { |
793 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 799 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
794 | u16 sc = le16_to_cpu(hdr->seq_ctrl); | 800 | u16 sc = le16_to_cpu(hdr->seq_ctrl); |
@@ -816,7 +822,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata | |||
816 | head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); | 822 | head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); |
817 | /* release stored frames up to new head to stack */ | 823 | /* release stored frames up to new head to stack */ |
818 | ieee80211_release_reorder_frames(sdata, tid_agg_rx, | 824 | ieee80211_release_reorder_frames(sdata, tid_agg_rx, |
819 | head_seq_num); | 825 | head_seq_num, frames); |
820 | } | 826 | } |
821 | 827 | ||
822 | /* Now the new frame is always in the range of the reordering buffer */ | 828 | /* Now the new frame is always in the range of the reordering buffer */ |
@@ -846,7 +852,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata | |||
846 | tid_agg_rx->reorder_buf[index] = skb; | 852 | tid_agg_rx->reorder_buf[index] = skb; |
847 | tid_agg_rx->reorder_time[index] = jiffies; | 853 | tid_agg_rx->reorder_time[index] = jiffies; |
848 | tid_agg_rx->stored_mpdu_num++; | 854 | tid_agg_rx->stored_mpdu_num++; |
849 | ieee80211_sta_reorder_release(sdata, tid_agg_rx); | 855 | ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames); |
850 | 856 | ||
851 | out: | 857 | out: |
852 | spin_unlock(&tid_agg_rx->reorder_lock); | 858 | spin_unlock(&tid_agg_rx->reorder_lock); |
@@ -857,7 +863,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata | |||
857 | * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns | 863 | * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns |
858 | * true if the MPDU was buffered, false if it should be processed. | 864 | * true if the MPDU was buffered, false if it should be processed. |
859 | */ | 865 | */ |
860 | static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) | 866 | static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, |
867 | struct sk_buff_head *frames) | ||
861 | { | 868 | { |
862 | struct sk_buff *skb = rx->skb; | 869 | struct sk_buff *skb = rx->skb; |
863 | struct ieee80211_local *local = rx->local; | 870 | struct ieee80211_local *local = rx->local; |
@@ -922,11 +929,12 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx) | |||
922 | * sure that we cannot get to it any more before doing | 929 | * sure that we cannot get to it any more before doing |
923 | * anything with it. | 930 | * anything with it. |
924 | */ | 931 | */ |
925 | if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb)) | 932 | if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb, |
933 | frames)) | ||
926 | return; | 934 | return; |
927 | 935 | ||
928 | dont_reorder: | 936 | dont_reorder: |
929 | skb_queue_tail(&local->rx_skb_queue, skb); | 937 | __skb_queue_tail(frames, skb); |
930 | } | 938 | } |
931 | 939 | ||
932 | static ieee80211_rx_result debug_noinline | 940 | static ieee80211_rx_result debug_noinline |
@@ -2184,7 +2192,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx) | |||
2184 | } | 2192 | } |
2185 | 2193 | ||
2186 | static ieee80211_rx_result debug_noinline | 2194 | static ieee80211_rx_result debug_noinline |
2187 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) | 2195 | ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) |
2188 | { | 2196 | { |
2189 | struct sk_buff *skb = rx->skb; | 2197 | struct sk_buff *skb = rx->skb; |
2190 | struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; | 2198 | struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data; |
@@ -2223,7 +2231,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) | |||
2223 | spin_lock(&tid_agg_rx->reorder_lock); | 2231 | spin_lock(&tid_agg_rx->reorder_lock); |
2224 | /* release stored frames up to start of BAR */ | 2232 | /* release stored frames up to start of BAR */ |
2225 | ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, | 2233 | ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx, |
2226 | start_seq_num); | 2234 | start_seq_num, frames); |
2227 | spin_unlock(&tid_agg_rx->reorder_lock); | 2235 | spin_unlock(&tid_agg_rx->reorder_lock); |
2228 | 2236 | ||
2229 | kfree_skb(skb); | 2237 | kfree_skb(skb); |
@@ -2808,7 +2816,8 @@ static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx, | |||
2808 | } | 2816 | } |
2809 | } | 2817 | } |
2810 | 2818 | ||
2811 | static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) | 2819 | static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx, |
2820 | struct sk_buff_head *frames) | ||
2812 | { | 2821 | { |
2813 | ieee80211_rx_result res = RX_DROP_MONITOR; | 2822 | ieee80211_rx_result res = RX_DROP_MONITOR; |
2814 | struct sk_buff *skb; | 2823 | struct sk_buff *skb; |
@@ -2820,15 +2829,9 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) | |||
2820 | goto rxh_next; \ | 2829 | goto rxh_next; \ |
2821 | } while (0); | 2830 | } while (0); |
2822 | 2831 | ||
2823 | spin_lock(&rx->local->rx_skb_queue.lock); | 2832 | spin_lock_bh(&rx->local->rx_path_lock); |
2824 | if (rx->local->running_rx_handler) | ||
2825 | goto unlock; | ||
2826 | |||
2827 | rx->local->running_rx_handler = true; | ||
2828 | |||
2829 | while ((skb = __skb_dequeue(&rx->local->rx_skb_queue))) { | ||
2830 | spin_unlock(&rx->local->rx_skb_queue.lock); | ||
2831 | 2833 | ||
2834 | while ((skb = __skb_dequeue(frames))) { | ||
2832 | /* | 2835 | /* |
2833 | * all the other fields are valid across frames | 2836 | * all the other fields are valid across frames |
2834 | * that belong to an aMPDU since they are on the | 2837 | * that belong to an aMPDU since they are on the |
@@ -2849,7 +2852,12 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) | |||
2849 | #endif | 2852 | #endif |
2850 | CALL_RXH(ieee80211_rx_h_amsdu) | 2853 | CALL_RXH(ieee80211_rx_h_amsdu) |
2851 | CALL_RXH(ieee80211_rx_h_data) | 2854 | CALL_RXH(ieee80211_rx_h_data) |
2852 | CALL_RXH(ieee80211_rx_h_ctrl); | 2855 | |
2856 | /* special treatment -- needs the queue */ | ||
2857 | res = ieee80211_rx_h_ctrl(rx, frames); | ||
2858 | if (res != RX_CONTINUE) | ||
2859 | goto rxh_next; | ||
2860 | |||
2853 | CALL_RXH(ieee80211_rx_h_mgmt_check) | 2861 | CALL_RXH(ieee80211_rx_h_mgmt_check) |
2854 | CALL_RXH(ieee80211_rx_h_action) | 2862 | CALL_RXH(ieee80211_rx_h_action) |
2855 | CALL_RXH(ieee80211_rx_h_userspace_mgmt) | 2863 | CALL_RXH(ieee80211_rx_h_userspace_mgmt) |
@@ -2858,20 +2866,20 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx) | |||
2858 | 2866 | ||
2859 | rxh_next: | 2867 | rxh_next: |
2860 | ieee80211_rx_handlers_result(rx, res); | 2868 | ieee80211_rx_handlers_result(rx, res); |
2861 | spin_lock(&rx->local->rx_skb_queue.lock); | 2869 | |
2862 | #undef CALL_RXH | 2870 | #undef CALL_RXH |
2863 | } | 2871 | } |
2864 | 2872 | ||
2865 | rx->local->running_rx_handler = false; | 2873 | spin_unlock_bh(&rx->local->rx_path_lock); |
2866 | |||
2867 | unlock: | ||
2868 | spin_unlock(&rx->local->rx_skb_queue.lock); | ||
2869 | } | 2874 | } |
2870 | 2875 | ||
2871 | static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) | 2876 | static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) |
2872 | { | 2877 | { |
2878 | struct sk_buff_head reorder_release; | ||
2873 | ieee80211_rx_result res = RX_DROP_MONITOR; | 2879 | ieee80211_rx_result res = RX_DROP_MONITOR; |
2874 | 2880 | ||
2881 | __skb_queue_head_init(&reorder_release); | ||
2882 | |||
2875 | #define CALL_RXH(rxh) \ | 2883 | #define CALL_RXH(rxh) \ |
2876 | do { \ | 2884 | do { \ |
2877 | res = rxh(rx); \ | 2885 | res = rxh(rx); \ |
@@ -2881,9 +2889,9 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) | |||
2881 | 2889 | ||
2882 | CALL_RXH(ieee80211_rx_h_check) | 2890 | CALL_RXH(ieee80211_rx_h_check) |
2883 | 2891 | ||
2884 | ieee80211_rx_reorder_ampdu(rx); | 2892 | ieee80211_rx_reorder_ampdu(rx, &reorder_release); |
2885 | 2893 | ||
2886 | ieee80211_rx_handlers(rx); | 2894 | ieee80211_rx_handlers(rx, &reorder_release); |
2887 | return; | 2895 | return; |
2888 | 2896 | ||
2889 | rxh_next: | 2897 | rxh_next: |
@@ -2898,6 +2906,7 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx) | |||
2898 | */ | 2906 | */ |
2899 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) | 2907 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) |
2900 | { | 2908 | { |
2909 | struct sk_buff_head frames; | ||
2901 | struct ieee80211_rx_data rx = { | 2910 | struct ieee80211_rx_data rx = { |
2902 | .sta = sta, | 2911 | .sta = sta, |
2903 | .sdata = sta->sdata, | 2912 | .sdata = sta->sdata, |
@@ -2913,11 +2922,13 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid) | |||
2913 | if (!tid_agg_rx) | 2922 | if (!tid_agg_rx) |
2914 | return; | 2923 | return; |
2915 | 2924 | ||
2925 | __skb_queue_head_init(&frames); | ||
2926 | |||
2916 | spin_lock(&tid_agg_rx->reorder_lock); | 2927 | spin_lock(&tid_agg_rx->reorder_lock); |
2917 | ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx); | 2928 | ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames); |
2918 | spin_unlock(&tid_agg_rx->reorder_lock); | 2929 | spin_unlock(&tid_agg_rx->reorder_lock); |
2919 | 2930 | ||
2920 | ieee80211_rx_handlers(&rx); | 2931 | ieee80211_rx_handlers(&rx, &frames); |
2921 | } | 2932 | } |
2922 | 2933 | ||
2923 | /* main receive path */ | 2934 | /* main receive path */ |