aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-12-04 16:25:15 -0500
committerDavid S. Miller <davem@davemloft.net>2009-12-04 16:25:15 -0500
commit8f56874bd7e8bee73ed6a1cf80dcec2753616262 (patch)
treeaebd15dea662ef5efd89402b8fd92fec540a98eb /net
parent47e1c323069bcef0acb8a2b48921688573f5ca63 (diff)
parent159bcfeb9123c91f0dc885a42b6387a98192f896 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6
Diffstat (limited to 'net')
-rw-r--r--net/mac80211/driver-trace.h26
-rw-r--r--net/mac80211/ieee80211_i.h4
-rw-r--r--net/mac80211/iface.c14
-rw-r--r--net/mac80211/rx.c589
-rw-r--r--net/mac80211/status.c6
-rw-r--r--net/wireless/core.c12
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/ibss.c2
-rw-r--r--net/wireless/nl80211.c120
-rw-r--r--net/wireless/reg.c6
-rw-r--r--net/wireless/scan.c2
-rw-r--r--net/wireless/sme.c6
-rw-r--r--net/wireless/wext-compat.c42
-rw-r--r--net/wireless/wext-core.c3
14 files changed, 526 insertions, 308 deletions
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index b8fef1d11369..ee94ea0c67e9 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -131,17 +131,35 @@ TRACE_EVENT(drv_config,
131 LOCAL_ENTRY 131 LOCAL_ENTRY
132 __field(u32, changed) 132 __field(u32, changed)
133 __field(int, ret) 133 __field(int, ret)
134 __field(u32, flags)
135 __field(int, power_level)
136 __field(int, dynamic_ps_timeout)
137 __field(int, max_sleep_period)
138 __field(u16, listen_interval)
139 __field(u8, long_frame_max_tx_count)
140 __field(u8, short_frame_max_tx_count)
141 __field(int, center_freq)
142 __field(int, channel_type)
134 ), 143 ),
135 144
136 TP_fast_assign( 145 TP_fast_assign(
137 LOCAL_ASSIGN; 146 LOCAL_ASSIGN;
138 __entry->changed = changed; 147 __entry->changed = changed;
139 __entry->ret = ret; 148 __entry->ret = ret;
149 __entry->flags = local->hw.conf.flags;
150 __entry->power_level = local->hw.conf.power_level;
151 __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout;
152 __entry->max_sleep_period = local->hw.conf.max_sleep_period;
153 __entry->listen_interval = local->hw.conf.listen_interval;
154 __entry->long_frame_max_tx_count = local->hw.conf.long_frame_max_tx_count;
155 __entry->short_frame_max_tx_count = local->hw.conf.short_frame_max_tx_count;
156 __entry->center_freq = local->hw.conf.channel->center_freq;
157 __entry->channel_type = local->hw.conf.channel_type;
140 ), 158 ),
141 159
142 TP_printk( 160 TP_printk(
143 LOCAL_PR_FMT " ch:%#x ret:%d", 161 LOCAL_PR_FMT " ch:%#x freq:%d ret:%d",
144 LOCAL_PR_ARG, __entry->changed, __entry->ret 162 LOCAL_PR_ARG, __entry->changed, __entry->center_freq, __entry->ret
145 ) 163 )
146); 164);
147 165
@@ -167,6 +185,8 @@ TRACE_EVENT(drv_bss_info_changed,
167 __field(u64, timestamp) 185 __field(u64, timestamp)
168 __field(u32, basic_rates) 186 __field(u32, basic_rates)
169 __field(u32, changed) 187 __field(u32, changed)
188 __field(bool, enable_beacon)
189 __field(u16, ht_operation_mode)
170 ), 190 ),
171 191
172 TP_fast_assign( 192 TP_fast_assign(
@@ -183,6 +203,8 @@ TRACE_EVENT(drv_bss_info_changed,
183 __entry->assoc_cap = info->assoc_capability; 203 __entry->assoc_cap = info->assoc_capability;
184 __entry->timestamp = info->timestamp; 204 __entry->timestamp = info->timestamp;
185 __entry->basic_rates = info->basic_rates; 205 __entry->basic_rates = info->basic_rates;
206 __entry->enable_beacon = info->enable_beacon;
207 __entry->ht_operation_mode = info->ht_operation_mode;
186 ), 208 ),
187 209
188 TP_printk( 210 TP_printk(
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 039affa7c871..419f186cfcf0 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -163,8 +163,8 @@ typedef unsigned __bitwise__ ieee80211_rx_result;
163/* frame is destined to interface currently processed (incl. multicast frames) */ 163/* frame is destined to interface currently processed (incl. multicast frames) */
164#define IEEE80211_RX_RA_MATCH BIT(1) 164#define IEEE80211_RX_RA_MATCH BIT(1)
165#define IEEE80211_RX_AMSDU BIT(2) 165#define IEEE80211_RX_AMSDU BIT(2)
166#define IEEE80211_RX_CMNTR_REPORTED BIT(3) 166#define IEEE80211_RX_FRAGMENTED BIT(3)
167#define IEEE80211_RX_FRAGMENTED BIT(4) 167/* only add flags here that do not change with subframes of an aMPDU */
168 168
169struct ieee80211_rx_data { 169struct ieee80211_rx_data {
170 struct sk_buff *skb; 170 struct sk_buff *skb;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 1bf12a26b45e..80c16f6e2af6 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -860,22 +860,18 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
860void ieee80211_remove_interfaces(struct ieee80211_local *local) 860void ieee80211_remove_interfaces(struct ieee80211_local *local)
861{ 861{
862 struct ieee80211_sub_if_data *sdata, *tmp; 862 struct ieee80211_sub_if_data *sdata, *tmp;
863 LIST_HEAD(unreg_list);
863 864
864 ASSERT_RTNL(); 865 ASSERT_RTNL();
865 866
867 mutex_lock(&local->iflist_mtx);
866 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 868 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
867 /*
868 * we cannot hold the iflist_mtx across unregister_netdevice,
869 * but we only need to hold it for list modifications to lock
870 * out readers since we're under the RTNL here as all other
871 * writers.
872 */
873 mutex_lock(&local->iflist_mtx);
874 list_del(&sdata->list); 869 list_del(&sdata->list);
875 mutex_unlock(&local->iflist_mtx);
876 870
877 unregister_netdevice(sdata->dev); 871 unregister_netdevice_queue(sdata->dev, &unreg_list);
878 } 872 }
873 mutex_unlock(&local->iflist_mtx);
874 unregister_netdevice_many(&unreg_list);
879} 875}
880 876
881static u32 ieee80211_idle_off(struct ieee80211_local *local, 877static u32 ieee80211_idle_off(struct ieee80211_local *local,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index beecf50fbd10..4ed60ae81b99 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -27,10 +27,6 @@
27#include "tkip.h" 27#include "tkip.h"
28#include "wme.h" 28#include "wme.h"
29 29
30static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
31 struct tid_ampdu_rx *tid_agg_rx,
32 u16 head_seq_num);
33
34/* 30/*
35 * monitor mode reception 31 * monitor mode reception
36 * 32 *
@@ -534,6 +530,242 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
534 return RX_CONTINUE; 530 return RX_CONTINUE;
535} 531}
536 532
533#define SEQ_MODULO 0x1000
534#define SEQ_MASK 0xfff
535
536static inline int seq_less(u16 sq1, u16 sq2)
537{
538 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
539}
540
541static inline u16 seq_inc(u16 sq)
542{
543 return (sq + 1) & SEQ_MASK;
544}
545
546static inline u16 seq_sub(u16 sq1, u16 sq2)
547{
548 return (sq1 - sq2) & SEQ_MASK;
549}
550
551
552static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
553 struct tid_ampdu_rx *tid_agg_rx,
554 int index,
555 struct sk_buff_head *frames)
556{
557 struct ieee80211_supported_band *sband;
558 struct ieee80211_rate *rate = NULL;
559 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
560 struct ieee80211_rx_status *status;
561
562 if (!skb)
563 goto no_frame;
564
565 status = IEEE80211_SKB_RXCB(skb);
566
567 /* release the reordered frames to stack */
568 sband = hw->wiphy->bands[status->band];
569 if (!(status->flag & RX_FLAG_HT))
570 rate = &sband->bitrates[status->rate_idx];
571 tid_agg_rx->stored_mpdu_num--;
572 tid_agg_rx->reorder_buf[index] = NULL;
573 skb_queue_tail(frames, skb);
574
575no_frame:
576 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
577}
578
579static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
580 struct tid_ampdu_rx *tid_agg_rx,
581 u16 head_seq_num,
582 struct sk_buff_head *frames)
583{
584 int index;
585
586 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
587 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
588 tid_agg_rx->buf_size;
589 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
590 }
591}
592
593/*
594 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
595 * the skb was added to the buffer longer than this time ago, the earlier
596 * frames that have not yet been received are assumed to be lost and the skb
597 * can be released for processing. This may also release other skb's from the
598 * reorder buffer if there are no additional gaps between the frames.
599 */
600#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
601
602/*
603 * As this function belongs to the RX path it must be under
604 * rcu_read_lock protection. It returns false if the frame
605 * can be processed immediately, true if it was consumed.
606 */
607static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
608 struct tid_ampdu_rx *tid_agg_rx,
609 struct sk_buff *skb,
610 struct sk_buff_head *frames)
611{
612 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
613 u16 sc = le16_to_cpu(hdr->seq_ctrl);
614 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
615 u16 head_seq_num, buf_size;
616 int index;
617
618 buf_size = tid_agg_rx->buf_size;
619 head_seq_num = tid_agg_rx->head_seq_num;
620
621 /* frame with out of date sequence number */
622 if (seq_less(mpdu_seq_num, head_seq_num)) {
623 dev_kfree_skb(skb);
624 return true;
625 }
626
627 /*
628 * If frame the sequence number exceeds our buffering window
629 * size release some previous frames to make room for this one.
630 */
631 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
632 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
633 /* release stored frames up to new head to stack */
634 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num,
635 frames);
636 }
637
638 /* Now the new frame is always in the range of the reordering buffer */
639
640 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
641
642 /* check if we already stored this frame */
643 if (tid_agg_rx->reorder_buf[index]) {
644 dev_kfree_skb(skb);
645 return true;
646 }
647
648 /*
649 * If the current MPDU is in the right order and nothing else
650 * is stored we can process it directly, no need to buffer it.
651 */
652 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
653 tid_agg_rx->stored_mpdu_num == 0) {
654 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
655 return false;
656 }
657
658 /* put the frame in the reordering buffer */
659 tid_agg_rx->reorder_buf[index] = skb;
660 tid_agg_rx->reorder_time[index] = jiffies;
661 tid_agg_rx->stored_mpdu_num++;
662 /* release the buffer until next missing frame */
663 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
664 tid_agg_rx->buf_size;
665 if (!tid_agg_rx->reorder_buf[index] &&
666 tid_agg_rx->stored_mpdu_num > 1) {
667 /*
668 * No buffers ready to be released, but check whether any
669 * frames in the reorder buffer have timed out.
670 */
671 int j;
672 int skipped = 1;
673 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
674 j = (j + 1) % tid_agg_rx->buf_size) {
675 if (!tid_agg_rx->reorder_buf[j]) {
676 skipped++;
677 continue;
678 }
679 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
680 HT_RX_REORDER_BUF_TIMEOUT))
681 break;
682
683#ifdef CONFIG_MAC80211_HT_DEBUG
684 if (net_ratelimit())
685 printk(KERN_DEBUG "%s: release an RX reorder "
686 "frame due to timeout on earlier "
687 "frames\n",
688 wiphy_name(hw->wiphy));
689#endif
690 ieee80211_release_reorder_frame(hw, tid_agg_rx,
691 j, frames);
692
693 /*
694 * Increment the head seq# also for the skipped slots.
695 */
696 tid_agg_rx->head_seq_num =
697 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
698 skipped = 0;
699 }
700 } else while (tid_agg_rx->reorder_buf[index]) {
701 ieee80211_release_reorder_frame(hw, tid_agg_rx, index, frames);
702 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
703 tid_agg_rx->buf_size;
704 }
705
706 return true;
707}
708
709/*
710 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
711 * true if the MPDU was buffered, false if it should be processed.
712 */
713static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
714 struct sk_buff_head *frames)
715{
716 struct sk_buff *skb = rx->skb;
717 struct ieee80211_local *local = rx->local;
718 struct ieee80211_hw *hw = &local->hw;
719 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
720 struct sta_info *sta = rx->sta;
721 struct tid_ampdu_rx *tid_agg_rx;
722 u16 sc;
723 int tid;
724
725 if (!ieee80211_is_data_qos(hdr->frame_control))
726 goto dont_reorder;
727
728 /*
729 * filter the QoS data rx stream according to
730 * STA/TID and check if this STA/TID is on aggregation
731 */
732
733 if (!sta)
734 goto dont_reorder;
735
736 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
737
738 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
739 goto dont_reorder;
740
741 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
742
743 /* qos null data frames are excluded */
744 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
745 goto dont_reorder;
746
747 /* new, potentially un-ordered, ampdu frame - process it */
748
749 /* reset session timer */
750 if (tid_agg_rx->timeout)
751 mod_timer(&tid_agg_rx->session_timer,
752 TU_TO_EXP_TIME(tid_agg_rx->timeout));
753
754 /* if this mpdu is fragmented - terminate rx aggregation session */
755 sc = le16_to_cpu(hdr->seq_ctrl);
756 if (sc & IEEE80211_SCTL_FRAG) {
757 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
758 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
759 dev_kfree_skb(skb);
760 return;
761 }
762
763 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames))
764 return;
765
766 dont_reorder:
767 __skb_queue_tail(frames, skb);
768}
537 769
538static ieee80211_rx_result debug_noinline 770static ieee80211_rx_result debug_noinline
539ieee80211_rx_h_check(struct ieee80211_rx_data *rx) 771ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
@@ -637,6 +869,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
637 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 869 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
638 return RX_CONTINUE; 870 return RX_CONTINUE;
639 871
872 /* start without a key */
873 rx->key = NULL;
874
640 if (rx->sta) 875 if (rx->sta)
641 stakey = rcu_dereference(rx->sta->key); 876 stakey = rcu_dereference(rx->sta->key);
642 877
@@ -1589,7 +1824,7 @@ ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
1589} 1824}
1590 1825
1591static ieee80211_rx_result debug_noinline 1826static ieee80211_rx_result debug_noinline
1592ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx) 1827ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1593{ 1828{
1594 struct ieee80211_local *local = rx->local; 1829 struct ieee80211_local *local = rx->local;
1595 struct ieee80211_hw *hw = &local->hw; 1830 struct ieee80211_hw *hw = &local->hw;
@@ -1619,7 +1854,8 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
1619 TU_TO_EXP_TIME(tid_agg_rx->timeout)); 1854 TU_TO_EXP_TIME(tid_agg_rx->timeout));
1620 1855
1621 /* release stored frames up to start of BAR */ 1856 /* release stored frames up to start of BAR */
1622 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num); 1857 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1858 frames);
1623 kfree_skb(skb); 1859 kfree_skb(skb);
1624 return RX_QUEUED; 1860 return RX_QUEUED;
1625 } 1861 }
@@ -1868,7 +2104,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
1868 struct net_device *prev_dev = NULL; 2104 struct net_device *prev_dev = NULL;
1869 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2105 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
1870 2106
1871 if (rx->flags & IEEE80211_RX_CMNTR_REPORTED) 2107 if (status->flag & RX_FLAG_INTERNAL_CMTR)
1872 goto out_free_skb; 2108 goto out_free_skb;
1873 2109
1874 if (skb_headroom(skb) < sizeof(*rthdr) && 2110 if (skb_headroom(skb) < sizeof(*rthdr) &&
@@ -1929,7 +2165,7 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
1929 } else 2165 } else
1930 goto out_free_skb; 2166 goto out_free_skb;
1931 2167
1932 rx->flags |= IEEE80211_RX_CMNTR_REPORTED; 2168 status->flag |= RX_FLAG_INTERNAL_CMTR;
1933 return; 2169 return;
1934 2170
1935 out_free_skb: 2171 out_free_skb:
@@ -1942,8 +2178,11 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1942 struct sk_buff *skb, 2178 struct sk_buff *skb,
1943 struct ieee80211_rate *rate) 2179 struct ieee80211_rate *rate)
1944{ 2180{
2181 struct sk_buff_head reorder_release;
1945 ieee80211_rx_result res = RX_DROP_MONITOR; 2182 ieee80211_rx_result res = RX_DROP_MONITOR;
1946 2183
2184 __skb_queue_head_init(&reorder_release);
2185
1947 rx->skb = skb; 2186 rx->skb = skb;
1948 rx->sdata = sdata; 2187 rx->sdata = sdata;
1949 2188
@@ -1951,50 +2190,72 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_sub_if_data *sdata,
1951 do { \ 2190 do { \
1952 res = rxh(rx); \ 2191 res = rxh(rx); \
1953 if (res != RX_CONTINUE) \ 2192 if (res != RX_CONTINUE) \
1954 goto rxh_done; \ 2193 goto rxh_next; \
1955 } while (0); 2194 } while (0);
1956 2195
2196 /*
2197 * NB: the rxh_next label works even if we jump
2198 * to it from here because then the list will
2199 * be empty, which is a trivial check
2200 */
1957 CALL_RXH(ieee80211_rx_h_passive_scan) 2201 CALL_RXH(ieee80211_rx_h_passive_scan)
1958 CALL_RXH(ieee80211_rx_h_check) 2202 CALL_RXH(ieee80211_rx_h_check)
1959 CALL_RXH(ieee80211_rx_h_decrypt) 2203
1960 CALL_RXH(ieee80211_rx_h_check_more_data) 2204 ieee80211_rx_reorder_ampdu(rx, &reorder_release);
1961 CALL_RXH(ieee80211_rx_h_sta_process) 2205
1962 CALL_RXH(ieee80211_rx_h_defragment) 2206 while ((skb = __skb_dequeue(&reorder_release))) {
1963 CALL_RXH(ieee80211_rx_h_ps_poll) 2207 /*
1964 CALL_RXH(ieee80211_rx_h_michael_mic_verify) 2208 * all the other fields are valid across frames
1965 /* must be after MMIC verify so header is counted in MPDU mic */ 2209 * that belong to an aMPDU since they are on the
1966 CALL_RXH(ieee80211_rx_h_remove_qos_control) 2210 * same TID from the same station
1967 CALL_RXH(ieee80211_rx_h_amsdu) 2211 */
2212 rx->skb = skb;
2213
2214 CALL_RXH(ieee80211_rx_h_decrypt)
2215 CALL_RXH(ieee80211_rx_h_check_more_data)
2216 CALL_RXH(ieee80211_rx_h_sta_process)
2217 CALL_RXH(ieee80211_rx_h_defragment)
2218 CALL_RXH(ieee80211_rx_h_ps_poll)
2219 CALL_RXH(ieee80211_rx_h_michael_mic_verify)
2220 /* must be after MMIC verify so header is counted in MPDU mic */
2221 CALL_RXH(ieee80211_rx_h_remove_qos_control)
2222 CALL_RXH(ieee80211_rx_h_amsdu)
1968#ifdef CONFIG_MAC80211_MESH 2223#ifdef CONFIG_MAC80211_MESH
1969 if (ieee80211_vif_is_mesh(&sdata->vif)) 2224 if (ieee80211_vif_is_mesh(&sdata->vif))
1970 CALL_RXH(ieee80211_rx_h_mesh_fwding); 2225 CALL_RXH(ieee80211_rx_h_mesh_fwding);
1971#endif 2226#endif
1972 CALL_RXH(ieee80211_rx_h_data) 2227 CALL_RXH(ieee80211_rx_h_data)
1973 CALL_RXH(ieee80211_rx_h_ctrl) 2228
1974 CALL_RXH(ieee80211_rx_h_action) 2229 /* special treatment -- needs the queue */
1975 CALL_RXH(ieee80211_rx_h_mgmt) 2230 res = ieee80211_rx_h_ctrl(rx, &reorder_release);
2231 if (res != RX_CONTINUE)
2232 goto rxh_next;
2233
2234 CALL_RXH(ieee80211_rx_h_action)
2235 CALL_RXH(ieee80211_rx_h_mgmt)
1976 2236
1977#undef CALL_RXH 2237#undef CALL_RXH
1978 2238
1979 rxh_done: 2239 rxh_next:
1980 switch (res) { 2240 switch (res) {
1981 case RX_DROP_MONITOR: 2241 case RX_DROP_MONITOR:
1982 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 2242 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1983 if (rx->sta) 2243 if (rx->sta)
1984 rx->sta->rx_dropped++; 2244 rx->sta->rx_dropped++;
1985 /* fall through */ 2245 /* fall through */
1986 case RX_CONTINUE: 2246 case RX_CONTINUE:
1987 ieee80211_rx_cooked_monitor(rx, rate); 2247 ieee80211_rx_cooked_monitor(rx, rate);
1988 break; 2248 break;
1989 case RX_DROP_UNUSABLE: 2249 case RX_DROP_UNUSABLE:
1990 I802_DEBUG_INC(sdata->local->rx_handlers_drop); 2250 I802_DEBUG_INC(sdata->local->rx_handlers_drop);
1991 if (rx->sta) 2251 if (rx->sta)
1992 rx->sta->rx_dropped++; 2252 rx->sta->rx_dropped++;
1993 dev_kfree_skb(rx->skb); 2253 dev_kfree_skb(rx->skb);
1994 break; 2254 break;
1995 case RX_QUEUED: 2255 case RX_QUEUED:
1996 I802_DEBUG_INC(sdata->local->rx_handlers_queued); 2256 I802_DEBUG_INC(sdata->local->rx_handlers_queued);
1997 break; 2257 break;
2258 }
1998 } 2259 }
1999} 2260}
2000 2261
@@ -2187,233 +2448,6 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2187 dev_kfree_skb(skb); 2448 dev_kfree_skb(skb);
2188} 2449}
2189 2450
2190#define SEQ_MODULO 0x1000
2191#define SEQ_MASK 0xfff
2192
2193static inline int seq_less(u16 sq1, u16 sq2)
2194{
2195 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
2196}
2197
2198static inline u16 seq_inc(u16 sq)
2199{
2200 return (sq + 1) & SEQ_MASK;
2201}
2202
2203static inline u16 seq_sub(u16 sq1, u16 sq2)
2204{
2205 return (sq1 - sq2) & SEQ_MASK;
2206}
2207
2208
2209static void ieee80211_release_reorder_frame(struct ieee80211_hw *hw,
2210 struct tid_ampdu_rx *tid_agg_rx,
2211 int index)
2212{
2213 struct ieee80211_supported_band *sband;
2214 struct ieee80211_rate *rate = NULL;
2215 struct sk_buff *skb = tid_agg_rx->reorder_buf[index];
2216 struct ieee80211_rx_status *status;
2217
2218 if (!skb)
2219 goto no_frame;
2220
2221 status = IEEE80211_SKB_RXCB(skb);
2222
2223 /* release the reordered frames to stack */
2224 sband = hw->wiphy->bands[status->band];
2225 if (!(status->flag & RX_FLAG_HT))
2226 rate = &sband->bitrates[status->rate_idx];
2227 __ieee80211_rx_handle_packet(hw, skb, rate);
2228 tid_agg_rx->stored_mpdu_num--;
2229 tid_agg_rx->reorder_buf[index] = NULL;
2230
2231no_frame:
2232 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2233}
2234
2235static void ieee80211_release_reorder_frames(struct ieee80211_hw *hw,
2236 struct tid_ampdu_rx *tid_agg_rx,
2237 u16 head_seq_num)
2238{
2239 int index;
2240
2241 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) {
2242 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2243 tid_agg_rx->buf_size;
2244 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2245 }
2246}
2247
2248/*
2249 * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
2250 * the skb was added to the buffer longer than this time ago, the earlier
2251 * frames that have not yet been received are assumed to be lost and the skb
2252 * can be released for processing. This may also release other skb's from the
2253 * reorder buffer if there are no additional gaps between the frames.
2254 */
2255#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
2256
2257/*
2258 * As this function belongs to the RX path it must be under
2259 * rcu_read_lock protection. It returns false if the frame
2260 * can be processed immediately, true if it was consumed.
2261 */
2262static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
2263 struct tid_ampdu_rx *tid_agg_rx,
2264 struct sk_buff *skb)
2265{
2266 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2267 u16 sc = le16_to_cpu(hdr->seq_ctrl);
2268 u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
2269 u16 head_seq_num, buf_size;
2270 int index;
2271
2272 buf_size = tid_agg_rx->buf_size;
2273 head_seq_num = tid_agg_rx->head_seq_num;
2274
2275 /* frame with out of date sequence number */
2276 if (seq_less(mpdu_seq_num, head_seq_num)) {
2277 dev_kfree_skb(skb);
2278 return true;
2279 }
2280
2281 /*
2282 * If frame the sequence number exceeds our buffering window
2283 * size release some previous frames to make room for this one.
2284 */
2285 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
2286 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
2287 /* release stored frames up to new head to stack */
2288 ieee80211_release_reorder_frames(hw, tid_agg_rx, head_seq_num);
2289 }
2290
2291 /* Now the new frame is always in the range of the reordering buffer */
2292
2293 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size;
2294
2295 /* check if we already stored this frame */
2296 if (tid_agg_rx->reorder_buf[index]) {
2297 dev_kfree_skb(skb);
2298 return true;
2299 }
2300
2301 /*
2302 * If the current MPDU is in the right order and nothing else
2303 * is stored we can process it directly, no need to buffer it.
2304 */
2305 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
2306 tid_agg_rx->stored_mpdu_num == 0) {
2307 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num);
2308 return false;
2309 }
2310
2311 /* put the frame in the reordering buffer */
2312 tid_agg_rx->reorder_buf[index] = skb;
2313 tid_agg_rx->reorder_time[index] = jiffies;
2314 tid_agg_rx->stored_mpdu_num++;
2315 /* release the buffer until next missing frame */
2316 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2317 tid_agg_rx->buf_size;
2318 if (!tid_agg_rx->reorder_buf[index] &&
2319 tid_agg_rx->stored_mpdu_num > 1) {
2320 /*
2321 * No buffers ready to be released, but check whether any
2322 * frames in the reorder buffer have timed out.
2323 */
2324 int j;
2325 int skipped = 1;
2326 for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
2327 j = (j + 1) % tid_agg_rx->buf_size) {
2328 if (!tid_agg_rx->reorder_buf[j]) {
2329 skipped++;
2330 continue;
2331 }
2332 if (!time_after(jiffies, tid_agg_rx->reorder_time[j] +
2333 HT_RX_REORDER_BUF_TIMEOUT))
2334 break;
2335
2336#ifdef CONFIG_MAC80211_HT_DEBUG
2337 if (net_ratelimit())
2338 printk(KERN_DEBUG "%s: release an RX reorder "
2339 "frame due to timeout on earlier "
2340 "frames\n",
2341 wiphy_name(hw->wiphy));
2342#endif
2343 ieee80211_release_reorder_frame(hw, tid_agg_rx, j);
2344
2345 /*
2346 * Increment the head seq# also for the skipped slots.
2347 */
2348 tid_agg_rx->head_seq_num =
2349 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK;
2350 skipped = 0;
2351 }
2352 } else while (tid_agg_rx->reorder_buf[index]) {
2353 ieee80211_release_reorder_frame(hw, tid_agg_rx, index);
2354 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) %
2355 tid_agg_rx->buf_size;
2356 }
2357
2358 return true;
2359}
2360
2361/*
2362 * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
2363 * true if the MPDU was buffered, false if it should be processed.
2364 */
2365static bool ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
2366 struct sk_buff *skb)
2367{
2368 struct ieee80211_hw *hw = &local->hw;
2369 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2370 struct sta_info *sta;
2371 struct tid_ampdu_rx *tid_agg_rx;
2372 u16 sc;
2373 int tid;
2374
2375 if (!ieee80211_is_data_qos(hdr->frame_control))
2376 return false;
2377
2378 /*
2379 * filter the QoS data rx stream according to
2380 * STA/TID and check if this STA/TID is on aggregation
2381 */
2382
2383 sta = sta_info_get(local, hdr->addr2);
2384 if (!sta)
2385 return false;
2386
2387 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
2388
2389 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL)
2390 return false;
2391
2392 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
2393
2394 /* qos null data frames are excluded */
2395 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
2396 return false;
2397
2398 /* new, potentially un-ordered, ampdu frame - process it */
2399
2400 /* reset session timer */
2401 if (tid_agg_rx->timeout)
2402 mod_timer(&tid_agg_rx->session_timer,
2403 TU_TO_EXP_TIME(tid_agg_rx->timeout));
2404
2405 /* if this mpdu is fragmented - terminate rx aggregation session */
2406 sc = le16_to_cpu(hdr->seq_ctrl);
2407 if (sc & IEEE80211_SCTL_FRAG) {
2408 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr,
2409 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP);
2410 dev_kfree_skb(skb);
2411 return true;
2412 }
2413
2414 return ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb);
2415}
2416
2417/* 2451/*
2418 * This is the receive path handler. It is called by a low level driver when an 2452 * This is the receive path handler. It is called by a low level driver when an
2419 * 802.11 MPDU is received from the hardware. 2453 * 802.11 MPDU is received from the hardware.
@@ -2495,20 +2529,7 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
2495 return; 2529 return;
2496 } 2530 }
2497 2531
2498 /* 2532 __ieee80211_rx_handle_packet(hw, skb, rate);
2499 * In theory, the block ack reordering should happen after duplicate
2500 * removal (ieee80211_rx_h_check(), which is an RX handler). As such,
2501 * the call to ieee80211_rx_reorder_ampdu() should really be moved to
2502 * happen as a new RX handler between ieee80211_rx_h_check and
2503 * ieee80211_rx_h_decrypt. This cleanup may eventually happen, but for
2504 * the time being, the call can be here since RX reorder buf processing
2505 * will implicitly skip duplicates. We could, in theory at least,
2506 * process frames that ieee80211_rx_h_passive_scan would drop (e.g.,
2507 * frames from other than operational channel), but that should not
2508 * happen in normal networks.
2509 */
2510 if (!ieee80211_rx_reorder_ampdu(local, skb))
2511 __ieee80211_rx_handle_packet(hw, skb, rate);
2512 2533
2513 rcu_read_unlock(); 2534 rcu_read_unlock();
2514 2535
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 9f91fd8e6efb..d78f36c64c7b 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -148,6 +148,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
148 struct net_device *prev_dev = NULL; 148 struct net_device *prev_dev = NULL;
149 struct sta_info *sta; 149 struct sta_info *sta;
150 int retry_count = -1, i; 150 int retry_count = -1, i;
151 bool injected;
151 152
152 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 153 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
153 /* the HW cannot have attempted that rate */ 154 /* the HW cannot have attempted that rate */
@@ -297,6 +298,9 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
297 /* for now report the total retry_count */ 298 /* for now report the total retry_count */
298 rthdr->data_retries = retry_count; 299 rthdr->data_retries = retry_count;
299 300
301 /* Need to make a copy before skb->cb gets cleared */
302 injected = !!(info->flags & IEEE80211_TX_CTL_INJECTED);
303
300 /* XXX: is this sufficient for BPF? */ 304 /* XXX: is this sufficient for BPF? */
301 skb_set_mac_header(skb, 0); 305 skb_set_mac_header(skb, 0);
302 skb->ip_summed = CHECKSUM_UNNECESSARY; 306 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -311,7 +315,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
311 continue; 315 continue;
312 316
313 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && 317 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
314 !(info->flags & IEEE80211_TX_CTL_INJECTED) && 318 !injected &&
315 (type == IEEE80211_FTYPE_DATA)) 319 (type == IEEE80211_FTYPE_DATA))
316 continue; 320 continue;
317 321
diff --git a/net/wireless/core.c b/net/wireless/core.c
index fe6f402a22af..c2a2c563d21a 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -45,6 +45,9 @@ DEFINE_MUTEX(cfg80211_mutex);
45/* for debugfs */ 45/* for debugfs */
46static struct dentry *ieee80211_debugfs_dir; 46static struct dentry *ieee80211_debugfs_dir;
47 47
48/* for the cleanup, scan and event works */
49struct workqueue_struct *cfg80211_wq;
50
48/* requires cfg80211_mutex to be held! */ 51/* requires cfg80211_mutex to be held! */
49struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx) 52struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
50{ 53{
@@ -727,7 +730,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
727 break; 730 break;
728 case NETDEV_DOWN: 731 case NETDEV_DOWN:
729 dev_hold(dev); 732 dev_hold(dev);
730 schedule_work(&wdev->cleanup_work); 733 queue_work(cfg80211_wq, &wdev->cleanup_work);
731 break; 734 break;
732 case NETDEV_UP: 735 case NETDEV_UP:
733 /* 736 /*
@@ -845,8 +848,14 @@ static int __init cfg80211_init(void)
845 if (err) 848 if (err)
846 goto out_fail_reg; 849 goto out_fail_reg;
847 850
851 cfg80211_wq = create_singlethread_workqueue("cfg80211");
852 if (!cfg80211_wq)
853 goto out_fail_wq;
854
848 return 0; 855 return 0;
849 856
857out_fail_wq:
858 regulatory_exit();
850out_fail_reg: 859out_fail_reg:
851 debugfs_remove(ieee80211_debugfs_dir); 860 debugfs_remove(ieee80211_debugfs_dir);
852out_fail_nl80211: 861out_fail_nl80211:
@@ -868,5 +877,6 @@ static void cfg80211_exit(void)
868 wiphy_sysfs_exit(); 877 wiphy_sysfs_exit();
869 regulatory_exit(); 878 regulatory_exit();
870 unregister_pernet_device(&cfg80211_pernet_ops); 879 unregister_pernet_device(&cfg80211_pernet_ops);
880 destroy_workqueue(cfg80211_wq);
871} 881}
872module_exit(cfg80211_exit); 882module_exit(cfg80211_exit);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index a9db9e6255bb..4ef3efc94106 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -91,6 +91,8 @@ bool wiphy_idx_valid(int wiphy_idx)
91 return (wiphy_idx >= 0); 91 return (wiphy_idx >= 0);
92} 92}
93 93
94
95extern struct workqueue_struct *cfg80211_wq;
94extern struct mutex cfg80211_mutex; 96extern struct mutex cfg80211_mutex;
95extern struct list_head cfg80211_rdev_list; 97extern struct list_head cfg80211_rdev_list;
96extern int cfg80211_rdev_list_generation; 98extern int cfg80211_rdev_list_generation;
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 34dfc93fa713..6ef5a491fb4b 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -70,7 +70,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
70 spin_lock_irqsave(&wdev->event_lock, flags); 70 spin_lock_irqsave(&wdev->event_lock, flags);
71 list_add_tail(&ev->list, &wdev->event_list); 71 list_add_tail(&ev->list, &wdev->event_list);
72 spin_unlock_irqrestore(&wdev->event_lock, flags); 72 spin_unlock_irqrestore(&wdev->event_lock, flags);
73 schedule_work(&rdev->event_work); 73 queue_work(cfg80211_wq, &rdev->event_work);
74} 74}
75EXPORT_SYMBOL(cfg80211_ibss_joined); 75EXPORT_SYMBOL(cfg80211_ibss_joined);
76 76
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 149539ade15e..a6028433e3a0 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -139,6 +139,8 @@ static struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] __read_mostly = {
139 [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, 139 [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
140 [NL80211_ATTR_PID] = { .type = NLA_U32 }, 140 [NL80211_ATTR_PID] = { .type = NLA_U32 },
141 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, 141 [NL80211_ATTR_4ADDR] = { .type = NLA_U8 },
142 [NL80211_ATTR_PMKID] = { .type = NLA_BINARY,
143 .len = WLAN_PMKID_LEN },
142}; 144};
143 145
144/* policy for the attributes */ 146/* policy for the attributes */
@@ -450,6 +452,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
450 sizeof(u32) * dev->wiphy.n_cipher_suites, 452 sizeof(u32) * dev->wiphy.n_cipher_suites,
451 dev->wiphy.cipher_suites); 453 dev->wiphy.cipher_suites);
452 454
455 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
456 dev->wiphy.max_num_pmkids);
457
453 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES); 458 nl_modes = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_IFTYPES);
454 if (!nl_modes) 459 if (!nl_modes)
455 goto nla_put_failure; 460 goto nla_put_failure;
@@ -561,6 +566,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
561 CMD(deauth, DEAUTHENTICATE); 566 CMD(deauth, DEAUTHENTICATE);
562 CMD(disassoc, DISASSOCIATE); 567 CMD(disassoc, DISASSOCIATE);
563 CMD(join_ibss, JOIN_IBSS); 568 CMD(join_ibss, JOIN_IBSS);
569 CMD(set_pmksa, SET_PMKSA);
570 CMD(del_pmksa, DEL_PMKSA);
571 CMD(flush_pmksa, FLUSH_PMKSA);
564 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { 572 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
565 i++; 573 i++;
566 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); 574 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS);
@@ -4221,6 +4229,99 @@ static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info)
4221 return err; 4229 return err;
4222} 4230}
4223 4231
4232static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info)
4233{
4234 struct cfg80211_registered_device *rdev;
4235 int (*rdev_ops)(struct wiphy *wiphy, struct net_device *dev,
4236 struct cfg80211_pmksa *pmksa) = NULL;
4237 int err;
4238 struct net_device *dev;
4239 struct cfg80211_pmksa pmksa;
4240
4241 memset(&pmksa, 0, sizeof(struct cfg80211_pmksa));
4242
4243 if (!info->attrs[NL80211_ATTR_MAC])
4244 return -EINVAL;
4245
4246 if (!info->attrs[NL80211_ATTR_PMKID])
4247 return -EINVAL;
4248
4249 rtnl_lock();
4250
4251 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4252 if (err)
4253 goto out_rtnl;
4254
4255 pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]);
4256 pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
4257
4258 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
4259 err = -EOPNOTSUPP;
4260 goto out;
4261 }
4262
4263 switch (info->genlhdr->cmd) {
4264 case NL80211_CMD_SET_PMKSA:
4265 rdev_ops = rdev->ops->set_pmksa;
4266 break;
4267 case NL80211_CMD_DEL_PMKSA:
4268 rdev_ops = rdev->ops->del_pmksa;
4269 break;
4270 default:
4271 WARN_ON(1);
4272 break;
4273 }
4274
4275 if (!rdev_ops) {
4276 err = -EOPNOTSUPP;
4277 goto out;
4278 }
4279
4280 err = rdev_ops(&rdev->wiphy, dev, &pmksa);
4281
4282 out:
4283 cfg80211_unlock_rdev(rdev);
4284 dev_put(dev);
4285 out_rtnl:
4286 rtnl_unlock();
4287
4288 return err;
4289}
4290
4291static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info)
4292{
4293 struct cfg80211_registered_device *rdev;
4294 int err;
4295 struct net_device *dev;
4296
4297 rtnl_lock();
4298
4299 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4300 if (err)
4301 goto out_rtnl;
4302
4303 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) {
4304 err = -EOPNOTSUPP;
4305 goto out;
4306 }
4307
4308 if (!rdev->ops->flush_pmksa) {
4309 err = -EOPNOTSUPP;
4310 goto out;
4311 }
4312
4313 err = rdev->ops->flush_pmksa(&rdev->wiphy, dev);
4314
4315 out:
4316 cfg80211_unlock_rdev(rdev);
4317 dev_put(dev);
4318 out_rtnl:
4319 rtnl_unlock();
4320
4321 return err;
4322
4323}
4324
4224static struct genl_ops nl80211_ops[] = { 4325static struct genl_ops nl80211_ops[] = {
4225 { 4326 {
4226 .cmd = NL80211_CMD_GET_WIPHY, 4327 .cmd = NL80211_CMD_GET_WIPHY,
@@ -4465,6 +4566,25 @@ static struct genl_ops nl80211_ops[] = {
4465 .policy = nl80211_policy, 4566 .policy = nl80211_policy,
4466 .dumpit = nl80211_dump_survey, 4567 .dumpit = nl80211_dump_survey,
4467 }, 4568 },
4569 {
4570 .cmd = NL80211_CMD_SET_PMKSA,
4571 .doit = nl80211_setdel_pmksa,
4572 .policy = nl80211_policy,
4573 .flags = GENL_ADMIN_PERM,
4574 },
4575 {
4576 .cmd = NL80211_CMD_DEL_PMKSA,
4577 .doit = nl80211_setdel_pmksa,
4578 .policy = nl80211_policy,
4579 .flags = GENL_ADMIN_PERM,
4580 },
4581 {
4582 .cmd = NL80211_CMD_FLUSH_PMKSA,
4583 .doit = nl80211_flush_pmksa,
4584 .policy = nl80211_policy,
4585 .flags = GENL_ADMIN_PERM,
4586 },
4587
4468}; 4588};
4469static struct genl_multicast_group nl80211_mlme_mcgrp = { 4589static struct genl_multicast_group nl80211_mlme_mcgrp = {
4470 .name = "mlme", 4590 .name = "mlme",
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 1f33017737fd..c01470e7de15 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1931,7 +1931,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
1931 const struct ieee80211_freq_range *freq_range = NULL; 1931 const struct ieee80211_freq_range *freq_range = NULL;
1932 const struct ieee80211_power_rule *power_rule = NULL; 1932 const struct ieee80211_power_rule *power_rule = NULL;
1933 1933
1934 printk(KERN_INFO "\t(start_freq - end_freq @ bandwidth), " 1934 printk(KERN_INFO " (start_freq - end_freq @ bandwidth), "
1935 "(max_antenna_gain, max_eirp)\n"); 1935 "(max_antenna_gain, max_eirp)\n");
1936 1936
1937 for (i = 0; i < rd->n_reg_rules; i++) { 1937 for (i = 0; i < rd->n_reg_rules; i++) {
@@ -1944,7 +1944,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
1944 * in certain regions 1944 * in certain regions
1945 */ 1945 */
1946 if (power_rule->max_antenna_gain) 1946 if (power_rule->max_antenna_gain)
1947 printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), " 1947 printk(KERN_INFO " (%d KHz - %d KHz @ %d KHz), "
1948 "(%d mBi, %d mBm)\n", 1948 "(%d mBi, %d mBm)\n",
1949 freq_range->start_freq_khz, 1949 freq_range->start_freq_khz,
1950 freq_range->end_freq_khz, 1950 freq_range->end_freq_khz,
@@ -1952,7 +1952,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
1952 power_rule->max_antenna_gain, 1952 power_rule->max_antenna_gain,
1953 power_rule->max_eirp); 1953 power_rule->max_eirp);
1954 else 1954 else
1955 printk(KERN_INFO "\t(%d KHz - %d KHz @ %d KHz), " 1955 printk(KERN_INFO " (%d KHz - %d KHz @ %d KHz), "
1956 "(N/A, %d mBm)\n", 1956 "(N/A, %d mBm)\n",
1957 freq_range->start_freq_khz, 1957 freq_range->start_freq_khz,
1958 freq_range->end_freq_khz, 1958 freq_range->end_freq_khz,
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 96df34c3c6ee..12dfa62aad18 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -88,7 +88,7 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
88 WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req); 88 WARN_ON(request != wiphy_to_dev(request->wiphy)->scan_req);
89 89
90 request->aborted = aborted; 90 request->aborted = aborted;
91 schedule_work(&wiphy_to_dev(request->wiphy)->scan_done_wk); 91 queue_work(cfg80211_wq, &wiphy_to_dev(request->wiphy)->scan_done_wk);
92} 92}
93EXPORT_SYMBOL(cfg80211_scan_done); 93EXPORT_SYMBOL(cfg80211_scan_done);
94 94
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 0115d07d2c1a..2333d78187e4 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -488,7 +488,7 @@ void cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
488 spin_lock_irqsave(&wdev->event_lock, flags); 488 spin_lock_irqsave(&wdev->event_lock, flags);
489 list_add_tail(&ev->list, &wdev->event_list); 489 list_add_tail(&ev->list, &wdev->event_list);
490 spin_unlock_irqrestore(&wdev->event_lock, flags); 490 spin_unlock_irqrestore(&wdev->event_lock, flags);
491 schedule_work(&rdev->event_work); 491 queue_work(cfg80211_wq, &rdev->event_work);
492} 492}
493EXPORT_SYMBOL(cfg80211_connect_result); 493EXPORT_SYMBOL(cfg80211_connect_result);
494 494
@@ -583,7 +583,7 @@ void cfg80211_roamed(struct net_device *dev, const u8 *bssid,
583 spin_lock_irqsave(&wdev->event_lock, flags); 583 spin_lock_irqsave(&wdev->event_lock, flags);
584 list_add_tail(&ev->list, &wdev->event_list); 584 list_add_tail(&ev->list, &wdev->event_list);
585 spin_unlock_irqrestore(&wdev->event_lock, flags); 585 spin_unlock_irqrestore(&wdev->event_lock, flags);
586 schedule_work(&rdev->event_work); 586 queue_work(cfg80211_wq, &rdev->event_work);
587} 587}
588EXPORT_SYMBOL(cfg80211_roamed); 588EXPORT_SYMBOL(cfg80211_roamed);
589 589
@@ -681,7 +681,7 @@ void cfg80211_disconnected(struct net_device *dev, u16 reason,
681 spin_lock_irqsave(&wdev->event_lock, flags); 681 spin_lock_irqsave(&wdev->event_lock, flags);
682 list_add_tail(&ev->list, &wdev->event_list); 682 list_add_tail(&ev->list, &wdev->event_list);
683 spin_unlock_irqrestore(&wdev->event_lock, flags); 683 spin_unlock_irqrestore(&wdev->event_lock, flags);
684 schedule_work(&rdev->event_work); 684 queue_work(cfg80211_wq, &rdev->event_work);
685} 685}
686EXPORT_SYMBOL(cfg80211_disconnected); 686EXPORT_SYMBOL(cfg80211_disconnected);
687 687
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 29091ac9f989..584eb4826e02 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -1401,6 +1401,47 @@ int cfg80211_wext_giwessid(struct net_device *dev,
1401} 1401}
1402EXPORT_SYMBOL_GPL(cfg80211_wext_giwessid); 1402EXPORT_SYMBOL_GPL(cfg80211_wext_giwessid);
1403 1403
1404int cfg80211_wext_siwpmksa(struct net_device *dev,
1405 struct iw_request_info *info,
1406 struct iw_point *data, char *extra)
1407{
1408 struct wireless_dev *wdev = dev->ieee80211_ptr;
1409 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
1410 struct cfg80211_pmksa cfg_pmksa;
1411 struct iw_pmksa *pmksa = (struct iw_pmksa *)extra;
1412
1413 memset(&cfg_pmksa, 0, sizeof(struct cfg80211_pmksa));
1414
1415 if (wdev->iftype != NL80211_IFTYPE_STATION)
1416 return -EINVAL;
1417
1418 cfg_pmksa.bssid = pmksa->bssid.sa_data;
1419 cfg_pmksa.pmkid = pmksa->pmkid;
1420
1421 switch (pmksa->cmd) {
1422 case IW_PMKSA_ADD:
1423 if (!rdev->ops->set_pmksa)
1424 return -EOPNOTSUPP;
1425
1426 return rdev->ops->set_pmksa(&rdev->wiphy, dev, &cfg_pmksa);
1427
1428 case IW_PMKSA_REMOVE:
1429 if (!rdev->ops->del_pmksa)
1430 return -EOPNOTSUPP;
1431
1432 return rdev->ops->del_pmksa(&rdev->wiphy, dev, &cfg_pmksa);
1433
1434 case IW_PMKSA_FLUSH:
1435 if (!rdev->ops->flush_pmksa)
1436 return -EOPNOTSUPP;
1437
1438 return rdev->ops->flush_pmksa(&rdev->wiphy, dev);
1439
1440 default:
1441 return -EOPNOTSUPP;
1442 }
1443}
1444
1404static const iw_handler cfg80211_handlers[] = { 1445static const iw_handler cfg80211_handlers[] = {
1405 [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname, 1446 [IW_IOCTL_IDX(SIOCGIWNAME)] = (iw_handler) cfg80211_wext_giwname,
1406 [IW_IOCTL_IDX(SIOCSIWFREQ)] = (iw_handler) cfg80211_wext_siwfreq, 1447 [IW_IOCTL_IDX(SIOCSIWFREQ)] = (iw_handler) cfg80211_wext_siwfreq,
@@ -1433,6 +1474,7 @@ static const iw_handler cfg80211_handlers[] = {
1433 [IW_IOCTL_IDX(SIOCSIWAUTH)] = (iw_handler) cfg80211_wext_siwauth, 1474 [IW_IOCTL_IDX(SIOCSIWAUTH)] = (iw_handler) cfg80211_wext_siwauth,
1434 [IW_IOCTL_IDX(SIOCGIWAUTH)] = (iw_handler) cfg80211_wext_giwauth, 1475 [IW_IOCTL_IDX(SIOCGIWAUTH)] = (iw_handler) cfg80211_wext_giwauth,
1435 [IW_IOCTL_IDX(SIOCSIWENCODEEXT)]= (iw_handler) cfg80211_wext_siwencodeext, 1476 [IW_IOCTL_IDX(SIOCSIWENCODEEXT)]= (iw_handler) cfg80211_wext_siwencodeext,
1477 [IW_IOCTL_IDX(SIOCSIWPMKSA)] = (iw_handler) cfg80211_wext_siwpmksa,
1436}; 1478};
1437 1479
1438const struct iw_handler_def cfg80211_wext_handler = { 1480const struct iw_handler_def cfg80211_wext_handler = {
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 58dfb954974a..5e1656bdf23b 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -802,7 +802,8 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
802 } 802 }
803 803
804 /* Generate an event to notify listeners of the change */ 804 /* Generate an event to notify listeners of the change */
805 if ((descr->flags & IW_DESCR_FLAG_EVENT) && err == -EIWCOMMIT) { 805 if ((descr->flags & IW_DESCR_FLAG_EVENT) &&
806 ((err == 0) || (err == -EIWCOMMIT))) {
806 union iwreq_data *data = (union iwreq_data *) iwp; 807 union iwreq_data *data = (union iwreq_data *) iwp;
807 808
808 if (descr->flags & IW_DESCR_FLAG_RESTRICT) 809 if (descr->flags & IW_DESCR_FLAG_RESTRICT)