diff options
Diffstat (limited to 'net')
39 files changed, 2620 insertions, 1389 deletions
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 8a91f6c0bb18..83eec7a8bd1f 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig | |||
@@ -33,6 +33,13 @@ config MAC80211_RC_MINSTREL | |||
33 | ---help--- | 33 | ---help--- |
34 | This option enables the 'minstrel' TX rate control algorithm | 34 | This option enables the 'minstrel' TX rate control algorithm |
35 | 35 | ||
36 | config MAC80211_RC_MINSTREL_HT | ||
37 | bool "Minstrel 802.11n support" if EMBEDDED | ||
38 | depends on MAC80211_RC_MINSTREL | ||
39 | default y | ||
40 | ---help--- | ||
41 | This option enables the 'minstrel_ht' TX rate control algorithm | ||
42 | |||
36 | choice | 43 | choice |
37 | prompt "Default rate control algorithm" | 44 | prompt "Default rate control algorithm" |
38 | depends on MAC80211_HAS_RC | 45 | depends on MAC80211_HAS_RC |
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile index 84b48ba8a77e..fdb54e61d637 100644 --- a/net/mac80211/Makefile +++ b/net/mac80211/Makefile | |||
@@ -51,7 +51,11 @@ rc80211_pid-$(CONFIG_MAC80211_DEBUGFS) += rc80211_pid_debugfs.o | |||
51 | rc80211_minstrel-y := rc80211_minstrel.o | 51 | rc80211_minstrel-y := rc80211_minstrel.o |
52 | rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o | 52 | rc80211_minstrel-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_debugfs.o |
53 | 53 | ||
54 | rc80211_minstrel_ht-y := rc80211_minstrel_ht.o | ||
55 | rc80211_minstrel_ht-$(CONFIG_MAC80211_DEBUGFS) += rc80211_minstrel_ht_debugfs.o | ||
56 | |||
54 | mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y) | 57 | mac80211-$(CONFIG_MAC80211_RC_PID) += $(rc80211_pid-y) |
55 | mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) | 58 | mac80211-$(CONFIG_MAC80211_RC_MINSTREL) += $(rc80211_minstrel-y) |
59 | mac80211-$(CONFIG_MAC80211_RC_MINSTREL_HT) += $(rc80211_minstrel_ht-y) | ||
56 | 60 | ||
57 | ccflags-y += -D__CHECK_ENDIAN__ | 61 | ccflags-y += -D__CHECK_ENDIAN__ |
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 6bb9a9a94960..965b272499fd 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -6,39 +6,70 @@ | |||
6 | * Copyright 2005-2006, Devicescape Software, Inc. | 6 | * Copyright 2005-2006, Devicescape Software, Inc. |
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | 7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> |
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2008, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | 12 | * it under the terms of the GNU General Public License version 2 as |
13 | * published by the Free Software Foundation. | 13 | * published by the Free Software Foundation. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | /** | ||
17 | * DOC: RX A-MPDU aggregation | ||
18 | * | ||
19 | * Aggregation on the RX side requires only implementing the | ||
20 | * @ampdu_action callback that is invoked to start/stop any | ||
21 | * block-ack sessions for RX aggregation. | ||
22 | * | ||
23 | * When RX aggregation is started by the peer, the driver is | ||
24 | * notified via @ampdu_action function, with the | ||
25 | * %IEEE80211_AMPDU_RX_START action, and may reject the request | ||
26 | * in which case a negative response is sent to the peer, if it | ||
27 | * accepts it a positive response is sent. | ||
28 | * | ||
29 | * While the session is active, the device/driver are required | ||
30 | * to de-aggregate frames and pass them up one by one to mac80211, | ||
31 | * which will handle the reorder buffer. | ||
32 | * | ||
33 | * When the aggregation session is stopped again by the peer or | ||
34 | * ourselves, the driver's @ampdu_action function will be called | ||
35 | * with the action %IEEE80211_AMPDU_RX_STOP. In this case, the | ||
36 | * call must not fail. | ||
37 | */ | ||
38 | |||
16 | #include <linux/ieee80211.h> | 39 | #include <linux/ieee80211.h> |
17 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
18 | #include <net/mac80211.h> | 41 | #include <net/mac80211.h> |
19 | #include "ieee80211_i.h" | 42 | #include "ieee80211_i.h" |
20 | #include "driver-ops.h" | 43 | #include "driver-ops.h" |
21 | 44 | ||
22 | static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | 45 | static void ieee80211_free_tid_rx(struct rcu_head *h) |
23 | u16 initiator, u16 reason, | ||
24 | bool from_timer) | ||
25 | { | 46 | { |
26 | struct ieee80211_local *local = sta->local; | 47 | struct tid_ampdu_rx *tid_rx = |
27 | struct tid_ampdu_rx *tid_rx; | 48 | container_of(h, struct tid_ampdu_rx, rcu_head); |
28 | int i; | 49 | int i; |
29 | 50 | ||
30 | spin_lock_bh(&sta->lock); | 51 | for (i = 0; i < tid_rx->buf_size; i++) |
52 | dev_kfree_skb(tid_rx->reorder_buf[i]); | ||
53 | kfree(tid_rx->reorder_buf); | ||
54 | kfree(tid_rx->reorder_time); | ||
55 | kfree(tid_rx); | ||
56 | } | ||
31 | 57 | ||
32 | /* check if TID is in operational state */ | 58 | void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, |
33 | if (!sta->ampdu_mlme.tid_active_rx[tid]) { | 59 | u16 initiator, u16 reason) |
34 | spin_unlock_bh(&sta->lock); | 60 | { |
35 | return; | 61 | struct ieee80211_local *local = sta->local; |
36 | } | 62 | struct tid_ampdu_rx *tid_rx; |
37 | 63 | ||
38 | sta->ampdu_mlme.tid_active_rx[tid] = false; | 64 | lockdep_assert_held(&sta->ampdu_mlme.mtx); |
39 | 65 | ||
40 | tid_rx = sta->ampdu_mlme.tid_rx[tid]; | 66 | tid_rx = sta->ampdu_mlme.tid_rx[tid]; |
41 | 67 | ||
68 | if (!tid_rx) | ||
69 | return; | ||
70 | |||
71 | rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL); | ||
72 | |||
42 | #ifdef CONFIG_MAC80211_HT_DEBUG | 73 | #ifdef CONFIG_MAC80211_HT_DEBUG |
43 | printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", | 74 | printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", |
44 | sta->sta.addr, tid); | 75 | sta->sta.addr, tid); |
@@ -54,32 +85,17 @@ static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | |||
54 | ieee80211_send_delba(sta->sdata, sta->sta.addr, | 85 | ieee80211_send_delba(sta->sdata, sta->sta.addr, |
55 | tid, 0, reason); | 86 | tid, 0, reason); |
56 | 87 | ||
57 | /* free the reordering buffer */ | 88 | del_timer_sync(&tid_rx->session_timer); |
58 | for (i = 0; i < tid_rx->buf_size; i++) { | ||
59 | if (tid_rx->reorder_buf[i]) { | ||
60 | /* release the reordered frames */ | ||
61 | dev_kfree_skb(tid_rx->reorder_buf[i]); | ||
62 | tid_rx->stored_mpdu_num--; | ||
63 | tid_rx->reorder_buf[i] = NULL; | ||
64 | } | ||
65 | } | ||
66 | |||
67 | /* free resources */ | ||
68 | kfree(tid_rx->reorder_buf); | ||
69 | kfree(tid_rx->reorder_time); | ||
70 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
71 | |||
72 | spin_unlock_bh(&sta->lock); | ||
73 | 89 | ||
74 | if (!from_timer) | 90 | call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); |
75 | del_timer_sync(&tid_rx->session_timer); | ||
76 | kfree(tid_rx); | ||
77 | } | 91 | } |
78 | 92 | ||
79 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | 93 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, |
80 | u16 initiator, u16 reason) | 94 | u16 initiator, u16 reason) |
81 | { | 95 | { |
82 | ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, false); | 96 | mutex_lock(&sta->ampdu_mlme.mtx); |
97 | ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason); | ||
98 | mutex_unlock(&sta->ampdu_mlme.mtx); | ||
83 | } | 99 | } |
84 | 100 | ||
85 | /* | 101 | /* |
@@ -100,8 +116,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data) | |||
100 | #ifdef CONFIG_MAC80211_HT_DEBUG | 116 | #ifdef CONFIG_MAC80211_HT_DEBUG |
101 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | 117 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); |
102 | #endif | 118 | #endif |
103 | ___ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT, | 119 | set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired); |
104 | WLAN_REASON_QSTA_TIMEOUT, true); | 120 | ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); |
105 | } | 121 | } |
106 | 122 | ||
107 | static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, | 123 | static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, |
@@ -212,9 +228,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
212 | 228 | ||
213 | 229 | ||
214 | /* examine state machine */ | 230 | /* examine state machine */ |
215 | spin_lock_bh(&sta->lock); | 231 | mutex_lock(&sta->ampdu_mlme.mtx); |
216 | 232 | ||
217 | if (sta->ampdu_mlme.tid_active_rx[tid]) { | 233 | if (sta->ampdu_mlme.tid_rx[tid]) { |
218 | #ifdef CONFIG_MAC80211_HT_DEBUG | 234 | #ifdef CONFIG_MAC80211_HT_DEBUG |
219 | if (net_ratelimit()) | 235 | if (net_ratelimit()) |
220 | printk(KERN_DEBUG "unexpected AddBA Req from " | 236 | printk(KERN_DEBUG "unexpected AddBA Req from " |
@@ -225,9 +241,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
225 | } | 241 | } |
226 | 242 | ||
227 | /* prepare A-MPDU MLME for Rx aggregation */ | 243 | /* prepare A-MPDU MLME for Rx aggregation */ |
228 | sta->ampdu_mlme.tid_rx[tid] = | 244 | tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); |
229 | kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); | 245 | if (!tid_agg_rx) { |
230 | if (!sta->ampdu_mlme.tid_rx[tid]) { | ||
231 | #ifdef CONFIG_MAC80211_HT_DEBUG | 246 | #ifdef CONFIG_MAC80211_HT_DEBUG |
232 | if (net_ratelimit()) | 247 | if (net_ratelimit()) |
233 | printk(KERN_ERR "allocate rx mlme to tid %d failed\n", | 248 | printk(KERN_ERR "allocate rx mlme to tid %d failed\n", |
@@ -235,14 +250,11 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
235 | #endif | 250 | #endif |
236 | goto end; | 251 | goto end; |
237 | } | 252 | } |
238 | /* rx timer */ | ||
239 | sta->ampdu_mlme.tid_rx[tid]->session_timer.function = | ||
240 | sta_rx_agg_session_timer_expired; | ||
241 | sta->ampdu_mlme.tid_rx[tid]->session_timer.data = | ||
242 | (unsigned long)&sta->timer_to_tid[tid]; | ||
243 | init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer); | ||
244 | 253 | ||
245 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | 254 | /* rx timer */ |
255 | tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired; | ||
256 | tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; | ||
257 | init_timer(&tid_agg_rx->session_timer); | ||
246 | 258 | ||
247 | /* prepare reordering buffer */ | 259 | /* prepare reordering buffer */ |
248 | tid_agg_rx->reorder_buf = | 260 | tid_agg_rx->reorder_buf = |
@@ -257,8 +269,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
257 | #endif | 269 | #endif |
258 | kfree(tid_agg_rx->reorder_buf); | 270 | kfree(tid_agg_rx->reorder_buf); |
259 | kfree(tid_agg_rx->reorder_time); | 271 | kfree(tid_agg_rx->reorder_time); |
260 | kfree(sta->ampdu_mlme.tid_rx[tid]); | 272 | kfree(tid_agg_rx); |
261 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
262 | goto end; | 273 | goto end; |
263 | } | 274 | } |
264 | 275 | ||
@@ -270,13 +281,12 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
270 | 281 | ||
271 | if (ret) { | 282 | if (ret) { |
272 | kfree(tid_agg_rx->reorder_buf); | 283 | kfree(tid_agg_rx->reorder_buf); |
284 | kfree(tid_agg_rx->reorder_time); | ||
273 | kfree(tid_agg_rx); | 285 | kfree(tid_agg_rx); |
274 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
275 | goto end; | 286 | goto end; |
276 | } | 287 | } |
277 | 288 | ||
278 | /* change state and send addba resp */ | 289 | /* update data */ |
279 | sta->ampdu_mlme.tid_active_rx[tid] = true; | ||
280 | tid_agg_rx->dialog_token = dialog_token; | 290 | tid_agg_rx->dialog_token = dialog_token; |
281 | tid_agg_rx->ssn = start_seq_num; | 291 | tid_agg_rx->ssn = start_seq_num; |
282 | tid_agg_rx->head_seq_num = start_seq_num; | 292 | tid_agg_rx->head_seq_num = start_seq_num; |
@@ -284,8 +294,15 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
284 | tid_agg_rx->timeout = timeout; | 294 | tid_agg_rx->timeout = timeout; |
285 | tid_agg_rx->stored_mpdu_num = 0; | 295 | tid_agg_rx->stored_mpdu_num = 0; |
286 | status = WLAN_STATUS_SUCCESS; | 296 | status = WLAN_STATUS_SUCCESS; |
297 | |||
298 | /* activate it for RX */ | ||
299 | rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); | ||
300 | |||
301 | if (timeout) | ||
302 | mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); | ||
303 | |||
287 | end: | 304 | end: |
288 | spin_unlock_bh(&sta->lock); | 305 | mutex_unlock(&sta->ampdu_mlme.mtx); |
289 | 306 | ||
290 | end_no_lock: | 307 | end_no_lock: |
291 | ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, | 308 | ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 98258b7341e3..c893f236acea 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * Copyright 2005-2006, Devicescape Software, Inc. | 6 | * Copyright 2005-2006, Devicescape Software, Inc. |
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | 7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> |
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2009, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | 12 | * it under the terms of the GNU General Public License version 2 as |
@@ -21,28 +21,39 @@ | |||
21 | #include "wme.h" | 21 | #include "wme.h" |
22 | 22 | ||
23 | /** | 23 | /** |
24 | * DOC: TX aggregation | 24 | * DOC: TX A-MPDU aggregation |
25 | * | 25 | * |
26 | * Aggregation on the TX side requires setting the hardware flag | 26 | * Aggregation on the TX side requires setting the hardware flag |
27 | * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues | 27 | * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed |
28 | * hardware parameter to the number of hardware AMPDU queues. If there are no | 28 | * packets with a flag indicating A-MPDU aggregation. The driver |
29 | * hardware queues then the driver will (currently) have to do all frame | 29 | * or device is responsible for actually aggregating the frames, |
30 | * buffering. | 30 | * as well as deciding how many and which to aggregate. |
31 | * | 31 | * |
32 | * When TX aggregation is started by some subsystem (usually the rate control | 32 | * When TX aggregation is started by some subsystem (usually the rate |
33 | * algorithm would be appropriate) by calling the | 33 | * control algorithm would be appropriate) by calling the |
34 | * ieee80211_start_tx_ba_session() function, the driver will be notified via | 34 | * ieee80211_start_tx_ba_session() function, the driver will be |
35 | * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action. | 35 | * notified via its @ampdu_action function, with the |
36 | * %IEEE80211_AMPDU_TX_START action. | ||
36 | * | 37 | * |
37 | * In response to that, the driver is later required to call the | 38 | * In response to that, the driver is later required to call the |
38 | * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe()) | 39 | * ieee80211_start_tx_ba_cb_irqsafe() function, which will really |
39 | * function, which will start the aggregation session. | 40 | * start the aggregation session after the peer has also responded. |
41 | * If the peer responds negatively, the session will be stopped | ||
42 | * again right away. Note that it is possible for the aggregation | ||
43 | * session to be stopped before the driver has indicated that it | ||
44 | * is done setting it up, in which case it must not indicate the | ||
45 | * setup completion. | ||
40 | * | 46 | * |
41 | * Similarly, when the aggregation session is stopped by | 47 | * Also note that, since we also need to wait for a response from |
42 | * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will | 48 | * the peer, the driver is notified of the completion of the |
43 | * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the | 49 | * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the |
44 | * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb() | 50 | * @ampdu_action callback. |
45 | * (or ieee80211_stop_tx_ba_cb_irqsafe()). | 51 | * |
52 | * Similarly, when the aggregation session is stopped by the peer | ||
53 | * or something calling ieee80211_stop_tx_ba_session(), the driver's | ||
54 | * @ampdu_action function will be called with the action | ||
55 | * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail, | ||
56 | * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe(). | ||
46 | */ | 57 | */ |
47 | 58 | ||
48 | static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, | 59 | static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, |
@@ -125,25 +136,53 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1 | |||
125 | ieee80211_tx_skb(sdata, skb); | 136 | ieee80211_tx_skb(sdata, skb); |
126 | } | 137 | } |
127 | 138 | ||
139 | static void kfree_tid_tx(struct rcu_head *rcu_head) | ||
140 | { | ||
141 | struct tid_ampdu_tx *tid_tx = | ||
142 | container_of(rcu_head, struct tid_ampdu_tx, rcu_head); | ||
143 | |||
144 | kfree(tid_tx); | ||
145 | } | ||
146 | |||
128 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | 147 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, |
129 | enum ieee80211_back_parties initiator) | 148 | enum ieee80211_back_parties initiator) |
130 | { | 149 | { |
131 | struct ieee80211_local *local = sta->local; | 150 | struct ieee80211_local *local = sta->local; |
151 | struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid]; | ||
132 | int ret; | 152 | int ret; |
133 | u8 *state; | 153 | |
154 | lockdep_assert_held(&sta->ampdu_mlme.mtx); | ||
155 | |||
156 | if (!tid_tx) | ||
157 | return -ENOENT; | ||
158 | |||
159 | spin_lock_bh(&sta->lock); | ||
160 | |||
161 | if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { | ||
162 | /* not even started yet! */ | ||
163 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); | ||
164 | spin_unlock_bh(&sta->lock); | ||
165 | call_rcu(&tid_tx->rcu_head, kfree_tid_tx); | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | spin_unlock_bh(&sta->lock); | ||
134 | 170 | ||
135 | #ifdef CONFIG_MAC80211_HT_DEBUG | 171 | #ifdef CONFIG_MAC80211_HT_DEBUG |
136 | printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", | 172 | printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", |
137 | sta->sta.addr, tid); | 173 | sta->sta.addr, tid); |
138 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 174 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
139 | 175 | ||
140 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 176 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); |
141 | 177 | ||
142 | if (*state == HT_AGG_STATE_OPERATIONAL) | 178 | /* |
143 | sta->ampdu_mlme.addba_req_num[tid] = 0; | 179 | * After this packets are no longer handed right through |
180 | * to the driver but are put onto tid_tx->pending instead, | ||
181 | * with locking to ensure proper access. | ||
182 | */ | ||
183 | clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); | ||
144 | 184 | ||
145 | *state = HT_AGG_STATE_REQ_STOP_BA_MSK | | 185 | tid_tx->stop_initiator = initiator; |
146 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | ||
147 | 186 | ||
148 | ret = drv_ampdu_action(local, sta->sdata, | 187 | ret = drv_ampdu_action(local, sta->sdata, |
149 | IEEE80211_AMPDU_TX_STOP, | 188 | IEEE80211_AMPDU_TX_STOP, |
@@ -174,16 +213,14 @@ static void sta_addba_resp_timer_expired(unsigned long data) | |||
174 | u16 tid = *(u8 *)data; | 213 | u16 tid = *(u8 *)data; |
175 | struct sta_info *sta = container_of((void *)data, | 214 | struct sta_info *sta = container_of((void *)data, |
176 | struct sta_info, timer_to_tid[tid]); | 215 | struct sta_info, timer_to_tid[tid]); |
177 | u8 *state; | 216 | struct tid_ampdu_tx *tid_tx; |
178 | |||
179 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
180 | 217 | ||
181 | /* check if the TID waits for addBA response */ | 218 | /* check if the TID waits for addBA response */ |
182 | spin_lock_bh(&sta->lock); | 219 | rcu_read_lock(); |
183 | if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK | | 220 | tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); |
184 | HT_AGG_STATE_REQ_STOP_BA_MSK)) != | 221 | if (!tid_tx || |
185 | HT_ADDBA_REQUESTED_MSK) { | 222 | test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { |
186 | spin_unlock_bh(&sta->lock); | 223 | rcu_read_unlock(); |
187 | #ifdef CONFIG_MAC80211_HT_DEBUG | 224 | #ifdef CONFIG_MAC80211_HT_DEBUG |
188 | printk(KERN_DEBUG "timer expired on tid %d but we are not " | 225 | printk(KERN_DEBUG "timer expired on tid %d but we are not " |
189 | "(or no longer) expecting addBA response there\n", | 226 | "(or no longer) expecting addBA response there\n", |
@@ -196,8 +233,8 @@ static void sta_addba_resp_timer_expired(unsigned long data) | |||
196 | printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); | 233 | printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); |
197 | #endif | 234 | #endif |
198 | 235 | ||
199 | ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); | 236 | ieee80211_stop_tx_ba_session(&sta->sta, tid); |
200 | spin_unlock_bh(&sta->lock); | 237 | rcu_read_unlock(); |
201 | } | 238 | } |
202 | 239 | ||
203 | static inline int ieee80211_ac_from_tid(int tid) | 240 | static inline int ieee80211_ac_from_tid(int tid) |
@@ -205,14 +242,112 @@ static inline int ieee80211_ac_from_tid(int tid) | |||
205 | return ieee802_1d_to_ac[tid & 7]; | 242 | return ieee802_1d_to_ac[tid & 7]; |
206 | } | 243 | } |
207 | 244 | ||
245 | /* | ||
246 | * When multiple aggregation sessions on multiple stations | ||
247 | * are being created/destroyed simultaneously, we need to | ||
248 | * refcount the global queue stop caused by that in order | ||
249 | * to not get into a situation where one of the aggregation | ||
250 | * setup or teardown re-enables queues before the other is | ||
251 | * ready to handle that. | ||
252 | * | ||
253 | * These two functions take care of this issue by keeping | ||
254 | * a global "agg_queue_stop" refcount. | ||
255 | */ | ||
256 | static void __acquires(agg_queue) | ||
257 | ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid) | ||
258 | { | ||
259 | int queue = ieee80211_ac_from_tid(tid); | ||
260 | |||
261 | if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1) | ||
262 | ieee80211_stop_queue_by_reason( | ||
263 | &local->hw, queue, | ||
264 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
265 | __acquire(agg_queue); | ||
266 | } | ||
267 | |||
268 | static void __releases(agg_queue) | ||
269 | ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) | ||
270 | { | ||
271 | int queue = ieee80211_ac_from_tid(tid); | ||
272 | |||
273 | if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0) | ||
274 | ieee80211_wake_queue_by_reason( | ||
275 | &local->hw, queue, | ||
276 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
277 | __release(agg_queue); | ||
278 | } | ||
279 | |||
280 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | ||
281 | { | ||
282 | struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid]; | ||
283 | struct ieee80211_local *local = sta->local; | ||
284 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
285 | u16 start_seq_num; | ||
286 | int ret; | ||
287 | |||
288 | lockdep_assert_held(&sta->ampdu_mlme.mtx); | ||
289 | |||
290 | /* | ||
291 | * While we're asking the driver about the aggregation, | ||
292 | * stop the AC queue so that we don't have to worry | ||
293 | * about frames that came in while we were doing that, | ||
294 | * which would require us to put them to the AC pending | ||
295 | * afterwards which just makes the code more complex. | ||
296 | */ | ||
297 | ieee80211_stop_queue_agg(local, tid); | ||
298 | |||
299 | clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); | ||
300 | |||
301 | /* | ||
302 | * make sure no packets are being processed to get | ||
303 | * valid starting sequence number | ||
304 | */ | ||
305 | synchronize_net(); | ||
306 | |||
307 | start_seq_num = sta->tid_seq[tid] >> 4; | ||
308 | |||
309 | ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, | ||
310 | &sta->sta, tid, &start_seq_num); | ||
311 | if (ret) { | ||
312 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
313 | printk(KERN_DEBUG "BA request denied - HW unavailable for" | ||
314 | " tid %d\n", tid); | ||
315 | #endif | ||
316 | spin_lock_bh(&sta->lock); | ||
317 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); | ||
318 | spin_unlock_bh(&sta->lock); | ||
319 | |||
320 | ieee80211_wake_queue_agg(local, tid); | ||
321 | call_rcu(&tid_tx->rcu_head, kfree_tid_tx); | ||
322 | return; | ||
323 | } | ||
324 | |||
325 | /* we can take packets again now */ | ||
326 | ieee80211_wake_queue_agg(local, tid); | ||
327 | |||
328 | /* activate the timer for the recipient's addBA response */ | ||
329 | mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); | ||
330 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
331 | printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); | ||
332 | #endif | ||
333 | |||
334 | spin_lock_bh(&sta->lock); | ||
335 | sta->ampdu_mlme.addba_req_num[tid]++; | ||
336 | spin_unlock_bh(&sta->lock); | ||
337 | |||
338 | /* send AddBA request */ | ||
339 | ieee80211_send_addba_request(sdata, sta->sta.addr, tid, | ||
340 | tid_tx->dialog_token, start_seq_num, | ||
341 | 0x40, 5000); | ||
342 | } | ||
343 | |||
208 | int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | 344 | int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) |
209 | { | 345 | { |
210 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); | 346 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); |
211 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 347 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
212 | struct ieee80211_local *local = sdata->local; | 348 | struct ieee80211_local *local = sdata->local; |
213 | u8 *state; | 349 | struct tid_ampdu_tx *tid_tx; |
214 | int ret = 0; | 350 | int ret = 0; |
215 | u16 start_seq_num; | ||
216 | 351 | ||
217 | trace_api_start_tx_ba_session(pubsta, tid); | 352 | trace_api_start_tx_ba_session(pubsta, tid); |
218 | 353 | ||
@@ -239,24 +374,15 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
239 | sdata->vif.type != NL80211_IFTYPE_AP) | 374 | sdata->vif.type != NL80211_IFTYPE_AP) |
240 | return -EINVAL; | 375 | return -EINVAL; |
241 | 376 | ||
242 | if (test_sta_flags(sta, WLAN_STA_DISASSOC)) { | ||
243 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
244 | printk(KERN_DEBUG "Disassociation is in progress. " | ||
245 | "Denying BA session request\n"); | ||
246 | #endif | ||
247 | return -EINVAL; | ||
248 | } | ||
249 | |||
250 | if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { | 377 | if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { |
251 | #ifdef CONFIG_MAC80211_HT_DEBUG | 378 | #ifdef CONFIG_MAC80211_HT_DEBUG |
252 | printk(KERN_DEBUG "Suspend in progress. " | 379 | printk(KERN_DEBUG "BA sessions blocked. " |
253 | "Denying BA session request\n"); | 380 | "Denying BA session request\n"); |
254 | #endif | 381 | #endif |
255 | return -EINVAL; | 382 | return -EINVAL; |
256 | } | 383 | } |
257 | 384 | ||
258 | spin_lock_bh(&sta->lock); | 385 | spin_lock_bh(&sta->lock); |
259 | spin_lock(&local->ampdu_lock); | ||
260 | 386 | ||
261 | /* we have tried too many times, receiver does not want A-MPDU */ | 387 | /* we have tried too many times, receiver does not want A-MPDU */ |
262 | if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { | 388 | if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { |
@@ -264,9 +390,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
264 | goto err_unlock_sta; | 390 | goto err_unlock_sta; |
265 | } | 391 | } |
266 | 392 | ||
267 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 393 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; |
268 | /* check if the TID is not in aggregation flow already */ | 394 | /* check if the TID is not in aggregation flow already */ |
269 | if (*state != HT_AGG_STATE_IDLE) { | 395 | if (tid_tx) { |
270 | #ifdef CONFIG_MAC80211_HT_DEBUG | 396 | #ifdef CONFIG_MAC80211_HT_DEBUG |
271 | printk(KERN_DEBUG "BA request denied - session is not " | 397 | printk(KERN_DEBUG "BA request denied - session is not " |
272 | "idle on tid %u\n", tid); | 398 | "idle on tid %u\n", tid); |
@@ -275,96 +401,37 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
275 | goto err_unlock_sta; | 401 | goto err_unlock_sta; |
276 | } | 402 | } |
277 | 403 | ||
278 | /* | ||
279 | * While we're asking the driver about the aggregation, | ||
280 | * stop the AC queue so that we don't have to worry | ||
281 | * about frames that came in while we were doing that, | ||
282 | * which would require us to put them to the AC pending | ||
283 | * afterwards which just makes the code more complex. | ||
284 | */ | ||
285 | ieee80211_stop_queue_by_reason( | ||
286 | &local->hw, ieee80211_ac_from_tid(tid), | ||
287 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
288 | |||
289 | /* prepare A-MPDU MLME for Tx aggregation */ | 404 | /* prepare A-MPDU MLME for Tx aggregation */ |
290 | sta->ampdu_mlme.tid_tx[tid] = | 405 | tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); |
291 | kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); | 406 | if (!tid_tx) { |
292 | if (!sta->ampdu_mlme.tid_tx[tid]) { | ||
293 | #ifdef CONFIG_MAC80211_HT_DEBUG | 407 | #ifdef CONFIG_MAC80211_HT_DEBUG |
294 | if (net_ratelimit()) | 408 | if (net_ratelimit()) |
295 | printk(KERN_ERR "allocate tx mlme to tid %d failed\n", | 409 | printk(KERN_ERR "allocate tx mlme to tid %d failed\n", |
296 | tid); | 410 | tid); |
297 | #endif | 411 | #endif |
298 | ret = -ENOMEM; | 412 | ret = -ENOMEM; |
299 | goto err_wake_queue; | 413 | goto err_unlock_sta; |
300 | } | 414 | } |
301 | 415 | ||
302 | skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending); | 416 | skb_queue_head_init(&tid_tx->pending); |
417 | __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); | ||
303 | 418 | ||
304 | /* Tx timer */ | 419 | /* Tx timer */ |
305 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = | 420 | tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired; |
306 | sta_addba_resp_timer_expired; | 421 | tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid]; |
307 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = | 422 | init_timer(&tid_tx->addba_resp_timer); |
308 | (unsigned long)&sta->timer_to_tid[tid]; | ||
309 | init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
310 | |||
311 | /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the | ||
312 | * call back right away, it must see that the flow has begun */ | ||
313 | *state |= HT_ADDBA_REQUESTED_MSK; | ||
314 | |||
315 | start_seq_num = sta->tid_seq[tid] >> 4; | ||
316 | |||
317 | ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, | ||
318 | pubsta, tid, &start_seq_num); | ||
319 | 423 | ||
320 | if (ret) { | 424 | /* assign a dialog token */ |
321 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
322 | printk(KERN_DEBUG "BA request denied - HW unavailable for" | ||
323 | " tid %d\n", tid); | ||
324 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
325 | *state = HT_AGG_STATE_IDLE; | ||
326 | goto err_free; | ||
327 | } | ||
328 | |||
329 | /* Driver vetoed or OKed, but we can take packets again now */ | ||
330 | ieee80211_wake_queue_by_reason( | ||
331 | &local->hw, ieee80211_ac_from_tid(tid), | ||
332 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
333 | |||
334 | spin_unlock(&local->ampdu_lock); | ||
335 | |||
336 | /* prepare tid data */ | ||
337 | sta->ampdu_mlme.dialog_token_allocator++; | 425 | sta->ampdu_mlme.dialog_token_allocator++; |
338 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = | 426 | tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; |
339 | sta->ampdu_mlme.dialog_token_allocator; | ||
340 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; | ||
341 | 427 | ||
342 | spin_unlock_bh(&sta->lock); | 428 | /* finally, assign it to the array */ |
429 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); | ||
343 | 430 | ||
344 | /* send AddBA request */ | 431 | ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); |
345 | ieee80211_send_addba_request(sdata, pubsta->addr, tid, | 432 | |
346 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | 433 | /* this flow continues off the work */ |
347 | sta->ampdu_mlme.tid_tx[tid]->ssn, | ||
348 | 0x40, 5000); | ||
349 | sta->ampdu_mlme.addba_req_num[tid]++; | ||
350 | /* activate the timer for the recipient's addBA response */ | ||
351 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = | ||
352 | jiffies + ADDBA_RESP_INTERVAL; | ||
353 | add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
354 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
355 | printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); | ||
356 | #endif | ||
357 | return 0; | ||
358 | |||
359 | err_free: | ||
360 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
361 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
362 | err_wake_queue: | ||
363 | ieee80211_wake_queue_by_reason( | ||
364 | &local->hw, ieee80211_ac_from_tid(tid), | ||
365 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
366 | err_unlock_sta: | 434 | err_unlock_sta: |
367 | spin_unlock(&local->ampdu_lock); | ||
368 | spin_unlock_bh(&sta->lock); | 435 | spin_unlock_bh(&sta->lock); |
369 | return ret; | 436 | return ret; |
370 | } | 437 | } |
@@ -372,69 +439,65 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_session); | |||
372 | 439 | ||
373 | /* | 440 | /* |
374 | * splice packets from the STA's pending to the local pending, | 441 | * splice packets from the STA's pending to the local pending, |
375 | * requires a call to ieee80211_agg_splice_finish and holding | 442 | * requires a call to ieee80211_agg_splice_finish later |
376 | * local->ampdu_lock across both calls. | ||
377 | */ | 443 | */ |
378 | static void ieee80211_agg_splice_packets(struct ieee80211_local *local, | 444 | static void __acquires(agg_queue) |
379 | struct sta_info *sta, u16 tid) | 445 | ieee80211_agg_splice_packets(struct ieee80211_local *local, |
446 | struct tid_ampdu_tx *tid_tx, u16 tid) | ||
380 | { | 447 | { |
448 | int queue = ieee80211_ac_from_tid(tid); | ||
381 | unsigned long flags; | 449 | unsigned long flags; |
382 | u16 queue = ieee80211_ac_from_tid(tid); | ||
383 | |||
384 | ieee80211_stop_queue_by_reason( | ||
385 | &local->hw, queue, | ||
386 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
387 | 450 | ||
388 | if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)) | 451 | ieee80211_stop_queue_agg(local, tid); |
389 | return; | ||
390 | 452 | ||
391 | if (WARN(!sta->ampdu_mlme.tid_tx[tid], | 453 | if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" |
392 | "TID %d gone but expected when splicing aggregates from" | 454 | " from the pending queue\n", tid)) |
393 | "the pending queue\n", tid)) | ||
394 | return; | 455 | return; |
395 | 456 | ||
396 | if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) { | 457 | if (!skb_queue_empty(&tid_tx->pending)) { |
397 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | 458 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
398 | /* copy over remaining packets */ | 459 | /* copy over remaining packets */ |
399 | skb_queue_splice_tail_init( | 460 | skb_queue_splice_tail_init(&tid_tx->pending, |
400 | &sta->ampdu_mlme.tid_tx[tid]->pending, | 461 | &local->pending[queue]); |
401 | &local->pending[queue]); | ||
402 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 462 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
403 | } | 463 | } |
404 | } | 464 | } |
405 | 465 | ||
406 | static void ieee80211_agg_splice_finish(struct ieee80211_local *local, | 466 | static void __releases(agg_queue) |
407 | struct sta_info *sta, u16 tid) | 467 | ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) |
408 | { | 468 | { |
409 | u16 queue = ieee80211_ac_from_tid(tid); | 469 | ieee80211_wake_queue_agg(local, tid); |
410 | |||
411 | ieee80211_wake_queue_by_reason( | ||
412 | &local->hw, queue, | ||
413 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
414 | } | 470 | } |
415 | 471 | ||
416 | /* caller must hold sta->lock */ | ||
417 | static void ieee80211_agg_tx_operational(struct ieee80211_local *local, | 472 | static void ieee80211_agg_tx_operational(struct ieee80211_local *local, |
418 | struct sta_info *sta, u16 tid) | 473 | struct sta_info *sta, u16 tid) |
419 | { | 474 | { |
475 | lockdep_assert_held(&sta->ampdu_mlme.mtx); | ||
476 | |||
420 | #ifdef CONFIG_MAC80211_HT_DEBUG | 477 | #ifdef CONFIG_MAC80211_HT_DEBUG |
421 | printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid); | 478 | printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid); |
422 | #endif | 479 | #endif |
423 | 480 | ||
424 | spin_lock(&local->ampdu_lock); | ||
425 | ieee80211_agg_splice_packets(local, sta, tid); | ||
426 | /* | ||
427 | * NB: we rely on sta->lock being taken in the TX | ||
428 | * processing here when adding to the pending queue, | ||
429 | * otherwise we could only change the state of the | ||
430 | * session to OPERATIONAL _here_. | ||
431 | */ | ||
432 | ieee80211_agg_splice_finish(local, sta, tid); | ||
433 | spin_unlock(&local->ampdu_lock); | ||
434 | |||
435 | drv_ampdu_action(local, sta->sdata, | 481 | drv_ampdu_action(local, sta->sdata, |
436 | IEEE80211_AMPDU_TX_OPERATIONAL, | 482 | IEEE80211_AMPDU_TX_OPERATIONAL, |
437 | &sta->sta, tid, NULL); | 483 | &sta->sta, tid, NULL); |
484 | |||
485 | /* | ||
486 | * synchronize with TX path, while splicing the TX path | ||
487 | * should block so it won't put more packets onto pending. | ||
488 | */ | ||
489 | spin_lock_bh(&sta->lock); | ||
490 | |||
491 | ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid); | ||
492 | /* | ||
493 | * Now mark as operational. This will be visible | ||
494 | * in the TX path, and lets it go lock-free in | ||
495 | * the common case. | ||
496 | */ | ||
497 | set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state); | ||
498 | ieee80211_agg_splice_finish(local, tid); | ||
499 | |||
500 | spin_unlock_bh(&sta->lock); | ||
438 | } | 501 | } |
439 | 502 | ||
440 | void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) | 503 | void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) |
@@ -442,7 +505,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) | |||
442 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 505 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |
443 | struct ieee80211_local *local = sdata->local; | 506 | struct ieee80211_local *local = sdata->local; |
444 | struct sta_info *sta; | 507 | struct sta_info *sta; |
445 | u8 *state; | 508 | struct tid_ampdu_tx *tid_tx; |
446 | 509 | ||
447 | trace_api_start_tx_ba_cb(sdata, ra, tid); | 510 | trace_api_start_tx_ba_cb(sdata, ra, tid); |
448 | 511 | ||
@@ -454,42 +517,36 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) | |||
454 | return; | 517 | return; |
455 | } | 518 | } |
456 | 519 | ||
457 | rcu_read_lock(); | 520 | mutex_lock(&local->sta_mtx); |
458 | sta = sta_info_get(sdata, ra); | 521 | sta = sta_info_get(sdata, ra); |
459 | if (!sta) { | 522 | if (!sta) { |
460 | rcu_read_unlock(); | 523 | mutex_unlock(&local->sta_mtx); |
461 | #ifdef CONFIG_MAC80211_HT_DEBUG | 524 | #ifdef CONFIG_MAC80211_HT_DEBUG |
462 | printk(KERN_DEBUG "Could not find station: %pM\n", ra); | 525 | printk(KERN_DEBUG "Could not find station: %pM\n", ra); |
463 | #endif | 526 | #endif |
464 | return; | 527 | return; |
465 | } | 528 | } |
466 | 529 | ||
467 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 530 | mutex_lock(&sta->ampdu_mlme.mtx); |
468 | spin_lock_bh(&sta->lock); | 531 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; |
469 | 532 | ||
470 | if (WARN_ON(!(*state & HT_ADDBA_REQUESTED_MSK))) { | 533 | if (WARN_ON(!tid_tx)) { |
471 | #ifdef CONFIG_MAC80211_HT_DEBUG | 534 | #ifdef CONFIG_MAC80211_HT_DEBUG |
472 | printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", | 535 | printk(KERN_DEBUG "addBA was not requested!\n"); |
473 | *state); | ||
474 | #endif | 536 | #endif |
475 | spin_unlock_bh(&sta->lock); | 537 | goto unlock; |
476 | rcu_read_unlock(); | ||
477 | return; | ||
478 | } | 538 | } |
479 | 539 | ||
480 | if (WARN_ON(*state & HT_ADDBA_DRV_READY_MSK)) | 540 | if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) |
481 | goto out; | 541 | goto unlock; |
482 | |||
483 | *state |= HT_ADDBA_DRV_READY_MSK; | ||
484 | 542 | ||
485 | if (*state == HT_AGG_STATE_OPERATIONAL) | 543 | if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) |
486 | ieee80211_agg_tx_operational(local, sta, tid); | 544 | ieee80211_agg_tx_operational(local, sta, tid); |
487 | 545 | ||
488 | out: | 546 | unlock: |
489 | spin_unlock_bh(&sta->lock); | 547 | mutex_unlock(&sta->ampdu_mlme.mtx); |
490 | rcu_read_unlock(); | 548 | mutex_unlock(&local->sta_mtx); |
491 | } | 549 | } |
492 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); | ||
493 | 550 | ||
494 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | 551 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, |
495 | const u8 *ra, u16 tid) | 552 | const u8 *ra, u16 tid) |
@@ -510,44 +567,36 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | |||
510 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | 567 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; |
511 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | 568 | memcpy(&ra_tid->ra, ra, ETH_ALEN); |
512 | ra_tid->tid = tid; | 569 | ra_tid->tid = tid; |
513 | ra_tid->vif = vif; | ||
514 | 570 | ||
515 | skb->pkt_type = IEEE80211_ADDBA_MSG; | 571 | skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; |
516 | skb_queue_tail(&local->skb_queue, skb); | 572 | skb_queue_tail(&sdata->skb_queue, skb); |
517 | tasklet_schedule(&local->tasklet); | 573 | ieee80211_queue_work(&local->hw, &sdata->work); |
518 | } | 574 | } |
519 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); | 575 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); |
520 | 576 | ||
521 | int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | 577 | int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, |
522 | enum ieee80211_back_parties initiator) | 578 | enum ieee80211_back_parties initiator) |
523 | { | 579 | { |
524 | u8 *state; | ||
525 | int ret; | 580 | int ret; |
526 | 581 | ||
527 | /* check if the TID is in aggregation */ | 582 | mutex_lock(&sta->ampdu_mlme.mtx); |
528 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
529 | spin_lock_bh(&sta->lock); | ||
530 | |||
531 | if (*state != HT_AGG_STATE_OPERATIONAL) { | ||
532 | ret = -ENOENT; | ||
533 | goto unlock; | ||
534 | } | ||
535 | 583 | ||
536 | ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator); | 584 | ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator); |
537 | 585 | ||
538 | unlock: | 586 | mutex_unlock(&sta->ampdu_mlme.mtx); |
539 | spin_unlock_bh(&sta->lock); | 587 | |
540 | return ret; | 588 | return ret; |
541 | } | 589 | } |
542 | 590 | ||
543 | int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | 591 | int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) |
544 | enum ieee80211_back_parties initiator) | ||
545 | { | 592 | { |
546 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); | 593 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); |
547 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 594 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
548 | struct ieee80211_local *local = sdata->local; | 595 | struct ieee80211_local *local = sdata->local; |
596 | struct tid_ampdu_tx *tid_tx; | ||
597 | int ret = 0; | ||
549 | 598 | ||
550 | trace_api_stop_tx_ba_session(pubsta, tid, initiator); | 599 | trace_api_stop_tx_ba_session(pubsta, tid); |
551 | 600 | ||
552 | if (!local->ops->ampdu_action) | 601 | if (!local->ops->ampdu_action) |
553 | return -EINVAL; | 602 | return -EINVAL; |
@@ -555,7 +604,26 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | |||
555 | if (tid >= STA_TID_NUM) | 604 | if (tid >= STA_TID_NUM) |
556 | return -EINVAL; | 605 | return -EINVAL; |
557 | 606 | ||
558 | return __ieee80211_stop_tx_ba_session(sta, tid, initiator); | 607 | spin_lock_bh(&sta->lock); |
608 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; | ||
609 | |||
610 | if (!tid_tx) { | ||
611 | ret = -ENOENT; | ||
612 | goto unlock; | ||
613 | } | ||
614 | |||
615 | if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { | ||
616 | /* already in progress stopping it */ | ||
617 | ret = 0; | ||
618 | goto unlock; | ||
619 | } | ||
620 | |||
621 | set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state); | ||
622 | ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); | ||
623 | |||
624 | unlock: | ||
625 | spin_unlock_bh(&sta->lock); | ||
626 | return ret; | ||
559 | } | 627 | } |
560 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); | 628 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); |
561 | 629 | ||
@@ -564,7 +632,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | |||
564 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 632 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |
565 | struct ieee80211_local *local = sdata->local; | 633 | struct ieee80211_local *local = sdata->local; |
566 | struct sta_info *sta; | 634 | struct sta_info *sta; |
567 | u8 *state; | 635 | struct tid_ampdu_tx *tid_tx; |
568 | 636 | ||
569 | trace_api_stop_tx_ba_cb(sdata, ra, tid); | 637 | trace_api_stop_tx_ba_cb(sdata, ra, tid); |
570 | 638 | ||
@@ -581,51 +649,56 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | |||
581 | ra, tid); | 649 | ra, tid); |
582 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 650 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
583 | 651 | ||
584 | rcu_read_lock(); | 652 | mutex_lock(&local->sta_mtx); |
653 | |||
585 | sta = sta_info_get(sdata, ra); | 654 | sta = sta_info_get(sdata, ra); |
586 | if (!sta) { | 655 | if (!sta) { |
587 | #ifdef CONFIG_MAC80211_HT_DEBUG | 656 | #ifdef CONFIG_MAC80211_HT_DEBUG |
588 | printk(KERN_DEBUG "Could not find station: %pM\n", ra); | 657 | printk(KERN_DEBUG "Could not find station: %pM\n", ra); |
589 | #endif | 658 | #endif |
590 | rcu_read_unlock(); | 659 | goto unlock; |
591 | return; | ||
592 | } | 660 | } |
593 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
594 | 661 | ||
595 | /* NOTE: no need to use sta->lock in this state check, as | 662 | mutex_lock(&sta->ampdu_mlme.mtx); |
596 | * ieee80211_stop_tx_ba_session will let only one stop call to | 663 | spin_lock_bh(&sta->lock); |
597 | * pass through per sta/tid | 664 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; |
598 | */ | 665 | |
599 | if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { | 666 | if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { |
600 | #ifdef CONFIG_MAC80211_HT_DEBUG | 667 | #ifdef CONFIG_MAC80211_HT_DEBUG |
601 | printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); | 668 | printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); |
602 | #endif | 669 | #endif |
603 | rcu_read_unlock(); | 670 | goto unlock_sta; |
604 | return; | ||
605 | } | 671 | } |
606 | 672 | ||
607 | if (*state & HT_AGG_STATE_INITIATOR_MSK) | 673 | if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR) |
608 | ieee80211_send_delba(sta->sdata, ra, tid, | 674 | ieee80211_send_delba(sta->sdata, ra, tid, |
609 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); | 675 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); |
610 | 676 | ||
611 | spin_lock_bh(&sta->lock); | 677 | /* |
612 | spin_lock(&local->ampdu_lock); | 678 | * When we get here, the TX path will not be lockless any more wrt. |
679 | * aggregation, since the OPERATIONAL bit has long been cleared. | ||
680 | * Thus it will block on getting the lock, if it occurs. So if we | ||
681 | * stop the queue now, we will not get any more packets, and any | ||
682 | * that might be being processed will wait for us here, thereby | ||
683 | * guaranteeing that no packets go to the tid_tx pending queue any | ||
684 | * more. | ||
685 | */ | ||
613 | 686 | ||
614 | ieee80211_agg_splice_packets(local, sta, tid); | 687 | ieee80211_agg_splice_packets(local, tid_tx, tid); |
615 | 688 | ||
616 | *state = HT_AGG_STATE_IDLE; | 689 | /* future packets must not find the tid_tx struct any more */ |
617 | /* from now on packets are no longer put onto sta->pending */ | 690 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); |
618 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
619 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
620 | 691 | ||
621 | ieee80211_agg_splice_finish(local, sta, tid); | 692 | ieee80211_agg_splice_finish(local, tid); |
622 | 693 | ||
623 | spin_unlock(&local->ampdu_lock); | 694 | call_rcu(&tid_tx->rcu_head, kfree_tid_tx); |
624 | spin_unlock_bh(&sta->lock); | ||
625 | 695 | ||
626 | rcu_read_unlock(); | 696 | unlock_sta: |
697 | spin_unlock_bh(&sta->lock); | ||
698 | mutex_unlock(&sta->ampdu_mlme.mtx); | ||
699 | unlock: | ||
700 | mutex_unlock(&local->sta_mtx); | ||
627 | } | 701 | } |
628 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); | ||
629 | 702 | ||
630 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | 703 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, |
631 | const u8 *ra, u16 tid) | 704 | const u8 *ra, u16 tid) |
@@ -646,11 +719,10 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | |||
646 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | 719 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; |
647 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | 720 | memcpy(&ra_tid->ra, ra, ETH_ALEN); |
648 | ra_tid->tid = tid; | 721 | ra_tid->tid = tid; |
649 | ra_tid->vif = vif; | ||
650 | 722 | ||
651 | skb->pkt_type = IEEE80211_DELBA_MSG; | 723 | skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; |
652 | skb_queue_tail(&local->skb_queue, skb); | 724 | skb_queue_tail(&sdata->skb_queue, skb); |
653 | tasklet_schedule(&local->tasklet); | 725 | ieee80211_queue_work(&local->hw, &sdata->work); |
654 | } | 726 | } |
655 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); | 727 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); |
656 | 728 | ||
@@ -660,40 +732,40 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
660 | struct ieee80211_mgmt *mgmt, | 732 | struct ieee80211_mgmt *mgmt, |
661 | size_t len) | 733 | size_t len) |
662 | { | 734 | { |
735 | struct tid_ampdu_tx *tid_tx; | ||
663 | u16 capab, tid; | 736 | u16 capab, tid; |
664 | u8 *state; | ||
665 | 737 | ||
666 | capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); | 738 | capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); |
667 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | 739 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; |
668 | 740 | ||
669 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 741 | mutex_lock(&sta->ampdu_mlme.mtx); |
670 | |||
671 | spin_lock_bh(&sta->lock); | ||
672 | 742 | ||
673 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) | 743 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; |
744 | if (!tid_tx) | ||
674 | goto out; | 745 | goto out; |
675 | 746 | ||
676 | if (mgmt->u.action.u.addba_resp.dialog_token != | 747 | if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { |
677 | sta->ampdu_mlme.tid_tx[tid]->dialog_token) { | ||
678 | #ifdef CONFIG_MAC80211_HT_DEBUG | 748 | #ifdef CONFIG_MAC80211_HT_DEBUG |
679 | printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); | 749 | printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); |
680 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 750 | #endif |
681 | goto out; | 751 | goto out; |
682 | } | 752 | } |
683 | 753 | ||
684 | del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | 754 | del_timer(&tid_tx->addba_resp_timer); |
685 | 755 | ||
686 | #ifdef CONFIG_MAC80211_HT_DEBUG | 756 | #ifdef CONFIG_MAC80211_HT_DEBUG |
687 | printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); | 757 | printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); |
688 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 758 | #endif |
689 | 759 | ||
690 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) | 760 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) |
691 | == WLAN_STATUS_SUCCESS) { | 761 | == WLAN_STATUS_SUCCESS) { |
692 | u8 curstate = *state; | 762 | if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, |
693 | 763 | &tid_tx->state)) { | |
694 | *state |= HT_ADDBA_RECEIVED_MSK; | 764 | /* ignore duplicate response */ |
765 | goto out; | ||
766 | } | ||
695 | 767 | ||
696 | if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL) | 768 | if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) |
697 | ieee80211_agg_tx_operational(local, sta, tid); | 769 | ieee80211_agg_tx_operational(local, sta, tid); |
698 | 770 | ||
699 | sta->ampdu_mlme.addba_req_num[tid] = 0; | 771 | sta->ampdu_mlme.addba_req_num[tid] = 0; |
@@ -702,5 +774,5 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
702 | } | 774 | } |
703 | 775 | ||
704 | out: | 776 | out: |
705 | spin_unlock_bh(&sta->lock); | 777 | mutex_unlock(&sta->ampdu_mlme.mtx); |
706 | } | 778 | } |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index c7000a6ca379..003b6addf5fa 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -120,6 +120,9 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
120 | struct ieee80211_key *key; | 120 | struct ieee80211_key *key; |
121 | int err; | 121 | int err; |
122 | 122 | ||
123 | if (!netif_running(dev)) | ||
124 | return -ENETDOWN; | ||
125 | |||
123 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 126 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
124 | 127 | ||
125 | switch (params->cipher) { | 128 | switch (params->cipher) { |
@@ -145,7 +148,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
145 | if (!key) | 148 | if (!key) |
146 | return -ENOMEM; | 149 | return -ENOMEM; |
147 | 150 | ||
148 | rcu_read_lock(); | 151 | mutex_lock(&sdata->local->sta_mtx); |
149 | 152 | ||
150 | if (mac_addr) { | 153 | if (mac_addr) { |
151 | sta = sta_info_get_bss(sdata, mac_addr); | 154 | sta = sta_info_get_bss(sdata, mac_addr); |
@@ -160,7 +163,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev, | |||
160 | 163 | ||
161 | err = 0; | 164 | err = 0; |
162 | out_unlock: | 165 | out_unlock: |
163 | rcu_read_unlock(); | 166 | mutex_unlock(&sdata->local->sta_mtx); |
164 | 167 | ||
165 | return err; | 168 | return err; |
166 | } | 169 | } |
@@ -174,7 +177,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, | |||
174 | 177 | ||
175 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 178 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
176 | 179 | ||
177 | rcu_read_lock(); | 180 | mutex_lock(&sdata->local->sta_mtx); |
178 | 181 | ||
179 | if (mac_addr) { | 182 | if (mac_addr) { |
180 | ret = -ENOENT; | 183 | ret = -ENOENT; |
@@ -202,7 +205,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev, | |||
202 | 205 | ||
203 | ret = 0; | 206 | ret = 0; |
204 | out_unlock: | 207 | out_unlock: |
205 | rcu_read_unlock(); | 208 | mutex_unlock(&sdata->local->sta_mtx); |
206 | 209 | ||
207 | return ret; | 210 | return ret; |
208 | } | 211 | } |
@@ -305,15 +308,10 @@ static int ieee80211_config_default_key(struct wiphy *wiphy, | |||
305 | struct net_device *dev, | 308 | struct net_device *dev, |
306 | u8 key_idx) | 309 | u8 key_idx) |
307 | { | 310 | { |
308 | struct ieee80211_sub_if_data *sdata; | 311 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
309 | |||
310 | rcu_read_lock(); | ||
311 | 312 | ||
312 | sdata = IEEE80211_DEV_TO_SUB_IF(dev); | ||
313 | ieee80211_set_default_key(sdata, key_idx); | 313 | ieee80211_set_default_key(sdata, key_idx); |
314 | 314 | ||
315 | rcu_read_unlock(); | ||
316 | |||
317 | return 0; | 315 | return 0; |
318 | } | 316 | } |
319 | 317 | ||
@@ -1448,7 +1446,6 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
1448 | { | 1446 | { |
1449 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1447 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1450 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1448 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1451 | struct ieee80211_conf *conf = &local->hw.conf; | ||
1452 | 1449 | ||
1453 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | 1450 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
1454 | return -EOPNOTSUPP; | 1451 | return -EOPNOTSUPP; |
@@ -1457,11 +1454,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
1457 | return -EOPNOTSUPP; | 1454 | return -EOPNOTSUPP; |
1458 | 1455 | ||
1459 | if (enabled == sdata->u.mgd.powersave && | 1456 | if (enabled == sdata->u.mgd.powersave && |
1460 | timeout == conf->dynamic_ps_forced_timeout) | 1457 | timeout == local->dynamic_ps_forced_timeout) |
1461 | return 0; | 1458 | return 0; |
1462 | 1459 | ||
1463 | sdata->u.mgd.powersave = enabled; | 1460 | sdata->u.mgd.powersave = enabled; |
1464 | conf->dynamic_ps_forced_timeout = timeout; | 1461 | local->dynamic_ps_forced_timeout = timeout; |
1465 | 1462 | ||
1466 | /* no change, but if automatic follow powersave */ | 1463 | /* no change, but if automatic follow powersave */ |
1467 | mutex_lock(&sdata->u.mgd.mtx); | 1464 | mutex_lock(&sdata->u.mgd.mtx); |
@@ -1554,10 +1551,58 @@ static int ieee80211_cancel_remain_on_channel(struct wiphy *wiphy, | |||
1554 | static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev, | 1551 | static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev, |
1555 | struct ieee80211_channel *chan, | 1552 | struct ieee80211_channel *chan, |
1556 | enum nl80211_channel_type channel_type, | 1553 | enum nl80211_channel_type channel_type, |
1554 | bool channel_type_valid, | ||
1557 | const u8 *buf, size_t len, u64 *cookie) | 1555 | const u8 *buf, size_t len, u64 *cookie) |
1558 | { | 1556 | { |
1559 | return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan, | 1557 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1560 | channel_type, buf, len, cookie); | 1558 | struct ieee80211_local *local = sdata->local; |
1559 | struct sk_buff *skb; | ||
1560 | struct sta_info *sta; | ||
1561 | const struct ieee80211_mgmt *mgmt = (void *)buf; | ||
1562 | u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | | ||
1563 | IEEE80211_TX_CTL_REQ_TX_STATUS; | ||
1564 | |||
1565 | /* Check that we are on the requested channel for transmission */ | ||
1566 | if (chan != local->tmp_channel && | ||
1567 | chan != local->oper_channel) | ||
1568 | return -EBUSY; | ||
1569 | if (channel_type_valid && | ||
1570 | (channel_type != local->tmp_channel_type && | ||
1571 | channel_type != local->_oper_channel_type)) | ||
1572 | return -EBUSY; | ||
1573 | |||
1574 | switch (sdata->vif.type) { | ||
1575 | case NL80211_IFTYPE_ADHOC: | ||
1576 | if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) | ||
1577 | break; | ||
1578 | rcu_read_lock(); | ||
1579 | sta = sta_info_get(sdata, mgmt->da); | ||
1580 | rcu_read_unlock(); | ||
1581 | if (!sta) | ||
1582 | return -ENOLINK; | ||
1583 | break; | ||
1584 | case NL80211_IFTYPE_STATION: | ||
1585 | if (!(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED)) | ||
1586 | flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; | ||
1587 | break; | ||
1588 | default: | ||
1589 | return -EOPNOTSUPP; | ||
1590 | } | ||
1591 | |||
1592 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); | ||
1593 | if (!skb) | ||
1594 | return -ENOMEM; | ||
1595 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1596 | |||
1597 | memcpy(skb_put(skb, len), buf, len); | ||
1598 | |||
1599 | IEEE80211_SKB_CB(skb)->flags = flags; | ||
1600 | |||
1601 | skb->dev = sdata->dev; | ||
1602 | ieee80211_tx_skb(sdata, skb); | ||
1603 | |||
1604 | *cookie = (unsigned long) skb; | ||
1605 | return 0; | ||
1561 | } | 1606 | } |
1562 | 1607 | ||
1563 | struct cfg80211_ops mac80211_config_ops = { | 1608 | struct cfg80211_ops mac80211_config_ops = { |
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 637929b65ccc..a694c593ff6a 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c | |||
@@ -307,9 +307,6 @@ static const struct file_operations queues_ops = { | |||
307 | 307 | ||
308 | /* statistics stuff */ | 308 | /* statistics stuff */ |
309 | 309 | ||
310 | #define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ | ||
311 | DEBUGFS_READONLY_FILE(stats_ ##name, buflen, fmt, ##value) | ||
312 | |||
313 | static ssize_t format_devstat_counter(struct ieee80211_local *local, | 310 | static ssize_t format_devstat_counter(struct ieee80211_local *local, |
314 | char __user *userbuf, | 311 | char __user *userbuf, |
315 | size_t count, loff_t *ppos, | 312 | size_t count, loff_t *ppos, |
@@ -351,75 +348,16 @@ static const struct file_operations stats_ ##name## _ops = { \ | |||
351 | .open = mac80211_open_file_generic, \ | 348 | .open = mac80211_open_file_generic, \ |
352 | }; | 349 | }; |
353 | 350 | ||
354 | #define DEBUGFS_STATS_ADD(name) \ | 351 | #define DEBUGFS_STATS_ADD(name, field) \ |
352 | debugfs_create_u32(#name, 0400, statsd, (u32 *) &field); | ||
353 | #define DEBUGFS_DEVSTATS_ADD(name) \ | ||
355 | debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); | 354 | debugfs_create_file(#name, 0400, statsd, local, &stats_ ##name## _ops); |
356 | 355 | ||
357 | DEBUGFS_STATS_FILE(transmitted_fragment_count, 20, "%u", | ||
358 | local->dot11TransmittedFragmentCount); | ||
359 | DEBUGFS_STATS_FILE(multicast_transmitted_frame_count, 20, "%u", | ||
360 | local->dot11MulticastTransmittedFrameCount); | ||
361 | DEBUGFS_STATS_FILE(failed_count, 20, "%u", | ||
362 | local->dot11FailedCount); | ||
363 | DEBUGFS_STATS_FILE(retry_count, 20, "%u", | ||
364 | local->dot11RetryCount); | ||
365 | DEBUGFS_STATS_FILE(multiple_retry_count, 20, "%u", | ||
366 | local->dot11MultipleRetryCount); | ||
367 | DEBUGFS_STATS_FILE(frame_duplicate_count, 20, "%u", | ||
368 | local->dot11FrameDuplicateCount); | ||
369 | DEBUGFS_STATS_FILE(received_fragment_count, 20, "%u", | ||
370 | local->dot11ReceivedFragmentCount); | ||
371 | DEBUGFS_STATS_FILE(multicast_received_frame_count, 20, "%u", | ||
372 | local->dot11MulticastReceivedFrameCount); | ||
373 | DEBUGFS_STATS_FILE(transmitted_frame_count, 20, "%u", | ||
374 | local->dot11TransmittedFrameCount); | ||
375 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | ||
376 | DEBUGFS_STATS_FILE(tx_handlers_drop, 20, "%u", | ||
377 | local->tx_handlers_drop); | ||
378 | DEBUGFS_STATS_FILE(tx_handlers_queued, 20, "%u", | ||
379 | local->tx_handlers_queued); | ||
380 | DEBUGFS_STATS_FILE(tx_handlers_drop_unencrypted, 20, "%u", | ||
381 | local->tx_handlers_drop_unencrypted); | ||
382 | DEBUGFS_STATS_FILE(tx_handlers_drop_fragment, 20, "%u", | ||
383 | local->tx_handlers_drop_fragment); | ||
384 | DEBUGFS_STATS_FILE(tx_handlers_drop_wep, 20, "%u", | ||
385 | local->tx_handlers_drop_wep); | ||
386 | DEBUGFS_STATS_FILE(tx_handlers_drop_not_assoc, 20, "%u", | ||
387 | local->tx_handlers_drop_not_assoc); | ||
388 | DEBUGFS_STATS_FILE(tx_handlers_drop_unauth_port, 20, "%u", | ||
389 | local->tx_handlers_drop_unauth_port); | ||
390 | DEBUGFS_STATS_FILE(rx_handlers_drop, 20, "%u", | ||
391 | local->rx_handlers_drop); | ||
392 | DEBUGFS_STATS_FILE(rx_handlers_queued, 20, "%u", | ||
393 | local->rx_handlers_queued); | ||
394 | DEBUGFS_STATS_FILE(rx_handlers_drop_nullfunc, 20, "%u", | ||
395 | local->rx_handlers_drop_nullfunc); | ||
396 | DEBUGFS_STATS_FILE(rx_handlers_drop_defrag, 20, "%u", | ||
397 | local->rx_handlers_drop_defrag); | ||
398 | DEBUGFS_STATS_FILE(rx_handlers_drop_short, 20, "%u", | ||
399 | local->rx_handlers_drop_short); | ||
400 | DEBUGFS_STATS_FILE(rx_handlers_drop_passive_scan, 20, "%u", | ||
401 | local->rx_handlers_drop_passive_scan); | ||
402 | DEBUGFS_STATS_FILE(tx_expand_skb_head, 20, "%u", | ||
403 | local->tx_expand_skb_head); | ||
404 | DEBUGFS_STATS_FILE(tx_expand_skb_head_cloned, 20, "%u", | ||
405 | local->tx_expand_skb_head_cloned); | ||
406 | DEBUGFS_STATS_FILE(rx_expand_skb_head, 20, "%u", | ||
407 | local->rx_expand_skb_head); | ||
408 | DEBUGFS_STATS_FILE(rx_expand_skb_head2, 20, "%u", | ||
409 | local->rx_expand_skb_head2); | ||
410 | DEBUGFS_STATS_FILE(rx_handlers_fragments, 20, "%u", | ||
411 | local->rx_handlers_fragments); | ||
412 | DEBUGFS_STATS_FILE(tx_status_drop, 20, "%u", | ||
413 | local->tx_status_drop); | ||
414 | |||
415 | #endif | ||
416 | |||
417 | DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); | 356 | DEBUGFS_DEVSTATS_FILE(dot11ACKFailureCount); |
418 | DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount); | 357 | DEBUGFS_DEVSTATS_FILE(dot11RTSFailureCount); |
419 | DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount); | 358 | DEBUGFS_DEVSTATS_FILE(dot11FCSErrorCount); |
420 | DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); | 359 | DEBUGFS_DEVSTATS_FILE(dot11RTSSuccessCount); |
421 | 360 | ||
422 | |||
423 | void debugfs_hw_add(struct ieee80211_local *local) | 361 | void debugfs_hw_add(struct ieee80211_local *local) |
424 | { | 362 | { |
425 | struct dentry *phyd = local->hw.wiphy->debugfsdir; | 363 | struct dentry *phyd = local->hw.wiphy->debugfsdir; |
@@ -448,38 +386,60 @@ void debugfs_hw_add(struct ieee80211_local *local) | |||
448 | if (!statsd) | 386 | if (!statsd) |
449 | return; | 387 | return; |
450 | 388 | ||
451 | DEBUGFS_STATS_ADD(transmitted_fragment_count); | 389 | DEBUGFS_STATS_ADD(transmitted_fragment_count, |
452 | DEBUGFS_STATS_ADD(multicast_transmitted_frame_count); | 390 | local->dot11TransmittedFragmentCount); |
453 | DEBUGFS_STATS_ADD(failed_count); | 391 | DEBUGFS_STATS_ADD(multicast_transmitted_frame_count, |
454 | DEBUGFS_STATS_ADD(retry_count); | 392 | local->dot11MulticastTransmittedFrameCount); |
455 | DEBUGFS_STATS_ADD(multiple_retry_count); | 393 | DEBUGFS_STATS_ADD(failed_count, local->dot11FailedCount); |
456 | DEBUGFS_STATS_ADD(frame_duplicate_count); | 394 | DEBUGFS_STATS_ADD(retry_count, local->dot11RetryCount); |
457 | DEBUGFS_STATS_ADD(received_fragment_count); | 395 | DEBUGFS_STATS_ADD(multiple_retry_count, |
458 | DEBUGFS_STATS_ADD(multicast_received_frame_count); | 396 | local->dot11MultipleRetryCount); |
459 | DEBUGFS_STATS_ADD(transmitted_frame_count); | 397 | DEBUGFS_STATS_ADD(frame_duplicate_count, |
398 | local->dot11FrameDuplicateCount); | ||
399 | DEBUGFS_STATS_ADD(received_fragment_count, | ||
400 | local->dot11ReceivedFragmentCount); | ||
401 | DEBUGFS_STATS_ADD(multicast_received_frame_count, | ||
402 | local->dot11MulticastReceivedFrameCount); | ||
403 | DEBUGFS_STATS_ADD(transmitted_frame_count, | ||
404 | local->dot11TransmittedFrameCount); | ||
460 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS | 405 | #ifdef CONFIG_MAC80211_DEBUG_COUNTERS |
461 | DEBUGFS_STATS_ADD(tx_handlers_drop); | 406 | DEBUGFS_STATS_ADD(tx_handlers_drop, local->tx_handlers_drop); |
462 | DEBUGFS_STATS_ADD(tx_handlers_queued); | 407 | DEBUGFS_STATS_ADD(tx_handlers_queued, local->tx_handlers_queued); |
463 | DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted); | 408 | DEBUGFS_STATS_ADD(tx_handlers_drop_unencrypted, |
464 | DEBUGFS_STATS_ADD(tx_handlers_drop_fragment); | 409 | local->tx_handlers_drop_unencrypted); |
465 | DEBUGFS_STATS_ADD(tx_handlers_drop_wep); | 410 | DEBUGFS_STATS_ADD(tx_handlers_drop_fragment, |
466 | DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc); | 411 | local->tx_handlers_drop_fragment); |
467 | DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port); | 412 | DEBUGFS_STATS_ADD(tx_handlers_drop_wep, |
468 | DEBUGFS_STATS_ADD(rx_handlers_drop); | 413 | local->tx_handlers_drop_wep); |
469 | DEBUGFS_STATS_ADD(rx_handlers_queued); | 414 | DEBUGFS_STATS_ADD(tx_handlers_drop_not_assoc, |
470 | DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc); | 415 | local->tx_handlers_drop_not_assoc); |
471 | DEBUGFS_STATS_ADD(rx_handlers_drop_defrag); | 416 | DEBUGFS_STATS_ADD(tx_handlers_drop_unauth_port, |
472 | DEBUGFS_STATS_ADD(rx_handlers_drop_short); | 417 | local->tx_handlers_drop_unauth_port); |
473 | DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan); | 418 | DEBUGFS_STATS_ADD(rx_handlers_drop, local->rx_handlers_drop); |
474 | DEBUGFS_STATS_ADD(tx_expand_skb_head); | 419 | DEBUGFS_STATS_ADD(rx_handlers_queued, local->rx_handlers_queued); |
475 | DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned); | 420 | DEBUGFS_STATS_ADD(rx_handlers_drop_nullfunc, |
476 | DEBUGFS_STATS_ADD(rx_expand_skb_head); | 421 | local->rx_handlers_drop_nullfunc); |
477 | DEBUGFS_STATS_ADD(rx_expand_skb_head2); | 422 | DEBUGFS_STATS_ADD(rx_handlers_drop_defrag, |
478 | DEBUGFS_STATS_ADD(rx_handlers_fragments); | 423 | local->rx_handlers_drop_defrag); |
479 | DEBUGFS_STATS_ADD(tx_status_drop); | 424 | DEBUGFS_STATS_ADD(rx_handlers_drop_short, |
425 | local->rx_handlers_drop_short); | ||
426 | DEBUGFS_STATS_ADD(rx_handlers_drop_passive_scan, | ||
427 | local->rx_handlers_drop_passive_scan); | ||
428 | DEBUGFS_STATS_ADD(tx_expand_skb_head, | ||
429 | local->tx_expand_skb_head); | ||
430 | DEBUGFS_STATS_ADD(tx_expand_skb_head_cloned, | ||
431 | local->tx_expand_skb_head_cloned); | ||
432 | DEBUGFS_STATS_ADD(rx_expand_skb_head, | ||
433 | local->rx_expand_skb_head); | ||
434 | DEBUGFS_STATS_ADD(rx_expand_skb_head2, | ||
435 | local->rx_expand_skb_head2); | ||
436 | DEBUGFS_STATS_ADD(rx_handlers_fragments, | ||
437 | local->rx_handlers_fragments); | ||
438 | DEBUGFS_STATS_ADD(tx_status_drop, | ||
439 | local->tx_status_drop); | ||
480 | #endif | 440 | #endif |
481 | DEBUGFS_STATS_ADD(dot11ACKFailureCount); | 441 | DEBUGFS_DEVSTATS_ADD(dot11ACKFailureCount); |
482 | DEBUGFS_STATS_ADD(dot11RTSFailureCount); | 442 | DEBUGFS_DEVSTATS_ADD(dot11RTSFailureCount); |
483 | DEBUGFS_STATS_ADD(dot11FCSErrorCount); | 443 | DEBUGFS_DEVSTATS_ADD(dot11FCSErrorCount); |
484 | DEBUGFS_STATS_ADD(dot11RTSSuccessCount); | 444 | DEBUGFS_DEVSTATS_ADD(dot11RTSSuccessCount); |
485 | } | 445 | } |
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 97c9e46e859e..fa5e76e658ef 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c | |||
@@ -143,7 +143,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, | |||
143 | len = p - buf; | 143 | len = p - buf; |
144 | break; | 144 | break; |
145 | case ALG_CCMP: | 145 | case ALG_CCMP: |
146 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) { | 146 | for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) { |
147 | rpn = key->u.ccmp.rx_pn[i]; | 147 | rpn = key->u.ccmp.rx_pn[i]; |
148 | p += scnprintf(p, sizeof(buf)+buf-p, | 148 | p += scnprintf(p, sizeof(buf)+buf-p, |
149 | "%02x%02x%02x%02x%02x%02x\n", | 149 | "%02x%02x%02x%02x%02x%02x\n", |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index e763f1529ddb..76839d4dfaac 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -30,7 +30,6 @@ static ssize_t sta_ ##name## _read(struct file *file, \ | |||
30 | } | 30 | } |
31 | #define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n") | 31 | #define STA_READ_D(name, field) STA_READ(name, 20, field, "%d\n") |
32 | #define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n") | 32 | #define STA_READ_U(name, field) STA_READ(name, 20, field, "%u\n") |
33 | #define STA_READ_LU(name, field) STA_READ(name, 20, field, "%lu\n") | ||
34 | #define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") | 33 | #define STA_READ_S(name, field) STA_READ(name, 20, field, "%s\n") |
35 | 34 | ||
36 | #define STA_OPS(name) \ | 35 | #define STA_OPS(name) \ |
@@ -52,19 +51,7 @@ static const struct file_operations sta_ ##name## _ops = { \ | |||
52 | 51 | ||
53 | STA_FILE(aid, sta.aid, D); | 52 | STA_FILE(aid, sta.aid, D); |
54 | STA_FILE(dev, sdata->name, S); | 53 | STA_FILE(dev, sdata->name, S); |
55 | STA_FILE(rx_packets, rx_packets, LU); | ||
56 | STA_FILE(tx_packets, tx_packets, LU); | ||
57 | STA_FILE(rx_bytes, rx_bytes, LU); | ||
58 | STA_FILE(tx_bytes, tx_bytes, LU); | ||
59 | STA_FILE(rx_duplicates, num_duplicates, LU); | ||
60 | STA_FILE(rx_fragments, rx_fragments, LU); | ||
61 | STA_FILE(rx_dropped, rx_dropped, LU); | ||
62 | STA_FILE(tx_fragments, tx_fragments, LU); | ||
63 | STA_FILE(tx_filtered, tx_filtered_count, LU); | ||
64 | STA_FILE(tx_retry_failed, tx_retry_failed, LU); | ||
65 | STA_FILE(tx_retry_count, tx_retry_count, LU); | ||
66 | STA_FILE(last_signal, last_signal, D); | 54 | STA_FILE(last_signal, last_signal, D); |
67 | STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); | ||
68 | 55 | ||
69 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, | 56 | static ssize_t sta_flags_read(struct file *file, char __user *userbuf, |
70 | size_t count, loff_t *ppos) | 57 | size_t count, loff_t *ppos) |
@@ -134,28 +121,25 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | |||
134 | p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", | 121 | p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", |
135 | sta->ampdu_mlme.dialog_token_allocator + 1); | 122 | sta->ampdu_mlme.dialog_token_allocator + 1); |
136 | p += scnprintf(p, sizeof(buf) + buf - p, | 123 | p += scnprintf(p, sizeof(buf) + buf - p, |
137 | "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n"); | 124 | "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); |
138 | for (i = 0; i < STA_TID_NUM; i++) { | 125 | for (i = 0; i < STA_TID_NUM; i++) { |
139 | p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); | 126 | p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); |
140 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", | 127 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", |
141 | sta->ampdu_mlme.tid_active_rx[i]); | 128 | !!sta->ampdu_mlme.tid_rx[i]); |
142 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", | 129 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", |
143 | sta->ampdu_mlme.tid_active_rx[i] ? | 130 | sta->ampdu_mlme.tid_rx[i] ? |
144 | sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); | 131 | sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); |
145 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", | 132 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", |
146 | sta->ampdu_mlme.tid_active_rx[i] ? | 133 | sta->ampdu_mlme.tid_rx[i] ? |
147 | sta->ampdu_mlme.tid_rx[i]->ssn : 0); | 134 | sta->ampdu_mlme.tid_rx[i]->ssn : 0); |
148 | 135 | ||
149 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", | 136 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", |
150 | sta->ampdu_mlme.tid_state_tx[i]); | 137 | !!sta->ampdu_mlme.tid_tx[i]); |
151 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", | 138 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", |
152 | sta->ampdu_mlme.tid_state_tx[i] ? | 139 | sta->ampdu_mlme.tid_tx[i] ? |
153 | sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); | 140 | sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); |
154 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", | ||
155 | sta->ampdu_mlme.tid_state_tx[i] ? | ||
156 | sta->ampdu_mlme.tid_tx[i]->ssn : 0); | ||
157 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", | 141 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", |
158 | sta->ampdu_mlme.tid_state_tx[i] ? | 142 | sta->ampdu_mlme.tid_tx[i] ? |
159 | skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); | 143 | skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); |
160 | p += scnprintf(p, sizeof(buf) + buf - p, "\n"); | 144 | p += scnprintf(p, sizeof(buf) + buf - p, "\n"); |
161 | } | 145 | } |
@@ -210,8 +194,7 @@ static ssize_t sta_agg_status_write(struct file *file, const char __user *userbu | |||
210 | if (start) | 194 | if (start) |
211 | ret = ieee80211_start_tx_ba_session(&sta->sta, tid); | 195 | ret = ieee80211_start_tx_ba_session(&sta->sta, tid); |
212 | else | 196 | else |
213 | ret = ieee80211_stop_tx_ba_session(&sta->sta, tid, | 197 | ret = ieee80211_stop_tx_ba_session(&sta->sta, tid); |
214 | WLAN_BACK_RECIPIENT); | ||
215 | } else { | 198 | } else { |
216 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3); | 199 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, 3); |
217 | ret = 0; | 200 | ret = 0; |
@@ -307,6 +290,13 @@ STA_OPS(ht_capa); | |||
307 | debugfs_create_file(#name, 0400, \ | 290 | debugfs_create_file(#name, 0400, \ |
308 | sta->debugfs.dir, sta, &sta_ ##name## _ops); | 291 | sta->debugfs.dir, sta, &sta_ ##name## _ops); |
309 | 292 | ||
293 | #define DEBUGFS_ADD_COUNTER(name, field) \ | ||
294 | if (sizeof(sta->field) == sizeof(u32)) \ | ||
295 | debugfs_create_u32(#name, 0400, sta->debugfs.dir, \ | ||
296 | (u32 *) &sta->field); \ | ||
297 | else \ | ||
298 | debugfs_create_u64(#name, 0400, sta->debugfs.dir, \ | ||
299 | (u64 *) &sta->field); | ||
310 | 300 | ||
311 | void ieee80211_sta_debugfs_add(struct sta_info *sta) | 301 | void ieee80211_sta_debugfs_add(struct sta_info *sta) |
312 | { | 302 | { |
@@ -339,20 +329,21 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta) | |||
339 | DEBUGFS_ADD(last_seq_ctrl); | 329 | DEBUGFS_ADD(last_seq_ctrl); |
340 | DEBUGFS_ADD(agg_status); | 330 | DEBUGFS_ADD(agg_status); |
341 | DEBUGFS_ADD(dev); | 331 | DEBUGFS_ADD(dev); |
342 | DEBUGFS_ADD(rx_packets); | ||
343 | DEBUGFS_ADD(tx_packets); | ||
344 | DEBUGFS_ADD(rx_bytes); | ||
345 | DEBUGFS_ADD(tx_bytes); | ||
346 | DEBUGFS_ADD(rx_duplicates); | ||
347 | DEBUGFS_ADD(rx_fragments); | ||
348 | DEBUGFS_ADD(rx_dropped); | ||
349 | DEBUGFS_ADD(tx_fragments); | ||
350 | DEBUGFS_ADD(tx_filtered); | ||
351 | DEBUGFS_ADD(tx_retry_failed); | ||
352 | DEBUGFS_ADD(tx_retry_count); | ||
353 | DEBUGFS_ADD(last_signal); | 332 | DEBUGFS_ADD(last_signal); |
354 | DEBUGFS_ADD(wep_weak_iv_count); | ||
355 | DEBUGFS_ADD(ht_capa); | 333 | DEBUGFS_ADD(ht_capa); |
334 | |||
335 | DEBUGFS_ADD_COUNTER(rx_packets, rx_packets); | ||
336 | DEBUGFS_ADD_COUNTER(tx_packets, tx_packets); | ||
337 | DEBUGFS_ADD_COUNTER(rx_bytes, rx_bytes); | ||
338 | DEBUGFS_ADD_COUNTER(tx_bytes, tx_bytes); | ||
339 | DEBUGFS_ADD_COUNTER(rx_duplicates, num_duplicates); | ||
340 | DEBUGFS_ADD_COUNTER(rx_fragments, rx_fragments); | ||
341 | DEBUGFS_ADD_COUNTER(rx_dropped, rx_dropped); | ||
342 | DEBUGFS_ADD_COUNTER(tx_fragments, tx_fragments); | ||
343 | DEBUGFS_ADD_COUNTER(tx_filtered, tx_filtered_count); | ||
344 | DEBUGFS_ADD_COUNTER(tx_retry_failed, tx_retry_failed); | ||
345 | DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count); | ||
346 | DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count); | ||
356 | } | 347 | } |
357 | 348 | ||
358 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) | 349 | void ieee80211_sta_debugfs_remove(struct sta_info *sta) |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 9c1da0809160..c33317320eee 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -16,10 +16,11 @@ static inline int drv_start(struct ieee80211_local *local) | |||
16 | 16 | ||
17 | might_sleep(); | 17 | might_sleep(); |
18 | 18 | ||
19 | trace_drv_start(local); | ||
19 | local->started = true; | 20 | local->started = true; |
20 | smp_mb(); | 21 | smp_mb(); |
21 | ret = local->ops->start(&local->hw); | 22 | ret = local->ops->start(&local->hw); |
22 | trace_drv_start(local, ret); | 23 | trace_drv_return_int(local, ret); |
23 | return ret; | 24 | return ret; |
24 | } | 25 | } |
25 | 26 | ||
@@ -27,8 +28,9 @@ static inline void drv_stop(struct ieee80211_local *local) | |||
27 | { | 28 | { |
28 | might_sleep(); | 29 | might_sleep(); |
29 | 30 | ||
30 | local->ops->stop(&local->hw); | ||
31 | trace_drv_stop(local); | 31 | trace_drv_stop(local); |
32 | local->ops->stop(&local->hw); | ||
33 | trace_drv_return_void(local); | ||
32 | 34 | ||
33 | /* sync away all work on the tasklet before clearing started */ | 35 | /* sync away all work on the tasklet before clearing started */ |
34 | tasklet_disable(&local->tasklet); | 36 | tasklet_disable(&local->tasklet); |
@@ -46,8 +48,9 @@ static inline int drv_add_interface(struct ieee80211_local *local, | |||
46 | 48 | ||
47 | might_sleep(); | 49 | might_sleep(); |
48 | 50 | ||
51 | trace_drv_add_interface(local, vif_to_sdata(vif)); | ||
49 | ret = local->ops->add_interface(&local->hw, vif); | 52 | ret = local->ops->add_interface(&local->hw, vif); |
50 | trace_drv_add_interface(local, vif_to_sdata(vif), ret); | 53 | trace_drv_return_int(local, ret); |
51 | return ret; | 54 | return ret; |
52 | } | 55 | } |
53 | 56 | ||
@@ -56,8 +59,9 @@ static inline void drv_remove_interface(struct ieee80211_local *local, | |||
56 | { | 59 | { |
57 | might_sleep(); | 60 | might_sleep(); |
58 | 61 | ||
59 | local->ops->remove_interface(&local->hw, vif); | ||
60 | trace_drv_remove_interface(local, vif_to_sdata(vif)); | 62 | trace_drv_remove_interface(local, vif_to_sdata(vif)); |
63 | local->ops->remove_interface(&local->hw, vif); | ||
64 | trace_drv_return_void(local); | ||
61 | } | 65 | } |
62 | 66 | ||
63 | static inline int drv_config(struct ieee80211_local *local, u32 changed) | 67 | static inline int drv_config(struct ieee80211_local *local, u32 changed) |
@@ -66,8 +70,9 @@ static inline int drv_config(struct ieee80211_local *local, u32 changed) | |||
66 | 70 | ||
67 | might_sleep(); | 71 | might_sleep(); |
68 | 72 | ||
73 | trace_drv_config(local, changed); | ||
69 | ret = local->ops->config(&local->hw, changed); | 74 | ret = local->ops->config(&local->hw, changed); |
70 | trace_drv_config(local, changed, ret); | 75 | trace_drv_return_int(local, ret); |
71 | return ret; | 76 | return ret; |
72 | } | 77 | } |
73 | 78 | ||
@@ -78,9 +83,10 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, | |||
78 | { | 83 | { |
79 | might_sleep(); | 84 | might_sleep(); |
80 | 85 | ||
86 | trace_drv_bss_info_changed(local, sdata, info, changed); | ||
81 | if (local->ops->bss_info_changed) | 87 | if (local->ops->bss_info_changed) |
82 | local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed); | 88 | local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed); |
83 | trace_drv_bss_info_changed(local, sdata, info, changed); | 89 | trace_drv_return_void(local); |
84 | } | 90 | } |
85 | 91 | ||
86 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, | 92 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, |
@@ -88,10 +94,12 @@ static inline u64 drv_prepare_multicast(struct ieee80211_local *local, | |||
88 | { | 94 | { |
89 | u64 ret = 0; | 95 | u64 ret = 0; |
90 | 96 | ||
97 | trace_drv_prepare_multicast(local, mc_list->count); | ||
98 | |||
91 | if (local->ops->prepare_multicast) | 99 | if (local->ops->prepare_multicast) |
92 | ret = local->ops->prepare_multicast(&local->hw, mc_list); | 100 | ret = local->ops->prepare_multicast(&local->hw, mc_list); |
93 | 101 | ||
94 | trace_drv_prepare_multicast(local, mc_list->count, ret); | 102 | trace_drv_return_u64(local, ret); |
95 | 103 | ||
96 | return ret; | 104 | return ret; |
97 | } | 105 | } |
@@ -103,19 +111,21 @@ static inline void drv_configure_filter(struct ieee80211_local *local, | |||
103 | { | 111 | { |
104 | might_sleep(); | 112 | might_sleep(); |
105 | 113 | ||
106 | local->ops->configure_filter(&local->hw, changed_flags, total_flags, | ||
107 | multicast); | ||
108 | trace_drv_configure_filter(local, changed_flags, total_flags, | 114 | trace_drv_configure_filter(local, changed_flags, total_flags, |
109 | multicast); | 115 | multicast); |
116 | local->ops->configure_filter(&local->hw, changed_flags, total_flags, | ||
117 | multicast); | ||
118 | trace_drv_return_void(local); | ||
110 | } | 119 | } |
111 | 120 | ||
112 | static inline int drv_set_tim(struct ieee80211_local *local, | 121 | static inline int drv_set_tim(struct ieee80211_local *local, |
113 | struct ieee80211_sta *sta, bool set) | 122 | struct ieee80211_sta *sta, bool set) |
114 | { | 123 | { |
115 | int ret = 0; | 124 | int ret = 0; |
125 | trace_drv_set_tim(local, sta, set); | ||
116 | if (local->ops->set_tim) | 126 | if (local->ops->set_tim) |
117 | ret = local->ops->set_tim(&local->hw, sta, set); | 127 | ret = local->ops->set_tim(&local->hw, sta, set); |
118 | trace_drv_set_tim(local, sta, set, ret); | 128 | trace_drv_return_int(local, ret); |
119 | return ret; | 129 | return ret; |
120 | } | 130 | } |
121 | 131 | ||
@@ -129,8 +139,9 @@ static inline int drv_set_key(struct ieee80211_local *local, | |||
129 | 139 | ||
130 | might_sleep(); | 140 | might_sleep(); |
131 | 141 | ||
142 | trace_drv_set_key(local, cmd, sdata, sta, key); | ||
132 | ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); | 143 | ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); |
133 | trace_drv_set_key(local, cmd, sdata, sta, key, ret); | 144 | trace_drv_return_int(local, ret); |
134 | return ret; | 145 | return ret; |
135 | } | 146 | } |
136 | 147 | ||
@@ -145,10 +156,11 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local, | |||
145 | if (sta) | 156 | if (sta) |
146 | ista = &sta->sta; | 157 | ista = &sta->sta; |
147 | 158 | ||
159 | trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); | ||
148 | if (local->ops->update_tkip_key) | 160 | if (local->ops->update_tkip_key) |
149 | local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, | 161 | local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, |
150 | ista, iv32, phase1key); | 162 | ista, iv32, phase1key); |
151 | trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); | 163 | trace_drv_return_void(local); |
152 | } | 164 | } |
153 | 165 | ||
154 | static inline int drv_hw_scan(struct ieee80211_local *local, | 166 | static inline int drv_hw_scan(struct ieee80211_local *local, |
@@ -159,8 +171,9 @@ static inline int drv_hw_scan(struct ieee80211_local *local, | |||
159 | 171 | ||
160 | might_sleep(); | 172 | might_sleep(); |
161 | 173 | ||
174 | trace_drv_hw_scan(local, sdata, req); | ||
162 | ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); | 175 | ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); |
163 | trace_drv_hw_scan(local, sdata, req, ret); | 176 | trace_drv_return_int(local, ret); |
164 | return ret; | 177 | return ret; |
165 | } | 178 | } |
166 | 179 | ||
@@ -168,18 +181,20 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local) | |||
168 | { | 181 | { |
169 | might_sleep(); | 182 | might_sleep(); |
170 | 183 | ||
184 | trace_drv_sw_scan_start(local); | ||
171 | if (local->ops->sw_scan_start) | 185 | if (local->ops->sw_scan_start) |
172 | local->ops->sw_scan_start(&local->hw); | 186 | local->ops->sw_scan_start(&local->hw); |
173 | trace_drv_sw_scan_start(local); | 187 | trace_drv_return_void(local); |
174 | } | 188 | } |
175 | 189 | ||
176 | static inline void drv_sw_scan_complete(struct ieee80211_local *local) | 190 | static inline void drv_sw_scan_complete(struct ieee80211_local *local) |
177 | { | 191 | { |
178 | might_sleep(); | 192 | might_sleep(); |
179 | 193 | ||
194 | trace_drv_sw_scan_complete(local); | ||
180 | if (local->ops->sw_scan_complete) | 195 | if (local->ops->sw_scan_complete) |
181 | local->ops->sw_scan_complete(&local->hw); | 196 | local->ops->sw_scan_complete(&local->hw); |
182 | trace_drv_sw_scan_complete(local); | 197 | trace_drv_return_void(local); |
183 | } | 198 | } |
184 | 199 | ||
185 | static inline int drv_get_stats(struct ieee80211_local *local, | 200 | static inline int drv_get_stats(struct ieee80211_local *local, |
@@ -211,9 +226,10 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local, | |||
211 | 226 | ||
212 | might_sleep(); | 227 | might_sleep(); |
213 | 228 | ||
229 | trace_drv_set_rts_threshold(local, value); | ||
214 | if (local->ops->set_rts_threshold) | 230 | if (local->ops->set_rts_threshold) |
215 | ret = local->ops->set_rts_threshold(&local->hw, value); | 231 | ret = local->ops->set_rts_threshold(&local->hw, value); |
216 | trace_drv_set_rts_threshold(local, value, ret); | 232 | trace_drv_return_int(local, ret); |
217 | return ret; | 233 | return ret; |
218 | } | 234 | } |
219 | 235 | ||
@@ -223,12 +239,13 @@ static inline int drv_set_coverage_class(struct ieee80211_local *local, | |||
223 | int ret = 0; | 239 | int ret = 0; |
224 | might_sleep(); | 240 | might_sleep(); |
225 | 241 | ||
242 | trace_drv_set_coverage_class(local, value); | ||
226 | if (local->ops->set_coverage_class) | 243 | if (local->ops->set_coverage_class) |
227 | local->ops->set_coverage_class(&local->hw, value); | 244 | local->ops->set_coverage_class(&local->hw, value); |
228 | else | 245 | else |
229 | ret = -EOPNOTSUPP; | 246 | ret = -EOPNOTSUPP; |
230 | 247 | ||
231 | trace_drv_set_coverage_class(local, value, ret); | 248 | trace_drv_return_int(local, ret); |
232 | return ret; | 249 | return ret; |
233 | } | 250 | } |
234 | 251 | ||
@@ -237,9 +254,10 @@ static inline void drv_sta_notify(struct ieee80211_local *local, | |||
237 | enum sta_notify_cmd cmd, | 254 | enum sta_notify_cmd cmd, |
238 | struct ieee80211_sta *sta) | 255 | struct ieee80211_sta *sta) |
239 | { | 256 | { |
257 | trace_drv_sta_notify(local, sdata, cmd, sta); | ||
240 | if (local->ops->sta_notify) | 258 | if (local->ops->sta_notify) |
241 | local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); | 259 | local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); |
242 | trace_drv_sta_notify(local, sdata, cmd, sta); | 260 | trace_drv_return_void(local); |
243 | } | 261 | } |
244 | 262 | ||
245 | static inline int drv_sta_add(struct ieee80211_local *local, | 263 | static inline int drv_sta_add(struct ieee80211_local *local, |
@@ -250,13 +268,11 @@ static inline int drv_sta_add(struct ieee80211_local *local, | |||
250 | 268 | ||
251 | might_sleep(); | 269 | might_sleep(); |
252 | 270 | ||
271 | trace_drv_sta_add(local, sdata, sta); | ||
253 | if (local->ops->sta_add) | 272 | if (local->ops->sta_add) |
254 | ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); | 273 | ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); |
255 | else if (local->ops->sta_notify) | ||
256 | local->ops->sta_notify(&local->hw, &sdata->vif, | ||
257 | STA_NOTIFY_ADD, sta); | ||
258 | 274 | ||
259 | trace_drv_sta_add(local, sdata, sta, ret); | 275 | trace_drv_return_int(local, ret); |
260 | 276 | ||
261 | return ret; | 277 | return ret; |
262 | } | 278 | } |
@@ -267,13 +283,11 @@ static inline void drv_sta_remove(struct ieee80211_local *local, | |||
267 | { | 283 | { |
268 | might_sleep(); | 284 | might_sleep(); |
269 | 285 | ||
286 | trace_drv_sta_remove(local, sdata, sta); | ||
270 | if (local->ops->sta_remove) | 287 | if (local->ops->sta_remove) |
271 | local->ops->sta_remove(&local->hw, &sdata->vif, sta); | 288 | local->ops->sta_remove(&local->hw, &sdata->vif, sta); |
272 | else if (local->ops->sta_notify) | ||
273 | local->ops->sta_notify(&local->hw, &sdata->vif, | ||
274 | STA_NOTIFY_REMOVE, sta); | ||
275 | 289 | ||
276 | trace_drv_sta_remove(local, sdata, sta); | 290 | trace_drv_return_void(local); |
277 | } | 291 | } |
278 | 292 | ||
279 | static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, | 293 | static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, |
@@ -283,9 +297,10 @@ static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, | |||
283 | 297 | ||
284 | might_sleep(); | 298 | might_sleep(); |
285 | 299 | ||
300 | trace_drv_conf_tx(local, queue, params); | ||
286 | if (local->ops->conf_tx) | 301 | if (local->ops->conf_tx) |
287 | ret = local->ops->conf_tx(&local->hw, queue, params); | 302 | ret = local->ops->conf_tx(&local->hw, queue, params); |
288 | trace_drv_conf_tx(local, queue, params, ret); | 303 | trace_drv_return_int(local, ret); |
289 | return ret; | 304 | return ret; |
290 | } | 305 | } |
291 | 306 | ||
@@ -295,9 +310,10 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local) | |||
295 | 310 | ||
296 | might_sleep(); | 311 | might_sleep(); |
297 | 312 | ||
313 | trace_drv_get_tsf(local); | ||
298 | if (local->ops->get_tsf) | 314 | if (local->ops->get_tsf) |
299 | ret = local->ops->get_tsf(&local->hw); | 315 | ret = local->ops->get_tsf(&local->hw); |
300 | trace_drv_get_tsf(local, ret); | 316 | trace_drv_return_u64(local, ret); |
301 | return ret; | 317 | return ret; |
302 | } | 318 | } |
303 | 319 | ||
@@ -305,18 +321,20 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) | |||
305 | { | 321 | { |
306 | might_sleep(); | 322 | might_sleep(); |
307 | 323 | ||
324 | trace_drv_set_tsf(local, tsf); | ||
308 | if (local->ops->set_tsf) | 325 | if (local->ops->set_tsf) |
309 | local->ops->set_tsf(&local->hw, tsf); | 326 | local->ops->set_tsf(&local->hw, tsf); |
310 | trace_drv_set_tsf(local, tsf); | 327 | trace_drv_return_void(local); |
311 | } | 328 | } |
312 | 329 | ||
313 | static inline void drv_reset_tsf(struct ieee80211_local *local) | 330 | static inline void drv_reset_tsf(struct ieee80211_local *local) |
314 | { | 331 | { |
315 | might_sleep(); | 332 | might_sleep(); |
316 | 333 | ||
334 | trace_drv_reset_tsf(local); | ||
317 | if (local->ops->reset_tsf) | 335 | if (local->ops->reset_tsf) |
318 | local->ops->reset_tsf(&local->hw); | 336 | local->ops->reset_tsf(&local->hw); |
319 | trace_drv_reset_tsf(local); | 337 | trace_drv_return_void(local); |
320 | } | 338 | } |
321 | 339 | ||
322 | static inline int drv_tx_last_beacon(struct ieee80211_local *local) | 340 | static inline int drv_tx_last_beacon(struct ieee80211_local *local) |
@@ -325,9 +343,10 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local) | |||
325 | 343 | ||
326 | might_sleep(); | 344 | might_sleep(); |
327 | 345 | ||
346 | trace_drv_tx_last_beacon(local); | ||
328 | if (local->ops->tx_last_beacon) | 347 | if (local->ops->tx_last_beacon) |
329 | ret = local->ops->tx_last_beacon(&local->hw); | 348 | ret = local->ops->tx_last_beacon(&local->hw); |
330 | trace_drv_tx_last_beacon(local, ret); | 349 | trace_drv_return_int(local, ret); |
331 | return ret; | 350 | return ret; |
332 | } | 351 | } |
333 | 352 | ||
@@ -338,10 +357,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local, | |||
338 | u16 *ssn) | 357 | u16 *ssn) |
339 | { | 358 | { |
340 | int ret = -EOPNOTSUPP; | 359 | int ret = -EOPNOTSUPP; |
360 | |||
361 | might_sleep(); | ||
362 | |||
363 | trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn); | ||
364 | |||
341 | if (local->ops->ampdu_action) | 365 | if (local->ops->ampdu_action) |
342 | ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, | 366 | ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, |
343 | sta, tid, ssn); | 367 | sta, tid, ssn); |
344 | trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret); | 368 | |
369 | trace_drv_return_int(local, ret); | ||
370 | |||
345 | return ret; | 371 | return ret; |
346 | } | 372 | } |
347 | 373 | ||
@@ -370,6 +396,7 @@ static inline void drv_flush(struct ieee80211_local *local, bool drop) | |||
370 | trace_drv_flush(local, drop); | 396 | trace_drv_flush(local, drop); |
371 | if (local->ops->flush) | 397 | if (local->ops->flush) |
372 | local->ops->flush(&local->hw, drop); | 398 | local->ops->flush(&local->hw, drop); |
399 | trace_drv_return_void(local); | ||
373 | } | 400 | } |
374 | 401 | ||
375 | static inline void drv_channel_switch(struct ieee80211_local *local, | 402 | static inline void drv_channel_switch(struct ieee80211_local *local, |
@@ -377,9 +404,9 @@ static inline void drv_channel_switch(struct ieee80211_local *local, | |||
377 | { | 404 | { |
378 | might_sleep(); | 405 | might_sleep(); |
379 | 406 | ||
380 | local->ops->channel_switch(&local->hw, ch_switch); | ||
381 | |||
382 | trace_drv_channel_switch(local, ch_switch); | 407 | trace_drv_channel_switch(local, ch_switch); |
408 | local->ops->channel_switch(&local->hw, ch_switch); | ||
409 | trace_drv_return_void(local); | ||
383 | } | 410 | } |
384 | 411 | ||
385 | #endif /* __MAC80211_DRIVER_OPS */ | 412 | #endif /* __MAC80211_DRIVER_OPS */ |
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index 6a9b2342a9c2..8da31caff931 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h | |||
@@ -36,20 +36,58 @@ static inline void trace_ ## name(proto) {} | |||
36 | * Tracing for driver callbacks. | 36 | * Tracing for driver callbacks. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | TRACE_EVENT(drv_start, | 39 | TRACE_EVENT(drv_return_void, |
40 | TP_PROTO(struct ieee80211_local *local, int ret), | 40 | TP_PROTO(struct ieee80211_local *local), |
41 | TP_ARGS(local), | ||
42 | TP_STRUCT__entry( | ||
43 | LOCAL_ENTRY | ||
44 | ), | ||
45 | TP_fast_assign( | ||
46 | LOCAL_ASSIGN; | ||
47 | ), | ||
48 | TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG) | ||
49 | ); | ||
41 | 50 | ||
51 | TRACE_EVENT(drv_return_int, | ||
52 | TP_PROTO(struct ieee80211_local *local, int ret), | ||
42 | TP_ARGS(local, ret), | 53 | TP_ARGS(local, ret), |
43 | |||
44 | TP_STRUCT__entry( | 54 | TP_STRUCT__entry( |
45 | LOCAL_ENTRY | 55 | LOCAL_ENTRY |
46 | __field(int, ret) | 56 | __field(int, ret) |
47 | ), | 57 | ), |
58 | TP_fast_assign( | ||
59 | LOCAL_ASSIGN; | ||
60 | __entry->ret = ret; | ||
61 | ), | ||
62 | TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret) | ||
63 | ); | ||
48 | 64 | ||
65 | TRACE_EVENT(drv_return_u64, | ||
66 | TP_PROTO(struct ieee80211_local *local, u64 ret), | ||
67 | TP_ARGS(local, ret), | ||
68 | TP_STRUCT__entry( | ||
69 | LOCAL_ENTRY | ||
70 | __field(u64, ret) | ||
71 | ), | ||
49 | TP_fast_assign( | 72 | TP_fast_assign( |
50 | LOCAL_ASSIGN; | 73 | LOCAL_ASSIGN; |
51 | __entry->ret = ret; | 74 | __entry->ret = ret; |
52 | ), | 75 | ), |
76 | TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret) | ||
77 | ); | ||
78 | |||
79 | TRACE_EVENT(drv_start, | ||
80 | TP_PROTO(struct ieee80211_local *local), | ||
81 | |||
82 | TP_ARGS(local), | ||
83 | |||
84 | TP_STRUCT__entry( | ||
85 | LOCAL_ENTRY | ||
86 | ), | ||
87 | |||
88 | TP_fast_assign( | ||
89 | LOCAL_ASSIGN; | ||
90 | ), | ||
53 | 91 | ||
54 | TP_printk( | 92 | TP_printk( |
55 | LOCAL_PR_FMT, LOCAL_PR_ARG | 93 | LOCAL_PR_FMT, LOCAL_PR_ARG |
@@ -76,28 +114,25 @@ TRACE_EVENT(drv_stop, | |||
76 | 114 | ||
77 | TRACE_EVENT(drv_add_interface, | 115 | TRACE_EVENT(drv_add_interface, |
78 | TP_PROTO(struct ieee80211_local *local, | 116 | TP_PROTO(struct ieee80211_local *local, |
79 | struct ieee80211_sub_if_data *sdata, | 117 | struct ieee80211_sub_if_data *sdata), |
80 | int ret), | ||
81 | 118 | ||
82 | TP_ARGS(local, sdata, ret), | 119 | TP_ARGS(local, sdata), |
83 | 120 | ||
84 | TP_STRUCT__entry( | 121 | TP_STRUCT__entry( |
85 | LOCAL_ENTRY | 122 | LOCAL_ENTRY |
86 | VIF_ENTRY | 123 | VIF_ENTRY |
87 | __array(char, addr, 6) | 124 | __array(char, addr, 6) |
88 | __field(int, ret) | ||
89 | ), | 125 | ), |
90 | 126 | ||
91 | TP_fast_assign( | 127 | TP_fast_assign( |
92 | LOCAL_ASSIGN; | 128 | LOCAL_ASSIGN; |
93 | VIF_ASSIGN; | 129 | VIF_ASSIGN; |
94 | memcpy(__entry->addr, sdata->vif.addr, 6); | 130 | memcpy(__entry->addr, sdata->vif.addr, 6); |
95 | __entry->ret = ret; | ||
96 | ), | 131 | ), |
97 | 132 | ||
98 | TP_printk( | 133 | TP_printk( |
99 | LOCAL_PR_FMT VIF_PR_FMT " addr:%pM ret:%d", | 134 | LOCAL_PR_FMT VIF_PR_FMT " addr:%pM", |
100 | LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr, __entry->ret | 135 | LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr |
101 | ) | 136 | ) |
102 | ); | 137 | ); |
103 | 138 | ||
@@ -126,15 +161,13 @@ TRACE_EVENT(drv_remove_interface, | |||
126 | 161 | ||
127 | TRACE_EVENT(drv_config, | 162 | TRACE_EVENT(drv_config, |
128 | TP_PROTO(struct ieee80211_local *local, | 163 | TP_PROTO(struct ieee80211_local *local, |
129 | u32 changed, | 164 | u32 changed), |
130 | int ret), | ||
131 | 165 | ||
132 | TP_ARGS(local, changed, ret), | 166 | TP_ARGS(local, changed), |
133 | 167 | ||
134 | TP_STRUCT__entry( | 168 | TP_STRUCT__entry( |
135 | LOCAL_ENTRY | 169 | LOCAL_ENTRY |
136 | __field(u32, changed) | 170 | __field(u32, changed) |
137 | __field(int, ret) | ||
138 | __field(u32, flags) | 171 | __field(u32, flags) |
139 | __field(int, power_level) | 172 | __field(int, power_level) |
140 | __field(int, dynamic_ps_timeout) | 173 | __field(int, dynamic_ps_timeout) |
@@ -150,7 +183,6 @@ TRACE_EVENT(drv_config, | |||
150 | TP_fast_assign( | 183 | TP_fast_assign( |
151 | LOCAL_ASSIGN; | 184 | LOCAL_ASSIGN; |
152 | __entry->changed = changed; | 185 | __entry->changed = changed; |
153 | __entry->ret = ret; | ||
154 | __entry->flags = local->hw.conf.flags; | 186 | __entry->flags = local->hw.conf.flags; |
155 | __entry->power_level = local->hw.conf.power_level; | 187 | __entry->power_level = local->hw.conf.power_level; |
156 | __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; | 188 | __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; |
@@ -164,8 +196,8 @@ TRACE_EVENT(drv_config, | |||
164 | ), | 196 | ), |
165 | 197 | ||
166 | TP_printk( | 198 | TP_printk( |
167 | LOCAL_PR_FMT " ch:%#x freq:%d ret:%d", | 199 | LOCAL_PR_FMT " ch:%#x freq:%d", |
168 | LOCAL_PR_ARG, __entry->changed, __entry->center_freq, __entry->ret | 200 | LOCAL_PR_ARG, __entry->changed, __entry->center_freq |
169 | ) | 201 | ) |
170 | ); | 202 | ); |
171 | 203 | ||
@@ -220,26 +252,23 @@ TRACE_EVENT(drv_bss_info_changed, | |||
220 | ); | 252 | ); |
221 | 253 | ||
222 | TRACE_EVENT(drv_prepare_multicast, | 254 | TRACE_EVENT(drv_prepare_multicast, |
223 | TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret), | 255 | TP_PROTO(struct ieee80211_local *local, int mc_count), |
224 | 256 | ||
225 | TP_ARGS(local, mc_count, ret), | 257 | TP_ARGS(local, mc_count), |
226 | 258 | ||
227 | TP_STRUCT__entry( | 259 | TP_STRUCT__entry( |
228 | LOCAL_ENTRY | 260 | LOCAL_ENTRY |
229 | __field(int, mc_count) | 261 | __field(int, mc_count) |
230 | __field(u64, ret) | ||
231 | ), | 262 | ), |
232 | 263 | ||
233 | TP_fast_assign( | 264 | TP_fast_assign( |
234 | LOCAL_ASSIGN; | 265 | LOCAL_ASSIGN; |
235 | __entry->mc_count = mc_count; | 266 | __entry->mc_count = mc_count; |
236 | __entry->ret = ret; | ||
237 | ), | 267 | ), |
238 | 268 | ||
239 | TP_printk( | 269 | TP_printk( |
240 | LOCAL_PR_FMT " prepare mc (%d): %llx", | 270 | LOCAL_PR_FMT " prepare mc (%d)", |
241 | LOCAL_PR_ARG, __entry->mc_count, | 271 | LOCAL_PR_ARG, __entry->mc_count |
242 | (unsigned long long) __entry->ret | ||
243 | ) | 272 | ) |
244 | ); | 273 | ); |
245 | 274 | ||
@@ -273,27 +302,25 @@ TRACE_EVENT(drv_configure_filter, | |||
273 | 302 | ||
274 | TRACE_EVENT(drv_set_tim, | 303 | TRACE_EVENT(drv_set_tim, |
275 | TP_PROTO(struct ieee80211_local *local, | 304 | TP_PROTO(struct ieee80211_local *local, |
276 | struct ieee80211_sta *sta, bool set, int ret), | 305 | struct ieee80211_sta *sta, bool set), |
277 | 306 | ||
278 | TP_ARGS(local, sta, set, ret), | 307 | TP_ARGS(local, sta, set), |
279 | 308 | ||
280 | TP_STRUCT__entry( | 309 | TP_STRUCT__entry( |
281 | LOCAL_ENTRY | 310 | LOCAL_ENTRY |
282 | STA_ENTRY | 311 | STA_ENTRY |
283 | __field(bool, set) | 312 | __field(bool, set) |
284 | __field(int, ret) | ||
285 | ), | 313 | ), |
286 | 314 | ||
287 | TP_fast_assign( | 315 | TP_fast_assign( |
288 | LOCAL_ASSIGN; | 316 | LOCAL_ASSIGN; |
289 | STA_ASSIGN; | 317 | STA_ASSIGN; |
290 | __entry->set = set; | 318 | __entry->set = set; |
291 | __entry->ret = ret; | ||
292 | ), | 319 | ), |
293 | 320 | ||
294 | TP_printk( | 321 | TP_printk( |
295 | LOCAL_PR_FMT STA_PR_FMT " set:%d ret:%d", | 322 | LOCAL_PR_FMT STA_PR_FMT " set:%d", |
296 | LOCAL_PR_ARG, STA_PR_FMT, __entry->set, __entry->ret | 323 | LOCAL_PR_ARG, STA_PR_FMT, __entry->set |
297 | ) | 324 | ) |
298 | ); | 325 | ); |
299 | 326 | ||
@@ -301,9 +328,9 @@ TRACE_EVENT(drv_set_key, | |||
301 | TP_PROTO(struct ieee80211_local *local, | 328 | TP_PROTO(struct ieee80211_local *local, |
302 | enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata, | 329 | enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata, |
303 | struct ieee80211_sta *sta, | 330 | struct ieee80211_sta *sta, |
304 | struct ieee80211_key_conf *key, int ret), | 331 | struct ieee80211_key_conf *key), |
305 | 332 | ||
306 | TP_ARGS(local, cmd, sdata, sta, key, ret), | 333 | TP_ARGS(local, cmd, sdata, sta, key), |
307 | 334 | ||
308 | TP_STRUCT__entry( | 335 | TP_STRUCT__entry( |
309 | LOCAL_ENTRY | 336 | LOCAL_ENTRY |
@@ -313,7 +340,6 @@ TRACE_EVENT(drv_set_key, | |||
313 | __field(u8, hw_key_idx) | 340 | __field(u8, hw_key_idx) |
314 | __field(u8, flags) | 341 | __field(u8, flags) |
315 | __field(s8, keyidx) | 342 | __field(s8, keyidx) |
316 | __field(int, ret) | ||
317 | ), | 343 | ), |
318 | 344 | ||
319 | TP_fast_assign( | 345 | TP_fast_assign( |
@@ -324,12 +350,11 @@ TRACE_EVENT(drv_set_key, | |||
324 | __entry->flags = key->flags; | 350 | __entry->flags = key->flags; |
325 | __entry->keyidx = key->keyidx; | 351 | __entry->keyidx = key->keyidx; |
326 | __entry->hw_key_idx = key->hw_key_idx; | 352 | __entry->hw_key_idx = key->hw_key_idx; |
327 | __entry->ret = ret; | ||
328 | ), | 353 | ), |
329 | 354 | ||
330 | TP_printk( | 355 | TP_printk( |
331 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d", | 356 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT, |
332 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret | 357 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG |
333 | ) | 358 | ) |
334 | ); | 359 | ); |
335 | 360 | ||
@@ -364,25 +389,23 @@ TRACE_EVENT(drv_update_tkip_key, | |||
364 | TRACE_EVENT(drv_hw_scan, | 389 | TRACE_EVENT(drv_hw_scan, |
365 | TP_PROTO(struct ieee80211_local *local, | 390 | TP_PROTO(struct ieee80211_local *local, |
366 | struct ieee80211_sub_if_data *sdata, | 391 | struct ieee80211_sub_if_data *sdata, |
367 | struct cfg80211_scan_request *req, int ret), | 392 | struct cfg80211_scan_request *req), |
368 | 393 | ||
369 | TP_ARGS(local, sdata, req, ret), | 394 | TP_ARGS(local, sdata, req), |
370 | 395 | ||
371 | TP_STRUCT__entry( | 396 | TP_STRUCT__entry( |
372 | LOCAL_ENTRY | 397 | LOCAL_ENTRY |
373 | VIF_ENTRY | 398 | VIF_ENTRY |
374 | __field(int, ret) | ||
375 | ), | 399 | ), |
376 | 400 | ||
377 | TP_fast_assign( | 401 | TP_fast_assign( |
378 | LOCAL_ASSIGN; | 402 | LOCAL_ASSIGN; |
379 | VIF_ASSIGN; | 403 | VIF_ASSIGN; |
380 | __entry->ret = ret; | ||
381 | ), | 404 | ), |
382 | 405 | ||
383 | TP_printk( | 406 | TP_printk( |
384 | LOCAL_PR_FMT VIF_PR_FMT " ret:%d", | 407 | LOCAL_PR_FMT VIF_PR_FMT, |
385 | LOCAL_PR_ARG,VIF_PR_ARG, __entry->ret | 408 | LOCAL_PR_ARG,VIF_PR_ARG |
386 | ) | 409 | ) |
387 | ); | 410 | ); |
388 | 411 | ||
@@ -479,48 +502,44 @@ TRACE_EVENT(drv_get_tkip_seq, | |||
479 | ); | 502 | ); |
480 | 503 | ||
481 | TRACE_EVENT(drv_set_rts_threshold, | 504 | TRACE_EVENT(drv_set_rts_threshold, |
482 | TP_PROTO(struct ieee80211_local *local, u32 value, int ret), | 505 | TP_PROTO(struct ieee80211_local *local, u32 value), |
483 | 506 | ||
484 | TP_ARGS(local, value, ret), | 507 | TP_ARGS(local, value), |
485 | 508 | ||
486 | TP_STRUCT__entry( | 509 | TP_STRUCT__entry( |
487 | LOCAL_ENTRY | 510 | LOCAL_ENTRY |
488 | __field(u32, value) | 511 | __field(u32, value) |
489 | __field(int, ret) | ||
490 | ), | 512 | ), |
491 | 513 | ||
492 | TP_fast_assign( | 514 | TP_fast_assign( |
493 | LOCAL_ASSIGN; | 515 | LOCAL_ASSIGN; |
494 | __entry->ret = ret; | ||
495 | __entry->value = value; | 516 | __entry->value = value; |
496 | ), | 517 | ), |
497 | 518 | ||
498 | TP_printk( | 519 | TP_printk( |
499 | LOCAL_PR_FMT " value:%d ret:%d", | 520 | LOCAL_PR_FMT " value:%d", |
500 | LOCAL_PR_ARG, __entry->value, __entry->ret | 521 | LOCAL_PR_ARG, __entry->value |
501 | ) | 522 | ) |
502 | ); | 523 | ); |
503 | 524 | ||
504 | TRACE_EVENT(drv_set_coverage_class, | 525 | TRACE_EVENT(drv_set_coverage_class, |
505 | TP_PROTO(struct ieee80211_local *local, u8 value, int ret), | 526 | TP_PROTO(struct ieee80211_local *local, u8 value), |
506 | 527 | ||
507 | TP_ARGS(local, value, ret), | 528 | TP_ARGS(local, value), |
508 | 529 | ||
509 | TP_STRUCT__entry( | 530 | TP_STRUCT__entry( |
510 | LOCAL_ENTRY | 531 | LOCAL_ENTRY |
511 | __field(u8, value) | 532 | __field(u8, value) |
512 | __field(int, ret) | ||
513 | ), | 533 | ), |
514 | 534 | ||
515 | TP_fast_assign( | 535 | TP_fast_assign( |
516 | LOCAL_ASSIGN; | 536 | LOCAL_ASSIGN; |
517 | __entry->ret = ret; | ||
518 | __entry->value = value; | 537 | __entry->value = value; |
519 | ), | 538 | ), |
520 | 539 | ||
521 | TP_printk( | 540 | TP_printk( |
522 | LOCAL_PR_FMT " value:%d ret:%d", | 541 | LOCAL_PR_FMT " value:%d", |
523 | LOCAL_PR_ARG, __entry->value, __entry->ret | 542 | LOCAL_PR_ARG, __entry->value |
524 | ) | 543 | ) |
525 | ); | 544 | ); |
526 | 545 | ||
@@ -555,27 +574,25 @@ TRACE_EVENT(drv_sta_notify, | |||
555 | TRACE_EVENT(drv_sta_add, | 574 | TRACE_EVENT(drv_sta_add, |
556 | TP_PROTO(struct ieee80211_local *local, | 575 | TP_PROTO(struct ieee80211_local *local, |
557 | struct ieee80211_sub_if_data *sdata, | 576 | struct ieee80211_sub_if_data *sdata, |
558 | struct ieee80211_sta *sta, int ret), | 577 | struct ieee80211_sta *sta), |
559 | 578 | ||
560 | TP_ARGS(local, sdata, sta, ret), | 579 | TP_ARGS(local, sdata, sta), |
561 | 580 | ||
562 | TP_STRUCT__entry( | 581 | TP_STRUCT__entry( |
563 | LOCAL_ENTRY | 582 | LOCAL_ENTRY |
564 | VIF_ENTRY | 583 | VIF_ENTRY |
565 | STA_ENTRY | 584 | STA_ENTRY |
566 | __field(int, ret) | ||
567 | ), | 585 | ), |
568 | 586 | ||
569 | TP_fast_assign( | 587 | TP_fast_assign( |
570 | LOCAL_ASSIGN; | 588 | LOCAL_ASSIGN; |
571 | VIF_ASSIGN; | 589 | VIF_ASSIGN; |
572 | STA_ASSIGN; | 590 | STA_ASSIGN; |
573 | __entry->ret = ret; | ||
574 | ), | 591 | ), |
575 | 592 | ||
576 | TP_printk( | 593 | TP_printk( |
577 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d", | 594 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT, |
578 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret | 595 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG |
579 | ) | 596 | ) |
580 | ); | 597 | ); |
581 | 598 | ||
@@ -606,10 +623,9 @@ TRACE_EVENT(drv_sta_remove, | |||
606 | 623 | ||
607 | TRACE_EVENT(drv_conf_tx, | 624 | TRACE_EVENT(drv_conf_tx, |
608 | TP_PROTO(struct ieee80211_local *local, u16 queue, | 625 | TP_PROTO(struct ieee80211_local *local, u16 queue, |
609 | const struct ieee80211_tx_queue_params *params, | 626 | const struct ieee80211_tx_queue_params *params), |
610 | int ret), | ||
611 | 627 | ||
612 | TP_ARGS(local, queue, params, ret), | 628 | TP_ARGS(local, queue, params), |
613 | 629 | ||
614 | TP_STRUCT__entry( | 630 | TP_STRUCT__entry( |
615 | LOCAL_ENTRY | 631 | LOCAL_ENTRY |
@@ -618,13 +634,11 @@ TRACE_EVENT(drv_conf_tx, | |||
618 | __field(u16, cw_min) | 634 | __field(u16, cw_min) |
619 | __field(u16, cw_max) | 635 | __field(u16, cw_max) |
620 | __field(u8, aifs) | 636 | __field(u8, aifs) |
621 | __field(int, ret) | ||
622 | ), | 637 | ), |
623 | 638 | ||
624 | TP_fast_assign( | 639 | TP_fast_assign( |
625 | LOCAL_ASSIGN; | 640 | LOCAL_ASSIGN; |
626 | __entry->queue = queue; | 641 | __entry->queue = queue; |
627 | __entry->ret = ret; | ||
628 | __entry->txop = params->txop; | 642 | __entry->txop = params->txop; |
629 | __entry->cw_max = params->cw_max; | 643 | __entry->cw_max = params->cw_max; |
630 | __entry->cw_min = params->cw_min; | 644 | __entry->cw_min = params->cw_min; |
@@ -632,29 +646,27 @@ TRACE_EVENT(drv_conf_tx, | |||
632 | ), | 646 | ), |
633 | 647 | ||
634 | TP_printk( | 648 | TP_printk( |
635 | LOCAL_PR_FMT " queue:%d ret:%d", | 649 | LOCAL_PR_FMT " queue:%d", |
636 | LOCAL_PR_ARG, __entry->queue, __entry->ret | 650 | LOCAL_PR_ARG, __entry->queue |
637 | ) | 651 | ) |
638 | ); | 652 | ); |
639 | 653 | ||
640 | TRACE_EVENT(drv_get_tsf, | 654 | TRACE_EVENT(drv_get_tsf, |
641 | TP_PROTO(struct ieee80211_local *local, u64 ret), | 655 | TP_PROTO(struct ieee80211_local *local), |
642 | 656 | ||
643 | TP_ARGS(local, ret), | 657 | TP_ARGS(local), |
644 | 658 | ||
645 | TP_STRUCT__entry( | 659 | TP_STRUCT__entry( |
646 | LOCAL_ENTRY | 660 | LOCAL_ENTRY |
647 | __field(u64, ret) | ||
648 | ), | 661 | ), |
649 | 662 | ||
650 | TP_fast_assign( | 663 | TP_fast_assign( |
651 | LOCAL_ASSIGN; | 664 | LOCAL_ASSIGN; |
652 | __entry->ret = ret; | ||
653 | ), | 665 | ), |
654 | 666 | ||
655 | TP_printk( | 667 | TP_printk( |
656 | LOCAL_PR_FMT " ret:%llu", | 668 | LOCAL_PR_FMT, |
657 | LOCAL_PR_ARG, (unsigned long long)__entry->ret | 669 | LOCAL_PR_ARG |
658 | ) | 670 | ) |
659 | ); | 671 | ); |
660 | 672 | ||
@@ -698,23 +710,21 @@ TRACE_EVENT(drv_reset_tsf, | |||
698 | ); | 710 | ); |
699 | 711 | ||
700 | TRACE_EVENT(drv_tx_last_beacon, | 712 | TRACE_EVENT(drv_tx_last_beacon, |
701 | TP_PROTO(struct ieee80211_local *local, int ret), | 713 | TP_PROTO(struct ieee80211_local *local), |
702 | 714 | ||
703 | TP_ARGS(local, ret), | 715 | TP_ARGS(local), |
704 | 716 | ||
705 | TP_STRUCT__entry( | 717 | TP_STRUCT__entry( |
706 | LOCAL_ENTRY | 718 | LOCAL_ENTRY |
707 | __field(int, ret) | ||
708 | ), | 719 | ), |
709 | 720 | ||
710 | TP_fast_assign( | 721 | TP_fast_assign( |
711 | LOCAL_ASSIGN; | 722 | LOCAL_ASSIGN; |
712 | __entry->ret = ret; | ||
713 | ), | 723 | ), |
714 | 724 | ||
715 | TP_printk( | 725 | TP_printk( |
716 | LOCAL_PR_FMT " ret:%d", | 726 | LOCAL_PR_FMT, |
717 | LOCAL_PR_ARG, __entry->ret | 727 | LOCAL_PR_ARG |
718 | ) | 728 | ) |
719 | ); | 729 | ); |
720 | 730 | ||
@@ -723,9 +733,9 @@ TRACE_EVENT(drv_ampdu_action, | |||
723 | struct ieee80211_sub_if_data *sdata, | 733 | struct ieee80211_sub_if_data *sdata, |
724 | enum ieee80211_ampdu_mlme_action action, | 734 | enum ieee80211_ampdu_mlme_action action, |
725 | struct ieee80211_sta *sta, u16 tid, | 735 | struct ieee80211_sta *sta, u16 tid, |
726 | u16 *ssn, int ret), | 736 | u16 *ssn), |
727 | 737 | ||
728 | TP_ARGS(local, sdata, action, sta, tid, ssn, ret), | 738 | TP_ARGS(local, sdata, action, sta, tid, ssn), |
729 | 739 | ||
730 | TP_STRUCT__entry( | 740 | TP_STRUCT__entry( |
731 | LOCAL_ENTRY | 741 | LOCAL_ENTRY |
@@ -733,7 +743,6 @@ TRACE_EVENT(drv_ampdu_action, | |||
733 | __field(u32, action) | 743 | __field(u32, action) |
734 | __field(u16, tid) | 744 | __field(u16, tid) |
735 | __field(u16, ssn) | 745 | __field(u16, ssn) |
736 | __field(int, ret) | ||
737 | VIF_ENTRY | 746 | VIF_ENTRY |
738 | ), | 747 | ), |
739 | 748 | ||
@@ -741,15 +750,14 @@ TRACE_EVENT(drv_ampdu_action, | |||
741 | LOCAL_ASSIGN; | 750 | LOCAL_ASSIGN; |
742 | VIF_ASSIGN; | 751 | VIF_ASSIGN; |
743 | STA_ASSIGN; | 752 | STA_ASSIGN; |
744 | __entry->ret = ret; | ||
745 | __entry->action = action; | 753 | __entry->action = action; |
746 | __entry->tid = tid; | 754 | __entry->tid = tid; |
747 | __entry->ssn = ssn ? *ssn : 0; | 755 | __entry->ssn = ssn ? *ssn : 0; |
748 | ), | 756 | ), |
749 | 757 | ||
750 | TP_printk( | 758 | TP_printk( |
751 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d", | 759 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d", |
752 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret | 760 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid |
753 | ) | 761 | ) |
754 | ); | 762 | ); |
755 | 763 | ||
@@ -851,25 +859,23 @@ TRACE_EVENT(api_start_tx_ba_cb, | |||
851 | ); | 859 | ); |
852 | 860 | ||
853 | TRACE_EVENT(api_stop_tx_ba_session, | 861 | TRACE_EVENT(api_stop_tx_ba_session, |
854 | TP_PROTO(struct ieee80211_sta *sta, u16 tid, u16 initiator), | 862 | TP_PROTO(struct ieee80211_sta *sta, u16 tid), |
855 | 863 | ||
856 | TP_ARGS(sta, tid, initiator), | 864 | TP_ARGS(sta, tid), |
857 | 865 | ||
858 | TP_STRUCT__entry( | 866 | TP_STRUCT__entry( |
859 | STA_ENTRY | 867 | STA_ENTRY |
860 | __field(u16, tid) | 868 | __field(u16, tid) |
861 | __field(u16, initiator) | ||
862 | ), | 869 | ), |
863 | 870 | ||
864 | TP_fast_assign( | 871 | TP_fast_assign( |
865 | STA_ASSIGN; | 872 | STA_ASSIGN; |
866 | __entry->tid = tid; | 873 | __entry->tid = tid; |
867 | __entry->initiator = initiator; | ||
868 | ), | 874 | ), |
869 | 875 | ||
870 | TP_printk( | 876 | TP_printk( |
871 | STA_PR_FMT " tid:%d initiator:%d", | 877 | STA_PR_FMT " tid:%d", |
872 | STA_PR_ARG, __entry->tid, __entry->initiator | 878 | STA_PR_ARG, __entry->tid |
873 | ) | 879 | ) |
874 | ); | 880 | ); |
875 | 881 | ||
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 2ab106a0a491..be928ef7ef51 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * Copyright 2005-2006, Devicescape Software, Inc. | 6 | * Copyright 2005-2006, Devicescape Software, Inc. |
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | 7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> |
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2008, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | 12 | * it under the terms of the GNU General Public License version 2 as |
@@ -105,6 +105,8 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta) | |||
105 | { | 105 | { |
106 | int i; | 106 | int i; |
107 | 107 | ||
108 | cancel_work_sync(&sta->ampdu_mlme.work); | ||
109 | |||
108 | for (i = 0; i < STA_TID_NUM; i++) { | 110 | for (i = 0; i < STA_TID_NUM; i++) { |
109 | __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR); | 111 | __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR); |
110 | __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, | 112 | __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, |
@@ -112,6 +114,43 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta) | |||
112 | } | 114 | } |
113 | } | 115 | } |
114 | 116 | ||
117 | void ieee80211_ba_session_work(struct work_struct *work) | ||
118 | { | ||
119 | struct sta_info *sta = | ||
120 | container_of(work, struct sta_info, ampdu_mlme.work); | ||
121 | struct tid_ampdu_tx *tid_tx; | ||
122 | int tid; | ||
123 | |||
124 | /* | ||
125 | * When this flag is set, new sessions should be | ||
126 | * blocked, and existing sessions will be torn | ||
127 | * down by the code that set the flag, so this | ||
128 | * need not run. | ||
129 | */ | ||
130 | if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) | ||
131 | return; | ||
132 | |||
133 | mutex_lock(&sta->ampdu_mlme.mtx); | ||
134 | for (tid = 0; tid < STA_TID_NUM; tid++) { | ||
135 | if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired)) | ||
136 | ___ieee80211_stop_rx_ba_session( | ||
137 | sta, tid, WLAN_BACK_RECIPIENT, | ||
138 | WLAN_REASON_QSTA_TIMEOUT); | ||
139 | |||
140 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; | ||
141 | if (!tid_tx) | ||
142 | continue; | ||
143 | |||
144 | if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) | ||
145 | ieee80211_tx_ba_session_handle_start(sta, tid); | ||
146 | else if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, | ||
147 | &tid_tx->state)) | ||
148 | ___ieee80211_stop_tx_ba_session(sta, tid, | ||
149 | WLAN_BACK_INITIATOR); | ||
150 | } | ||
151 | mutex_unlock(&sta->ampdu_mlme.mtx); | ||
152 | } | ||
153 | |||
115 | void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, | 154 | void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, |
116 | const u8 *da, u16 tid, | 155 | const u8 *da, u16 tid, |
117 | u16 initiator, u16 reason_code) | 156 | u16 initiator, u16 reason_code) |
@@ -176,13 +215,8 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, | |||
176 | 215 | ||
177 | if (initiator == WLAN_BACK_INITIATOR) | 216 | if (initiator == WLAN_BACK_INITIATOR) |
178 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0); | 217 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0); |
179 | else { /* WLAN_BACK_RECIPIENT */ | 218 | else |
180 | spin_lock_bh(&sta->lock); | 219 | __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_RECIPIENT); |
181 | if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK) | ||
182 | ___ieee80211_stop_tx_ba_session(sta, tid, | ||
183 | WLAN_BACK_RECIPIENT); | ||
184 | spin_unlock_bh(&sta->lock); | ||
185 | } | ||
186 | } | 220 | } |
187 | 221 | ||
188 | int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, | 222 | int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index b2cc1fda6cfd..d4e84b22a66d 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -172,11 +172,13 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
172 | rcu_assign_pointer(ifibss->presp, skb); | 172 | rcu_assign_pointer(ifibss->presp, skb); |
173 | 173 | ||
174 | sdata->vif.bss_conf.beacon_int = beacon_int; | 174 | sdata->vif.bss_conf.beacon_int = beacon_int; |
175 | sdata->vif.bss_conf.basic_rates = basic_rates; | ||
175 | bss_change = BSS_CHANGED_BEACON_INT; | 176 | bss_change = BSS_CHANGED_BEACON_INT; |
176 | bss_change |= ieee80211_reset_erp_info(sdata); | 177 | bss_change |= ieee80211_reset_erp_info(sdata); |
177 | bss_change |= BSS_CHANGED_BSSID; | 178 | bss_change |= BSS_CHANGED_BSSID; |
178 | bss_change |= BSS_CHANGED_BEACON; | 179 | bss_change |= BSS_CHANGED_BEACON; |
179 | bss_change |= BSS_CHANGED_BEACON_ENABLED; | 180 | bss_change |= BSS_CHANGED_BEACON_ENABLED; |
181 | bss_change |= BSS_CHANGED_BASIC_RATES; | ||
180 | bss_change |= BSS_CHANGED_IBSS; | 182 | bss_change |= BSS_CHANGED_IBSS; |
181 | sdata->vif.bss_conf.ibss_joined = true; | 183 | sdata->vif.bss_conf.ibss_joined = true; |
182 | ieee80211_bss_info_change_notify(sdata, bss_change); | 184 | ieee80211_bss_info_change_notify(sdata, bss_change); |
@@ -529,7 +531,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) | |||
529 | sdata->drop_unencrypted = 0; | 531 | sdata->drop_unencrypted = 0; |
530 | 532 | ||
531 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, | 533 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, |
532 | ifibss->channel, 3, /* first two are basic */ | 534 | ifibss->channel, ifibss->basic_rates, |
533 | capability, 0); | 535 | capability, 0); |
534 | } | 536 | } |
535 | 537 | ||
@@ -727,8 +729,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
727 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true); | 729 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true); |
728 | } | 730 | } |
729 | 731 | ||
730 | static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | 732 | void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, |
731 | struct sk_buff *skb) | 733 | struct sk_buff *skb) |
732 | { | 734 | { |
733 | struct ieee80211_rx_status *rx_status; | 735 | struct ieee80211_rx_status *rx_status; |
734 | struct ieee80211_mgmt *mgmt; | 736 | struct ieee80211_mgmt *mgmt; |
@@ -754,33 +756,11 @@ static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
754 | ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len); | 756 | ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len); |
755 | break; | 757 | break; |
756 | } | 758 | } |
757 | |||
758 | kfree_skb(skb); | ||
759 | } | 759 | } |
760 | 760 | ||
761 | static void ieee80211_ibss_work(struct work_struct *work) | 761 | void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) |
762 | { | 762 | { |
763 | struct ieee80211_sub_if_data *sdata = | 763 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; |
764 | container_of(work, struct ieee80211_sub_if_data, u.ibss.work); | ||
765 | struct ieee80211_local *local = sdata->local; | ||
766 | struct ieee80211_if_ibss *ifibss; | ||
767 | struct sk_buff *skb; | ||
768 | |||
769 | if (WARN_ON(local->suspended)) | ||
770 | return; | ||
771 | |||
772 | if (!ieee80211_sdata_running(sdata)) | ||
773 | return; | ||
774 | |||
775 | if (local->scanning) | ||
776 | return; | ||
777 | |||
778 | if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_ADHOC)) | ||
779 | return; | ||
780 | ifibss = &sdata->u.ibss; | ||
781 | |||
782 | while ((skb = skb_dequeue(&ifibss->skb_queue))) | ||
783 | ieee80211_ibss_rx_queued_mgmt(sdata, skb); | ||
784 | 764 | ||
785 | if (!test_and_clear_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request)) | 765 | if (!test_and_clear_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request)) |
786 | return; | 766 | return; |
@@ -798,6 +778,15 @@ static void ieee80211_ibss_work(struct work_struct *work) | |||
798 | } | 778 | } |
799 | } | 779 | } |
800 | 780 | ||
781 | static void ieee80211_queue_ibss_work(struct ieee80211_sub_if_data *sdata) | ||
782 | { | ||
783 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; | ||
784 | struct ieee80211_local *local = sdata->local; | ||
785 | |||
786 | set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); | ||
787 | ieee80211_queue_work(&local->hw, &sdata->work); | ||
788 | } | ||
789 | |||
801 | static void ieee80211_ibss_timer(unsigned long data) | 790 | static void ieee80211_ibss_timer(unsigned long data) |
802 | { | 791 | { |
803 | struct ieee80211_sub_if_data *sdata = | 792 | struct ieee80211_sub_if_data *sdata = |
@@ -810,8 +799,7 @@ static void ieee80211_ibss_timer(unsigned long data) | |||
810 | return; | 799 | return; |
811 | } | 800 | } |
812 | 801 | ||
813 | set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); | 802 | ieee80211_queue_ibss_work(sdata); |
814 | ieee80211_queue_work(&local->hw, &ifibss->work); | ||
815 | } | 803 | } |
816 | 804 | ||
817 | #ifdef CONFIG_PM | 805 | #ifdef CONFIG_PM |
@@ -819,7 +807,6 @@ void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata) | |||
819 | { | 807 | { |
820 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; | 808 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; |
821 | 809 | ||
822 | cancel_work_sync(&ifibss->work); | ||
823 | if (del_timer_sync(&ifibss->timer)) | 810 | if (del_timer_sync(&ifibss->timer)) |
824 | ifibss->timer_running = true; | 811 | ifibss->timer_running = true; |
825 | } | 812 | } |
@@ -839,10 +826,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) | |||
839 | { | 826 | { |
840 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; | 827 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; |
841 | 828 | ||
842 | INIT_WORK(&ifibss->work, ieee80211_ibss_work); | ||
843 | setup_timer(&ifibss->timer, ieee80211_ibss_timer, | 829 | setup_timer(&ifibss->timer, ieee80211_ibss_timer, |
844 | (unsigned long) sdata); | 830 | (unsigned long) sdata); |
845 | skb_queue_head_init(&ifibss->skb_queue); | ||
846 | } | 831 | } |
847 | 832 | ||
848 | /* scan finished notification */ | 833 | /* scan finished notification */ |
@@ -859,37 +844,11 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) | |||
859 | if (!sdata->u.ibss.ssid_len) | 844 | if (!sdata->u.ibss.ssid_len) |
860 | continue; | 845 | continue; |
861 | sdata->u.ibss.last_scan_completed = jiffies; | 846 | sdata->u.ibss.last_scan_completed = jiffies; |
862 | mod_timer(&sdata->u.ibss.timer, 0); | 847 | ieee80211_queue_ibss_work(sdata); |
863 | } | 848 | } |
864 | mutex_unlock(&local->iflist_mtx); | 849 | mutex_unlock(&local->iflist_mtx); |
865 | } | 850 | } |
866 | 851 | ||
867 | ieee80211_rx_result | ||
868 | ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | ||
869 | { | ||
870 | struct ieee80211_local *local = sdata->local; | ||
871 | struct ieee80211_mgmt *mgmt; | ||
872 | u16 fc; | ||
873 | |||
874 | if (skb->len < 24) | ||
875 | return RX_DROP_MONITOR; | ||
876 | |||
877 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
878 | fc = le16_to_cpu(mgmt->frame_control); | ||
879 | |||
880 | switch (fc & IEEE80211_FCTL_STYPE) { | ||
881 | case IEEE80211_STYPE_PROBE_RESP: | ||
882 | case IEEE80211_STYPE_BEACON: | ||
883 | case IEEE80211_STYPE_PROBE_REQ: | ||
884 | case IEEE80211_STYPE_AUTH: | ||
885 | skb_queue_tail(&sdata->u.ibss.skb_queue, skb); | ||
886 | ieee80211_queue_work(&local->hw, &sdata->u.ibss.work); | ||
887 | return RX_QUEUED; | ||
888 | } | ||
889 | |||
890 | return RX_DROP_MONITOR; | ||
891 | } | ||
892 | |||
893 | int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | 852 | int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, |
894 | struct cfg80211_ibss_params *params) | 853 | struct cfg80211_ibss_params *params) |
895 | { | 854 | { |
@@ -902,6 +861,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
902 | sdata->u.ibss.fixed_bssid = false; | 861 | sdata->u.ibss.fixed_bssid = false; |
903 | 862 | ||
904 | sdata->u.ibss.privacy = params->privacy; | 863 | sdata->u.ibss.privacy = params->privacy; |
864 | sdata->u.ibss.basic_rates = params->basic_rates; | ||
905 | 865 | ||
906 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; | 866 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; |
907 | 867 | ||
@@ -949,7 +909,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
949 | ieee80211_recalc_idle(sdata->local); | 909 | ieee80211_recalc_idle(sdata->local); |
950 | 910 | ||
951 | set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); | 911 | set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); |
952 | ieee80211_queue_work(&sdata->local->hw, &sdata->u.ibss.work); | 912 | ieee80211_queue_work(&sdata->local->hw, &sdata->work); |
953 | 913 | ||
954 | return 0; | 914 | return 0; |
955 | } | 915 | } |
@@ -957,10 +917,35 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
957 | int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) | 917 | int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) |
958 | { | 918 | { |
959 | struct sk_buff *skb; | 919 | struct sk_buff *skb; |
920 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; | ||
921 | struct ieee80211_local *local = sdata->local; | ||
922 | struct cfg80211_bss *cbss; | ||
923 | u16 capability; | ||
924 | int active_ibss = 0; | ||
925 | |||
926 | active_ibss = ieee80211_sta_active_ibss(sdata); | ||
927 | |||
928 | if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { | ||
929 | capability = WLAN_CAPABILITY_IBSS; | ||
930 | |||
931 | if (ifibss->privacy) | ||
932 | capability |= WLAN_CAPABILITY_PRIVACY; | ||
933 | |||
934 | cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->channel, | ||
935 | ifibss->bssid, ifibss->ssid, | ||
936 | ifibss->ssid_len, WLAN_CAPABILITY_IBSS | | ||
937 | WLAN_CAPABILITY_PRIVACY, | ||
938 | capability); | ||
939 | |||
940 | if (cbss) { | ||
941 | cfg80211_unlink_bss(local->hw.wiphy, cbss); | ||
942 | cfg80211_put_bss(cbss); | ||
943 | } | ||
944 | } | ||
960 | 945 | ||
961 | del_timer_sync(&sdata->u.ibss.timer); | 946 | del_timer_sync(&sdata->u.ibss.timer); |
962 | clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); | 947 | clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); |
963 | cancel_work_sync(&sdata->u.ibss.work); | 948 | cancel_work_sync(&sdata->work); |
964 | clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); | 949 | clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); |
965 | 950 | ||
966 | sta_info_flush(sdata->local, sdata); | 951 | sta_info_flush(sdata->local, sdata); |
@@ -975,7 +960,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) | |||
975 | synchronize_rcu(); | 960 | synchronize_rcu(); |
976 | kfree_skb(skb); | 961 | kfree_skb(skb); |
977 | 962 | ||
978 | skb_queue_purge(&sdata->u.ibss.skb_queue); | 963 | skb_queue_purge(&sdata->skb_queue); |
979 | memset(sdata->u.ibss.bssid, 0, ETH_ALEN); | 964 | memset(sdata->u.ibss.bssid, 0, ETH_ALEN); |
980 | sdata->u.ibss.ssid_len = 0; | 965 | sdata->u.ibss.ssid_len = 0; |
981 | 966 | ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 1a9e2da37a93..fb5430188e87 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -325,7 +325,6 @@ struct ieee80211_if_managed { | |||
325 | struct timer_list conn_mon_timer; | 325 | struct timer_list conn_mon_timer; |
326 | struct timer_list bcn_mon_timer; | 326 | struct timer_list bcn_mon_timer; |
327 | struct timer_list chswitch_timer; | 327 | struct timer_list chswitch_timer; |
328 | struct work_struct work; | ||
329 | struct work_struct monitor_work; | 328 | struct work_struct monitor_work; |
330 | struct work_struct chswitch_work; | 329 | struct work_struct chswitch_work; |
331 | struct work_struct beacon_connection_loss_work; | 330 | struct work_struct beacon_connection_loss_work; |
@@ -340,8 +339,6 @@ struct ieee80211_if_managed { | |||
340 | 339 | ||
341 | u16 aid; | 340 | u16 aid; |
342 | 341 | ||
343 | struct sk_buff_head skb_queue; | ||
344 | |||
345 | unsigned long timers_running; /* used for quiesce/restart */ | 342 | unsigned long timers_running; /* used for quiesce/restart */ |
346 | bool powersave; /* powersave requested for this iface */ | 343 | bool powersave; /* powersave requested for this iface */ |
347 | enum ieee80211_smps_mode req_smps, /* requested smps mode */ | 344 | enum ieee80211_smps_mode req_smps, /* requested smps mode */ |
@@ -386,13 +383,12 @@ enum ieee80211_ibss_request { | |||
386 | 383 | ||
387 | struct ieee80211_if_ibss { | 384 | struct ieee80211_if_ibss { |
388 | struct timer_list timer; | 385 | struct timer_list timer; |
389 | struct work_struct work; | ||
390 | |||
391 | struct sk_buff_head skb_queue; | ||
392 | 386 | ||
393 | unsigned long request; | 387 | unsigned long request; |
394 | unsigned long last_scan_completed; | 388 | unsigned long last_scan_completed; |
395 | 389 | ||
390 | u32 basic_rates; | ||
391 | |||
396 | bool timer_running; | 392 | bool timer_running; |
397 | 393 | ||
398 | bool fixed_bssid; | 394 | bool fixed_bssid; |
@@ -416,11 +412,9 @@ struct ieee80211_if_ibss { | |||
416 | }; | 412 | }; |
417 | 413 | ||
418 | struct ieee80211_if_mesh { | 414 | struct ieee80211_if_mesh { |
419 | struct work_struct work; | ||
420 | struct timer_list housekeeping_timer; | 415 | struct timer_list housekeeping_timer; |
421 | struct timer_list mesh_path_timer; | 416 | struct timer_list mesh_path_timer; |
422 | struct timer_list mesh_path_root_timer; | 417 | struct timer_list mesh_path_root_timer; |
423 | struct sk_buff_head skb_queue; | ||
424 | 418 | ||
425 | unsigned long timers_running; | 419 | unsigned long timers_running; |
426 | 420 | ||
@@ -517,6 +511,11 @@ struct ieee80211_sub_if_data { | |||
517 | 511 | ||
518 | u16 sequence_number; | 512 | u16 sequence_number; |
519 | 513 | ||
514 | struct work_struct work; | ||
515 | struct sk_buff_head skb_queue; | ||
516 | |||
517 | bool arp_filter_state; | ||
518 | |||
520 | /* | 519 | /* |
521 | * AP this belongs to: self in AP mode and | 520 | * AP this belongs to: self in AP mode and |
522 | * corresponding AP in VLAN mode, NULL for | 521 | * corresponding AP in VLAN mode, NULL for |
@@ -569,11 +568,15 @@ ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata, | |||
569 | #endif | 568 | #endif |
570 | } | 569 | } |
571 | 570 | ||
571 | enum sdata_queue_type { | ||
572 | IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, | ||
573 | IEEE80211_SDATA_QUEUE_AGG_START = 1, | ||
574 | IEEE80211_SDATA_QUEUE_AGG_STOP = 2, | ||
575 | }; | ||
576 | |||
572 | enum { | 577 | enum { |
573 | IEEE80211_RX_MSG = 1, | 578 | IEEE80211_RX_MSG = 1, |
574 | IEEE80211_TX_STATUS_MSG = 2, | 579 | IEEE80211_TX_STATUS_MSG = 2, |
575 | IEEE80211_DELBA_MSG = 3, | ||
576 | IEEE80211_ADDBA_MSG = 4, | ||
577 | }; | 580 | }; |
578 | 581 | ||
579 | enum queue_stop_reason { | 582 | enum queue_stop_reason { |
@@ -724,13 +727,7 @@ struct ieee80211_local { | |||
724 | struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; | 727 | struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; |
725 | struct tasklet_struct tx_pending_tasklet; | 728 | struct tasklet_struct tx_pending_tasklet; |
726 | 729 | ||
727 | /* | 730 | atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES]; |
728 | * This lock is used to prevent concurrent A-MPDU | ||
729 | * session start/stop processing, this thus also | ||
730 | * synchronises the ->ampdu_action() callback to | ||
731 | * drivers and limits it to one at a time. | ||
732 | */ | ||
733 | spinlock_t ampdu_lock; | ||
734 | 731 | ||
735 | /* number of interfaces with corresponding IFF_ flags */ | 732 | /* number of interfaces with corresponding IFF_ flags */ |
736 | atomic_t iff_allmultis, iff_promiscs; | 733 | atomic_t iff_allmultis, iff_promiscs; |
@@ -746,10 +743,10 @@ struct ieee80211_local { | |||
746 | struct mutex iflist_mtx; | 743 | struct mutex iflist_mtx; |
747 | 744 | ||
748 | /* | 745 | /* |
749 | * Key lock, protects sdata's key_list and sta_info's | 746 | * Key mutex, protects sdata's key_list and sta_info's |
750 | * key pointers (write access, they're RCU.) | 747 | * key pointers (write access, they're RCU.) |
751 | */ | 748 | */ |
752 | spinlock_t key_lock; | 749 | struct mutex key_mtx; |
753 | 750 | ||
754 | 751 | ||
755 | /* Scanning and BSS list */ | 752 | /* Scanning and BSS list */ |
@@ -851,6 +848,13 @@ struct ieee80211_local { | |||
851 | struct work_struct dynamic_ps_disable_work; | 848 | struct work_struct dynamic_ps_disable_work; |
852 | struct timer_list dynamic_ps_timer; | 849 | struct timer_list dynamic_ps_timer; |
853 | struct notifier_block network_latency_notifier; | 850 | struct notifier_block network_latency_notifier; |
851 | struct notifier_block ifa_notifier; | ||
852 | |||
853 | /* | ||
854 | * The dynamic ps timeout configured from user space via WEXT - | ||
855 | * this will override whatever chosen by mac80211 internally. | ||
856 | */ | ||
857 | int dynamic_ps_forced_timeout; | ||
854 | 858 | ||
855 | int user_power_level; /* in dBm */ | 859 | int user_power_level; /* in dBm */ |
856 | int power_constr_level; /* in dBm */ | 860 | int power_constr_level; /* in dBm */ |
@@ -874,9 +878,8 @@ IEEE80211_DEV_TO_SUB_IF(struct net_device *dev) | |||
874 | return netdev_priv(dev); | 878 | return netdev_priv(dev); |
875 | } | 879 | } |
876 | 880 | ||
877 | /* this struct represents 802.11n's RA/TID combination along with our vif */ | 881 | /* this struct represents 802.11n's RA/TID combination */ |
878 | struct ieee80211_ra_tid { | 882 | struct ieee80211_ra_tid { |
879 | struct ieee80211_vif *vif; | ||
880 | u8 ra[ETH_ALEN]; | 883 | u8 ra[ETH_ALEN]; |
881 | u16 tid; | 884 | u16 tid; |
882 | }; | 885 | }; |
@@ -985,29 +988,25 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
985 | int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, | 988 | int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, |
986 | struct cfg80211_disassoc_request *req, | 989 | struct cfg80211_disassoc_request *req, |
987 | void *cookie); | 990 | void *cookie); |
988 | int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, | ||
989 | struct ieee80211_channel *chan, | ||
990 | enum nl80211_channel_type channel_type, | ||
991 | const u8 *buf, size_t len, u64 *cookie); | ||
992 | ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, | ||
993 | struct sk_buff *skb); | ||
994 | void ieee80211_send_pspoll(struct ieee80211_local *local, | 991 | void ieee80211_send_pspoll(struct ieee80211_local *local, |
995 | struct ieee80211_sub_if_data *sdata); | 992 | struct ieee80211_sub_if_data *sdata); |
996 | void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); | 993 | void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); |
997 | int ieee80211_max_network_latency(struct notifier_block *nb, | 994 | int ieee80211_max_network_latency(struct notifier_block *nb, |
998 | unsigned long data, void *dummy); | 995 | unsigned long data, void *dummy); |
996 | int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata); | ||
999 | void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | 997 | void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, |
1000 | struct ieee80211_channel_sw_ie *sw_elem, | 998 | struct ieee80211_channel_sw_ie *sw_elem, |
1001 | struct ieee80211_bss *bss, | 999 | struct ieee80211_bss *bss, |
1002 | u64 timestamp); | 1000 | u64 timestamp); |
1003 | void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); | 1001 | void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); |
1004 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); | 1002 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); |
1003 | void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); | ||
1004 | void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | ||
1005 | struct sk_buff *skb); | ||
1005 | 1006 | ||
1006 | /* IBSS code */ | 1007 | /* IBSS code */ |
1007 | void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); | 1008 | void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); |
1008 | void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); | 1009 | void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); |
1009 | ieee80211_rx_result | ||
1010 | ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); | ||
1011 | struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, | 1010 | struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, |
1012 | u8 *bssid, u8 *addr, u32 supp_rates, | 1011 | u8 *bssid, u8 *addr, u32 supp_rates, |
1013 | gfp_t gfp); | 1012 | gfp_t gfp); |
@@ -1016,6 +1015,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
1016 | int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); | 1015 | int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); |
1017 | void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata); | 1016 | void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata); |
1018 | void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata); | 1017 | void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata); |
1018 | void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata); | ||
1019 | void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | ||
1020 | struct sk_buff *skb); | ||
1021 | |||
1022 | /* mesh code */ | ||
1023 | void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata); | ||
1024 | void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | ||
1025 | struct sk_buff *skb); | ||
1019 | 1026 | ||
1020 | /* scan/BSS handling */ | 1027 | /* scan/BSS handling */ |
1021 | void ieee80211_scan_work(struct work_struct *work); | 1028 | void ieee80211_scan_work(struct work_struct *work); |
@@ -1099,6 +1106,8 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, | |||
1099 | enum ieee80211_smps_mode smps, const u8 *da, | 1106 | enum ieee80211_smps_mode smps, const u8 *da, |
1100 | const u8 *bssid); | 1107 | const u8 *bssid); |
1101 | 1108 | ||
1109 | void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | ||
1110 | u16 initiator, u16 reason); | ||
1102 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | 1111 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, |
1103 | u16 initiator, u16 reason); | 1112 | u16 initiator, u16 reason); |
1104 | void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta); | 1113 | void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta); |
@@ -1118,6 +1127,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
1118 | enum ieee80211_back_parties initiator); | 1127 | enum ieee80211_back_parties initiator); |
1119 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | 1128 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, |
1120 | enum ieee80211_back_parties initiator); | 1129 | enum ieee80211_back_parties initiator); |
1130 | void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); | ||
1131 | void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); | ||
1132 | void ieee80211_ba_session_work(struct work_struct *work); | ||
1133 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); | ||
1121 | 1134 | ||
1122 | /* Spectrum management */ | 1135 | /* Spectrum management */ |
1123 | void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | 1136 | void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 50deb017fd6e..910729fc18cd 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -268,7 +268,6 @@ static int ieee80211_open(struct net_device *dev) | |||
268 | 268 | ||
269 | changed |= ieee80211_reset_erp_info(sdata); | 269 | changed |= ieee80211_reset_erp_info(sdata); |
270 | ieee80211_bss_info_change_notify(sdata, changed); | 270 | ieee80211_bss_info_change_notify(sdata, changed); |
271 | ieee80211_enable_keys(sdata); | ||
272 | 271 | ||
273 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | 272 | if (sdata->vif.type == NL80211_IFTYPE_STATION) |
274 | netif_carrier_off(dev); | 273 | netif_carrier_off(dev); |
@@ -321,15 +320,6 @@ static int ieee80211_open(struct net_device *dev) | |||
321 | 320 | ||
322 | ieee80211_recalc_ps(local, -1); | 321 | ieee80211_recalc_ps(local, -1); |
323 | 322 | ||
324 | /* | ||
325 | * ieee80211_sta_work is disabled while network interface | ||
326 | * is down. Therefore, some configuration changes may not | ||
327 | * yet be effective. Trigger execution of ieee80211_sta_work | ||
328 | * to fix this. | ||
329 | */ | ||
330 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
331 | ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); | ||
332 | |||
333 | netif_tx_start_all_queues(dev); | 323 | netif_tx_start_all_queues(dev); |
334 | 324 | ||
335 | return 0; | 325 | return 0; |
@@ -349,7 +339,6 @@ static int ieee80211_stop(struct net_device *dev) | |||
349 | { | 339 | { |
350 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 340 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
351 | struct ieee80211_local *local = sdata->local; | 341 | struct ieee80211_local *local = sdata->local; |
352 | struct sta_info *sta; | ||
353 | unsigned long flags; | 342 | unsigned long flags; |
354 | struct sk_buff *skb, *tmp; | 343 | struct sk_buff *skb, *tmp; |
355 | u32 hw_reconf_flags = 0; | 344 | u32 hw_reconf_flags = 0; |
@@ -366,18 +355,6 @@ static int ieee80211_stop(struct net_device *dev) | |||
366 | ieee80211_work_purge(sdata); | 355 | ieee80211_work_purge(sdata); |
367 | 356 | ||
368 | /* | 357 | /* |
369 | * Now delete all active aggregation sessions. | ||
370 | */ | ||
371 | rcu_read_lock(); | ||
372 | |||
373 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | ||
374 | if (sta->sdata == sdata) | ||
375 | ieee80211_sta_tear_down_BA_sessions(sta); | ||
376 | } | ||
377 | |||
378 | rcu_read_unlock(); | ||
379 | |||
380 | /* | ||
381 | * Remove all stations associated with this interface. | 358 | * Remove all stations associated with this interface. |
382 | * | 359 | * |
383 | * This must be done before calling ops->remove_interface() | 360 | * This must be done before calling ops->remove_interface() |
@@ -483,27 +460,14 @@ static int ieee80211_stop(struct net_device *dev) | |||
483 | * whether the interface is running, which, at this point, | 460 | * whether the interface is running, which, at this point, |
484 | * it no longer is. | 461 | * it no longer is. |
485 | */ | 462 | */ |
486 | cancel_work_sync(&sdata->u.mgd.work); | ||
487 | cancel_work_sync(&sdata->u.mgd.chswitch_work); | 463 | cancel_work_sync(&sdata->u.mgd.chswitch_work); |
488 | cancel_work_sync(&sdata->u.mgd.monitor_work); | 464 | cancel_work_sync(&sdata->u.mgd.monitor_work); |
489 | cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work); | 465 | cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work); |
490 | 466 | ||
491 | /* | ||
492 | * When we get here, the interface is marked down. | ||
493 | * Call synchronize_rcu() to wait for the RX path | ||
494 | * should it be using the interface and enqueuing | ||
495 | * frames at this very time on another CPU. | ||
496 | */ | ||
497 | synchronize_rcu(); | ||
498 | skb_queue_purge(&sdata->u.mgd.skb_queue); | ||
499 | /* fall through */ | 467 | /* fall through */ |
500 | case NL80211_IFTYPE_ADHOC: | 468 | case NL80211_IFTYPE_ADHOC: |
501 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { | 469 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) |
502 | del_timer_sync(&sdata->u.ibss.timer); | 470 | del_timer_sync(&sdata->u.ibss.timer); |
503 | cancel_work_sync(&sdata->u.ibss.work); | ||
504 | synchronize_rcu(); | ||
505 | skb_queue_purge(&sdata->u.ibss.skb_queue); | ||
506 | } | ||
507 | /* fall through */ | 471 | /* fall through */ |
508 | case NL80211_IFTYPE_MESH_POINT: | 472 | case NL80211_IFTYPE_MESH_POINT: |
509 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | 473 | if (ieee80211_vif_is_mesh(&sdata->vif)) { |
@@ -518,6 +482,16 @@ static int ieee80211_stop(struct net_device *dev) | |||
518 | } | 482 | } |
519 | /* fall through */ | 483 | /* fall through */ |
520 | default: | 484 | default: |
485 | flush_work(&sdata->work); | ||
486 | /* | ||
487 | * When we get here, the interface is marked down. | ||
488 | * Call synchronize_rcu() to wait for the RX path | ||
489 | * should it be using the interface and enqueuing | ||
490 | * frames at this very time on another CPU. | ||
491 | */ | ||
492 | synchronize_rcu(); | ||
493 | skb_queue_purge(&sdata->skb_queue); | ||
494 | |||
521 | if (local->scan_sdata == sdata) | 495 | if (local->scan_sdata == sdata) |
522 | ieee80211_scan_cancel(local); | 496 | ieee80211_scan_cancel(local); |
523 | 497 | ||
@@ -531,8 +505,8 @@ static int ieee80211_stop(struct net_device *dev) | |||
531 | BSS_CHANGED_BEACON_ENABLED); | 505 | BSS_CHANGED_BEACON_ENABLED); |
532 | } | 506 | } |
533 | 507 | ||
534 | /* disable all keys for as long as this netdev is down */ | 508 | /* free all remaining keys, there shouldn't be any */ |
535 | ieee80211_disable_keys(sdata); | 509 | ieee80211_free_keys(sdata); |
536 | drv_remove_interface(local, &sdata->vif); | 510 | drv_remove_interface(local, &sdata->vif); |
537 | } | 511 | } |
538 | 512 | ||
@@ -727,6 +701,136 @@ static void ieee80211_if_setup(struct net_device *dev) | |||
727 | dev->destructor = free_netdev; | 701 | dev->destructor = free_netdev; |
728 | } | 702 | } |
729 | 703 | ||
704 | static void ieee80211_iface_work(struct work_struct *work) | ||
705 | { | ||
706 | struct ieee80211_sub_if_data *sdata = | ||
707 | container_of(work, struct ieee80211_sub_if_data, work); | ||
708 | struct ieee80211_local *local = sdata->local; | ||
709 | struct sk_buff *skb; | ||
710 | struct sta_info *sta; | ||
711 | struct ieee80211_ra_tid *ra_tid; | ||
712 | |||
713 | if (!ieee80211_sdata_running(sdata)) | ||
714 | return; | ||
715 | |||
716 | if (local->scanning) | ||
717 | return; | ||
718 | |||
719 | /* | ||
720 | * ieee80211_queue_work() should have picked up most cases, | ||
721 | * here we'll pick the rest. | ||
722 | */ | ||
723 | if (WARN(local->suspended, | ||
724 | "interface work scheduled while going to suspend\n")) | ||
725 | return; | ||
726 | |||
727 | /* first process frames */ | ||
728 | while ((skb = skb_dequeue(&sdata->skb_queue))) { | ||
729 | struct ieee80211_mgmt *mgmt = (void *)skb->data; | ||
730 | |||
731 | if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { | ||
732 | ra_tid = (void *)&skb->cb; | ||
733 | ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra, | ||
734 | ra_tid->tid); | ||
735 | } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) { | ||
736 | ra_tid = (void *)&skb->cb; | ||
737 | ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra, | ||
738 | ra_tid->tid); | ||
739 | } else if (ieee80211_is_action(mgmt->frame_control) && | ||
740 | mgmt->u.action.category == WLAN_CATEGORY_BACK) { | ||
741 | int len = skb->len; | ||
742 | |||
743 | mutex_lock(&local->sta_mtx); | ||
744 | sta = sta_info_get(sdata, mgmt->sa); | ||
745 | if (sta) { | ||
746 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
747 | case WLAN_ACTION_ADDBA_REQ: | ||
748 | ieee80211_process_addba_request( | ||
749 | local, sta, mgmt, len); | ||
750 | break; | ||
751 | case WLAN_ACTION_ADDBA_RESP: | ||
752 | ieee80211_process_addba_resp(local, sta, | ||
753 | mgmt, len); | ||
754 | break; | ||
755 | case WLAN_ACTION_DELBA: | ||
756 | ieee80211_process_delba(sdata, sta, | ||
757 | mgmt, len); | ||
758 | break; | ||
759 | default: | ||
760 | WARN_ON(1); | ||
761 | break; | ||
762 | } | ||
763 | } | ||
764 | mutex_unlock(&local->sta_mtx); | ||
765 | } else if (ieee80211_is_data_qos(mgmt->frame_control)) { | ||
766 | struct ieee80211_hdr *hdr = (void *)mgmt; | ||
767 | /* | ||
768 | * So the frame isn't mgmt, but frame_control | ||
769 | * is at the right place anyway, of course, so | ||
770 | * the if statement is correct. | ||
771 | * | ||
772 | * Warn if we have other data frame types here, | ||
773 | * they must not get here. | ||
774 | */ | ||
775 | WARN_ON(hdr->frame_control & | ||
776 | cpu_to_le16(IEEE80211_STYPE_NULLFUNC)); | ||
777 | WARN_ON(!(hdr->seq_ctrl & | ||
778 | cpu_to_le16(IEEE80211_SCTL_FRAG))); | ||
779 | /* | ||
780 | * This was a fragment of a frame, received while | ||
781 | * a block-ack session was active. That cannot be | ||
782 | * right, so terminate the session. | ||
783 | */ | ||
784 | mutex_lock(&local->sta_mtx); | ||
785 | sta = sta_info_get(sdata, mgmt->sa); | ||
786 | if (sta) { | ||
787 | u16 tid = *ieee80211_get_qos_ctl(hdr) & | ||
788 | IEEE80211_QOS_CTL_TID_MASK; | ||
789 | |||
790 | __ieee80211_stop_rx_ba_session( | ||
791 | sta, tid, WLAN_BACK_RECIPIENT, | ||
792 | WLAN_REASON_QSTA_REQUIRE_SETUP); | ||
793 | } | ||
794 | mutex_unlock(&local->sta_mtx); | ||
795 | } else switch (sdata->vif.type) { | ||
796 | case NL80211_IFTYPE_STATION: | ||
797 | ieee80211_sta_rx_queued_mgmt(sdata, skb); | ||
798 | break; | ||
799 | case NL80211_IFTYPE_ADHOC: | ||
800 | ieee80211_ibss_rx_queued_mgmt(sdata, skb); | ||
801 | break; | ||
802 | case NL80211_IFTYPE_MESH_POINT: | ||
803 | if (!ieee80211_vif_is_mesh(&sdata->vif)) | ||
804 | break; | ||
805 | ieee80211_mesh_rx_queued_mgmt(sdata, skb); | ||
806 | break; | ||
807 | default: | ||
808 | WARN(1, "frame for unexpected interface type"); | ||
809 | break; | ||
810 | } | ||
811 | |||
812 | kfree_skb(skb); | ||
813 | } | ||
814 | |||
815 | /* then other type-dependent work */ | ||
816 | switch (sdata->vif.type) { | ||
817 | case NL80211_IFTYPE_STATION: | ||
818 | ieee80211_sta_work(sdata); | ||
819 | break; | ||
820 | case NL80211_IFTYPE_ADHOC: | ||
821 | ieee80211_ibss_work(sdata); | ||
822 | break; | ||
823 | case NL80211_IFTYPE_MESH_POINT: | ||
824 | if (!ieee80211_vif_is_mesh(&sdata->vif)) | ||
825 | break; | ||
826 | ieee80211_mesh_work(sdata); | ||
827 | break; | ||
828 | default: | ||
829 | break; | ||
830 | } | ||
831 | } | ||
832 | |||
833 | |||
730 | /* | 834 | /* |
731 | * Helper function to initialise an interface to a specific type. | 835 | * Helper function to initialise an interface to a specific type. |
732 | */ | 836 | */ |
@@ -744,6 +848,9 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, | |||
744 | /* only monitor differs */ | 848 | /* only monitor differs */ |
745 | sdata->dev->type = ARPHRD_ETHER; | 849 | sdata->dev->type = ARPHRD_ETHER; |
746 | 850 | ||
851 | skb_queue_head_init(&sdata->skb_queue); | ||
852 | INIT_WORK(&sdata->work, ieee80211_iface_work); | ||
853 | |||
747 | switch (type) { | 854 | switch (type) { |
748 | case NL80211_IFTYPE_AP: | 855 | case NL80211_IFTYPE_AP: |
749 | skb_queue_head_init(&sdata->u.ap.ps_bc_buf); | 856 | skb_queue_head_init(&sdata->u.ap.ps_bc_buf); |
@@ -969,6 +1076,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
969 | sdata->wdev.wiphy = local->hw.wiphy; | 1076 | sdata->wdev.wiphy = local->hw.wiphy; |
970 | sdata->local = local; | 1077 | sdata->local = local; |
971 | sdata->dev = ndev; | 1078 | sdata->dev = ndev; |
1079 | #ifdef CONFIG_INET | ||
1080 | sdata->arp_filter_state = true; | ||
1081 | #endif | ||
972 | 1082 | ||
973 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) | 1083 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) |
974 | skb_queue_head_init(&sdata->fragments[i].skb_list); | 1084 | skb_queue_head_init(&sdata->fragments[i].skb_list); |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index e8f6e3b252d8..50d1cff23d8e 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -36,80 +36,20 @@ | |||
36 | * There is currently no way of knowing this except by looking into | 36 | * There is currently no way of knowing this except by looking into |
37 | * debugfs. | 37 | * debugfs. |
38 | * | 38 | * |
39 | * All key operations are protected internally so you can call them at | 39 | * All key operations are protected internally. |
40 | * any time. | ||
41 | * | 40 | * |
42 | * Within mac80211, key references are, just as STA structure references, | 41 | * Within mac80211, key references are, just as STA structure references, |
43 | * protected by RCU. Note, however, that some things are unprotected, | 42 | * protected by RCU. Note, however, that some things are unprotected, |
44 | * namely the key->sta dereferences within the hardware acceleration | 43 | * namely the key->sta dereferences within the hardware acceleration |
45 | * functions. This means that sta_info_destroy() must flush the key todo | 44 | * functions. This means that sta_info_destroy() must remove the key |
46 | * list. | 45 | * which waits for an RCU grace period. |
47 | * | ||
48 | * All the direct key list manipulation functions must not sleep because | ||
49 | * they can operate on STA info structs that are protected by RCU. | ||
50 | */ | 46 | */ |
51 | 47 | ||
52 | static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; | 48 | static const u8 bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; |
53 | 49 | ||
54 | /* key mutex: used to synchronise todo runners */ | 50 | static void assert_key_lock(struct ieee80211_local *local) |
55 | static DEFINE_MUTEX(key_mutex); | ||
56 | static DEFINE_SPINLOCK(todo_lock); | ||
57 | static LIST_HEAD(todo_list); | ||
58 | |||
59 | static void key_todo(struct work_struct *work) | ||
60 | { | 51 | { |
61 | ieee80211_key_todo(); | 52 | WARN_ON(!mutex_is_locked(&local->key_mtx)); |
62 | } | ||
63 | |||
64 | static DECLARE_WORK(todo_work, key_todo); | ||
65 | |||
66 | /** | ||
67 | * add_todo - add todo item for a key | ||
68 | * | ||
69 | * @key: key to add to do item for | ||
70 | * @flag: todo flag(s) | ||
71 | * | ||
72 | * Must be called with IRQs or softirqs disabled. | ||
73 | */ | ||
74 | static void add_todo(struct ieee80211_key *key, u32 flag) | ||
75 | { | ||
76 | if (!key) | ||
77 | return; | ||
78 | |||
79 | spin_lock(&todo_lock); | ||
80 | key->flags |= flag; | ||
81 | /* | ||
82 | * Remove again if already on the list so that we move it to the end. | ||
83 | */ | ||
84 | if (!list_empty(&key->todo)) | ||
85 | list_del(&key->todo); | ||
86 | list_add_tail(&key->todo, &todo_list); | ||
87 | schedule_work(&todo_work); | ||
88 | spin_unlock(&todo_lock); | ||
89 | } | ||
90 | |||
91 | /** | ||
92 | * ieee80211_key_lock - lock the mac80211 key operation lock | ||
93 | * | ||
94 | * This locks the (global) mac80211 key operation lock, all | ||
95 | * key operations must be done under this lock. | ||
96 | */ | ||
97 | static void ieee80211_key_lock(void) | ||
98 | { | ||
99 | mutex_lock(&key_mutex); | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * ieee80211_key_unlock - unlock the mac80211 key operation lock | ||
104 | */ | ||
105 | static void ieee80211_key_unlock(void) | ||
106 | { | ||
107 | mutex_unlock(&key_mutex); | ||
108 | } | ||
109 | |||
110 | static void assert_key_lock(void) | ||
111 | { | ||
112 | WARN_ON(!mutex_is_locked(&key_mutex)); | ||
113 | } | 53 | } |
114 | 54 | ||
115 | static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key) | 55 | static struct ieee80211_sta *get_sta_for_key(struct ieee80211_key *key) |
@@ -126,12 +66,13 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
126 | struct ieee80211_sta *sta; | 66 | struct ieee80211_sta *sta; |
127 | int ret; | 67 | int ret; |
128 | 68 | ||
129 | assert_key_lock(); | ||
130 | might_sleep(); | 69 | might_sleep(); |
131 | 70 | ||
132 | if (!key->local->ops->set_key) | 71 | if (!key->local->ops->set_key) |
133 | return; | 72 | return; |
134 | 73 | ||
74 | assert_key_lock(key->local); | ||
75 | |||
135 | sta = get_sta_for_key(key); | 76 | sta = get_sta_for_key(key); |
136 | 77 | ||
137 | sdata = key->sdata; | 78 | sdata = key->sdata; |
@@ -142,11 +83,8 @@ static void ieee80211_key_enable_hw_accel(struct ieee80211_key *key) | |||
142 | 83 | ||
143 | ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); | 84 | ret = drv_set_key(key->local, SET_KEY, sdata, sta, &key->conf); |
144 | 85 | ||
145 | if (!ret) { | 86 | if (!ret) |
146 | spin_lock_bh(&todo_lock); | ||
147 | key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; | 87 | key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; |
148 | spin_unlock_bh(&todo_lock); | ||
149 | } | ||
150 | 88 | ||
151 | if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP) | 89 | if (ret && ret != -ENOSPC && ret != -EOPNOTSUPP) |
152 | printk(KERN_ERR "mac80211-%s: failed to set key " | 90 | printk(KERN_ERR "mac80211-%s: failed to set key " |
@@ -161,18 +99,15 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) | |||
161 | struct ieee80211_sta *sta; | 99 | struct ieee80211_sta *sta; |
162 | int ret; | 100 | int ret; |
163 | 101 | ||
164 | assert_key_lock(); | ||
165 | might_sleep(); | 102 | might_sleep(); |
166 | 103 | ||
167 | if (!key || !key->local->ops->set_key) | 104 | if (!key || !key->local->ops->set_key) |
168 | return; | 105 | return; |
169 | 106 | ||
170 | spin_lock_bh(&todo_lock); | 107 | assert_key_lock(key->local); |
171 | if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) { | 108 | |
172 | spin_unlock_bh(&todo_lock); | 109 | if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)) |
173 | return; | 110 | return; |
174 | } | ||
175 | spin_unlock_bh(&todo_lock); | ||
176 | 111 | ||
177 | sta = get_sta_for_key(key); | 112 | sta = get_sta_for_key(key); |
178 | sdata = key->sdata; | 113 | sdata = key->sdata; |
@@ -191,9 +126,7 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) | |||
191 | wiphy_name(key->local->hw.wiphy), | 126 | wiphy_name(key->local->hw.wiphy), |
192 | key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); | 127 | key->conf.keyidx, sta ? sta->addr : bcast_addr, ret); |
193 | 128 | ||
194 | spin_lock_bh(&todo_lock); | ||
195 | key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; | 129 | key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; |
196 | spin_unlock_bh(&todo_lock); | ||
197 | } | 130 | } |
198 | 131 | ||
199 | static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, | 132 | static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, |
@@ -201,22 +134,24 @@ static void __ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, | |||
201 | { | 134 | { |
202 | struct ieee80211_key *key = NULL; | 135 | struct ieee80211_key *key = NULL; |
203 | 136 | ||
137 | assert_key_lock(sdata->local); | ||
138 | |||
204 | if (idx >= 0 && idx < NUM_DEFAULT_KEYS) | 139 | if (idx >= 0 && idx < NUM_DEFAULT_KEYS) |
205 | key = sdata->keys[idx]; | 140 | key = sdata->keys[idx]; |
206 | 141 | ||
207 | rcu_assign_pointer(sdata->default_key, key); | 142 | rcu_assign_pointer(sdata->default_key, key); |
208 | 143 | ||
209 | if (key) | 144 | if (key) { |
210 | add_todo(key, KEY_FLAG_TODO_DEFKEY); | 145 | ieee80211_debugfs_key_remove_default(key->sdata); |
146 | ieee80211_debugfs_key_add_default(key->sdata); | ||
147 | } | ||
211 | } | 148 | } |
212 | 149 | ||
213 | void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx) | 150 | void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx) |
214 | { | 151 | { |
215 | unsigned long flags; | 152 | mutex_lock(&sdata->local->key_mtx); |
216 | |||
217 | spin_lock_irqsave(&sdata->local->key_lock, flags); | ||
218 | __ieee80211_set_default_key(sdata, idx); | 153 | __ieee80211_set_default_key(sdata, idx); |
219 | spin_unlock_irqrestore(&sdata->local->key_lock, flags); | 154 | mutex_unlock(&sdata->local->key_mtx); |
220 | } | 155 | } |
221 | 156 | ||
222 | static void | 157 | static void |
@@ -224,24 +159,26 @@ __ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, int idx) | |||
224 | { | 159 | { |
225 | struct ieee80211_key *key = NULL; | 160 | struct ieee80211_key *key = NULL; |
226 | 161 | ||
162 | assert_key_lock(sdata->local); | ||
163 | |||
227 | if (idx >= NUM_DEFAULT_KEYS && | 164 | if (idx >= NUM_DEFAULT_KEYS && |
228 | idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) | 165 | idx < NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) |
229 | key = sdata->keys[idx]; | 166 | key = sdata->keys[idx]; |
230 | 167 | ||
231 | rcu_assign_pointer(sdata->default_mgmt_key, key); | 168 | rcu_assign_pointer(sdata->default_mgmt_key, key); |
232 | 169 | ||
233 | if (key) | 170 | if (key) { |
234 | add_todo(key, KEY_FLAG_TODO_DEFMGMTKEY); | 171 | ieee80211_debugfs_key_remove_mgmt_default(key->sdata); |
172 | ieee80211_debugfs_key_add_mgmt_default(key->sdata); | ||
173 | } | ||
235 | } | 174 | } |
236 | 175 | ||
237 | void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, | 176 | void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata, |
238 | int idx) | 177 | int idx) |
239 | { | 178 | { |
240 | unsigned long flags; | 179 | mutex_lock(&sdata->local->key_mtx); |
241 | |||
242 | spin_lock_irqsave(&sdata->local->key_lock, flags); | ||
243 | __ieee80211_set_default_mgmt_key(sdata, idx); | 180 | __ieee80211_set_default_mgmt_key(sdata, idx); |
244 | spin_unlock_irqrestore(&sdata->local->key_lock, flags); | 181 | mutex_unlock(&sdata->local->key_mtx); |
245 | } | 182 | } |
246 | 183 | ||
247 | 184 | ||
@@ -336,7 +273,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, | |||
336 | key->conf.iv_len = CCMP_HDR_LEN; | 273 | key->conf.iv_len = CCMP_HDR_LEN; |
337 | key->conf.icv_len = CCMP_MIC_LEN; | 274 | key->conf.icv_len = CCMP_MIC_LEN; |
338 | if (seq) { | 275 | if (seq) { |
339 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) | 276 | for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) |
340 | for (j = 0; j < CCMP_PN_LEN; j++) | 277 | for (j = 0; j < CCMP_PN_LEN; j++) |
341 | key->u.ccmp.rx_pn[i][j] = | 278 | key->u.ccmp.rx_pn[i][j] = |
342 | seq[CCMP_PN_LEN - j - 1]; | 279 | seq[CCMP_PN_LEN - j - 1]; |
@@ -352,7 +289,6 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, | |||
352 | } | 289 | } |
353 | memcpy(key->conf.key, key_data, key_len); | 290 | memcpy(key->conf.key, key_data, key_len); |
354 | INIT_LIST_HEAD(&key->list); | 291 | INIT_LIST_HEAD(&key->list); |
355 | INIT_LIST_HEAD(&key->todo); | ||
356 | 292 | ||
357 | if (alg == ALG_CCMP) { | 293 | if (alg == ALG_CCMP) { |
358 | /* | 294 | /* |
@@ -382,12 +318,27 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, | |||
382 | return key; | 318 | return key; |
383 | } | 319 | } |
384 | 320 | ||
321 | static void __ieee80211_key_destroy(struct ieee80211_key *key) | ||
322 | { | ||
323 | if (!key) | ||
324 | return; | ||
325 | |||
326 | ieee80211_key_disable_hw_accel(key); | ||
327 | |||
328 | if (key->conf.alg == ALG_CCMP) | ||
329 | ieee80211_aes_key_free(key->u.ccmp.tfm); | ||
330 | if (key->conf.alg == ALG_AES_CMAC) | ||
331 | ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); | ||
332 | ieee80211_debugfs_key_remove(key); | ||
333 | |||
334 | kfree(key); | ||
335 | } | ||
336 | |||
385 | void ieee80211_key_link(struct ieee80211_key *key, | 337 | void ieee80211_key_link(struct ieee80211_key *key, |
386 | struct ieee80211_sub_if_data *sdata, | 338 | struct ieee80211_sub_if_data *sdata, |
387 | struct sta_info *sta) | 339 | struct sta_info *sta) |
388 | { | 340 | { |
389 | struct ieee80211_key *old_key; | 341 | struct ieee80211_key *old_key; |
390 | unsigned long flags; | ||
391 | int idx; | 342 | int idx; |
392 | 343 | ||
393 | BUG_ON(!sdata); | 344 | BUG_ON(!sdata); |
@@ -431,7 +382,7 @@ void ieee80211_key_link(struct ieee80211_key *key, | |||
431 | } | 382 | } |
432 | } | 383 | } |
433 | 384 | ||
434 | spin_lock_irqsave(&sdata->local->key_lock, flags); | 385 | mutex_lock(&sdata->local->key_mtx); |
435 | 386 | ||
436 | if (sta) | 387 | if (sta) |
437 | old_key = sta->key; | 388 | old_key = sta->key; |
@@ -439,15 +390,13 @@ void ieee80211_key_link(struct ieee80211_key *key, | |||
439 | old_key = sdata->keys[idx]; | 390 | old_key = sdata->keys[idx]; |
440 | 391 | ||
441 | __ieee80211_key_replace(sdata, sta, old_key, key); | 392 | __ieee80211_key_replace(sdata, sta, old_key, key); |
393 | __ieee80211_key_destroy(old_key); | ||
442 | 394 | ||
443 | /* free old key later */ | 395 | ieee80211_debugfs_key_add(key); |
444 | add_todo(old_key, KEY_FLAG_TODO_DELETE); | ||
445 | 396 | ||
446 | add_todo(key, KEY_FLAG_TODO_ADD_DEBUGFS); | 397 | ieee80211_key_enable_hw_accel(key); |
447 | if (ieee80211_sdata_running(sdata)) | ||
448 | add_todo(key, KEY_FLAG_TODO_HWACCEL_ADD); | ||
449 | 398 | ||
450 | spin_unlock_irqrestore(&sdata->local->key_lock, flags); | 399 | mutex_unlock(&sdata->local->key_mtx); |
451 | } | 400 | } |
452 | 401 | ||
453 | static void __ieee80211_key_free(struct ieee80211_key *key) | 402 | static void __ieee80211_key_free(struct ieee80211_key *key) |
@@ -458,170 +407,65 @@ static void __ieee80211_key_free(struct ieee80211_key *key) | |||
458 | if (key->sdata) | 407 | if (key->sdata) |
459 | __ieee80211_key_replace(key->sdata, key->sta, | 408 | __ieee80211_key_replace(key->sdata, key->sta, |
460 | key, NULL); | 409 | key, NULL); |
461 | 410 | __ieee80211_key_destroy(key); | |
462 | add_todo(key, KEY_FLAG_TODO_DELETE); | ||
463 | } | 411 | } |
464 | 412 | ||
465 | void ieee80211_key_free(struct ieee80211_key *key) | 413 | void ieee80211_key_free(struct ieee80211_key *key) |
466 | { | 414 | { |
467 | unsigned long flags; | 415 | struct ieee80211_local *local; |
468 | 416 | ||
469 | if (!key) | 417 | if (!key) |
470 | return; | 418 | return; |
471 | 419 | ||
472 | if (!key->sdata) { | 420 | local = key->sdata->local; |
473 | /* The key has not been linked yet, simply free it | ||
474 | * and don't Oops */ | ||
475 | if (key->conf.alg == ALG_CCMP) | ||
476 | ieee80211_aes_key_free(key->u.ccmp.tfm); | ||
477 | kfree(key); | ||
478 | return; | ||
479 | } | ||
480 | 421 | ||
481 | spin_lock_irqsave(&key->sdata->local->key_lock, flags); | 422 | mutex_lock(&local->key_mtx); |
482 | __ieee80211_key_free(key); | 423 | __ieee80211_key_free(key); |
483 | spin_unlock_irqrestore(&key->sdata->local->key_lock, flags); | 424 | mutex_unlock(&local->key_mtx); |
484 | } | 425 | } |
485 | 426 | ||
486 | /* | 427 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) |
487 | * To be safe against concurrent manipulations of the list (which shouldn't | ||
488 | * actually happen) we need to hold the spinlock. But under the spinlock we | ||
489 | * can't actually do much, so we defer processing to the todo list. Then run | ||
490 | * the todo list to be sure the operation and possibly previously pending | ||
491 | * operations are completed. | ||
492 | */ | ||
493 | static void ieee80211_todo_for_each_key(struct ieee80211_sub_if_data *sdata, | ||
494 | u32 todo_flags) | ||
495 | { | 428 | { |
496 | struct ieee80211_key *key; | 429 | struct ieee80211_key *key; |
497 | unsigned long flags; | ||
498 | |||
499 | might_sleep(); | ||
500 | |||
501 | spin_lock_irqsave(&sdata->local->key_lock, flags); | ||
502 | list_for_each_entry(key, &sdata->key_list, list) | ||
503 | add_todo(key, todo_flags); | ||
504 | spin_unlock_irqrestore(&sdata->local->key_lock, flags); | ||
505 | |||
506 | ieee80211_key_todo(); | ||
507 | } | ||
508 | 430 | ||
509 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata) | ||
510 | { | ||
511 | ASSERT_RTNL(); | 431 | ASSERT_RTNL(); |
512 | 432 | ||
513 | if (WARN_ON(!ieee80211_sdata_running(sdata))) | 433 | if (WARN_ON(!ieee80211_sdata_running(sdata))) |
514 | return; | 434 | return; |
515 | 435 | ||
516 | ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_ADD); | 436 | mutex_lock(&sdata->local->key_mtx); |
517 | } | ||
518 | |||
519 | void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) | ||
520 | { | ||
521 | ASSERT_RTNL(); | ||
522 | |||
523 | ieee80211_todo_for_each_key(sdata, KEY_FLAG_TODO_HWACCEL_REMOVE); | ||
524 | } | ||
525 | |||
526 | static void __ieee80211_key_destroy(struct ieee80211_key *key) | ||
527 | { | ||
528 | if (!key) | ||
529 | return; | ||
530 | |||
531 | ieee80211_key_disable_hw_accel(key); | ||
532 | 437 | ||
533 | if (key->conf.alg == ALG_CCMP) | 438 | list_for_each_entry(key, &sdata->key_list, list) |
534 | ieee80211_aes_key_free(key->u.ccmp.tfm); | 439 | ieee80211_key_enable_hw_accel(key); |
535 | if (key->conf.alg == ALG_AES_CMAC) | ||
536 | ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); | ||
537 | ieee80211_debugfs_key_remove(key); | ||
538 | 440 | ||
539 | kfree(key); | 441 | mutex_unlock(&sdata->local->key_mtx); |
540 | } | 442 | } |
541 | 443 | ||
542 | static void __ieee80211_key_todo(void) | 444 | void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata) |
543 | { | 445 | { |
544 | struct ieee80211_key *key; | 446 | struct ieee80211_key *key; |
545 | bool work_done; | ||
546 | u32 todoflags; | ||
547 | 447 | ||
548 | /* | 448 | ASSERT_RTNL(); |
549 | * NB: sta_info_destroy relies on this! | ||
550 | */ | ||
551 | synchronize_rcu(); | ||
552 | |||
553 | spin_lock_bh(&todo_lock); | ||
554 | while (!list_empty(&todo_list)) { | ||
555 | key = list_first_entry(&todo_list, struct ieee80211_key, todo); | ||
556 | list_del_init(&key->todo); | ||
557 | todoflags = key->flags & (KEY_FLAG_TODO_ADD_DEBUGFS | | ||
558 | KEY_FLAG_TODO_DEFKEY | | ||
559 | KEY_FLAG_TODO_DEFMGMTKEY | | ||
560 | KEY_FLAG_TODO_HWACCEL_ADD | | ||
561 | KEY_FLAG_TODO_HWACCEL_REMOVE | | ||
562 | KEY_FLAG_TODO_DELETE); | ||
563 | key->flags &= ~todoflags; | ||
564 | spin_unlock_bh(&todo_lock); | ||
565 | |||
566 | work_done = false; | ||
567 | |||
568 | if (todoflags & KEY_FLAG_TODO_ADD_DEBUGFS) { | ||
569 | ieee80211_debugfs_key_add(key); | ||
570 | work_done = true; | ||
571 | } | ||
572 | if (todoflags & KEY_FLAG_TODO_DEFKEY) { | ||
573 | ieee80211_debugfs_key_remove_default(key->sdata); | ||
574 | ieee80211_debugfs_key_add_default(key->sdata); | ||
575 | work_done = true; | ||
576 | } | ||
577 | if (todoflags & KEY_FLAG_TODO_DEFMGMTKEY) { | ||
578 | ieee80211_debugfs_key_remove_mgmt_default(key->sdata); | ||
579 | ieee80211_debugfs_key_add_mgmt_default(key->sdata); | ||
580 | work_done = true; | ||
581 | } | ||
582 | if (todoflags & KEY_FLAG_TODO_HWACCEL_ADD) { | ||
583 | ieee80211_key_enable_hw_accel(key); | ||
584 | work_done = true; | ||
585 | } | ||
586 | if (todoflags & KEY_FLAG_TODO_HWACCEL_REMOVE) { | ||
587 | ieee80211_key_disable_hw_accel(key); | ||
588 | work_done = true; | ||
589 | } | ||
590 | if (todoflags & KEY_FLAG_TODO_DELETE) { | ||
591 | __ieee80211_key_destroy(key); | ||
592 | work_done = true; | ||
593 | } | ||
594 | 449 | ||
595 | WARN_ON(!work_done); | 450 | mutex_lock(&sdata->local->key_mtx); |
596 | 451 | ||
597 | spin_lock_bh(&todo_lock); | 452 | list_for_each_entry(key, &sdata->key_list, list) |
598 | } | 453 | ieee80211_key_disable_hw_accel(key); |
599 | spin_unlock_bh(&todo_lock); | ||
600 | } | ||
601 | 454 | ||
602 | void ieee80211_key_todo(void) | 455 | mutex_unlock(&sdata->local->key_mtx); |
603 | { | ||
604 | ieee80211_key_lock(); | ||
605 | __ieee80211_key_todo(); | ||
606 | ieee80211_key_unlock(); | ||
607 | } | 456 | } |
608 | 457 | ||
609 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) | 458 | void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) |
610 | { | 459 | { |
611 | struct ieee80211_key *key, *tmp; | 460 | struct ieee80211_key *key, *tmp; |
612 | unsigned long flags; | ||
613 | 461 | ||
614 | ieee80211_key_lock(); | 462 | mutex_lock(&sdata->local->key_mtx); |
615 | 463 | ||
616 | ieee80211_debugfs_key_remove_default(sdata); | 464 | ieee80211_debugfs_key_remove_default(sdata); |
617 | ieee80211_debugfs_key_remove_mgmt_default(sdata); | 465 | ieee80211_debugfs_key_remove_mgmt_default(sdata); |
618 | 466 | ||
619 | spin_lock_irqsave(&sdata->local->key_lock, flags); | ||
620 | list_for_each_entry_safe(key, tmp, &sdata->key_list, list) | 467 | list_for_each_entry_safe(key, tmp, &sdata->key_list, list) |
621 | __ieee80211_key_free(key); | 468 | __ieee80211_key_free(key); |
622 | spin_unlock_irqrestore(&sdata->local->key_lock, flags); | ||
623 | |||
624 | __ieee80211_key_todo(); | ||
625 | 469 | ||
626 | ieee80211_key_unlock(); | 470 | mutex_unlock(&sdata->local->key_mtx); |
627 | } | 471 | } |
diff --git a/net/mac80211/key.h b/net/mac80211/key.h index bdc2968c2bbe..a3849fa3fce8 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h | |||
@@ -38,25 +38,9 @@ struct sta_info; | |||
38 | * | 38 | * |
39 | * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present | 39 | * @KEY_FLAG_UPLOADED_TO_HARDWARE: Indicates that this key is present |
40 | * in the hardware for TX crypto hardware acceleration. | 40 | * in the hardware for TX crypto hardware acceleration. |
41 | * @KEY_FLAG_TODO_DELETE: Key is marked for deletion and will, after an | ||
42 | * RCU grace period, no longer be reachable other than from the | ||
43 | * todo list. | ||
44 | * @KEY_FLAG_TODO_HWACCEL_ADD: Key needs to be added to hardware acceleration. | ||
45 | * @KEY_FLAG_TODO_HWACCEL_REMOVE: Key needs to be removed from hardware | ||
46 | * acceleration. | ||
47 | * @KEY_FLAG_TODO_DEFKEY: Key is default key and debugfs needs to be updated. | ||
48 | * @KEY_FLAG_TODO_ADD_DEBUGFS: Key needs to be added to debugfs. | ||
49 | * @KEY_FLAG_TODO_DEFMGMTKEY: Key is default management key and debugfs needs | ||
50 | * to be updated. | ||
51 | */ | 41 | */ |
52 | enum ieee80211_internal_key_flags { | 42 | enum ieee80211_internal_key_flags { |
53 | KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), | 43 | KEY_FLAG_UPLOADED_TO_HARDWARE = BIT(0), |
54 | KEY_FLAG_TODO_DELETE = BIT(1), | ||
55 | KEY_FLAG_TODO_HWACCEL_ADD = BIT(2), | ||
56 | KEY_FLAG_TODO_HWACCEL_REMOVE = BIT(3), | ||
57 | KEY_FLAG_TODO_DEFKEY = BIT(4), | ||
58 | KEY_FLAG_TODO_ADD_DEBUGFS = BIT(5), | ||
59 | KEY_FLAG_TODO_DEFMGMTKEY = BIT(6), | ||
60 | }; | 44 | }; |
61 | 45 | ||
62 | enum ieee80211_internal_tkip_state { | 46 | enum ieee80211_internal_tkip_state { |
@@ -79,10 +63,8 @@ struct ieee80211_key { | |||
79 | 63 | ||
80 | /* for sdata list */ | 64 | /* for sdata list */ |
81 | struct list_head list; | 65 | struct list_head list; |
82 | /* for todo list */ | ||
83 | struct list_head todo; | ||
84 | 66 | ||
85 | /* protected by todo lock! */ | 67 | /* protected by key mutex */ |
86 | unsigned int flags; | 68 | unsigned int flags; |
87 | 69 | ||
88 | union { | 70 | union { |
@@ -95,7 +77,13 @@ struct ieee80211_key { | |||
95 | } tkip; | 77 | } tkip; |
96 | struct { | 78 | struct { |
97 | u8 tx_pn[6]; | 79 | u8 tx_pn[6]; |
98 | u8 rx_pn[NUM_RX_DATA_QUEUES][6]; | 80 | /* |
81 | * Last received packet number. The first | ||
82 | * NUM_RX_DATA_QUEUES counters are used with Data | ||
83 | * frames and the last counter is used with Robust | ||
84 | * Management frames. | ||
85 | */ | ||
86 | u8 rx_pn[NUM_RX_DATA_QUEUES + 1][6]; | ||
99 | struct crypto_cipher *tfm; | 87 | struct crypto_cipher *tfm; |
100 | u32 replays; /* dot11RSNAStatsCCMPReplays */ | 88 | u32 replays; /* dot11RSNAStatsCCMPReplays */ |
101 | /* scratch buffers for virt_to_page() (crypto API) */ | 89 | /* scratch buffers for virt_to_page() (crypto API) */ |
@@ -155,6 +143,4 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); | |||
155 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); | 143 | void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); |
156 | void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); | 144 | void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata); |
157 | 145 | ||
158 | void ieee80211_key_todo(void); | ||
159 | |||
160 | #endif /* IEEE80211_KEY_H */ | 146 | #endif /* IEEE80211_KEY_H */ |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 22a384dfab65..edf7aff93268 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
21 | #include <linux/bitmap.h> | 21 | #include <linux/bitmap.h> |
22 | #include <linux/pm_qos_params.h> | 22 | #include <linux/pm_qos_params.h> |
23 | #include <linux/inetdevice.h> | ||
23 | #include <net/net_namespace.h> | 24 | #include <net/net_namespace.h> |
24 | #include <net/cfg80211.h> | 25 | #include <net/cfg80211.h> |
25 | 26 | ||
@@ -259,7 +260,6 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
259 | { | 260 | { |
260 | struct ieee80211_local *local = (struct ieee80211_local *) data; | 261 | struct ieee80211_local *local = (struct ieee80211_local *) data; |
261 | struct sk_buff *skb; | 262 | struct sk_buff *skb; |
262 | struct ieee80211_ra_tid *ra_tid; | ||
263 | 263 | ||
264 | while ((skb = skb_dequeue(&local->skb_queue)) || | 264 | while ((skb = skb_dequeue(&local->skb_queue)) || |
265 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { | 265 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { |
@@ -274,18 +274,6 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
274 | skb->pkt_type = 0; | 274 | skb->pkt_type = 0; |
275 | ieee80211_tx_status(local_to_hw(local), skb); | 275 | ieee80211_tx_status(local_to_hw(local), skb); |
276 | break; | 276 | break; |
277 | case IEEE80211_DELBA_MSG: | ||
278 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
279 | ieee80211_stop_tx_ba_cb(ra_tid->vif, ra_tid->ra, | ||
280 | ra_tid->tid); | ||
281 | dev_kfree_skb(skb); | ||
282 | break; | ||
283 | case IEEE80211_ADDBA_MSG: | ||
284 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
285 | ieee80211_start_tx_ba_cb(ra_tid->vif, ra_tid->ra, | ||
286 | ra_tid->tid); | ||
287 | dev_kfree_skb(skb); | ||
288 | break ; | ||
289 | default: | 277 | default: |
290 | WARN(1, "mac80211: Packet is of unknown type %d\n", | 278 | WARN(1, "mac80211: Packet is of unknown type %d\n", |
291 | skb->pkt_type); | 279 | skb->pkt_type); |
@@ -329,6 +317,76 @@ static void ieee80211_recalc_smps_work(struct work_struct *work) | |||
329 | mutex_unlock(&local->iflist_mtx); | 317 | mutex_unlock(&local->iflist_mtx); |
330 | } | 318 | } |
331 | 319 | ||
320 | #ifdef CONFIG_INET | ||
321 | static int ieee80211_ifa_changed(struct notifier_block *nb, | ||
322 | unsigned long data, void *arg) | ||
323 | { | ||
324 | struct in_ifaddr *ifa = arg; | ||
325 | struct ieee80211_local *local = | ||
326 | container_of(nb, struct ieee80211_local, | ||
327 | ifa_notifier); | ||
328 | struct net_device *ndev = ifa->ifa_dev->dev; | ||
329 | struct wireless_dev *wdev = ndev->ieee80211_ptr; | ||
330 | struct in_device *idev; | ||
331 | struct ieee80211_sub_if_data *sdata; | ||
332 | struct ieee80211_bss_conf *bss_conf; | ||
333 | struct ieee80211_if_managed *ifmgd; | ||
334 | int c = 0; | ||
335 | |||
336 | if (!netif_running(ndev)) | ||
337 | return NOTIFY_DONE; | ||
338 | |||
339 | /* Make sure it's our interface that got changed */ | ||
340 | if (!wdev) | ||
341 | return NOTIFY_DONE; | ||
342 | |||
343 | if (wdev->wiphy != local->hw.wiphy) | ||
344 | return NOTIFY_DONE; | ||
345 | |||
346 | sdata = IEEE80211_DEV_TO_SUB_IF(ndev); | ||
347 | bss_conf = &sdata->vif.bss_conf; | ||
348 | |||
349 | /* ARP filtering is only supported in managed mode */ | ||
350 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
351 | return NOTIFY_DONE; | ||
352 | |||
353 | idev = sdata->dev->ip_ptr; | ||
354 | if (!idev) | ||
355 | return NOTIFY_DONE; | ||
356 | |||
357 | ifmgd = &sdata->u.mgd; | ||
358 | mutex_lock(&ifmgd->mtx); | ||
359 | |||
360 | /* Copy the addresses to the bss_conf list */ | ||
361 | ifa = idev->ifa_list; | ||
362 | while (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN && ifa) { | ||
363 | bss_conf->arp_addr_list[c] = ifa->ifa_address; | ||
364 | ifa = ifa->ifa_next; | ||
365 | c++; | ||
366 | } | ||
367 | |||
368 | /* If not all addresses fit the list, disable filtering */ | ||
369 | if (ifa) { | ||
370 | sdata->arp_filter_state = false; | ||
371 | c = 0; | ||
372 | } else { | ||
373 | sdata->arp_filter_state = true; | ||
374 | } | ||
375 | bss_conf->arp_addr_cnt = c; | ||
376 | |||
377 | /* Configure driver only if associated */ | ||
378 | if (ifmgd->associated) { | ||
379 | bss_conf->arp_filter_enabled = sdata->arp_filter_state; | ||
380 | ieee80211_bss_info_change_notify(sdata, | ||
381 | BSS_CHANGED_ARP_FILTER); | ||
382 | } | ||
383 | |||
384 | mutex_unlock(&ifmgd->mtx); | ||
385 | |||
386 | return NOTIFY_DONE; | ||
387 | } | ||
388 | #endif | ||
389 | |||
332 | struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | 390 | struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, |
333 | const struct ieee80211_ops *ops) | 391 | const struct ieee80211_ops *ops) |
334 | { | 392 | { |
@@ -396,7 +454,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
396 | mutex_init(&local->iflist_mtx); | 454 | mutex_init(&local->iflist_mtx); |
397 | mutex_init(&local->scan_mtx); | 455 | mutex_init(&local->scan_mtx); |
398 | 456 | ||
399 | spin_lock_init(&local->key_lock); | 457 | mutex_init(&local->key_mtx); |
400 | spin_lock_init(&local->filter_lock); | 458 | spin_lock_init(&local->filter_lock); |
401 | spin_lock_init(&local->queue_stop_reason_lock); | 459 | spin_lock_init(&local->queue_stop_reason_lock); |
402 | 460 | ||
@@ -419,8 +477,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
419 | 477 | ||
420 | sta_info_init(local); | 478 | sta_info_init(local); |
421 | 479 | ||
422 | for (i = 0; i < IEEE80211_MAX_QUEUES; i++) | 480 | for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { |
423 | skb_queue_head_init(&local->pending[i]); | 481 | skb_queue_head_init(&local->pending[i]); |
482 | atomic_set(&local->agg_queue_stop[i], 0); | ||
483 | } | ||
424 | tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, | 484 | tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, |
425 | (unsigned long)local); | 485 | (unsigned long)local); |
426 | 486 | ||
@@ -431,8 +491,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
431 | skb_queue_head_init(&local->skb_queue); | 491 | skb_queue_head_init(&local->skb_queue); |
432 | skb_queue_head_init(&local->skb_queue_unreliable); | 492 | skb_queue_head_init(&local->skb_queue_unreliable); |
433 | 493 | ||
434 | spin_lock_init(&local->ampdu_lock); | ||
435 | |||
436 | return local_to_hw(local); | 494 | return local_to_hw(local); |
437 | } | 495 | } |
438 | EXPORT_SYMBOL(ieee80211_alloc_hw); | 496 | EXPORT_SYMBOL(ieee80211_alloc_hw); |
@@ -572,7 +630,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
572 | 630 | ||
573 | local->hw.conf.listen_interval = local->hw.max_listen_interval; | 631 | local->hw.conf.listen_interval = local->hw.max_listen_interval; |
574 | 632 | ||
575 | local->hw.conf.dynamic_ps_forced_timeout = -1; | 633 | local->dynamic_ps_forced_timeout = -1; |
576 | 634 | ||
577 | result = sta_info_start(local); | 635 | result = sta_info_start(local); |
578 | if (result < 0) | 636 | if (result < 0) |
@@ -612,14 +670,24 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
612 | ieee80211_max_network_latency; | 670 | ieee80211_max_network_latency; |
613 | result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, | 671 | result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, |
614 | &local->network_latency_notifier); | 672 | &local->network_latency_notifier); |
615 | |||
616 | if (result) { | 673 | if (result) { |
617 | rtnl_lock(); | 674 | rtnl_lock(); |
618 | goto fail_pm_qos; | 675 | goto fail_pm_qos; |
619 | } | 676 | } |
620 | 677 | ||
678 | #ifdef CONFIG_INET | ||
679 | local->ifa_notifier.notifier_call = ieee80211_ifa_changed; | ||
680 | result = register_inetaddr_notifier(&local->ifa_notifier); | ||
681 | if (result) | ||
682 | goto fail_ifa; | ||
683 | #endif | ||
684 | |||
621 | return 0; | 685 | return 0; |
622 | 686 | ||
687 | fail_ifa: | ||
688 | pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, | ||
689 | &local->network_latency_notifier); | ||
690 | rtnl_lock(); | ||
623 | fail_pm_qos: | 691 | fail_pm_qos: |
624 | ieee80211_led_exit(local); | 692 | ieee80211_led_exit(local); |
625 | ieee80211_remove_interfaces(local); | 693 | ieee80211_remove_interfaces(local); |
@@ -647,6 +715,9 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw) | |||
647 | 715 | ||
648 | pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, | 716 | pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY, |
649 | &local->network_latency_notifier); | 717 | &local->network_latency_notifier); |
718 | #ifdef CONFIG_INET | ||
719 | unregister_inetaddr_notifier(&local->ifa_notifier); | ||
720 | #endif | ||
650 | 721 | ||
651 | rtnl_lock(); | 722 | rtnl_lock(); |
652 | 723 | ||
@@ -704,6 +775,10 @@ static int __init ieee80211_init(void) | |||
704 | if (ret) | 775 | if (ret) |
705 | return ret; | 776 | return ret; |
706 | 777 | ||
778 | ret = rc80211_minstrel_ht_init(); | ||
779 | if (ret) | ||
780 | goto err_minstrel; | ||
781 | |||
707 | ret = rc80211_pid_init(); | 782 | ret = rc80211_pid_init(); |
708 | if (ret) | 783 | if (ret) |
709 | goto err_pid; | 784 | goto err_pid; |
@@ -716,6 +791,8 @@ static int __init ieee80211_init(void) | |||
716 | err_netdev: | 791 | err_netdev: |
717 | rc80211_pid_exit(); | 792 | rc80211_pid_exit(); |
718 | err_pid: | 793 | err_pid: |
794 | rc80211_minstrel_ht_exit(); | ||
795 | err_minstrel: | ||
719 | rc80211_minstrel_exit(); | 796 | rc80211_minstrel_exit(); |
720 | 797 | ||
721 | return ret; | 798 | return ret; |
@@ -724,6 +801,7 @@ static int __init ieee80211_init(void) | |||
724 | static void __exit ieee80211_exit(void) | 801 | static void __exit ieee80211_exit(void) |
725 | { | 802 | { |
726 | rc80211_pid_exit(); | 803 | rc80211_pid_exit(); |
804 | rc80211_minstrel_ht_exit(); | ||
727 | rc80211_minstrel_exit(); | 805 | rc80211_minstrel_exit(); |
728 | 806 | ||
729 | /* | 807 | /* |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index bde81031727a..c8a4f19ed13b 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -54,7 +54,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) | |||
54 | return; | 54 | return; |
55 | } | 55 | } |
56 | 56 | ||
57 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 57 | ieee80211_queue_work(&local->hw, &sdata->work); |
58 | } | 58 | } |
59 | 59 | ||
60 | /** | 60 | /** |
@@ -345,7 +345,7 @@ static void ieee80211_mesh_path_timer(unsigned long data) | |||
345 | return; | 345 | return; |
346 | } | 346 | } |
347 | 347 | ||
348 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 348 | ieee80211_queue_work(&local->hw, &sdata->work); |
349 | } | 349 | } |
350 | 350 | ||
351 | static void ieee80211_mesh_path_root_timer(unsigned long data) | 351 | static void ieee80211_mesh_path_root_timer(unsigned long data) |
@@ -362,7 +362,7 @@ static void ieee80211_mesh_path_root_timer(unsigned long data) | |||
362 | return; | 362 | return; |
363 | } | 363 | } |
364 | 364 | ||
365 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 365 | ieee80211_queue_work(&local->hw, &sdata->work); |
366 | } | 366 | } |
367 | 367 | ||
368 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) | 368 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) |
@@ -484,9 +484,6 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) | |||
484 | { | 484 | { |
485 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 485 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
486 | 486 | ||
487 | /* might restart the timer but that doesn't matter */ | ||
488 | cancel_work_sync(&ifmsh->work); | ||
489 | |||
490 | /* use atomic bitops in case both timers fire at the same time */ | 487 | /* use atomic bitops in case both timers fire at the same time */ |
491 | 488 | ||
492 | if (del_timer_sync(&ifmsh->housekeeping_timer)) | 489 | if (del_timer_sync(&ifmsh->housekeeping_timer)) |
@@ -518,7 +515,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) | |||
518 | 515 | ||
519 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); | 516 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); |
520 | ieee80211_mesh_root_setup(ifmsh); | 517 | ieee80211_mesh_root_setup(ifmsh); |
521 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 518 | ieee80211_queue_work(&local->hw, &sdata->work); |
522 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; | 519 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; |
523 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | | 520 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | |
524 | BSS_CHANGED_BEACON_ENABLED | | 521 | BSS_CHANGED_BEACON_ENABLED | |
@@ -536,16 +533,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | |||
536 | * whether the interface is running, which, at this point, | 533 | * whether the interface is running, which, at this point, |
537 | * it no longer is. | 534 | * it no longer is. |
538 | */ | 535 | */ |
539 | cancel_work_sync(&sdata->u.mesh.work); | 536 | cancel_work_sync(&sdata->work); |
540 | |||
541 | /* | ||
542 | * When we get here, the interface is marked down. | ||
543 | * Call synchronize_rcu() to wait for the RX path | ||
544 | * should it be using the interface and enqueuing | ||
545 | * frames at this very time on another CPU. | ||
546 | */ | ||
547 | rcu_barrier(); /* Wait for RX path and call_rcu()'s */ | ||
548 | skb_queue_purge(&sdata->u.mesh.skb_queue); | ||
549 | } | 537 | } |
550 | 538 | ||
551 | static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | 539 | static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, |
@@ -608,8 +596,8 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, | |||
608 | } | 596 | } |
609 | } | 597 | } |
610 | 598 | ||
611 | static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | 599 | void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, |
612 | struct sk_buff *skb) | 600 | struct sk_buff *skb) |
613 | { | 601 | { |
614 | struct ieee80211_rx_status *rx_status; | 602 | struct ieee80211_rx_status *rx_status; |
615 | struct ieee80211_if_mesh *ifmsh; | 603 | struct ieee80211_if_mesh *ifmsh; |
@@ -632,26 +620,11 @@ static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
632 | ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); | 620 | ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); |
633 | break; | 621 | break; |
634 | } | 622 | } |
635 | |||
636 | kfree_skb(skb); | ||
637 | } | 623 | } |
638 | 624 | ||
639 | static void ieee80211_mesh_work(struct work_struct *work) | 625 | void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) |
640 | { | 626 | { |
641 | struct ieee80211_sub_if_data *sdata = | ||
642 | container_of(work, struct ieee80211_sub_if_data, u.mesh.work); | ||
643 | struct ieee80211_local *local = sdata->local; | ||
644 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 627 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
645 | struct sk_buff *skb; | ||
646 | |||
647 | if (!ieee80211_sdata_running(sdata)) | ||
648 | return; | ||
649 | |||
650 | if (local->scanning) | ||
651 | return; | ||
652 | |||
653 | while ((skb = skb_dequeue(&ifmsh->skb_queue))) | ||
654 | ieee80211_mesh_rx_queued_mgmt(sdata, skb); | ||
655 | 628 | ||
656 | if (ifmsh->preq_queue_len && | 629 | if (ifmsh->preq_queue_len && |
657 | time_after(jiffies, | 630 | time_after(jiffies, |
@@ -678,7 +651,7 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) | |||
678 | rcu_read_lock(); | 651 | rcu_read_lock(); |
679 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 652 | list_for_each_entry_rcu(sdata, &local->interfaces, list) |
680 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 653 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
681 | ieee80211_queue_work(&local->hw, &sdata->u.mesh.work); | 654 | ieee80211_queue_work(&local->hw, &sdata->work); |
682 | rcu_read_unlock(); | 655 | rcu_read_unlock(); |
683 | } | 656 | } |
684 | 657 | ||
@@ -686,11 +659,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | |||
686 | { | 659 | { |
687 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 660 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
688 | 661 | ||
689 | INIT_WORK(&ifmsh->work, ieee80211_mesh_work); | ||
690 | setup_timer(&ifmsh->housekeeping_timer, | 662 | setup_timer(&ifmsh->housekeeping_timer, |
691 | ieee80211_mesh_housekeeping_timer, | 663 | ieee80211_mesh_housekeeping_timer, |
692 | (unsigned long) sdata); | 664 | (unsigned long) sdata); |
693 | skb_queue_head_init(&sdata->u.mesh.skb_queue); | ||
694 | 665 | ||
695 | ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; | 666 | ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; |
696 | ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; | 667 | ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; |
@@ -731,29 +702,3 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | |||
731 | INIT_LIST_HEAD(&ifmsh->preq_queue.list); | 702 | INIT_LIST_HEAD(&ifmsh->preq_queue.list); |
732 | spin_lock_init(&ifmsh->mesh_preq_queue_lock); | 703 | spin_lock_init(&ifmsh->mesh_preq_queue_lock); |
733 | } | 704 | } |
734 | |||
735 | ieee80211_rx_result | ||
736 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | ||
737 | { | ||
738 | struct ieee80211_local *local = sdata->local; | ||
739 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
740 | struct ieee80211_mgmt *mgmt; | ||
741 | u16 fc; | ||
742 | |||
743 | if (skb->len < 24) | ||
744 | return RX_DROP_MONITOR; | ||
745 | |||
746 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
747 | fc = le16_to_cpu(mgmt->frame_control); | ||
748 | |||
749 | switch (fc & IEEE80211_FCTL_STYPE) { | ||
750 | case IEEE80211_STYPE_ACTION: | ||
751 | case IEEE80211_STYPE_PROBE_RESP: | ||
752 | case IEEE80211_STYPE_BEACON: | ||
753 | skb_queue_tail(&ifmsh->skb_queue, skb); | ||
754 | ieee80211_queue_work(&local->hw, &ifmsh->work); | ||
755 | return RX_QUEUED; | ||
756 | } | ||
757 | |||
758 | return RX_CONTINUE; | ||
759 | } | ||
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index c88087f1cd0f..ebd3f1d9d889 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -237,8 +237,6 @@ void ieee80211s_update_metric(struct ieee80211_local *local, | |||
237 | struct sta_info *stainfo, struct sk_buff *skb); | 237 | struct sta_info *stainfo, struct sk_buff *skb); |
238 | void ieee80211s_stop(void); | 238 | void ieee80211s_stop(void); |
239 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); | 239 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); |
240 | ieee80211_rx_result | ||
241 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); | ||
242 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); | 240 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); |
243 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); | 241 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); |
244 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); | 242 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 0705018d8d1e..829e08a657d0 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -805,14 +805,14 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) | |||
805 | spin_unlock(&ifmsh->mesh_preq_queue_lock); | 805 | spin_unlock(&ifmsh->mesh_preq_queue_lock); |
806 | 806 | ||
807 | if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) | 807 | if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) |
808 | ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); | 808 | ieee80211_queue_work(&sdata->local->hw, &sdata->work); |
809 | 809 | ||
810 | else if (time_before(jiffies, ifmsh->last_preq)) { | 810 | else if (time_before(jiffies, ifmsh->last_preq)) { |
811 | /* avoid long wait if did not send preqs for a long time | 811 | /* avoid long wait if did not send preqs for a long time |
812 | * and jiffies wrapped around | 812 | * and jiffies wrapped around |
813 | */ | 813 | */ |
814 | ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; | 814 | ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; |
815 | ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); | 815 | ieee80211_queue_work(&sdata->local->hw, &sdata->work); |
816 | } else | 816 | } else |
817 | mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + | 817 | mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + |
818 | min_preq_int_jiff(sdata)); | 818 | min_preq_int_jiff(sdata)); |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 181ffd6efd81..349e466cf08b 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -315,7 +315,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
315 | read_unlock(&pathtbl_resize_lock); | 315 | read_unlock(&pathtbl_resize_lock); |
316 | if (grow) { | 316 | if (grow) { |
317 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); | 317 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); |
318 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 318 | ieee80211_queue_work(&local->hw, &sdata->work); |
319 | } | 319 | } |
320 | return 0; | 320 | return 0; |
321 | 321 | ||
@@ -425,7 +425,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
425 | read_unlock(&pathtbl_resize_lock); | 425 | read_unlock(&pathtbl_resize_lock); |
426 | if (grow) { | 426 | if (grow) { |
427 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); | 427 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); |
428 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 428 | ieee80211_queue_work(&local->hw, &sdata->work); |
429 | } | 429 | } |
430 | return 0; | 430 | return 0; |
431 | 431 | ||
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f803f8b72a93..85c3ca33333e 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -561,23 +561,19 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) | |||
561 | beaconint_us = ieee80211_tu_to_usec( | 561 | beaconint_us = ieee80211_tu_to_usec( |
562 | found->vif.bss_conf.beacon_int); | 562 | found->vif.bss_conf.beacon_int); |
563 | 563 | ||
564 | timeout = local->hw.conf.dynamic_ps_forced_timeout; | 564 | timeout = local->dynamic_ps_forced_timeout; |
565 | if (timeout < 0) { | 565 | if (timeout < 0) { |
566 | /* | 566 | /* |
567 | * Go to full PSM if the user configures a very low | ||
568 | * latency requirement. | ||
567 | * The 2 second value is there for compatibility until | 569 | * The 2 second value is there for compatibility until |
568 | * the PM_QOS_NETWORK_LATENCY is configured with real | 570 | * the PM_QOS_NETWORK_LATENCY is configured with real |
569 | * values. | 571 | * values. |
570 | */ | 572 | */ |
571 | if (latency == 2000000000) | 573 | if (latency > 1900000000 && latency != 2000000000) |
572 | timeout = 100; | ||
573 | else if (latency <= 50000) | ||
574 | timeout = 300; | ||
575 | else if (latency <= 100000) | ||
576 | timeout = 100; | ||
577 | else if (latency <= 500000) | ||
578 | timeout = 50; | ||
579 | else | ||
580 | timeout = 0; | 574 | timeout = 0; |
575 | else | ||
576 | timeout = 100; | ||
581 | } | 577 | } |
582 | local->hw.conf.dynamic_ps_timeout = timeout; | 578 | local->hw.conf.dynamic_ps_timeout = timeout; |
583 | 579 | ||
@@ -806,11 +802,12 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
806 | { | 802 | { |
807 | struct ieee80211_bss *bss = (void *)cbss->priv; | 803 | struct ieee80211_bss *bss = (void *)cbss->priv; |
808 | struct ieee80211_local *local = sdata->local; | 804 | struct ieee80211_local *local = sdata->local; |
805 | struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; | ||
809 | 806 | ||
810 | bss_info_changed |= BSS_CHANGED_ASSOC; | 807 | bss_info_changed |= BSS_CHANGED_ASSOC; |
811 | /* set timing information */ | 808 | /* set timing information */ |
812 | sdata->vif.bss_conf.beacon_int = cbss->beacon_interval; | 809 | bss_conf->beacon_int = cbss->beacon_interval; |
813 | sdata->vif.bss_conf.timestamp = cbss->tsf; | 810 | bss_conf->timestamp = cbss->tsf; |
814 | 811 | ||
815 | bss_info_changed |= BSS_CHANGED_BEACON_INT; | 812 | bss_info_changed |= BSS_CHANGED_BEACON_INT; |
816 | bss_info_changed |= ieee80211_handle_bss_capability(sdata, | 813 | bss_info_changed |= ieee80211_handle_bss_capability(sdata, |
@@ -835,7 +832,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
835 | 832 | ||
836 | ieee80211_led_assoc(local, 1); | 833 | ieee80211_led_assoc(local, 1); |
837 | 834 | ||
838 | sdata->vif.bss_conf.assoc = 1; | 835 | bss_conf->assoc = 1; |
839 | /* | 836 | /* |
840 | * For now just always ask the driver to update the basic rateset | 837 | * For now just always ask the driver to update the basic rateset |
841 | * when we have associated, we aren't checking whether it actually | 838 | * when we have associated, we aren't checking whether it actually |
@@ -848,9 +845,15 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
848 | 845 | ||
849 | /* Tell the driver to monitor connection quality (if supported) */ | 846 | /* Tell the driver to monitor connection quality (if supported) */ |
850 | if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) && | 847 | if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) && |
851 | sdata->vif.bss_conf.cqm_rssi_thold) | 848 | bss_conf->cqm_rssi_thold) |
852 | bss_info_changed |= BSS_CHANGED_CQM; | 849 | bss_info_changed |= BSS_CHANGED_CQM; |
853 | 850 | ||
851 | /* Enable ARP filtering */ | ||
852 | if (bss_conf->arp_filter_enabled != sdata->arp_filter_state) { | ||
853 | bss_conf->arp_filter_enabled = sdata->arp_filter_state; | ||
854 | bss_info_changed |= BSS_CHANGED_ARP_FILTER; | ||
855 | } | ||
856 | |||
854 | ieee80211_bss_info_change_notify(sdata, bss_info_changed); | 857 | ieee80211_bss_info_change_notify(sdata, bss_info_changed); |
855 | 858 | ||
856 | mutex_lock(&local->iflist_mtx); | 859 | mutex_lock(&local->iflist_mtx); |
@@ -898,13 +901,13 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
898 | netif_tx_stop_all_queues(sdata->dev); | 901 | netif_tx_stop_all_queues(sdata->dev); |
899 | netif_carrier_off(sdata->dev); | 902 | netif_carrier_off(sdata->dev); |
900 | 903 | ||
901 | rcu_read_lock(); | 904 | mutex_lock(&local->sta_mtx); |
902 | sta = sta_info_get(sdata, bssid); | 905 | sta = sta_info_get(sdata, bssid); |
903 | if (sta) { | 906 | if (sta) { |
904 | set_sta_flags(sta, WLAN_STA_DISASSOC); | 907 | set_sta_flags(sta, WLAN_STA_BLOCK_BA); |
905 | ieee80211_sta_tear_down_BA_sessions(sta); | 908 | ieee80211_sta_tear_down_BA_sessions(sta); |
906 | } | 909 | } |
907 | rcu_read_unlock(); | 910 | mutex_unlock(&local->sta_mtx); |
908 | 911 | ||
909 | changed |= ieee80211_reset_erp_info(sdata); | 912 | changed |= ieee80211_reset_erp_info(sdata); |
910 | 913 | ||
@@ -932,6 +935,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
932 | 935 | ||
933 | ieee80211_hw_config(local, config_changed); | 936 | ieee80211_hw_config(local, config_changed); |
934 | 937 | ||
938 | /* Disable ARP filtering */ | ||
939 | if (sdata->vif.bss_conf.arp_filter_enabled) { | ||
940 | sdata->vif.bss_conf.arp_filter_enabled = false; | ||
941 | changed |= BSS_CHANGED_ARP_FILTER; | ||
942 | } | ||
943 | |||
935 | /* The BSSID (not really interesting) and HT changed */ | 944 | /* The BSSID (not really interesting) and HT changed */ |
936 | changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; | 945 | changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; |
937 | ieee80211_bss_info_change_notify(sdata, changed); | 946 | ieee80211_bss_info_change_notify(sdata, changed); |
@@ -1633,35 +1642,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
1633 | ieee80211_bss_info_change_notify(sdata, changed); | 1642 | ieee80211_bss_info_change_notify(sdata, changed); |
1634 | } | 1643 | } |
1635 | 1644 | ||
1636 | ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, | 1645 | void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, |
1637 | struct sk_buff *skb) | 1646 | struct sk_buff *skb) |
1638 | { | ||
1639 | struct ieee80211_local *local = sdata->local; | ||
1640 | struct ieee80211_mgmt *mgmt; | ||
1641 | u16 fc; | ||
1642 | |||
1643 | if (skb->len < 24) | ||
1644 | return RX_DROP_MONITOR; | ||
1645 | |||
1646 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
1647 | fc = le16_to_cpu(mgmt->frame_control); | ||
1648 | |||
1649 | switch (fc & IEEE80211_FCTL_STYPE) { | ||
1650 | case IEEE80211_STYPE_PROBE_RESP: | ||
1651 | case IEEE80211_STYPE_BEACON: | ||
1652 | case IEEE80211_STYPE_DEAUTH: | ||
1653 | case IEEE80211_STYPE_DISASSOC: | ||
1654 | case IEEE80211_STYPE_ACTION: | ||
1655 | skb_queue_tail(&sdata->u.mgd.skb_queue, skb); | ||
1656 | ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); | ||
1657 | return RX_QUEUED; | ||
1658 | } | ||
1659 | |||
1660 | return RX_DROP_MONITOR; | ||
1661 | } | ||
1662 | |||
1663 | static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | ||
1664 | struct sk_buff *skb) | ||
1665 | { | 1647 | { |
1666 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1648 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1667 | struct ieee80211_rx_status *rx_status; | 1649 | struct ieee80211_rx_status *rx_status; |
@@ -1693,44 +1675,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1693 | break; | 1675 | break; |
1694 | case IEEE80211_STYPE_ACTION: | 1676 | case IEEE80211_STYPE_ACTION: |
1695 | switch (mgmt->u.action.category) { | 1677 | switch (mgmt->u.action.category) { |
1696 | case WLAN_CATEGORY_BACK: { | ||
1697 | struct ieee80211_local *local = sdata->local; | ||
1698 | int len = skb->len; | ||
1699 | struct sta_info *sta; | ||
1700 | |||
1701 | rcu_read_lock(); | ||
1702 | sta = sta_info_get(sdata, mgmt->sa); | ||
1703 | if (!sta) { | ||
1704 | rcu_read_unlock(); | ||
1705 | break; | ||
1706 | } | ||
1707 | |||
1708 | local_bh_disable(); | ||
1709 | |||
1710 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
1711 | case WLAN_ACTION_ADDBA_REQ: | ||
1712 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1713 | sizeof(mgmt->u.action.u.addba_req))) | ||
1714 | break; | ||
1715 | ieee80211_process_addba_request(local, sta, mgmt, len); | ||
1716 | break; | ||
1717 | case WLAN_ACTION_ADDBA_RESP: | ||
1718 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1719 | sizeof(mgmt->u.action.u.addba_resp))) | ||
1720 | break; | ||
1721 | ieee80211_process_addba_resp(local, sta, mgmt, len); | ||
1722 | break; | ||
1723 | case WLAN_ACTION_DELBA: | ||
1724 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1725 | sizeof(mgmt->u.action.u.delba))) | ||
1726 | break; | ||
1727 | ieee80211_process_delba(sdata, sta, mgmt, len); | ||
1728 | break; | ||
1729 | } | ||
1730 | local_bh_enable(); | ||
1731 | rcu_read_unlock(); | ||
1732 | break; | ||
1733 | } | ||
1734 | case WLAN_CATEGORY_SPECTRUM_MGMT: | 1678 | case WLAN_CATEGORY_SPECTRUM_MGMT: |
1735 | ieee80211_sta_process_chanswitch(sdata, | 1679 | ieee80211_sta_process_chanswitch(sdata, |
1736 | &mgmt->u.action.u.chan_switch.sw_elem, | 1680 | &mgmt->u.action.u.chan_switch.sw_elem, |
@@ -1754,7 +1698,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1754 | default: | 1698 | default: |
1755 | WARN(1, "unexpected: %d", rma); | 1699 | WARN(1, "unexpected: %d", rma); |
1756 | } | 1700 | } |
1757 | goto out; | 1701 | return; |
1758 | } | 1702 | } |
1759 | 1703 | ||
1760 | mutex_unlock(&ifmgd->mtx); | 1704 | mutex_unlock(&ifmgd->mtx); |
@@ -1799,8 +1743,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1799 | 1743 | ||
1800 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | 1744 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); |
1801 | } | 1745 | } |
1802 | out: | ||
1803 | kfree_skb(skb); | ||
1804 | } | 1746 | } |
1805 | 1747 | ||
1806 | static void ieee80211_sta_timer(unsigned long data) | 1748 | static void ieee80211_sta_timer(unsigned long data) |
@@ -1815,39 +1757,13 @@ static void ieee80211_sta_timer(unsigned long data) | |||
1815 | return; | 1757 | return; |
1816 | } | 1758 | } |
1817 | 1759 | ||
1818 | ieee80211_queue_work(&local->hw, &ifmgd->work); | 1760 | ieee80211_queue_work(&local->hw, &sdata->work); |
1819 | } | 1761 | } |
1820 | 1762 | ||
1821 | static void ieee80211_sta_work(struct work_struct *work) | 1763 | void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) |
1822 | { | 1764 | { |
1823 | struct ieee80211_sub_if_data *sdata = | ||
1824 | container_of(work, struct ieee80211_sub_if_data, u.mgd.work); | ||
1825 | struct ieee80211_local *local = sdata->local; | 1765 | struct ieee80211_local *local = sdata->local; |
1826 | struct ieee80211_if_managed *ifmgd; | 1766 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1827 | struct sk_buff *skb; | ||
1828 | |||
1829 | if (!ieee80211_sdata_running(sdata)) | ||
1830 | return; | ||
1831 | |||
1832 | if (local->scanning) | ||
1833 | return; | ||
1834 | |||
1835 | if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) | ||
1836 | return; | ||
1837 | |||
1838 | /* | ||
1839 | * ieee80211_queue_work() should have picked up most cases, | ||
1840 | * here we'll pick the the rest. | ||
1841 | */ | ||
1842 | if (WARN(local->suspended, "STA MLME work scheduled while " | ||
1843 | "going to suspend\n")) | ||
1844 | return; | ||
1845 | |||
1846 | ifmgd = &sdata->u.mgd; | ||
1847 | |||
1848 | /* first process frames to avoid timing out while a frame is pending */ | ||
1849 | while ((skb = skb_dequeue(&ifmgd->skb_queue))) | ||
1850 | ieee80211_sta_rx_queued_mgmt(sdata, skb); | ||
1851 | 1767 | ||
1852 | /* then process the rest of the work */ | 1768 | /* then process the rest of the work */ |
1853 | mutex_lock(&ifmgd->mtx); | 1769 | mutex_lock(&ifmgd->mtx); |
@@ -1942,8 +1858,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) | |||
1942 | ieee80211_queue_work(&sdata->local->hw, | 1858 | ieee80211_queue_work(&sdata->local->hw, |
1943 | &sdata->u.mgd.monitor_work); | 1859 | &sdata->u.mgd.monitor_work); |
1944 | /* and do all the other regular work too */ | 1860 | /* and do all the other regular work too */ |
1945 | ieee80211_queue_work(&sdata->local->hw, | 1861 | ieee80211_queue_work(&sdata->local->hw, &sdata->work); |
1946 | &sdata->u.mgd.work); | ||
1947 | } | 1862 | } |
1948 | } | 1863 | } |
1949 | 1864 | ||
@@ -1958,7 +1873,6 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) | |||
1958 | * time -- the code here is properly synchronised. | 1873 | * time -- the code here is properly synchronised. |
1959 | */ | 1874 | */ |
1960 | 1875 | ||
1961 | cancel_work_sync(&ifmgd->work); | ||
1962 | cancel_work_sync(&ifmgd->beacon_connection_loss_work); | 1876 | cancel_work_sync(&ifmgd->beacon_connection_loss_work); |
1963 | if (del_timer_sync(&ifmgd->timer)) | 1877 | if (del_timer_sync(&ifmgd->timer)) |
1964 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); | 1878 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); |
@@ -1990,7 +1904,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) | |||
1990 | struct ieee80211_if_managed *ifmgd; | 1904 | struct ieee80211_if_managed *ifmgd; |
1991 | 1905 | ||
1992 | ifmgd = &sdata->u.mgd; | 1906 | ifmgd = &sdata->u.mgd; |
1993 | INIT_WORK(&ifmgd->work, ieee80211_sta_work); | ||
1994 | INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); | 1907 | INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); |
1995 | INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); | 1908 | INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); |
1996 | INIT_WORK(&ifmgd->beacon_connection_loss_work, | 1909 | INIT_WORK(&ifmgd->beacon_connection_loss_work, |
@@ -2003,7 +1916,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) | |||
2003 | (unsigned long) sdata); | 1916 | (unsigned long) sdata); |
2004 | setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, | 1917 | setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, |
2005 | (unsigned long) sdata); | 1918 | (unsigned long) sdata); |
2006 | skb_queue_head_init(&ifmgd->skb_queue); | ||
2007 | 1919 | ||
2008 | ifmgd->flags = 0; | 1920 | ifmgd->flags = 0; |
2009 | 1921 | ||
@@ -2153,6 +2065,7 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk, | |||
2153 | wk->filter_ta); | 2065 | wk->filter_ta); |
2154 | return WORK_DONE_DESTROY; | 2066 | return WORK_DONE_DESTROY; |
2155 | } | 2067 | } |
2068 | |||
2156 | mutex_unlock(&wk->sdata->u.mgd.mtx); | 2069 | mutex_unlock(&wk->sdata->u.mgd.mtx); |
2157 | } | 2070 | } |
2158 | 2071 | ||
@@ -2282,14 +2195,16 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
2282 | struct ieee80211_local *local = sdata->local; | 2195 | struct ieee80211_local *local = sdata->local; |
2283 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2196 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2284 | struct ieee80211_work *wk; | 2197 | struct ieee80211_work *wk; |
2285 | const u8 *bssid = req->bss->bssid; | 2198 | u8 bssid[ETH_ALEN]; |
2199 | bool assoc_bss = false; | ||
2286 | 2200 | ||
2287 | mutex_lock(&ifmgd->mtx); | 2201 | mutex_lock(&ifmgd->mtx); |
2288 | 2202 | ||
2203 | memcpy(bssid, req->bss->bssid, ETH_ALEN); | ||
2289 | if (ifmgd->associated == req->bss) { | 2204 | if (ifmgd->associated == req->bss) { |
2290 | bssid = req->bss->bssid; | 2205 | ieee80211_set_disassoc(sdata, false); |
2291 | ieee80211_set_disassoc(sdata, true); | ||
2292 | mutex_unlock(&ifmgd->mtx); | 2206 | mutex_unlock(&ifmgd->mtx); |
2207 | assoc_bss = true; | ||
2293 | } else { | 2208 | } else { |
2294 | bool not_auth_yet = false; | 2209 | bool not_auth_yet = false; |
2295 | 2210 | ||
@@ -2335,6 +2250,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
2335 | ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, | 2250 | ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, |
2336 | req->reason_code, cookie, | 2251 | req->reason_code, cookie, |
2337 | !req->local_state_change); | 2252 | !req->local_state_change); |
2253 | if (assoc_bss) | ||
2254 | sta_info_destroy_addr(sdata, bssid); | ||
2338 | 2255 | ||
2339 | ieee80211_recalc_idle(sdata->local); | 2256 | ieee80211_recalc_idle(sdata->local); |
2340 | 2257 | ||
@@ -2379,41 +2296,6 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, | |||
2379 | return 0; | 2296 | return 0; |
2380 | } | 2297 | } |
2381 | 2298 | ||
2382 | int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, | ||
2383 | struct ieee80211_channel *chan, | ||
2384 | enum nl80211_channel_type channel_type, | ||
2385 | const u8 *buf, size_t len, u64 *cookie) | ||
2386 | { | ||
2387 | struct ieee80211_local *local = sdata->local; | ||
2388 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
2389 | struct sk_buff *skb; | ||
2390 | |||
2391 | /* Check that we are on the requested channel for transmission */ | ||
2392 | if ((chan != local->tmp_channel || | ||
2393 | channel_type != local->tmp_channel_type) && | ||
2394 | (chan != local->oper_channel || | ||
2395 | channel_type != local->_oper_channel_type)) | ||
2396 | return -EBUSY; | ||
2397 | |||
2398 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); | ||
2399 | if (!skb) | ||
2400 | return -ENOMEM; | ||
2401 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
2402 | |||
2403 | memcpy(skb_put(skb, len), buf, len); | ||
2404 | |||
2405 | if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) | ||
2406 | IEEE80211_SKB_CB(skb)->flags |= | ||
2407 | IEEE80211_TX_INTFL_DONT_ENCRYPT; | ||
2408 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX | | ||
2409 | IEEE80211_TX_CTL_REQ_TX_STATUS; | ||
2410 | skb->dev = sdata->dev; | ||
2411 | ieee80211_tx_skb(sdata, skb); | ||
2412 | |||
2413 | *cookie = (unsigned long) skb; | ||
2414 | return 0; | ||
2415 | } | ||
2416 | |||
2417 | void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, | 2299 | void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, |
2418 | enum nl80211_cqm_rssi_threshold_event rssi_event, | 2300 | enum nl80211_cqm_rssi_threshold_event rssi_event, |
2419 | gfp_t gfp) | 2301 | gfp_t gfp) |
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 75202b295a4e..d287fde0431d 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c | |||
@@ -40,22 +40,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) | |||
40 | list_for_each_entry(sdata, &local->interfaces, list) | 40 | list_for_each_entry(sdata, &local->interfaces, list) |
41 | ieee80211_disable_keys(sdata); | 41 | ieee80211_disable_keys(sdata); |
42 | 42 | ||
43 | /* Tear down aggregation sessions */ | 43 | /* tear down aggregation sessions and remove STAs */ |
44 | 44 | mutex_lock(&local->sta_mtx); | |
45 | rcu_read_lock(); | 45 | list_for_each_entry(sta, &local->sta_list, list) { |
46 | 46 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { | |
47 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { | ||
48 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | ||
49 | set_sta_flags(sta, WLAN_STA_BLOCK_BA); | 47 | set_sta_flags(sta, WLAN_STA_BLOCK_BA); |
50 | ieee80211_sta_tear_down_BA_sessions(sta); | 48 | ieee80211_sta_tear_down_BA_sessions(sta); |
51 | } | 49 | } |
52 | } | ||
53 | 50 | ||
54 | rcu_read_unlock(); | ||
55 | |||
56 | /* remove STAs */ | ||
57 | mutex_lock(&local->sta_mtx); | ||
58 | list_for_each_entry(sta, &local->sta_list, list) { | ||
59 | if (sta->uploaded) { | 51 | if (sta->uploaded) { |
60 | sdata = sta->sdata; | 52 | sdata = sta->sdata; |
61 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | 53 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
@@ -72,6 +64,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) | |||
72 | 64 | ||
73 | /* remove all interfaces */ | 65 | /* remove all interfaces */ |
74 | list_for_each_entry(sdata, &local->interfaces, list) { | 66 | list_for_each_entry(sdata, &local->interfaces, list) { |
67 | cancel_work_sync(&sdata->work); | ||
68 | |||
75 | switch(sdata->vif.type) { | 69 | switch(sdata->vif.type) { |
76 | case NL80211_IFTYPE_STATION: | 70 | case NL80211_IFTYPE_STATION: |
77 | ieee80211_sta_quiesce(sdata); | 71 | ieee80211_sta_quiesce(sdata); |
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h index 065a96190e32..168427b0ffdc 100644 --- a/net/mac80211/rate.h +++ b/net/mac80211/rate.h | |||
@@ -147,5 +147,18 @@ static inline void rc80211_minstrel_exit(void) | |||
147 | } | 147 | } |
148 | #endif | 148 | #endif |
149 | 149 | ||
150 | #ifdef CONFIG_MAC80211_RC_MINSTREL_HT | ||
151 | extern int rc80211_minstrel_ht_init(void); | ||
152 | extern void rc80211_minstrel_ht_exit(void); | ||
153 | #else | ||
154 | static inline int rc80211_minstrel_ht_init(void) | ||
155 | { | ||
156 | return 0; | ||
157 | } | ||
158 | static inline void rc80211_minstrel_ht_exit(void) | ||
159 | { | ||
160 | } | ||
161 | #endif | ||
162 | |||
150 | 163 | ||
151 | #endif /* IEEE80211_RATE_H */ | 164 | #endif /* IEEE80211_RATE_H */ |
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c new file mode 100644 index 000000000000..7a04951fcb1f --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -0,0 +1,824 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/netdevice.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/skbuff.h> | ||
11 | #include <linux/debugfs.h> | ||
12 | #include <linux/random.h> | ||
13 | #include <linux/ieee80211.h> | ||
14 | #include <net/mac80211.h> | ||
15 | #include "rate.h" | ||
16 | #include "rc80211_minstrel.h" | ||
17 | #include "rc80211_minstrel_ht.h" | ||
18 | |||
19 | #define AVG_PKT_SIZE 1200 | ||
20 | #define SAMPLE_COLUMNS 10 | ||
21 | #define EWMA_LEVEL 75 | ||
22 | |||
23 | /* Number of bits for an average sized packet */ | ||
24 | #define MCS_NBITS (AVG_PKT_SIZE << 3) | ||
25 | |||
26 | /* Number of symbols for a packet with (bps) bits per symbol */ | ||
27 | #define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps)) | ||
28 | |||
29 | /* Transmission time for a packet containing (syms) symbols */ | ||
30 | #define MCS_SYMBOL_TIME(sgi, syms) \ | ||
31 | (sgi ? \ | ||
32 | ((syms) * 18 + 4) / 5 : /* syms * 3.6 us */ \ | ||
33 | (syms) << 2 /* syms * 4 us */ \ | ||
34 | ) | ||
35 | |||
36 | /* Transmit duration for the raw data part of an average sized packet */ | ||
37 | #define MCS_DURATION(streams, sgi, bps) MCS_SYMBOL_TIME(sgi, MCS_NSYMS((streams) * (bps))) | ||
38 | |||
39 | /* MCS rate information for an MCS group */ | ||
40 | #define MCS_GROUP(_streams, _sgi, _ht40) { \ | ||
41 | .streams = _streams, \ | ||
42 | .flags = \ | ||
43 | (_sgi ? IEEE80211_TX_RC_SHORT_GI : 0) | \ | ||
44 | (_ht40 ? IEEE80211_TX_RC_40_MHZ_WIDTH : 0), \ | ||
45 | .duration = { \ | ||
46 | MCS_DURATION(_streams, _sgi, _ht40 ? 54 : 26), \ | ||
47 | MCS_DURATION(_streams, _sgi, _ht40 ? 108 : 52), \ | ||
48 | MCS_DURATION(_streams, _sgi, _ht40 ? 162 : 78), \ | ||
49 | MCS_DURATION(_streams, _sgi, _ht40 ? 216 : 104), \ | ||
50 | MCS_DURATION(_streams, _sgi, _ht40 ? 324 : 156), \ | ||
51 | MCS_DURATION(_streams, _sgi, _ht40 ? 432 : 208), \ | ||
52 | MCS_DURATION(_streams, _sgi, _ht40 ? 486 : 234), \ | ||
53 | MCS_DURATION(_streams, _sgi, _ht40 ? 540 : 260) \ | ||
54 | } \ | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * To enable sufficiently targeted rate sampling, MCS rates are divided into | ||
59 | * groups, based on the number of streams and flags (HT40, SGI) that they | ||
60 | * use. | ||
61 | */ | ||
62 | const struct mcs_group minstrel_mcs_groups[] = { | ||
63 | MCS_GROUP(1, 0, 0), | ||
64 | MCS_GROUP(2, 0, 0), | ||
65 | #if MINSTREL_MAX_STREAMS >= 3 | ||
66 | MCS_GROUP(3, 0, 0), | ||
67 | #endif | ||
68 | |||
69 | MCS_GROUP(1, 1, 0), | ||
70 | MCS_GROUP(2, 1, 0), | ||
71 | #if MINSTREL_MAX_STREAMS >= 3 | ||
72 | MCS_GROUP(3, 1, 0), | ||
73 | #endif | ||
74 | |||
75 | MCS_GROUP(1, 0, 1), | ||
76 | MCS_GROUP(2, 0, 1), | ||
77 | #if MINSTREL_MAX_STREAMS >= 3 | ||
78 | MCS_GROUP(3, 0, 1), | ||
79 | #endif | ||
80 | |||
81 | MCS_GROUP(1, 1, 1), | ||
82 | MCS_GROUP(2, 1, 1), | ||
83 | #if MINSTREL_MAX_STREAMS >= 3 | ||
84 | MCS_GROUP(3, 1, 1), | ||
85 | #endif | ||
86 | }; | ||
87 | |||
88 | static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES]; | ||
89 | |||
90 | /* | ||
91 | * Perform EWMA (Exponentially Weighted Moving Average) calculation | ||
92 | */ | ||
93 | static int | ||
94 | minstrel_ewma(int old, int new, int weight) | ||
95 | { | ||
96 | return (new * (100 - weight) + old * weight) / 100; | ||
97 | } | ||
98 | |||
99 | /* | ||
100 | * Look up an MCS group index based on mac80211 rate information | ||
101 | */ | ||
102 | static int | ||
103 | minstrel_ht_get_group_idx(struct ieee80211_tx_rate *rate) | ||
104 | { | ||
105 | int streams = (rate->idx / MCS_GROUP_RATES) + 1; | ||
106 | u32 flags = IEEE80211_TX_RC_SHORT_GI | IEEE80211_TX_RC_40_MHZ_WIDTH; | ||
107 | int i; | ||
108 | |||
109 | for (i = 0; i < ARRAY_SIZE(minstrel_mcs_groups); i++) { | ||
110 | if (minstrel_mcs_groups[i].streams != streams) | ||
111 | continue; | ||
112 | if (minstrel_mcs_groups[i].flags != (rate->flags & flags)) | ||
113 | continue; | ||
114 | |||
115 | return i; | ||
116 | } | ||
117 | |||
118 | WARN_ON(1); | ||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static inline struct minstrel_rate_stats * | ||
123 | minstrel_get_ratestats(struct minstrel_ht_sta *mi, int index) | ||
124 | { | ||
125 | return &mi->groups[index / MCS_GROUP_RATES].rates[index % MCS_GROUP_RATES]; | ||
126 | } | ||
127 | |||
128 | |||
129 | /* | ||
130 | * Recalculate success probabilities and counters for a rate using EWMA | ||
131 | */ | ||
132 | static void | ||
133 | minstrel_calc_rate_ewma(struct minstrel_priv *mp, struct minstrel_rate_stats *mr) | ||
134 | { | ||
135 | if (unlikely(mr->attempts > 0)) { | ||
136 | mr->sample_skipped = 0; | ||
137 | mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts); | ||
138 | if (!mr->att_hist) | ||
139 | mr->probability = mr->cur_prob; | ||
140 | else | ||
141 | mr->probability = minstrel_ewma(mr->probability, | ||
142 | mr->cur_prob, EWMA_LEVEL); | ||
143 | mr->att_hist += mr->attempts; | ||
144 | mr->succ_hist += mr->success; | ||
145 | } else { | ||
146 | mr->sample_skipped++; | ||
147 | } | ||
148 | mr->last_success = mr->success; | ||
149 | mr->last_attempts = mr->attempts; | ||
150 | mr->success = 0; | ||
151 | mr->attempts = 0; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * Calculate throughput based on the average A-MPDU length, taking into account | ||
156 | * the expected number of retransmissions and their expected length | ||
157 | */ | ||
158 | static void | ||
159 | minstrel_ht_calc_tp(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, | ||
160 | int group, int rate) | ||
161 | { | ||
162 | struct minstrel_rate_stats *mr; | ||
163 | unsigned int usecs; | ||
164 | |||
165 | mr = &mi->groups[group].rates[rate]; | ||
166 | |||
167 | if (mr->probability < MINSTREL_FRAC(1, 10)) { | ||
168 | mr->cur_tp = 0; | ||
169 | return; | ||
170 | } | ||
171 | |||
172 | usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); | ||
173 | usecs += minstrel_mcs_groups[group].duration[rate]; | ||
174 | mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability); | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Update rate statistics and select new primary rates | ||
179 | * | ||
180 | * Rules for rate selection: | ||
181 | * - max_prob_rate must use only one stream, as a tradeoff between delivery | ||
182 | * probability and throughput during strong fluctuations | ||
183 | * - as long as the max prob rate has a probability of more than 3/4, pick | ||
184 | * higher throughput rates, even if the probablity is a bit lower | ||
185 | */ | ||
186 | static void | ||
187 | minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | ||
188 | { | ||
189 | struct minstrel_mcs_group_data *mg; | ||
190 | struct minstrel_rate_stats *mr; | ||
191 | int cur_prob, cur_prob_tp, cur_tp, cur_tp2; | ||
192 | int group, i, index; | ||
193 | |||
194 | if (mi->ampdu_packets > 0) { | ||
195 | mi->avg_ampdu_len = minstrel_ewma(mi->avg_ampdu_len, | ||
196 | MINSTREL_FRAC(mi->ampdu_len, mi->ampdu_packets), EWMA_LEVEL); | ||
197 | mi->ampdu_len = 0; | ||
198 | mi->ampdu_packets = 0; | ||
199 | } | ||
200 | |||
201 | mi->sample_slow = 0; | ||
202 | mi->sample_count = 0; | ||
203 | mi->max_tp_rate = 0; | ||
204 | mi->max_tp_rate2 = 0; | ||
205 | mi->max_prob_rate = 0; | ||
206 | |||
207 | for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { | ||
208 | cur_prob = 0; | ||
209 | cur_prob_tp = 0; | ||
210 | cur_tp = 0; | ||
211 | cur_tp2 = 0; | ||
212 | |||
213 | mg = &mi->groups[group]; | ||
214 | if (!mg->supported) | ||
215 | continue; | ||
216 | |||
217 | mg->max_tp_rate = 0; | ||
218 | mg->max_tp_rate2 = 0; | ||
219 | mg->max_prob_rate = 0; | ||
220 | mi->sample_count++; | ||
221 | |||
222 | for (i = 0; i < MCS_GROUP_RATES; i++) { | ||
223 | if (!(mg->supported & BIT(i))) | ||
224 | continue; | ||
225 | |||
226 | mr = &mg->rates[i]; | ||
227 | mr->retry_updated = false; | ||
228 | index = MCS_GROUP_RATES * group + i; | ||
229 | minstrel_calc_rate_ewma(mp, mr); | ||
230 | minstrel_ht_calc_tp(mp, mi, group, i); | ||
231 | |||
232 | if (!mr->cur_tp) | ||
233 | continue; | ||
234 | |||
235 | /* ignore the lowest rate of each single-stream group */ | ||
236 | if (!i && minstrel_mcs_groups[group].streams == 1) | ||
237 | continue; | ||
238 | |||
239 | if ((mr->cur_tp > cur_prob_tp && mr->probability > | ||
240 | MINSTREL_FRAC(3, 4)) || mr->probability > cur_prob) { | ||
241 | mg->max_prob_rate = index; | ||
242 | cur_prob = mr->probability; | ||
243 | } | ||
244 | |||
245 | if (mr->cur_tp > cur_tp) { | ||
246 | swap(index, mg->max_tp_rate); | ||
247 | cur_tp = mr->cur_tp; | ||
248 | mr = minstrel_get_ratestats(mi, index); | ||
249 | } | ||
250 | |||
251 | if (index >= mg->max_tp_rate) | ||
252 | continue; | ||
253 | |||
254 | if (mr->cur_tp > cur_tp2) { | ||
255 | mg->max_tp_rate2 = index; | ||
256 | cur_tp2 = mr->cur_tp; | ||
257 | } | ||
258 | } | ||
259 | } | ||
260 | |||
261 | /* try to sample up to half of the availble rates during each interval */ | ||
262 | mi->sample_count *= 4; | ||
263 | |||
264 | cur_prob = 0; | ||
265 | cur_prob_tp = 0; | ||
266 | cur_tp = 0; | ||
267 | cur_tp2 = 0; | ||
268 | for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) { | ||
269 | mg = &mi->groups[group]; | ||
270 | if (!mg->supported) | ||
271 | continue; | ||
272 | |||
273 | mr = minstrel_get_ratestats(mi, mg->max_prob_rate); | ||
274 | if (cur_prob_tp < mr->cur_tp && | ||
275 | minstrel_mcs_groups[group].streams == 1) { | ||
276 | mi->max_prob_rate = mg->max_prob_rate; | ||
277 | cur_prob = mr->cur_prob; | ||
278 | } | ||
279 | |||
280 | mr = minstrel_get_ratestats(mi, mg->max_tp_rate); | ||
281 | if (cur_tp < mr->cur_tp) { | ||
282 | mi->max_tp_rate = mg->max_tp_rate; | ||
283 | cur_tp = mr->cur_tp; | ||
284 | } | ||
285 | |||
286 | mr = minstrel_get_ratestats(mi, mg->max_tp_rate2); | ||
287 | if (cur_tp2 < mr->cur_tp) { | ||
288 | mi->max_tp_rate2 = mg->max_tp_rate2; | ||
289 | cur_tp2 = mr->cur_tp; | ||
290 | } | ||
291 | } | ||
292 | |||
293 | mi->stats_update = jiffies; | ||
294 | } | ||
295 | |||
296 | static bool | ||
297 | minstrel_ht_txstat_valid(struct ieee80211_tx_rate *rate) | ||
298 | { | ||
299 | if (!rate->count) | ||
300 | return false; | ||
301 | |||
302 | if (rate->idx < 0) | ||
303 | return false; | ||
304 | |||
305 | return !!(rate->flags & IEEE80211_TX_RC_MCS); | ||
306 | } | ||
307 | |||
308 | static void | ||
309 | minstrel_next_sample_idx(struct minstrel_ht_sta *mi) | ||
310 | { | ||
311 | struct minstrel_mcs_group_data *mg; | ||
312 | |||
313 | for (;;) { | ||
314 | mi->sample_group++; | ||
315 | mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups); | ||
316 | mg = &mi->groups[mi->sample_group]; | ||
317 | |||
318 | if (!mg->supported) | ||
319 | continue; | ||
320 | |||
321 | if (++mg->index >= MCS_GROUP_RATES) { | ||
322 | mg->index = 0; | ||
323 | if (++mg->column >= ARRAY_SIZE(sample_table)) | ||
324 | mg->column = 0; | ||
325 | } | ||
326 | break; | ||
327 | } | ||
328 | } | ||
329 | |||
330 | static void | ||
331 | minstrel_downgrade_rate(struct minstrel_ht_sta *mi, int *idx, bool primary) | ||
332 | { | ||
333 | int group, orig_group; | ||
334 | |||
335 | orig_group = group = *idx / MCS_GROUP_RATES; | ||
336 | while (group > 0) { | ||
337 | group--; | ||
338 | |||
339 | if (!mi->groups[group].supported) | ||
340 | continue; | ||
341 | |||
342 | if (minstrel_mcs_groups[group].streams > | ||
343 | minstrel_mcs_groups[orig_group].streams) | ||
344 | continue; | ||
345 | |||
346 | if (primary) | ||
347 | *idx = mi->groups[group].max_tp_rate; | ||
348 | else | ||
349 | *idx = mi->groups[group].max_tp_rate2; | ||
350 | break; | ||
351 | } | ||
352 | } | ||
353 | |||
354 | static void | ||
355 | minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, struct sk_buff *skb) | ||
356 | { | ||
357 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
358 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); | ||
359 | u16 tid; | ||
360 | |||
361 | if (unlikely(!ieee80211_is_data_qos(hdr->frame_control))) | ||
362 | return; | ||
363 | |||
364 | if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE))) | ||
365 | return; | ||
366 | |||
367 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | ||
368 | if (likely(sta->ampdu_mlme.tid_tx[tid])) | ||
369 | return; | ||
370 | |||
371 | ieee80211_start_tx_ba_session(pubsta, tid); | ||
372 | } | ||
373 | |||
374 | static void | ||
375 | minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband, | ||
376 | struct ieee80211_sta *sta, void *priv_sta, | ||
377 | struct sk_buff *skb) | ||
378 | { | ||
379 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
380 | struct minstrel_ht_sta *mi = &msp->ht; | ||
381 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
382 | struct ieee80211_tx_rate *ar = info->status.rates; | ||
383 | struct minstrel_rate_stats *rate, *rate2; | ||
384 | struct minstrel_priv *mp = priv; | ||
385 | bool last = false; | ||
386 | int group; | ||
387 | int i = 0; | ||
388 | |||
389 | if (!msp->is_ht) | ||
390 | return mac80211_minstrel.tx_status(priv, sband, sta, &msp->legacy, skb); | ||
391 | |||
392 | /* This packet was aggregated but doesn't carry status info */ | ||
393 | if ((info->flags & IEEE80211_TX_CTL_AMPDU) && | ||
394 | !(info->flags & IEEE80211_TX_STAT_AMPDU)) | ||
395 | return; | ||
396 | |||
397 | if (!info->status.ampdu_len) { | ||
398 | info->status.ampdu_ack_len = 1; | ||
399 | info->status.ampdu_len = 1; | ||
400 | } | ||
401 | |||
402 | mi->ampdu_packets++; | ||
403 | mi->ampdu_len += info->status.ampdu_len; | ||
404 | |||
405 | if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) { | ||
406 | mi->sample_wait = 4 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len); | ||
407 | mi->sample_tries = 3; | ||
408 | mi->sample_count--; | ||
409 | } | ||
410 | |||
411 | if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) { | ||
412 | mi->sample_packets += info->status.ampdu_len; | ||
413 | minstrel_next_sample_idx(mi); | ||
414 | } | ||
415 | |||
416 | for (i = 0; !last; i++) { | ||
417 | last = (i == IEEE80211_TX_MAX_RATES - 1) || | ||
418 | !minstrel_ht_txstat_valid(&ar[i + 1]); | ||
419 | |||
420 | if (!minstrel_ht_txstat_valid(&ar[i])) | ||
421 | break; | ||
422 | |||
423 | group = minstrel_ht_get_group_idx(&ar[i]); | ||
424 | rate = &mi->groups[group].rates[ar[i].idx % 8]; | ||
425 | |||
426 | if (last && (info->flags & IEEE80211_TX_STAT_ACK)) | ||
427 | rate->success += info->status.ampdu_ack_len; | ||
428 | |||
429 | rate->attempts += ar[i].count * info->status.ampdu_len; | ||
430 | } | ||
431 | |||
432 | /* | ||
433 | * check for sudden death of spatial multiplexing, | ||
434 | * downgrade to a lower number of streams if necessary. | ||
435 | */ | ||
436 | rate = minstrel_get_ratestats(mi, mi->max_tp_rate); | ||
437 | if (rate->attempts > 30 && | ||
438 | MINSTREL_FRAC(rate->success, rate->attempts) < | ||
439 | MINSTREL_FRAC(20, 100)) | ||
440 | minstrel_downgrade_rate(mi, &mi->max_tp_rate, true); | ||
441 | |||
442 | rate2 = minstrel_get_ratestats(mi, mi->max_tp_rate2); | ||
443 | if (rate->attempts > 30 && | ||
444 | MINSTREL_FRAC(rate->success, rate->attempts) < | ||
445 | MINSTREL_FRAC(20, 100)) | ||
446 | minstrel_downgrade_rate(mi, &mi->max_tp_rate2, false); | ||
447 | |||
448 | if (time_after(jiffies, mi->stats_update + (mp->update_interval / 2 * HZ) / 1000)) { | ||
449 | minstrel_ht_update_stats(mp, mi); | ||
450 | minstrel_aggr_check(mp, sta, skb); | ||
451 | } | ||
452 | } | ||
453 | |||
454 | static void | ||
455 | minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, | ||
456 | int index) | ||
457 | { | ||
458 | struct minstrel_rate_stats *mr; | ||
459 | const struct mcs_group *group; | ||
460 | unsigned int tx_time, tx_time_rtscts, tx_time_data; | ||
461 | unsigned int cw = mp->cw_min; | ||
462 | unsigned int t_slot = 9; /* FIXME */ | ||
463 | unsigned int ampdu_len = MINSTREL_TRUNC(mi->avg_ampdu_len); | ||
464 | |||
465 | mr = minstrel_get_ratestats(mi, index); | ||
466 | if (mr->probability < MINSTREL_FRAC(1, 10)) { | ||
467 | mr->retry_count = 1; | ||
468 | mr->retry_count_rtscts = 1; | ||
469 | return; | ||
470 | } | ||
471 | |||
472 | mr->retry_count = 2; | ||
473 | mr->retry_count_rtscts = 2; | ||
474 | mr->retry_updated = true; | ||
475 | |||
476 | group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; | ||
477 | tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len; | ||
478 | tx_time = 2 * (t_slot + mi->overhead + tx_time_data); | ||
479 | tx_time_rtscts = 2 * (t_slot + mi->overhead_rtscts + tx_time_data); | ||
480 | do { | ||
481 | cw = (cw << 1) | 1; | ||
482 | cw = min(cw, mp->cw_max); | ||
483 | tx_time += cw + t_slot + mi->overhead; | ||
484 | tx_time_rtscts += cw + t_slot + mi->overhead_rtscts; | ||
485 | if (tx_time_rtscts < mp->segment_size) | ||
486 | mr->retry_count_rtscts++; | ||
487 | } while ((tx_time < mp->segment_size) && | ||
488 | (++mr->retry_count < mp->max_retry)); | ||
489 | } | ||
490 | |||
491 | |||
492 | static void | ||
493 | minstrel_ht_set_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi, | ||
494 | struct ieee80211_tx_rate *rate, int index, | ||
495 | struct ieee80211_tx_rate_control *txrc, | ||
496 | bool sample, bool rtscts) | ||
497 | { | ||
498 | const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; | ||
499 | struct minstrel_rate_stats *mr; | ||
500 | |||
501 | mr = minstrel_get_ratestats(mi, index); | ||
502 | if (!mr->retry_updated) | ||
503 | minstrel_calc_retransmit(mp, mi, index); | ||
504 | |||
505 | if (mr->probability < MINSTREL_FRAC(20, 100)) | ||
506 | rate->count = 2; | ||
507 | else if (rtscts) | ||
508 | rate->count = mr->retry_count_rtscts; | ||
509 | else | ||
510 | rate->count = mr->retry_count; | ||
511 | |||
512 | rate->flags = IEEE80211_TX_RC_MCS | group->flags; | ||
513 | if (txrc->short_preamble) | ||
514 | rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE; | ||
515 | if (txrc->rts || rtscts) | ||
516 | rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS; | ||
517 | rate->idx = index % MCS_GROUP_RATES + (group->streams - 1) * MCS_GROUP_RATES; | ||
518 | } | ||
519 | |||
520 | static inline int | ||
521 | minstrel_get_duration(int index) | ||
522 | { | ||
523 | const struct mcs_group *group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; | ||
524 | return group->duration[index % MCS_GROUP_RATES]; | ||
525 | } | ||
526 | |||
527 | static int | ||
528 | minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi) | ||
529 | { | ||
530 | struct minstrel_rate_stats *mr; | ||
531 | struct minstrel_mcs_group_data *mg; | ||
532 | int sample_idx = 0; | ||
533 | |||
534 | if (mi->sample_wait > 0) { | ||
535 | mi->sample_wait--; | ||
536 | return -1; | ||
537 | } | ||
538 | |||
539 | if (!mi->sample_tries) | ||
540 | return -1; | ||
541 | |||
542 | mi->sample_tries--; | ||
543 | mg = &mi->groups[mi->sample_group]; | ||
544 | sample_idx = sample_table[mg->column][mg->index]; | ||
545 | mr = &mg->rates[sample_idx]; | ||
546 | sample_idx += mi->sample_group * MCS_GROUP_RATES; | ||
547 | |||
548 | /* | ||
549 | * When not using MRR, do not sample if the probability is already | ||
550 | * higher than 95% to avoid wasting airtime | ||
551 | */ | ||
552 | if (!mp->has_mrr && (mr->probability > MINSTREL_FRAC(95, 100))) | ||
553 | goto next; | ||
554 | |||
555 | /* | ||
556 | * Make sure that lower rates get sampled only occasionally, | ||
557 | * if the link is working perfectly. | ||
558 | */ | ||
559 | if (minstrel_get_duration(sample_idx) > | ||
560 | minstrel_get_duration(mi->max_tp_rate)) { | ||
561 | if (mr->sample_skipped < 10) | ||
562 | goto next; | ||
563 | |||
564 | if (mi->sample_slow++ > 2) | ||
565 | goto next; | ||
566 | } | ||
567 | |||
568 | return sample_idx; | ||
569 | |||
570 | next: | ||
571 | minstrel_next_sample_idx(mi); | ||
572 | return -1; | ||
573 | } | ||
574 | |||
575 | static void | ||
576 | minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, | ||
577 | struct ieee80211_tx_rate_control *txrc) | ||
578 | { | ||
579 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb); | ||
580 | struct ieee80211_tx_rate *ar = info->status.rates; | ||
581 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
582 | struct minstrel_ht_sta *mi = &msp->ht; | ||
583 | struct minstrel_priv *mp = priv; | ||
584 | int sample_idx; | ||
585 | |||
586 | if (rate_control_send_low(sta, priv_sta, txrc)) | ||
587 | return; | ||
588 | |||
589 | if (!msp->is_ht) | ||
590 | return mac80211_minstrel.get_rate(priv, sta, &msp->legacy, txrc); | ||
591 | |||
592 | info->flags |= mi->tx_flags; | ||
593 | sample_idx = minstrel_get_sample_rate(mp, mi); | ||
594 | if (sample_idx >= 0) { | ||
595 | minstrel_ht_set_rate(mp, mi, &ar[0], sample_idx, | ||
596 | txrc, true, false); | ||
597 | minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate, | ||
598 | txrc, false, true); | ||
599 | info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; | ||
600 | } else { | ||
601 | minstrel_ht_set_rate(mp, mi, &ar[0], mi->max_tp_rate, | ||
602 | txrc, false, false); | ||
603 | minstrel_ht_set_rate(mp, mi, &ar[1], mi->max_tp_rate2, | ||
604 | txrc, false, true); | ||
605 | } | ||
606 | minstrel_ht_set_rate(mp, mi, &ar[2], mi->max_prob_rate, txrc, false, true); | ||
607 | |||
608 | ar[3].count = 0; | ||
609 | ar[3].idx = -1; | ||
610 | |||
611 | mi->total_packets++; | ||
612 | |||
613 | /* wraparound */ | ||
614 | if (mi->total_packets == ~0) { | ||
615 | mi->total_packets = 0; | ||
616 | mi->sample_packets = 0; | ||
617 | } | ||
618 | } | ||
619 | |||
620 | static void | ||
621 | minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, | ||
622 | struct ieee80211_sta *sta, void *priv_sta, | ||
623 | enum nl80211_channel_type oper_chan_type) | ||
624 | { | ||
625 | struct minstrel_priv *mp = priv; | ||
626 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
627 | struct minstrel_ht_sta *mi = &msp->ht; | ||
628 | struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; | ||
629 | struct ieee80211_local *local = hw_to_local(mp->hw); | ||
630 | u16 sta_cap = sta->ht_cap.cap; | ||
631 | int ack_dur; | ||
632 | int stbc; | ||
633 | int i; | ||
634 | |||
635 | /* fall back to the old minstrel for legacy stations */ | ||
636 | if (sta && !sta->ht_cap.ht_supported) { | ||
637 | msp->is_ht = false; | ||
638 | memset(&msp->legacy, 0, sizeof(msp->legacy)); | ||
639 | msp->legacy.r = msp->ratelist; | ||
640 | msp->legacy.sample_table = msp->sample_table; | ||
641 | return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy); | ||
642 | } | ||
643 | |||
644 | BUILD_BUG_ON(ARRAY_SIZE(minstrel_mcs_groups) != | ||
645 | MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS); | ||
646 | |||
647 | msp->is_ht = true; | ||
648 | memset(mi, 0, sizeof(*mi)); | ||
649 | mi->stats_update = jiffies; | ||
650 | |||
651 | ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1); | ||
652 | mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur; | ||
653 | mi->overhead_rtscts = mi->overhead + 2 * ack_dur; | ||
654 | |||
655 | mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); | ||
656 | |||
657 | /* When using MRR, sample more on the first attempt, without delay */ | ||
658 | if (mp->has_mrr) { | ||
659 | mi->sample_count = 16; | ||
660 | mi->sample_wait = 0; | ||
661 | } else { | ||
662 | mi->sample_count = 8; | ||
663 | mi->sample_wait = 8; | ||
664 | } | ||
665 | mi->sample_tries = 4; | ||
666 | |||
667 | stbc = (sta_cap & IEEE80211_HT_CAP_RX_STBC) >> | ||
668 | IEEE80211_HT_CAP_RX_STBC_SHIFT; | ||
669 | mi->tx_flags |= stbc << IEEE80211_TX_CTL_STBC_SHIFT; | ||
670 | |||
671 | if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) | ||
672 | mi->tx_flags |= IEEE80211_TX_CTL_LDPC; | ||
673 | |||
674 | if (oper_chan_type != NL80211_CHAN_HT40MINUS && | ||
675 | oper_chan_type != NL80211_CHAN_HT40PLUS) | ||
676 | sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; | ||
677 | |||
678 | for (i = 0; i < ARRAY_SIZE(mi->groups); i++) { | ||
679 | u16 req = 0; | ||
680 | |||
681 | mi->groups[i].supported = 0; | ||
682 | if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) { | ||
683 | if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) | ||
684 | req |= IEEE80211_HT_CAP_SGI_40; | ||
685 | else | ||
686 | req |= IEEE80211_HT_CAP_SGI_20; | ||
687 | } | ||
688 | |||
689 | if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) | ||
690 | req |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; | ||
691 | |||
692 | if ((sta_cap & req) != req) | ||
693 | continue; | ||
694 | |||
695 | mi->groups[i].supported = | ||
696 | mcs->rx_mask[minstrel_mcs_groups[i].streams - 1]; | ||
697 | } | ||
698 | } | ||
699 | |||
700 | static void | ||
701 | minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, | ||
702 | struct ieee80211_sta *sta, void *priv_sta) | ||
703 | { | ||
704 | struct minstrel_priv *mp = priv; | ||
705 | |||
706 | minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type); | ||
707 | } | ||
708 | |||
709 | static void | ||
710 | minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, | ||
711 | struct ieee80211_sta *sta, void *priv_sta, | ||
712 | u32 changed, enum nl80211_channel_type oper_chan_type) | ||
713 | { | ||
714 | minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type); | ||
715 | } | ||
716 | |||
717 | static void * | ||
718 | minstrel_ht_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp) | ||
719 | { | ||
720 | struct ieee80211_supported_band *sband; | ||
721 | struct minstrel_ht_sta_priv *msp; | ||
722 | struct minstrel_priv *mp = priv; | ||
723 | struct ieee80211_hw *hw = mp->hw; | ||
724 | int max_rates = 0; | ||
725 | int i; | ||
726 | |||
727 | for (i = 0; i < IEEE80211_NUM_BANDS; i++) { | ||
728 | sband = hw->wiphy->bands[i]; | ||
729 | if (sband && sband->n_bitrates > max_rates) | ||
730 | max_rates = sband->n_bitrates; | ||
731 | } | ||
732 | |||
733 | msp = kzalloc(sizeof(struct minstrel_ht_sta), gfp); | ||
734 | if (!msp) | ||
735 | return NULL; | ||
736 | |||
737 | msp->ratelist = kzalloc(sizeof(struct minstrel_rate) * max_rates, gfp); | ||
738 | if (!msp->ratelist) | ||
739 | goto error; | ||
740 | |||
741 | msp->sample_table = kmalloc(SAMPLE_COLUMNS * max_rates, gfp); | ||
742 | if (!msp->sample_table) | ||
743 | goto error1; | ||
744 | |||
745 | return msp; | ||
746 | |||
747 | error1: | ||
748 | kfree(msp->sample_table); | ||
749 | error: | ||
750 | kfree(msp); | ||
751 | return NULL; | ||
752 | } | ||
753 | |||
754 | static void | ||
755 | minstrel_ht_free_sta(void *priv, struct ieee80211_sta *sta, void *priv_sta) | ||
756 | { | ||
757 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
758 | |||
759 | kfree(msp->sample_table); | ||
760 | kfree(msp->ratelist); | ||
761 | kfree(msp); | ||
762 | } | ||
763 | |||
764 | static void * | ||
765 | minstrel_ht_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) | ||
766 | { | ||
767 | return mac80211_minstrel.alloc(hw, debugfsdir); | ||
768 | } | ||
769 | |||
770 | static void | ||
771 | minstrel_ht_free(void *priv) | ||
772 | { | ||
773 | mac80211_minstrel.free(priv); | ||
774 | } | ||
775 | |||
776 | static struct rate_control_ops mac80211_minstrel_ht = { | ||
777 | .name = "minstrel_ht", | ||
778 | .tx_status = minstrel_ht_tx_status, | ||
779 | .get_rate = minstrel_ht_get_rate, | ||
780 | .rate_init = minstrel_ht_rate_init, | ||
781 | .rate_update = minstrel_ht_rate_update, | ||
782 | .alloc_sta = minstrel_ht_alloc_sta, | ||
783 | .free_sta = minstrel_ht_free_sta, | ||
784 | .alloc = minstrel_ht_alloc, | ||
785 | .free = minstrel_ht_free, | ||
786 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
787 | .add_sta_debugfs = minstrel_ht_add_sta_debugfs, | ||
788 | .remove_sta_debugfs = minstrel_ht_remove_sta_debugfs, | ||
789 | #endif | ||
790 | }; | ||
791 | |||
792 | |||
793 | static void | ||
794 | init_sample_table(void) | ||
795 | { | ||
796 | int col, i, new_idx; | ||
797 | u8 rnd[MCS_GROUP_RATES]; | ||
798 | |||
799 | memset(sample_table, 0xff, sizeof(sample_table)); | ||
800 | for (col = 0; col < SAMPLE_COLUMNS; col++) { | ||
801 | for (i = 0; i < MCS_GROUP_RATES; i++) { | ||
802 | get_random_bytes(rnd, sizeof(rnd)); | ||
803 | new_idx = (i + rnd[i]) % MCS_GROUP_RATES; | ||
804 | |||
805 | while (sample_table[col][new_idx] != 0xff) | ||
806 | new_idx = (new_idx + 1) % MCS_GROUP_RATES; | ||
807 | |||
808 | sample_table[col][new_idx] = i; | ||
809 | } | ||
810 | } | ||
811 | } | ||
812 | |||
813 | int __init | ||
814 | rc80211_minstrel_ht_init(void) | ||
815 | { | ||
816 | init_sample_table(); | ||
817 | return ieee80211_rate_control_register(&mac80211_minstrel_ht); | ||
818 | } | ||
819 | |||
820 | void | ||
821 | rc80211_minstrel_ht_exit(void) | ||
822 | { | ||
823 | ieee80211_rate_control_unregister(&mac80211_minstrel_ht); | ||
824 | } | ||
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h new file mode 100644 index 000000000000..696c0fc6e0b7 --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht.h | |||
@@ -0,0 +1,128 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #ifndef __RC_MINSTREL_HT_H | ||
10 | #define __RC_MINSTREL_HT_H | ||
11 | |||
12 | /* | ||
13 | * The number of streams can be changed to 2 to reduce code | ||
14 | * size and memory footprint. | ||
15 | */ | ||
16 | #define MINSTREL_MAX_STREAMS 3 | ||
17 | #define MINSTREL_STREAM_GROUPS 4 | ||
18 | |||
19 | /* scaled fraction values */ | ||
20 | #define MINSTREL_SCALE 16 | ||
21 | #define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div) | ||
22 | #define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE) | ||
23 | |||
24 | #define MCS_GROUP_RATES 8 | ||
25 | |||
26 | struct mcs_group { | ||
27 | u32 flags; | ||
28 | unsigned int streams; | ||
29 | unsigned int duration[MCS_GROUP_RATES]; | ||
30 | }; | ||
31 | |||
32 | struct minstrel_rate_stats { | ||
33 | /* current / last sampling period attempts/success counters */ | ||
34 | unsigned int attempts, last_attempts; | ||
35 | unsigned int success, last_success; | ||
36 | |||
37 | /* total attempts/success counters */ | ||
38 | u64 att_hist, succ_hist; | ||
39 | |||
40 | /* current throughput */ | ||
41 | unsigned int cur_tp; | ||
42 | |||
43 | /* packet delivery probabilities */ | ||
44 | unsigned int cur_prob, probability; | ||
45 | |||
46 | /* maximum retry counts */ | ||
47 | unsigned int retry_count; | ||
48 | unsigned int retry_count_rtscts; | ||
49 | |||
50 | bool retry_updated; | ||
51 | u8 sample_skipped; | ||
52 | }; | ||
53 | |||
54 | struct minstrel_mcs_group_data { | ||
55 | u8 index; | ||
56 | u8 column; | ||
57 | |||
58 | /* bitfield of supported MCS rates of this group */ | ||
59 | u8 supported; | ||
60 | |||
61 | /* selected primary rates */ | ||
62 | unsigned int max_tp_rate; | ||
63 | unsigned int max_tp_rate2; | ||
64 | unsigned int max_prob_rate; | ||
65 | |||
66 | /* MCS rate statistics */ | ||
67 | struct minstrel_rate_stats rates[MCS_GROUP_RATES]; | ||
68 | }; | ||
69 | |||
70 | struct minstrel_ht_sta { | ||
71 | /* ampdu length (average, per sampling interval) */ | ||
72 | unsigned int ampdu_len; | ||
73 | unsigned int ampdu_packets; | ||
74 | |||
75 | /* ampdu length (EWMA) */ | ||
76 | unsigned int avg_ampdu_len; | ||
77 | |||
78 | /* best throughput rate */ | ||
79 | unsigned int max_tp_rate; | ||
80 | |||
81 | /* second best throughput rate */ | ||
82 | unsigned int max_tp_rate2; | ||
83 | |||
84 | /* best probability rate */ | ||
85 | unsigned int max_prob_rate; | ||
86 | |||
87 | /* time of last status update */ | ||
88 | unsigned long stats_update; | ||
89 | |||
90 | /* overhead time in usec for each frame */ | ||
91 | unsigned int overhead; | ||
92 | unsigned int overhead_rtscts; | ||
93 | |||
94 | unsigned int total_packets; | ||
95 | unsigned int sample_packets; | ||
96 | |||
97 | /* tx flags to add for frames for this sta */ | ||
98 | u32 tx_flags; | ||
99 | |||
100 | u8 sample_wait; | ||
101 | u8 sample_tries; | ||
102 | u8 sample_count; | ||
103 | u8 sample_slow; | ||
104 | |||
105 | /* current MCS group to be sampled */ | ||
106 | u8 sample_group; | ||
107 | |||
108 | /* MCS rate group info and statistics */ | ||
109 | struct minstrel_mcs_group_data groups[MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS]; | ||
110 | }; | ||
111 | |||
112 | struct minstrel_ht_sta_priv { | ||
113 | union { | ||
114 | struct minstrel_ht_sta ht; | ||
115 | struct minstrel_sta_info legacy; | ||
116 | }; | ||
117 | #ifdef CONFIG_MAC80211_DEBUGFS | ||
118 | struct dentry *dbg_stats; | ||
119 | #endif | ||
120 | void *ratelist; | ||
121 | void *sample_table; | ||
122 | bool is_ht; | ||
123 | }; | ||
124 | |||
125 | void minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); | ||
126 | void minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta); | ||
127 | |||
128 | #endif | ||
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c new file mode 100644 index 000000000000..4fb3ccbd8b40 --- /dev/null +++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 Felix Fietkau <nbd@openwrt.org> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | #include <linux/netdevice.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/skbuff.h> | ||
11 | #include <linux/debugfs.h> | ||
12 | #include <linux/ieee80211.h> | ||
13 | #include <net/mac80211.h> | ||
14 | #include "rc80211_minstrel.h" | ||
15 | #include "rc80211_minstrel_ht.h" | ||
16 | |||
17 | extern const struct mcs_group minstrel_mcs_groups[]; | ||
18 | |||
19 | static int | ||
20 | minstrel_ht_stats_open(struct inode *inode, struct file *file) | ||
21 | { | ||
22 | struct minstrel_ht_sta_priv *msp = inode->i_private; | ||
23 | struct minstrel_ht_sta *mi = &msp->ht; | ||
24 | struct minstrel_debugfs_info *ms; | ||
25 | unsigned int i, j, tp, prob, eprob; | ||
26 | char *p; | ||
27 | int ret; | ||
28 | |||
29 | if (!msp->is_ht) { | ||
30 | inode->i_private = &msp->legacy; | ||
31 | ret = minstrel_stats_open(inode, file); | ||
32 | inode->i_private = msp; | ||
33 | return ret; | ||
34 | } | ||
35 | |||
36 | ms = kmalloc(sizeof(*ms) + 8192, GFP_KERNEL); | ||
37 | if (!ms) | ||
38 | return -ENOMEM; | ||
39 | |||
40 | file->private_data = ms; | ||
41 | p = ms->buf; | ||
42 | p += sprintf(p, "type rate throughput ewma prob this prob " | ||
43 | "this succ/attempt success attempts\n"); | ||
44 | for (i = 0; i < MINSTREL_MAX_STREAMS * MINSTREL_STREAM_GROUPS; i++) { | ||
45 | char htmode = '2'; | ||
46 | char gimode = 'L'; | ||
47 | |||
48 | if (!mi->groups[i].supported) | ||
49 | continue; | ||
50 | |||
51 | if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) | ||
52 | htmode = '4'; | ||
53 | if (minstrel_mcs_groups[i].flags & IEEE80211_TX_RC_SHORT_GI) | ||
54 | gimode = 'S'; | ||
55 | |||
56 | for (j = 0; j < MCS_GROUP_RATES; j++) { | ||
57 | struct minstrel_rate_stats *mr = &mi->groups[i].rates[j]; | ||
58 | int idx = i * MCS_GROUP_RATES + j; | ||
59 | |||
60 | if (!(mi->groups[i].supported & BIT(j))) | ||
61 | continue; | ||
62 | |||
63 | p += sprintf(p, "HT%c0/%cGI ", htmode, gimode); | ||
64 | |||
65 | *(p++) = (idx == mi->max_tp_rate) ? 'T' : ' '; | ||
66 | *(p++) = (idx == mi->max_tp_rate2) ? 't' : ' '; | ||
67 | *(p++) = (idx == mi->max_prob_rate) ? 'P' : ' '; | ||
68 | p += sprintf(p, "MCS%-2u", (minstrel_mcs_groups[i].streams - 1) * | ||
69 | MCS_GROUP_RATES + j); | ||
70 | |||
71 | tp = mr->cur_tp / 10; | ||
72 | prob = MINSTREL_TRUNC(mr->cur_prob * 1000); | ||
73 | eprob = MINSTREL_TRUNC(mr->probability * 1000); | ||
74 | |||
75 | p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " | ||
76 | "%3u(%3u) %8llu %8llu\n", | ||
77 | tp / 10, tp % 10, | ||
78 | eprob / 10, eprob % 10, | ||
79 | prob / 10, prob % 10, | ||
80 | mr->last_success, | ||
81 | mr->last_attempts, | ||
82 | (unsigned long long)mr->succ_hist, | ||
83 | (unsigned long long)mr->att_hist); | ||
84 | } | ||
85 | } | ||
86 | p += sprintf(p, "\nTotal packet count:: ideal %d " | ||
87 | "lookaround %d\n", | ||
88 | max(0, (int) mi->total_packets - (int) mi->sample_packets), | ||
89 | mi->sample_packets); | ||
90 | p += sprintf(p, "Average A-MPDU length: %d.%d\n", | ||
91 | MINSTREL_TRUNC(mi->avg_ampdu_len), | ||
92 | MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); | ||
93 | ms->len = p - ms->buf; | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static const struct file_operations minstrel_ht_stat_fops = { | ||
99 | .owner = THIS_MODULE, | ||
100 | .open = minstrel_ht_stats_open, | ||
101 | .read = minstrel_stats_read, | ||
102 | .release = minstrel_stats_release, | ||
103 | }; | ||
104 | |||
105 | void | ||
106 | minstrel_ht_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir) | ||
107 | { | ||
108 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
109 | |||
110 | msp->dbg_stats = debugfs_create_file("rc_stats", S_IRUGO, dir, msp, | ||
111 | &minstrel_ht_stat_fops); | ||
112 | } | ||
113 | |||
114 | void | ||
115 | minstrel_ht_remove_sta_debugfs(void *priv, void *priv_sta) | ||
116 | { | ||
117 | struct minstrel_ht_sta_priv *msp = priv_sta; | ||
118 | |||
119 | debugfs_remove(msp->dbg_stats); | ||
120 | } | ||
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index be9abc2e6348..1f76352caa9e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -719,16 +719,13 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, | |||
719 | 719 | ||
720 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | 720 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; |
721 | 721 | ||
722 | spin_lock(&sta->lock); | 722 | tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); |
723 | 723 | if (!tid_agg_rx) | |
724 | if (!sta->ampdu_mlme.tid_active_rx[tid]) | 724 | goto dont_reorder; |
725 | goto dont_reorder_unlock; | ||
726 | |||
727 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | ||
728 | 725 | ||
729 | /* qos null data frames are excluded */ | 726 | /* qos null data frames are excluded */ |
730 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) | 727 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) |
731 | goto dont_reorder_unlock; | 728 | goto dont_reorder; |
732 | 729 | ||
733 | /* new, potentially un-ordered, ampdu frame - process it */ | 730 | /* new, potentially un-ordered, ampdu frame - process it */ |
734 | 731 | ||
@@ -740,20 +737,22 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, | |||
740 | /* if this mpdu is fragmented - terminate rx aggregation session */ | 737 | /* if this mpdu is fragmented - terminate rx aggregation session */ |
741 | sc = le16_to_cpu(hdr->seq_ctrl); | 738 | sc = le16_to_cpu(hdr->seq_ctrl); |
742 | if (sc & IEEE80211_SCTL_FRAG) { | 739 | if (sc & IEEE80211_SCTL_FRAG) { |
743 | spin_unlock(&sta->lock); | 740 | skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; |
744 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, | 741 | skb_queue_tail(&rx->sdata->skb_queue, skb); |
745 | WLAN_REASON_QSTA_REQUIRE_SETUP); | 742 | ieee80211_queue_work(&local->hw, &rx->sdata->work); |
746 | dev_kfree_skb(skb); | ||
747 | return; | 743 | return; |
748 | } | 744 | } |
749 | 745 | ||
750 | if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) { | 746 | /* |
751 | spin_unlock(&sta->lock); | 747 | * No locking needed -- we will only ever process one |
748 | * RX packet at a time, and thus own tid_agg_rx. All | ||
749 | * other code manipulating it needs to (and does) make | ||
750 | * sure that we cannot get to it any more before doing | ||
751 | * anything with it. | ||
752 | */ | ||
753 | if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) | ||
752 | return; | 754 | return; |
753 | } | ||
754 | 755 | ||
755 | dont_reorder_unlock: | ||
756 | spin_unlock(&sta->lock); | ||
757 | dont_reorder: | 756 | dont_reorder: |
758 | __skb_queue_tail(frames, skb); | 757 | __skb_queue_tail(frames, skb); |
759 | } | 758 | } |
@@ -825,6 +824,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
825 | ieee80211_rx_result result = RX_DROP_UNUSABLE; | 824 | ieee80211_rx_result result = RX_DROP_UNUSABLE; |
826 | struct ieee80211_key *stakey = NULL; | 825 | struct ieee80211_key *stakey = NULL; |
827 | int mmie_keyidx = -1; | 826 | int mmie_keyidx = -1; |
827 | __le16 fc; | ||
828 | 828 | ||
829 | /* | 829 | /* |
830 | * Key selection 101 | 830 | * Key selection 101 |
@@ -866,13 +866,15 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
866 | if (rx->sta) | 866 | if (rx->sta) |
867 | stakey = rcu_dereference(rx->sta->key); | 867 | stakey = rcu_dereference(rx->sta->key); |
868 | 868 | ||
869 | if (!ieee80211_has_protected(hdr->frame_control)) | 869 | fc = hdr->frame_control; |
870 | |||
871 | if (!ieee80211_has_protected(fc)) | ||
870 | mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); | 872 | mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb); |
871 | 873 | ||
872 | if (!is_multicast_ether_addr(hdr->addr1) && stakey) { | 874 | if (!is_multicast_ether_addr(hdr->addr1) && stakey) { |
873 | rx->key = stakey; | 875 | rx->key = stakey; |
874 | /* Skip decryption if the frame is not protected. */ | 876 | /* Skip decryption if the frame is not protected. */ |
875 | if (!ieee80211_has_protected(hdr->frame_control)) | 877 | if (!ieee80211_has_protected(fc)) |
876 | return RX_CONTINUE; | 878 | return RX_CONTINUE; |
877 | } else if (mmie_keyidx >= 0) { | 879 | } else if (mmie_keyidx >= 0) { |
878 | /* Broadcast/multicast robust management frame / BIP */ | 880 | /* Broadcast/multicast robust management frame / BIP */ |
@@ -884,7 +886,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
884 | mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) | 886 | mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS) |
885 | return RX_DROP_MONITOR; /* unexpected BIP keyidx */ | 887 | return RX_DROP_MONITOR; /* unexpected BIP keyidx */ |
886 | rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); | 888 | rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]); |
887 | } else if (!ieee80211_has_protected(hdr->frame_control)) { | 889 | } else if (!ieee80211_has_protected(fc)) { |
888 | /* | 890 | /* |
889 | * The frame was not protected, so skip decryption. However, we | 891 | * The frame was not protected, so skip decryption. However, we |
890 | * need to set rx->key if there is a key that could have been | 892 | * need to set rx->key if there is a key that could have been |
@@ -892,7 +894,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
892 | * have been expected. | 894 | * have been expected. |
893 | */ | 895 | */ |
894 | struct ieee80211_key *key = NULL; | 896 | struct ieee80211_key *key = NULL; |
895 | if (ieee80211_is_mgmt(hdr->frame_control) && | 897 | if (ieee80211_is_mgmt(fc) && |
896 | is_multicast_ether_addr(hdr->addr1) && | 898 | is_multicast_ether_addr(hdr->addr1) && |
897 | (key = rcu_dereference(rx->sdata->default_mgmt_key))) | 899 | (key = rcu_dereference(rx->sdata->default_mgmt_key))) |
898 | rx->key = key; | 900 | rx->key = key; |
@@ -914,7 +916,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
914 | (status->flag & RX_FLAG_IV_STRIPPED)) | 916 | (status->flag & RX_FLAG_IV_STRIPPED)) |
915 | return RX_CONTINUE; | 917 | return RX_CONTINUE; |
916 | 918 | ||
917 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 919 | hdrlen = ieee80211_hdrlen(fc); |
918 | 920 | ||
919 | if (rx->skb->len < 8 + hdrlen) | 921 | if (rx->skb->len < 8 + hdrlen) |
920 | return RX_DROP_UNUSABLE; /* TODO: count this? */ | 922 | return RX_DROP_UNUSABLE; /* TODO: count this? */ |
@@ -947,19 +949,17 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx) | |||
947 | 949 | ||
948 | if (skb_linearize(rx->skb)) | 950 | if (skb_linearize(rx->skb)) |
949 | return RX_DROP_UNUSABLE; | 951 | return RX_DROP_UNUSABLE; |
950 | 952 | /* the hdr variable is invalid now! */ | |
951 | hdr = (struct ieee80211_hdr *)rx->skb->data; | ||
952 | |||
953 | /* Check for weak IVs if possible */ | ||
954 | if (rx->sta && rx->key->conf.alg == ALG_WEP && | ||
955 | ieee80211_is_data(hdr->frame_control) && | ||
956 | (!(status->flag & RX_FLAG_IV_STRIPPED) || | ||
957 | !(status->flag & RX_FLAG_DECRYPTED)) && | ||
958 | ieee80211_wep_is_weak_iv(rx->skb, rx->key)) | ||
959 | rx->sta->wep_weak_iv_count++; | ||
960 | 953 | ||
961 | switch (rx->key->conf.alg) { | 954 | switch (rx->key->conf.alg) { |
962 | case ALG_WEP: | 955 | case ALG_WEP: |
956 | /* Check for weak IVs if possible */ | ||
957 | if (rx->sta && ieee80211_is_data(fc) && | ||
958 | (!(status->flag & RX_FLAG_IV_STRIPPED) || | ||
959 | !(status->flag & RX_FLAG_DECRYPTED)) && | ||
960 | ieee80211_wep_is_weak_iv(rx->skb, rx->key)) | ||
961 | rx->sta->wep_weak_iv_count++; | ||
962 | |||
963 | result = ieee80211_crypto_wep_decrypt(rx); | 963 | result = ieee80211_crypto_wep_decrypt(rx); |
964 | break; | 964 | break; |
965 | case ALG_TKIP: | 965 | case ALG_TKIP: |
@@ -1267,11 +1267,13 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1267 | rx->queue, &(rx->skb)); | 1267 | rx->queue, &(rx->skb)); |
1268 | if (rx->key && rx->key->conf.alg == ALG_CCMP && | 1268 | if (rx->key && rx->key->conf.alg == ALG_CCMP && |
1269 | ieee80211_has_protected(fc)) { | 1269 | ieee80211_has_protected(fc)) { |
1270 | int queue = ieee80211_is_mgmt(fc) ? | ||
1271 | NUM_RX_DATA_QUEUES : rx->queue; | ||
1270 | /* Store CCMP PN so that we can verify that the next | 1272 | /* Store CCMP PN so that we can verify that the next |
1271 | * fragment has a sequential PN value. */ | 1273 | * fragment has a sequential PN value. */ |
1272 | entry->ccmp = 1; | 1274 | entry->ccmp = 1; |
1273 | memcpy(entry->last_pn, | 1275 | memcpy(entry->last_pn, |
1274 | rx->key->u.ccmp.rx_pn[rx->queue], | 1276 | rx->key->u.ccmp.rx_pn[queue], |
1275 | CCMP_PN_LEN); | 1277 | CCMP_PN_LEN); |
1276 | } | 1278 | } |
1277 | return RX_QUEUED; | 1279 | return RX_QUEUED; |
@@ -1291,6 +1293,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1291 | if (entry->ccmp) { | 1293 | if (entry->ccmp) { |
1292 | int i; | 1294 | int i; |
1293 | u8 pn[CCMP_PN_LEN], *rpn; | 1295 | u8 pn[CCMP_PN_LEN], *rpn; |
1296 | int queue; | ||
1294 | if (!rx->key || rx->key->conf.alg != ALG_CCMP) | 1297 | if (!rx->key || rx->key->conf.alg != ALG_CCMP) |
1295 | return RX_DROP_UNUSABLE; | 1298 | return RX_DROP_UNUSABLE; |
1296 | memcpy(pn, entry->last_pn, CCMP_PN_LEN); | 1299 | memcpy(pn, entry->last_pn, CCMP_PN_LEN); |
@@ -1299,7 +1302,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1299 | if (pn[i]) | 1302 | if (pn[i]) |
1300 | break; | 1303 | break; |
1301 | } | 1304 | } |
1302 | rpn = rx->key->u.ccmp.rx_pn[rx->queue]; | 1305 | queue = ieee80211_is_mgmt(fc) ? |
1306 | NUM_RX_DATA_QUEUES : rx->queue; | ||
1307 | rpn = rx->key->u.ccmp.rx_pn[queue]; | ||
1303 | if (memcmp(pn, rpn, CCMP_PN_LEN)) | 1308 | if (memcmp(pn, rpn, CCMP_PN_LEN)) |
1304 | return RX_DROP_UNUSABLE; | 1309 | return RX_DROP_UNUSABLE; |
1305 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); | 1310 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); |
@@ -1829,13 +1834,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1829 | &bar_data, sizeof(bar_data))) | 1834 | &bar_data, sizeof(bar_data))) |
1830 | return RX_DROP_MONITOR; | 1835 | return RX_DROP_MONITOR; |
1831 | 1836 | ||
1832 | spin_lock(&rx->sta->lock); | ||
1833 | tid = le16_to_cpu(bar_data.control) >> 12; | 1837 | tid = le16_to_cpu(bar_data.control) >> 12; |
1834 | if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { | 1838 | |
1835 | spin_unlock(&rx->sta->lock); | 1839 | tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); |
1840 | if (!tid_agg_rx) | ||
1836 | return RX_DROP_MONITOR; | 1841 | return RX_DROP_MONITOR; |
1837 | } | ||
1838 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; | ||
1839 | 1842 | ||
1840 | start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; | 1843 | start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; |
1841 | 1844 | ||
@@ -1848,11 +1851,15 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1848 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, | 1851 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, |
1849 | frames); | 1852 | frames); |
1850 | kfree_skb(skb); | 1853 | kfree_skb(skb); |
1851 | spin_unlock(&rx->sta->lock); | ||
1852 | return RX_QUEUED; | 1854 | return RX_QUEUED; |
1853 | } | 1855 | } |
1854 | 1856 | ||
1855 | return RX_CONTINUE; | 1857 | /* |
1858 | * After this point, we only want management frames, | ||
1859 | * so we can drop all remaining control frames to | ||
1860 | * cooked monitor interfaces. | ||
1861 | */ | ||
1862 | return RX_DROP_MONITOR; | ||
1856 | } | 1863 | } |
1857 | 1864 | ||
1858 | static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, | 1865 | static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata, |
@@ -1944,30 +1951,27 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1944 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | 1951 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) |
1945 | break; | 1952 | break; |
1946 | 1953 | ||
1947 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
1948 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); | ||
1949 | |||
1950 | switch (mgmt->u.action.u.addba_req.action_code) { | 1954 | switch (mgmt->u.action.u.addba_req.action_code) { |
1951 | case WLAN_ACTION_ADDBA_REQ: | 1955 | case WLAN_ACTION_ADDBA_REQ: |
1952 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 1956 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
1953 | sizeof(mgmt->u.action.u.addba_req))) | 1957 | sizeof(mgmt->u.action.u.addba_req))) |
1954 | return RX_DROP_MONITOR; | 1958 | goto invalid; |
1955 | ieee80211_process_addba_request(local, rx->sta, mgmt, len); | 1959 | break; |
1956 | goto handled; | ||
1957 | case WLAN_ACTION_ADDBA_RESP: | 1960 | case WLAN_ACTION_ADDBA_RESP: |
1958 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 1961 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
1959 | sizeof(mgmt->u.action.u.addba_resp))) | 1962 | sizeof(mgmt->u.action.u.addba_resp))) |
1960 | break; | 1963 | goto invalid; |
1961 | ieee80211_process_addba_resp(local, rx->sta, mgmt, len); | 1964 | break; |
1962 | goto handled; | ||
1963 | case WLAN_ACTION_DELBA: | 1965 | case WLAN_ACTION_DELBA: |
1964 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 1966 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
1965 | sizeof(mgmt->u.action.u.delba))) | 1967 | sizeof(mgmt->u.action.u.delba))) |
1966 | break; | 1968 | goto invalid; |
1967 | ieee80211_process_delba(sdata, rx->sta, mgmt, len); | 1969 | break; |
1968 | goto handled; | 1970 | default: |
1971 | goto invalid; | ||
1969 | } | 1972 | } |
1970 | break; | 1973 | |
1974 | goto queue; | ||
1971 | case WLAN_CATEGORY_SPECTRUM_MGMT: | 1975 | case WLAN_CATEGORY_SPECTRUM_MGMT: |
1972 | if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) | 1976 | if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) |
1973 | break; | 1977 | break; |
@@ -1997,7 +2001,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1997 | if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) | 2001 | if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) |
1998 | break; | 2002 | break; |
1999 | 2003 | ||
2000 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); | 2004 | goto queue; |
2001 | } | 2005 | } |
2002 | break; | 2006 | break; |
2003 | case WLAN_CATEGORY_SA_QUERY: | 2007 | case WLAN_CATEGORY_SA_QUERY: |
@@ -2015,11 +2019,12 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2015 | break; | 2019 | break; |
2016 | case WLAN_CATEGORY_MESH_PLINK: | 2020 | case WLAN_CATEGORY_MESH_PLINK: |
2017 | case WLAN_CATEGORY_MESH_PATH_SEL: | 2021 | case WLAN_CATEGORY_MESH_PATH_SEL: |
2018 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 2022 | if (!ieee80211_vif_is_mesh(&sdata->vif)) |
2019 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb); | 2023 | break; |
2020 | break; | 2024 | goto queue; |
2021 | } | 2025 | } |
2022 | 2026 | ||
2027 | invalid: | ||
2023 | /* | 2028 | /* |
2024 | * For AP mode, hostapd is responsible for handling any action | 2029 | * For AP mode, hostapd is responsible for handling any action |
2025 | * frames that we didn't handle, including returning unknown | 2030 | * frames that we didn't handle, including returning unknown |
@@ -2039,8 +2044,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2039 | */ | 2044 | */ |
2040 | status = IEEE80211_SKB_RXCB(rx->skb); | 2045 | status = IEEE80211_SKB_RXCB(rx->skb); |
2041 | 2046 | ||
2042 | if (sdata->vif.type == NL80211_IFTYPE_STATION && | 2047 | if (cfg80211_rx_action(rx->sdata->dev, status->freq, |
2043 | cfg80211_rx_action(rx->sdata->dev, status->freq, | ||
2044 | rx->skb->data, rx->skb->len, | 2048 | rx->skb->data, rx->skb->len, |
2045 | GFP_ATOMIC)) | 2049 | GFP_ATOMIC)) |
2046 | goto handled; | 2050 | goto handled; |
@@ -2068,6 +2072,14 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2068 | rx->sta->rx_packets++; | 2072 | rx->sta->rx_packets++; |
2069 | dev_kfree_skb(rx->skb); | 2073 | dev_kfree_skb(rx->skb); |
2070 | return RX_QUEUED; | 2074 | return RX_QUEUED; |
2075 | |||
2076 | queue: | ||
2077 | rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; | ||
2078 | skb_queue_tail(&sdata->skb_queue, rx->skb); | ||
2079 | ieee80211_queue_work(&local->hw, &sdata->work); | ||
2080 | if (rx->sta) | ||
2081 | rx->sta->rx_packets++; | ||
2082 | return RX_QUEUED; | ||
2071 | } | 2083 | } |
2072 | 2084 | ||
2073 | static ieee80211_rx_result debug_noinline | 2085 | static ieee80211_rx_result debug_noinline |
@@ -2075,10 +2087,15 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) | |||
2075 | { | 2087 | { |
2076 | struct ieee80211_sub_if_data *sdata = rx->sdata; | 2088 | struct ieee80211_sub_if_data *sdata = rx->sdata; |
2077 | ieee80211_rx_result rxs; | 2089 | ieee80211_rx_result rxs; |
2090 | struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; | ||
2091 | __le16 stype; | ||
2078 | 2092 | ||
2079 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 2093 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
2080 | return RX_DROP_MONITOR; | 2094 | return RX_DROP_MONITOR; |
2081 | 2095 | ||
2096 | if (rx->skb->len < 24) | ||
2097 | return RX_DROP_MONITOR; | ||
2098 | |||
2082 | if (ieee80211_drop_unencrypted_mgmt(rx)) | 2099 | if (ieee80211_drop_unencrypted_mgmt(rx)) |
2083 | return RX_DROP_UNUSABLE; | 2100 | return RX_DROP_UNUSABLE; |
2084 | 2101 | ||
@@ -2086,16 +2103,42 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) | |||
2086 | if (rxs != RX_CONTINUE) | 2103 | if (rxs != RX_CONTINUE) |
2087 | return rxs; | 2104 | return rxs; |
2088 | 2105 | ||
2089 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 2106 | stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); |
2090 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb); | ||
2091 | 2107 | ||
2092 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) | 2108 | if (!ieee80211_vif_is_mesh(&sdata->vif) && |
2093 | return ieee80211_ibss_rx_mgmt(sdata, rx->skb); | 2109 | sdata->vif.type != NL80211_IFTYPE_ADHOC && |
2110 | sdata->vif.type != NL80211_IFTYPE_STATION) | ||
2111 | return RX_DROP_MONITOR; | ||
2112 | |||
2113 | switch (stype) { | ||
2114 | case cpu_to_le16(IEEE80211_STYPE_BEACON): | ||
2115 | case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): | ||
2116 | /* process for all: mesh, mlme, ibss */ | ||
2117 | break; | ||
2118 | case cpu_to_le16(IEEE80211_STYPE_DEAUTH): | ||
2119 | case cpu_to_le16(IEEE80211_STYPE_DISASSOC): | ||
2120 | /* process only for station */ | ||
2121 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
2122 | return RX_DROP_MONITOR; | ||
2123 | break; | ||
2124 | case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): | ||
2125 | case cpu_to_le16(IEEE80211_STYPE_AUTH): | ||
2126 | /* process only for ibss */ | ||
2127 | if (sdata->vif.type != NL80211_IFTYPE_ADHOC) | ||
2128 | return RX_DROP_MONITOR; | ||
2129 | break; | ||
2130 | default: | ||
2131 | return RX_DROP_MONITOR; | ||
2132 | } | ||
2094 | 2133 | ||
2095 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | 2134 | /* queue up frame and kick off work to process it */ |
2096 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); | 2135 | rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; |
2136 | skb_queue_tail(&sdata->skb_queue, rx->skb); | ||
2137 | ieee80211_queue_work(&rx->local->hw, &sdata->work); | ||
2138 | if (rx->sta) | ||
2139 | rx->sta->rx_packets++; | ||
2097 | 2140 | ||
2098 | return RX_DROP_MONITOR; | 2141 | return RX_QUEUED; |
2099 | } | 2142 | } |
2100 | 2143 | ||
2101 | static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, | 2144 | static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 730197591ab5..f54d8ba7d788 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -235,6 +235,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
235 | spin_lock_init(&sta->lock); | 235 | spin_lock_init(&sta->lock); |
236 | spin_lock_init(&sta->flaglock); | 236 | spin_lock_init(&sta->flaglock); |
237 | INIT_WORK(&sta->drv_unblock_wk, sta_unblock); | 237 | INIT_WORK(&sta->drv_unblock_wk, sta_unblock); |
238 | INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); | ||
239 | mutex_init(&sta->ampdu_mlme.mtx); | ||
238 | 240 | ||
239 | memcpy(sta->sta.addr, addr, ETH_ALEN); | 241 | memcpy(sta->sta.addr, addr, ETH_ALEN); |
240 | sta->local = local; | 242 | sta->local = local; |
@@ -246,14 +248,12 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
246 | } | 248 | } |
247 | 249 | ||
248 | for (i = 0; i < STA_TID_NUM; i++) { | 250 | for (i = 0; i < STA_TID_NUM; i++) { |
249 | /* timer_to_tid must be initialized with identity mapping to | 251 | /* |
250 | * enable session_timer's data differentiation. refer to | 252 | * timer_to_tid must be initialized with identity mapping |
251 | * sta_rx_agg_session_timer_expired for useage */ | 253 | * to enable session_timer's data differentiation. See |
254 | * sta_rx_agg_session_timer_expired for usage. | ||
255 | */ | ||
252 | sta->timer_to_tid[i] = i; | 256 | sta->timer_to_tid[i] = i; |
253 | /* tx */ | ||
254 | sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE; | ||
255 | sta->ampdu_mlme.tid_tx[i] = NULL; | ||
256 | sta->ampdu_mlme.addba_req_num[i] = 0; | ||
257 | } | 257 | } |
258 | skb_queue_head_init(&sta->ps_tx_buf); | 258 | skb_queue_head_init(&sta->ps_tx_buf); |
259 | skb_queue_head_init(&sta->tx_filtered); | 259 | skb_queue_head_init(&sta->tx_filtered); |
@@ -648,14 +648,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta) | |||
648 | 648 | ||
649 | if (sta->key) { | 649 | if (sta->key) { |
650 | ieee80211_key_free(sta->key); | 650 | ieee80211_key_free(sta->key); |
651 | /* | ||
652 | * We have only unlinked the key, and actually destroying it | ||
653 | * may mean it is removed from hardware which requires that | ||
654 | * the key->sta pointer is still valid, so flush the key todo | ||
655 | * list here. | ||
656 | */ | ||
657 | ieee80211_key_todo(); | ||
658 | |||
659 | WARN_ON(sta->key); | 651 | WARN_ON(sta->key); |
660 | } | 652 | } |
661 | 653 | ||
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index df9d45544ca5..10d0fcb417ae 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -42,9 +42,6 @@ | |||
42 | * be in the queues | 42 | * be in the queues |
43 | * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping | 43 | * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping |
44 | * station in power-save mode, reply when the driver unblocks. | 44 | * station in power-save mode, reply when the driver unblocks. |
45 | * @WLAN_STA_DISASSOC: Disassociation in progress. | ||
46 | * This is used to reject TX BA session requests when disassociation | ||
47 | * is in progress. | ||
48 | */ | 45 | */ |
49 | enum ieee80211_sta_info_flags { | 46 | enum ieee80211_sta_info_flags { |
50 | WLAN_STA_AUTH = 1<<0, | 47 | WLAN_STA_AUTH = 1<<0, |
@@ -60,38 +57,44 @@ enum ieee80211_sta_info_flags { | |||
60 | WLAN_STA_BLOCK_BA = 1<<11, | 57 | WLAN_STA_BLOCK_BA = 1<<11, |
61 | WLAN_STA_PS_DRIVER = 1<<12, | 58 | WLAN_STA_PS_DRIVER = 1<<12, |
62 | WLAN_STA_PSPOLL = 1<<13, | 59 | WLAN_STA_PSPOLL = 1<<13, |
63 | WLAN_STA_DISASSOC = 1<<14, | ||
64 | }; | 60 | }; |
65 | 61 | ||
66 | #define STA_TID_NUM 16 | 62 | #define STA_TID_NUM 16 |
67 | #define ADDBA_RESP_INTERVAL HZ | 63 | #define ADDBA_RESP_INTERVAL HZ |
68 | #define HT_AGG_MAX_RETRIES (0x3) | 64 | #define HT_AGG_MAX_RETRIES 0x3 |
69 | 65 | ||
70 | #define HT_AGG_STATE_INITIATOR_SHIFT (4) | 66 | #define HT_AGG_STATE_DRV_READY 0 |
71 | 67 | #define HT_AGG_STATE_RESPONSE_RECEIVED 1 | |
72 | #define HT_ADDBA_REQUESTED_MSK BIT(0) | 68 | #define HT_AGG_STATE_OPERATIONAL 2 |
73 | #define HT_ADDBA_DRV_READY_MSK BIT(1) | 69 | #define HT_AGG_STATE_STOPPING 3 |
74 | #define HT_ADDBA_RECEIVED_MSK BIT(2) | 70 | #define HT_AGG_STATE_WANT_START 4 |
75 | #define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3) | 71 | #define HT_AGG_STATE_WANT_STOP 5 |
76 | #define HT_AGG_STATE_INITIATOR_MSK BIT(HT_AGG_STATE_INITIATOR_SHIFT) | ||
77 | #define HT_AGG_STATE_IDLE (0x0) | ||
78 | #define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \ | ||
79 | HT_ADDBA_DRV_READY_MSK | \ | ||
80 | HT_ADDBA_RECEIVED_MSK) | ||
81 | 72 | ||
82 | /** | 73 | /** |
83 | * struct tid_ampdu_tx - TID aggregation information (Tx). | 74 | * struct tid_ampdu_tx - TID aggregation information (Tx). |
84 | * | 75 | * |
76 | * @rcu_head: rcu head for freeing structure | ||
85 | * @addba_resp_timer: timer for peer's response to addba request | 77 | * @addba_resp_timer: timer for peer's response to addba request |
86 | * @pending: pending frames queue -- use sta's spinlock to protect | 78 | * @pending: pending frames queue -- use sta's spinlock to protect |
87 | * @ssn: Starting Sequence Number expected to be aggregated. | ||
88 | * @dialog_token: dialog token for aggregation session | 79 | * @dialog_token: dialog token for aggregation session |
80 | * @state: session state (see above) | ||
81 | * @stop_initiator: initiator of a session stop | ||
82 | * | ||
83 | * This structure is protected by RCU and the per-station | ||
84 | * spinlock. Assignments to the array holding it must hold | ||
85 | * the spinlock, only the TX path can access it under RCU | ||
86 | * lock-free if, and only if, the state has the flag | ||
87 | * %HT_AGG_STATE_OPERATIONAL set. Otherwise, the TX path | ||
88 | * must also acquire the spinlock and re-check the state, | ||
89 | * see comments in the tx code touching it. | ||
89 | */ | 90 | */ |
90 | struct tid_ampdu_tx { | 91 | struct tid_ampdu_tx { |
92 | struct rcu_head rcu_head; | ||
91 | struct timer_list addba_resp_timer; | 93 | struct timer_list addba_resp_timer; |
92 | struct sk_buff_head pending; | 94 | struct sk_buff_head pending; |
93 | u16 ssn; | 95 | unsigned long state; |
94 | u8 dialog_token; | 96 | u8 dialog_token; |
97 | u8 stop_initiator; | ||
95 | }; | 98 | }; |
96 | 99 | ||
97 | /** | 100 | /** |
@@ -106,8 +109,18 @@ struct tid_ampdu_tx { | |||
106 | * @buf_size: buffer size for incoming A-MPDUs | 109 | * @buf_size: buffer size for incoming A-MPDUs |
107 | * @timeout: reset timer value (in TUs). | 110 | * @timeout: reset timer value (in TUs). |
108 | * @dialog_token: dialog token for aggregation session | 111 | * @dialog_token: dialog token for aggregation session |
112 | * @rcu_head: RCU head used for freeing this struct | ||
113 | * | ||
114 | * This structure is protected by RCU and the per-station | ||
115 | * spinlock. Assignments to the array holding it must hold | ||
116 | * the spinlock, only the RX path can access it under RCU | ||
117 | * lock-free. The RX path, since it is single-threaded, | ||
118 | * can even modify the structure without locking since the | ||
119 | * only other modifications to it are done when the struct | ||
120 | * can not yet or no longer be found by the RX path. | ||
109 | */ | 121 | */ |
110 | struct tid_ampdu_rx { | 122 | struct tid_ampdu_rx { |
123 | struct rcu_head rcu_head; | ||
111 | struct sk_buff **reorder_buf; | 124 | struct sk_buff **reorder_buf; |
112 | unsigned long *reorder_time; | 125 | unsigned long *reorder_time; |
113 | struct timer_list session_timer; | 126 | struct timer_list session_timer; |
@@ -120,6 +133,32 @@ struct tid_ampdu_rx { | |||
120 | }; | 133 | }; |
121 | 134 | ||
122 | /** | 135 | /** |
136 | * struct sta_ampdu_mlme - STA aggregation information. | ||
137 | * | ||
138 | * @tid_rx: aggregation info for Rx per TID -- RCU protected | ||
139 | * @tid_tx: aggregation info for Tx per TID | ||
140 | * @addba_req_num: number of times addBA request has been sent. | ||
141 | * @dialog_token_allocator: dialog token enumerator for each new session; | ||
142 | * @work: work struct for starting/stopping aggregation | ||
143 | * @tid_rx_timer_expired: bitmap indicating on which TIDs the | ||
144 | * RX timer expired until the work for it runs | ||
145 | * @mtx: mutex to protect all TX data (except non-NULL assignments | ||
146 | * to tid_tx[idx], which are protected by the sta spinlock) | ||
147 | */ | ||
148 | struct sta_ampdu_mlme { | ||
149 | struct mutex mtx; | ||
150 | /* rx */ | ||
151 | struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; | ||
152 | unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)]; | ||
153 | /* tx */ | ||
154 | struct work_struct work; | ||
155 | struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; | ||
156 | u8 addba_req_num[STA_TID_NUM]; | ||
157 | u8 dialog_token_allocator; | ||
158 | }; | ||
159 | |||
160 | |||
161 | /** | ||
123 | * enum plink_state - state of a mesh peer link finite state machine | 162 | * enum plink_state - state of a mesh peer link finite state machine |
124 | * | 163 | * |
125 | * @PLINK_LISTEN: initial state, considered the implicit state of non existant | 164 | * @PLINK_LISTEN: initial state, considered the implicit state of non existant |
@@ -143,28 +182,6 @@ enum plink_state { | |||
143 | }; | 182 | }; |
144 | 183 | ||
145 | /** | 184 | /** |
146 | * struct sta_ampdu_mlme - STA aggregation information. | ||
147 | * | ||
148 | * @tid_active_rx: TID's state in Rx session state machine. | ||
149 | * @tid_rx: aggregation info for Rx per TID | ||
150 | * @tid_state_tx: TID's state in Tx session state machine. | ||
151 | * @tid_tx: aggregation info for Tx per TID | ||
152 | * @addba_req_num: number of times addBA request has been sent. | ||
153 | * @dialog_token_allocator: dialog token enumerator for each new session; | ||
154 | */ | ||
155 | struct sta_ampdu_mlme { | ||
156 | /* rx */ | ||
157 | bool tid_active_rx[STA_TID_NUM]; | ||
158 | struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; | ||
159 | /* tx */ | ||
160 | u8 tid_state_tx[STA_TID_NUM]; | ||
161 | struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; | ||
162 | u8 addba_req_num[STA_TID_NUM]; | ||
163 | u8 dialog_token_allocator; | ||
164 | }; | ||
165 | |||
166 | |||
167 | /** | ||
168 | * struct sta_info - STA information | 185 | * struct sta_info - STA information |
169 | * | 186 | * |
170 | * This structure collects information about a station that | 187 | * This structure collects information about a station that |
diff --git a/net/mac80211/status.c b/net/mac80211/status.c index 94613af009f3..34da67995d94 100644 --- a/net/mac80211/status.c +++ b/net/mac80211/status.c | |||
@@ -47,7 +47,7 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local, | |||
47 | /* | 47 | /* |
48 | * This skb 'survived' a round-trip through the driver, and | 48 | * This skb 'survived' a round-trip through the driver, and |
49 | * hopefully the driver didn't mangle it too badly. However, | 49 | * hopefully the driver didn't mangle it too badly. However, |
50 | * we can definitely not rely on the the control information | 50 | * we can definitely not rely on the control information |
51 | * being correct. Clear it so we don't get junk there, and | 51 | * being correct. Clear it so we don't get junk there, and |
52 | * indicate that it needs new processing, but must not be | 52 | * indicate that it needs new processing, but must not be |
53 | * modified/encrypted again. | 53 | * modified/encrypted again. |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 680bcb7093db..698d4718b1a4 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1092,6 +1092,59 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
1092 | return true; | 1092 | return true; |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, | ||
1096 | struct sk_buff *skb, | ||
1097 | struct ieee80211_tx_info *info, | ||
1098 | struct tid_ampdu_tx *tid_tx, | ||
1099 | int tid) | ||
1100 | { | ||
1101 | bool queued = false; | ||
1102 | |||
1103 | if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { | ||
1104 | info->flags |= IEEE80211_TX_CTL_AMPDU; | ||
1105 | } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { | ||
1106 | /* | ||
1107 | * nothing -- this aggregation session is being started | ||
1108 | * but that might still fail with the driver | ||
1109 | */ | ||
1110 | } else { | ||
1111 | spin_lock(&tx->sta->lock); | ||
1112 | /* | ||
1113 | * Need to re-check now, because we may get here | ||
1114 | * | ||
1115 | * 1) in the window during which the setup is actually | ||
1116 | * already done, but not marked yet because not all | ||
1117 | * packets are spliced over to the driver pending | ||
1118 | * queue yet -- if this happened we acquire the lock | ||
1119 | * either before or after the splice happens, but | ||
1120 | * need to recheck which of these cases happened. | ||
1121 | * | ||
1122 | * 2) during session teardown, if the OPERATIONAL bit | ||
1123 | * was cleared due to the teardown but the pointer | ||
1124 | * hasn't been assigned NULL yet (or we loaded it | ||
1125 | * before it was assigned) -- in this case it may | ||
1126 | * now be NULL which means we should just let the | ||
1127 | * packet pass through because splicing the frames | ||
1128 | * back is already done. | ||
1129 | */ | ||
1130 | tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; | ||
1131 | |||
1132 | if (!tid_tx) { | ||
1133 | /* do nothing, let packet pass through */ | ||
1134 | } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { | ||
1135 | info->flags |= IEEE80211_TX_CTL_AMPDU; | ||
1136 | } else { | ||
1137 | queued = true; | ||
1138 | info->control.vif = &tx->sdata->vif; | ||
1139 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | ||
1140 | __skb_queue_tail(&tid_tx->pending, skb); | ||
1141 | } | ||
1142 | spin_unlock(&tx->sta->lock); | ||
1143 | } | ||
1144 | |||
1145 | return queued; | ||
1146 | } | ||
1147 | |||
1095 | /* | 1148 | /* |
1096 | * initialises @tx | 1149 | * initialises @tx |
1097 | */ | 1150 | */ |
@@ -1104,8 +1157,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
1104 | struct ieee80211_hdr *hdr; | 1157 | struct ieee80211_hdr *hdr; |
1105 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1158 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1106 | int hdrlen, tid; | 1159 | int hdrlen, tid; |
1107 | u8 *qc, *state; | 1160 | u8 *qc; |
1108 | bool queued = false; | ||
1109 | 1161 | ||
1110 | memset(tx, 0, sizeof(*tx)); | 1162 | memset(tx, 0, sizeof(*tx)); |
1111 | tx->skb = skb; | 1163 | tx->skb = skb; |
@@ -1157,35 +1209,16 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
1157 | qc = ieee80211_get_qos_ctl(hdr); | 1209 | qc = ieee80211_get_qos_ctl(hdr); |
1158 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; | 1210 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; |
1159 | 1211 | ||
1160 | spin_lock(&tx->sta->lock); | 1212 | tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); |
1161 | /* | 1213 | if (tid_tx) { |
1162 | * XXX: This spinlock could be fairly expensive, but see the | 1214 | bool queued; |
1163 | * comment in agg-tx.c:ieee80211_agg_tx_operational(). | ||
1164 | * One way to solve this would be to do something RCU-like | ||
1165 | * for managing the tid_tx struct and using atomic bitops | ||
1166 | * for the actual state -- by introducing an actual | ||
1167 | * 'operational' bit that would be possible. It would | ||
1168 | * require changing ieee80211_agg_tx_operational() to | ||
1169 | * set that bit, and changing the way tid_tx is managed | ||
1170 | * everywhere, including races between that bit and | ||
1171 | * tid_tx going away (tid_tx being added can be easily | ||
1172 | * committed to memory before the 'operational' bit). | ||
1173 | */ | ||
1174 | tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; | ||
1175 | state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; | ||
1176 | if (*state == HT_AGG_STATE_OPERATIONAL) { | ||
1177 | info->flags |= IEEE80211_TX_CTL_AMPDU; | ||
1178 | } else if (*state != HT_AGG_STATE_IDLE) { | ||
1179 | /* in progress */ | ||
1180 | queued = true; | ||
1181 | info->control.vif = &sdata->vif; | ||
1182 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | ||
1183 | __skb_queue_tail(&tid_tx->pending, skb); | ||
1184 | } | ||
1185 | spin_unlock(&tx->sta->lock); | ||
1186 | 1215 | ||
1187 | if (unlikely(queued)) | 1216 | queued = ieee80211_tx_prep_agg(tx, skb, info, |
1188 | return TX_QUEUED; | 1217 | tid_tx, tid); |
1218 | |||
1219 | if (unlikely(queued)) | ||
1220 | return TX_QUEUED; | ||
1221 | } | ||
1189 | } | 1222 | } |
1190 | 1223 | ||
1191 | if (is_multicast_ether_addr(hdr->addr1)) { | 1224 | if (is_multicast_ether_addr(hdr->addr1)) { |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 5b79d552780a..a54cf146ed50 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1138,18 +1138,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1138 | } | 1138 | } |
1139 | mutex_unlock(&local->sta_mtx); | 1139 | mutex_unlock(&local->sta_mtx); |
1140 | 1140 | ||
1141 | /* Clear Suspend state so that ADDBA requests can be processed */ | ||
1142 | |||
1143 | rcu_read_lock(); | ||
1144 | |||
1145 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { | ||
1146 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | ||
1147 | clear_sta_flags(sta, WLAN_STA_BLOCK_BA); | ||
1148 | } | ||
1149 | } | ||
1150 | |||
1151 | rcu_read_unlock(); | ||
1152 | |||
1153 | /* setup RTS threshold */ | 1141 | /* setup RTS threshold */ |
1154 | drv_set_rts_threshold(local, hw->wiphy->rts_threshold); | 1142 | drv_set_rts_threshold(local, hw->wiphy->rts_threshold); |
1155 | 1143 | ||
@@ -1202,13 +1190,26 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1202 | } | 1190 | } |
1203 | } | 1191 | } |
1204 | 1192 | ||
1205 | rcu_read_lock(); | 1193 | /* |
1194 | * Clear the WLAN_STA_BLOCK_BA flag so new aggregation | ||
1195 | * sessions can be established after a resume. | ||
1196 | * | ||
1197 | * Also tear down aggregation sessions since reconfiguring | ||
1198 | * them in a hardware restart scenario is not easily done | ||
1199 | * right now, and the hardware will have lost information | ||
1200 | * about the sessions, but we and the AP still think they | ||
1201 | * are active. This is really a workaround though. | ||
1202 | */ | ||
1206 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { | 1203 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { |
1207 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | 1204 | mutex_lock(&local->sta_mtx); |
1205 | |||
1206 | list_for_each_entry(sta, &local->sta_list, list) { | ||
1208 | ieee80211_sta_tear_down_BA_sessions(sta); | 1207 | ieee80211_sta_tear_down_BA_sessions(sta); |
1208 | clear_sta_flags(sta, WLAN_STA_BLOCK_BA); | ||
1209 | } | 1209 | } |
1210 | |||
1211 | mutex_unlock(&local->sta_mtx); | ||
1210 | } | 1212 | } |
1211 | rcu_read_unlock(); | ||
1212 | 1213 | ||
1213 | /* add back keys */ | 1214 | /* add back keys */ |
1214 | list_for_each_entry(sdata, &local->interfaces, list) | 1215 | list_for_each_entry(sdata, &local->interfaces, list) |
diff --git a/net/mac80211/work.c b/net/mac80211/work.c index b025dc7bb0fd..c22a71c5cb45 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c | |||
@@ -840,7 +840,7 @@ static void ieee80211_work_work(struct work_struct *work) | |||
840 | 840 | ||
841 | /* | 841 | /* |
842 | * ieee80211_queue_work() should have picked up most cases, | 842 | * ieee80211_queue_work() should have picked up most cases, |
843 | * here we'll pick the the rest. | 843 | * here we'll pick the rest. |
844 | */ | 844 | */ |
845 | if (WARN(local->suspended, "work scheduled while going to suspend\n")) | 845 | if (WARN(local->suspended, "work scheduled while going to suspend\n")) |
846 | return; | 846 | return; |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 0adbcc941ac9..a14e67707476 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -436,6 +436,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) | |||
436 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 436 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
437 | u8 pn[CCMP_PN_LEN]; | 437 | u8 pn[CCMP_PN_LEN]; |
438 | int data_len; | 438 | int data_len; |
439 | int queue; | ||
439 | 440 | ||
440 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 441 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
441 | 442 | ||
@@ -453,7 +454,10 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) | |||
453 | 454 | ||
454 | ccmp_hdr2pn(pn, skb->data + hdrlen); | 455 | ccmp_hdr2pn(pn, skb->data + hdrlen); |
455 | 456 | ||
456 | if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) { | 457 | queue = ieee80211_is_mgmt(hdr->frame_control) ? |
458 | NUM_RX_DATA_QUEUES : rx->queue; | ||
459 | |||
460 | if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) { | ||
457 | key->u.ccmp.replays++; | 461 | key->u.ccmp.replays++; |
458 | return RX_DROP_UNUSABLE; | 462 | return RX_DROP_UNUSABLE; |
459 | } | 463 | } |
@@ -470,7 +474,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) | |||
470 | return RX_DROP_UNUSABLE; | 474 | return RX_DROP_UNUSABLE; |
471 | } | 475 | } |
472 | 476 | ||
473 | memcpy(key->u.ccmp.rx_pn[rx->queue], pn, CCMP_PN_LEN); | 477 | memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN); |
474 | 478 | ||
475 | /* Remove CCMP header and MIC */ | 479 | /* Remove CCMP header and MIC */ |
476 | skb_trim(skb, skb->len - CCMP_MIC_LEN); | 480 | skb_trim(skb, skb->len - CCMP_MIC_LEN); |
diff --git a/net/wireless/chan.c b/net/wireless/chan.c index b01a6f6397d7..d0c92dddb26b 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c | |||
@@ -35,8 +35,9 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev, | |||
35 | if (!ht_cap->ht_supported) | 35 | if (!ht_cap->ht_supported) |
36 | return NULL; | 36 | return NULL; |
37 | 37 | ||
38 | if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || | 38 | if (channel_type != NL80211_CHAN_HT20 && |
39 | ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) | 39 | (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || |
40 | ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT)) | ||
40 | return NULL; | 41 | return NULL; |
41 | } | 42 | } |
42 | 43 | ||
diff --git a/net/wireless/core.h b/net/wireless/core.h index ae930acf75e9..63d57ae399c3 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -339,6 +339,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, | |||
339 | struct net_device *dev, | 339 | struct net_device *dev, |
340 | struct ieee80211_channel *chan, | 340 | struct ieee80211_channel *chan, |
341 | enum nl80211_channel_type channel_type, | 341 | enum nl80211_channel_type channel_type, |
342 | bool channel_type_valid, | ||
342 | const u8 *buf, size_t len, u64 *cookie); | 343 | const u8 *buf, size_t len, u64 *cookie); |
343 | 344 | ||
344 | /* SME */ | 345 | /* SME */ |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index 48ead6f0426d..9f95354f859f 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
@@ -827,6 +827,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, | |||
827 | struct net_device *dev, | 827 | struct net_device *dev, |
828 | struct ieee80211_channel *chan, | 828 | struct ieee80211_channel *chan, |
829 | enum nl80211_channel_type channel_type, | 829 | enum nl80211_channel_type channel_type, |
830 | bool channel_type_valid, | ||
830 | const u8 *buf, size_t len, u64 *cookie) | 831 | const u8 *buf, size_t len, u64 *cookie) |
831 | { | 832 | { |
832 | struct wireless_dev *wdev = dev->ieee80211_ptr; | 833 | struct wireless_dev *wdev = dev->ieee80211_ptr; |
@@ -845,8 +846,9 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, | |||
845 | if (!wdev->current_bss || | 846 | if (!wdev->current_bss || |
846 | memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, | 847 | memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, |
847 | ETH_ALEN) != 0 || | 848 | ETH_ALEN) != 0 || |
848 | memcmp(wdev->current_bss->pub.bssid, mgmt->da, | 849 | (wdev->iftype == NL80211_IFTYPE_STATION && |
849 | ETH_ALEN) != 0) | 850 | memcmp(wdev->current_bss->pub.bssid, mgmt->da, |
851 | ETH_ALEN) != 0)) | ||
850 | return -ENOTCONN; | 852 | return -ENOTCONN; |
851 | } | 853 | } |
852 | 854 | ||
@@ -855,7 +857,7 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, | |||
855 | 857 | ||
856 | /* Transmit the Action frame as requested by user space */ | 858 | /* Transmit the Action frame as requested by user space */ |
857 | return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type, | 859 | return rdev->ops->action(&rdev->wiphy, dev, chan, channel_type, |
858 | buf, len, cookie); | 860 | channel_type_valid, buf, len, cookie); |
859 | } | 861 | } |
860 | 862 | ||
861 | bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, | 863 | bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index db71150b8040..41529aca794c 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -3955,6 +3955,55 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) | |||
3955 | } | 3955 | } |
3956 | } | 3956 | } |
3957 | 3957 | ||
3958 | if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { | ||
3959 | u8 *rates = | ||
3960 | nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); | ||
3961 | int n_rates = | ||
3962 | nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); | ||
3963 | struct ieee80211_supported_band *sband = | ||
3964 | wiphy->bands[ibss.channel->band]; | ||
3965 | int i, j; | ||
3966 | |||
3967 | if (n_rates == 0) { | ||
3968 | err = -EINVAL; | ||
3969 | goto out; | ||
3970 | } | ||
3971 | |||
3972 | for (i = 0; i < n_rates; i++) { | ||
3973 | int rate = (rates[i] & 0x7f) * 5; | ||
3974 | bool found = false; | ||
3975 | |||
3976 | for (j = 0; j < sband->n_bitrates; j++) { | ||
3977 | if (sband->bitrates[j].bitrate == rate) { | ||
3978 | found = true; | ||
3979 | ibss.basic_rates |= BIT(j); | ||
3980 | break; | ||
3981 | } | ||
3982 | } | ||
3983 | if (!found) { | ||
3984 | err = -EINVAL; | ||
3985 | goto out; | ||
3986 | } | ||
3987 | } | ||
3988 | } else { | ||
3989 | /* | ||
3990 | * If no rates were explicitly configured, | ||
3991 | * use the mandatory rate set for 11b or | ||
3992 | * 11a for maximum compatibility. | ||
3993 | */ | ||
3994 | struct ieee80211_supported_band *sband = | ||
3995 | wiphy->bands[ibss.channel->band]; | ||
3996 | int j; | ||
3997 | u32 flag = ibss.channel->band == IEEE80211_BAND_5GHZ ? | ||
3998 | IEEE80211_RATE_MANDATORY_A : | ||
3999 | IEEE80211_RATE_MANDATORY_B; | ||
4000 | |||
4001 | for (j = 0; j < sband->n_bitrates; j++) { | ||
4002 | if (sband->bitrates[j].flags & flag) | ||
4003 | ibss.basic_rates |= BIT(j); | ||
4004 | } | ||
4005 | } | ||
4006 | |||
3958 | err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); | 4007 | err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); |
3959 | 4008 | ||
3960 | out: | 4009 | out: |
@@ -4653,7 +4702,8 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info) | |||
4653 | if (err) | 4702 | if (err) |
4654 | goto unlock_rtnl; | 4703 | goto unlock_rtnl; |
4655 | 4704 | ||
4656 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { | 4705 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && |
4706 | dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { | ||
4657 | err = -EOPNOTSUPP; | 4707 | err = -EOPNOTSUPP; |
4658 | goto out; | 4708 | goto out; |
4659 | } | 4709 | } |
@@ -4681,6 +4731,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) | |||
4681 | struct net_device *dev; | 4731 | struct net_device *dev; |
4682 | struct ieee80211_channel *chan; | 4732 | struct ieee80211_channel *chan; |
4683 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; | 4733 | enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; |
4734 | bool channel_type_valid = false; | ||
4684 | u32 freq; | 4735 | u32 freq; |
4685 | int err; | 4736 | int err; |
4686 | void *hdr; | 4737 | void *hdr; |
@@ -4702,7 +4753,8 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) | |||
4702 | goto out; | 4753 | goto out; |
4703 | } | 4754 | } |
4704 | 4755 | ||
4705 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { | 4756 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && |
4757 | dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { | ||
4706 | err = -EOPNOTSUPP; | 4758 | err = -EOPNOTSUPP; |
4707 | goto out; | 4759 | goto out; |
4708 | } | 4760 | } |
@@ -4722,6 +4774,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) | |||
4722 | err = -EINVAL; | 4774 | err = -EINVAL; |
4723 | goto out; | 4775 | goto out; |
4724 | } | 4776 | } |
4777 | channel_type_valid = true; | ||
4725 | } | 4778 | } |
4726 | 4779 | ||
4727 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); | 4780 | freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); |
@@ -4745,6 +4798,7 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) | |||
4745 | goto free_msg; | 4798 | goto free_msg; |
4746 | } | 4799 | } |
4747 | err = cfg80211_mlme_action(rdev, dev, chan, channel_type, | 4800 | err = cfg80211_mlme_action(rdev, dev, chan, channel_type, |
4801 | channel_type_valid, | ||
4748 | nla_data(info->attrs[NL80211_ATTR_FRAME]), | 4802 | nla_data(info->attrs[NL80211_ATTR_FRAME]), |
4749 | nla_len(info->attrs[NL80211_ATTR_FRAME]), | 4803 | nla_len(info->attrs[NL80211_ATTR_FRAME]), |
4750 | &cookie); | 4804 | &cookie); |