diff options
author | David S. Miller <davem@davemloft.net> | 2010-06-17 17:19:06 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-17 17:19:06 -0400 |
commit | bb9c03d8a6893517737b16fdbeb54be3c73b3023 (patch) | |
tree | 35fa0d1defaaf94641963a49126d7bb475ffa4c6 /net | |
parent | 4de57826810fd2cfeb2ab5c7d003ff9116b8f7ee (diff) | |
parent | abf52f86aa0a49a7377350cafa8f218c4cd227e7 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6
Diffstat (limited to 'net')
30 files changed, 1242 insertions, 1013 deletions
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c index 6bb9a9a94960..965b272499fd 100644 --- a/net/mac80211/agg-rx.c +++ b/net/mac80211/agg-rx.c | |||
@@ -6,39 +6,70 @@ | |||
6 | * Copyright 2005-2006, Devicescape Software, Inc. | 6 | * Copyright 2005-2006, Devicescape Software, Inc. |
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | 7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> |
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2008, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | 12 | * it under the terms of the GNU General Public License version 2 as |
13 | * published by the Free Software Foundation. | 13 | * published by the Free Software Foundation. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | /** | ||
17 | * DOC: RX A-MPDU aggregation | ||
18 | * | ||
19 | * Aggregation on the RX side requires only implementing the | ||
20 | * @ampdu_action callback that is invoked to start/stop any | ||
21 | * block-ack sessions for RX aggregation. | ||
22 | * | ||
23 | * When RX aggregation is started by the peer, the driver is | ||
24 | * notified via @ampdu_action function, with the | ||
25 | * %IEEE80211_AMPDU_RX_START action, and may reject the request | ||
26 | * in which case a negative response is sent to the peer, if it | ||
27 | * accepts it a positive response is sent. | ||
28 | * | ||
29 | * While the session is active, the device/driver are required | ||
30 | * to de-aggregate frames and pass them up one by one to mac80211, | ||
31 | * which will handle the reorder buffer. | ||
32 | * | ||
33 | * When the aggregation session is stopped again by the peer or | ||
34 | * ourselves, the driver's @ampdu_action function will be called | ||
35 | * with the action %IEEE80211_AMPDU_RX_STOP. In this case, the | ||
36 | * call must not fail. | ||
37 | */ | ||
38 | |||
16 | #include <linux/ieee80211.h> | 39 | #include <linux/ieee80211.h> |
17 | #include <linux/slab.h> | 40 | #include <linux/slab.h> |
18 | #include <net/mac80211.h> | 41 | #include <net/mac80211.h> |
19 | #include "ieee80211_i.h" | 42 | #include "ieee80211_i.h" |
20 | #include "driver-ops.h" | 43 | #include "driver-ops.h" |
21 | 44 | ||
22 | static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | 45 | static void ieee80211_free_tid_rx(struct rcu_head *h) |
23 | u16 initiator, u16 reason, | ||
24 | bool from_timer) | ||
25 | { | 46 | { |
26 | struct ieee80211_local *local = sta->local; | 47 | struct tid_ampdu_rx *tid_rx = |
27 | struct tid_ampdu_rx *tid_rx; | 48 | container_of(h, struct tid_ampdu_rx, rcu_head); |
28 | int i; | 49 | int i; |
29 | 50 | ||
30 | spin_lock_bh(&sta->lock); | 51 | for (i = 0; i < tid_rx->buf_size; i++) |
52 | dev_kfree_skb(tid_rx->reorder_buf[i]); | ||
53 | kfree(tid_rx->reorder_buf); | ||
54 | kfree(tid_rx->reorder_time); | ||
55 | kfree(tid_rx); | ||
56 | } | ||
31 | 57 | ||
32 | /* check if TID is in operational state */ | 58 | void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, |
33 | if (!sta->ampdu_mlme.tid_active_rx[tid]) { | 59 | u16 initiator, u16 reason) |
34 | spin_unlock_bh(&sta->lock); | 60 | { |
35 | return; | 61 | struct ieee80211_local *local = sta->local; |
36 | } | 62 | struct tid_ampdu_rx *tid_rx; |
37 | 63 | ||
38 | sta->ampdu_mlme.tid_active_rx[tid] = false; | 64 | lockdep_assert_held(&sta->ampdu_mlme.mtx); |
39 | 65 | ||
40 | tid_rx = sta->ampdu_mlme.tid_rx[tid]; | 66 | tid_rx = sta->ampdu_mlme.tid_rx[tid]; |
41 | 67 | ||
68 | if (!tid_rx) | ||
69 | return; | ||
70 | |||
71 | rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], NULL); | ||
72 | |||
42 | #ifdef CONFIG_MAC80211_HT_DEBUG | 73 | #ifdef CONFIG_MAC80211_HT_DEBUG |
43 | printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", | 74 | printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", |
44 | sta->sta.addr, tid); | 75 | sta->sta.addr, tid); |
@@ -54,32 +85,17 @@ static void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | |||
54 | ieee80211_send_delba(sta->sdata, sta->sta.addr, | 85 | ieee80211_send_delba(sta->sdata, sta->sta.addr, |
55 | tid, 0, reason); | 86 | tid, 0, reason); |
56 | 87 | ||
57 | /* free the reordering buffer */ | 88 | del_timer_sync(&tid_rx->session_timer); |
58 | for (i = 0; i < tid_rx->buf_size; i++) { | ||
59 | if (tid_rx->reorder_buf[i]) { | ||
60 | /* release the reordered frames */ | ||
61 | dev_kfree_skb(tid_rx->reorder_buf[i]); | ||
62 | tid_rx->stored_mpdu_num--; | ||
63 | tid_rx->reorder_buf[i] = NULL; | ||
64 | } | ||
65 | } | ||
66 | |||
67 | /* free resources */ | ||
68 | kfree(tid_rx->reorder_buf); | ||
69 | kfree(tid_rx->reorder_time); | ||
70 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
71 | |||
72 | spin_unlock_bh(&sta->lock); | ||
73 | 89 | ||
74 | if (!from_timer) | 90 | call_rcu(&tid_rx->rcu_head, ieee80211_free_tid_rx); |
75 | del_timer_sync(&tid_rx->session_timer); | ||
76 | kfree(tid_rx); | ||
77 | } | 91 | } |
78 | 92 | ||
79 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | 93 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, |
80 | u16 initiator, u16 reason) | 94 | u16 initiator, u16 reason) |
81 | { | 95 | { |
82 | ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason, false); | 96 | mutex_lock(&sta->ampdu_mlme.mtx); |
97 | ___ieee80211_stop_rx_ba_session(sta, tid, initiator, reason); | ||
98 | mutex_unlock(&sta->ampdu_mlme.mtx); | ||
83 | } | 99 | } |
84 | 100 | ||
85 | /* | 101 | /* |
@@ -100,8 +116,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data) | |||
100 | #ifdef CONFIG_MAC80211_HT_DEBUG | 116 | #ifdef CONFIG_MAC80211_HT_DEBUG |
101 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); | 117 | printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); |
102 | #endif | 118 | #endif |
103 | ___ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT, | 119 | set_bit(*ptid, sta->ampdu_mlme.tid_rx_timer_expired); |
104 | WLAN_REASON_QSTA_TIMEOUT, true); | 120 | ieee80211_queue_work(&sta->local->hw, &sta->ampdu_mlme.work); |
105 | } | 121 | } |
106 | 122 | ||
107 | static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, | 123 | static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, |
@@ -212,9 +228,9 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
212 | 228 | ||
213 | 229 | ||
214 | /* examine state machine */ | 230 | /* examine state machine */ |
215 | spin_lock_bh(&sta->lock); | 231 | mutex_lock(&sta->ampdu_mlme.mtx); |
216 | 232 | ||
217 | if (sta->ampdu_mlme.tid_active_rx[tid]) { | 233 | if (sta->ampdu_mlme.tid_rx[tid]) { |
218 | #ifdef CONFIG_MAC80211_HT_DEBUG | 234 | #ifdef CONFIG_MAC80211_HT_DEBUG |
219 | if (net_ratelimit()) | 235 | if (net_ratelimit()) |
220 | printk(KERN_DEBUG "unexpected AddBA Req from " | 236 | printk(KERN_DEBUG "unexpected AddBA Req from " |
@@ -225,9 +241,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
225 | } | 241 | } |
226 | 242 | ||
227 | /* prepare A-MPDU MLME for Rx aggregation */ | 243 | /* prepare A-MPDU MLME for Rx aggregation */ |
228 | sta->ampdu_mlme.tid_rx[tid] = | 244 | tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); |
229 | kmalloc(sizeof(struct tid_ampdu_rx), GFP_ATOMIC); | 245 | if (!tid_agg_rx) { |
230 | if (!sta->ampdu_mlme.tid_rx[tid]) { | ||
231 | #ifdef CONFIG_MAC80211_HT_DEBUG | 246 | #ifdef CONFIG_MAC80211_HT_DEBUG |
232 | if (net_ratelimit()) | 247 | if (net_ratelimit()) |
233 | printk(KERN_ERR "allocate rx mlme to tid %d failed\n", | 248 | printk(KERN_ERR "allocate rx mlme to tid %d failed\n", |
@@ -235,14 +250,11 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
235 | #endif | 250 | #endif |
236 | goto end; | 251 | goto end; |
237 | } | 252 | } |
238 | /* rx timer */ | ||
239 | sta->ampdu_mlme.tid_rx[tid]->session_timer.function = | ||
240 | sta_rx_agg_session_timer_expired; | ||
241 | sta->ampdu_mlme.tid_rx[tid]->session_timer.data = | ||
242 | (unsigned long)&sta->timer_to_tid[tid]; | ||
243 | init_timer(&sta->ampdu_mlme.tid_rx[tid]->session_timer); | ||
244 | 253 | ||
245 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | 254 | /* rx timer */ |
255 | tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired; | ||
256 | tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; | ||
257 | init_timer(&tid_agg_rx->session_timer); | ||
246 | 258 | ||
247 | /* prepare reordering buffer */ | 259 | /* prepare reordering buffer */ |
248 | tid_agg_rx->reorder_buf = | 260 | tid_agg_rx->reorder_buf = |
@@ -257,8 +269,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
257 | #endif | 269 | #endif |
258 | kfree(tid_agg_rx->reorder_buf); | 270 | kfree(tid_agg_rx->reorder_buf); |
259 | kfree(tid_agg_rx->reorder_time); | 271 | kfree(tid_agg_rx->reorder_time); |
260 | kfree(sta->ampdu_mlme.tid_rx[tid]); | 272 | kfree(tid_agg_rx); |
261 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
262 | goto end; | 273 | goto end; |
263 | } | 274 | } |
264 | 275 | ||
@@ -270,13 +281,12 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
270 | 281 | ||
271 | if (ret) { | 282 | if (ret) { |
272 | kfree(tid_agg_rx->reorder_buf); | 283 | kfree(tid_agg_rx->reorder_buf); |
284 | kfree(tid_agg_rx->reorder_time); | ||
273 | kfree(tid_agg_rx); | 285 | kfree(tid_agg_rx); |
274 | sta->ampdu_mlme.tid_rx[tid] = NULL; | ||
275 | goto end; | 286 | goto end; |
276 | } | 287 | } |
277 | 288 | ||
278 | /* change state and send addba resp */ | 289 | /* update data */ |
279 | sta->ampdu_mlme.tid_active_rx[tid] = true; | ||
280 | tid_agg_rx->dialog_token = dialog_token; | 290 | tid_agg_rx->dialog_token = dialog_token; |
281 | tid_agg_rx->ssn = start_seq_num; | 291 | tid_agg_rx->ssn = start_seq_num; |
282 | tid_agg_rx->head_seq_num = start_seq_num; | 292 | tid_agg_rx->head_seq_num = start_seq_num; |
@@ -284,8 +294,15 @@ void ieee80211_process_addba_request(struct ieee80211_local *local, | |||
284 | tid_agg_rx->timeout = timeout; | 294 | tid_agg_rx->timeout = timeout; |
285 | tid_agg_rx->stored_mpdu_num = 0; | 295 | tid_agg_rx->stored_mpdu_num = 0; |
286 | status = WLAN_STATUS_SUCCESS; | 296 | status = WLAN_STATUS_SUCCESS; |
297 | |||
298 | /* activate it for RX */ | ||
299 | rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); | ||
300 | |||
301 | if (timeout) | ||
302 | mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); | ||
303 | |||
287 | end: | 304 | end: |
288 | spin_unlock_bh(&sta->lock); | 305 | mutex_unlock(&sta->ampdu_mlme.mtx); |
289 | 306 | ||
290 | end_no_lock: | 307 | end_no_lock: |
291 | ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, | 308 | ieee80211_send_addba_resp(sta->sdata, sta->sta.addr, tid, |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index d1b6664a2532..c893f236acea 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * Copyright 2005-2006, Devicescape Software, Inc. | 6 | * Copyright 2005-2006, Devicescape Software, Inc. |
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | 7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> |
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2009, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | 12 | * it under the terms of the GNU General Public License version 2 as |
@@ -21,28 +21,39 @@ | |||
21 | #include "wme.h" | 21 | #include "wme.h" |
22 | 22 | ||
23 | /** | 23 | /** |
24 | * DOC: TX aggregation | 24 | * DOC: TX A-MPDU aggregation |
25 | * | 25 | * |
26 | * Aggregation on the TX side requires setting the hardware flag | 26 | * Aggregation on the TX side requires setting the hardware flag |
27 | * %IEEE80211_HW_AMPDU_AGGREGATION as well as, if present, the @ampdu_queues | 27 | * %IEEE80211_HW_AMPDU_AGGREGATION. The driver will then be handed |
28 | * hardware parameter to the number of hardware AMPDU queues. If there are no | 28 | * packets with a flag indicating A-MPDU aggregation. The driver |
29 | * hardware queues then the driver will (currently) have to do all frame | 29 | * or device is responsible for actually aggregating the frames, |
30 | * buffering. | 30 | * as well as deciding how many and which to aggregate. |
31 | * | 31 | * |
32 | * When TX aggregation is started by some subsystem (usually the rate control | 32 | * When TX aggregation is started by some subsystem (usually the rate |
33 | * algorithm would be appropriate) by calling the | 33 | * control algorithm would be appropriate) by calling the |
34 | * ieee80211_start_tx_ba_session() function, the driver will be notified via | 34 | * ieee80211_start_tx_ba_session() function, the driver will be |
35 | * its @ampdu_action function, with the %IEEE80211_AMPDU_TX_START action. | 35 | * notified via its @ampdu_action function, with the |
36 | * %IEEE80211_AMPDU_TX_START action. | ||
36 | * | 37 | * |
37 | * In response to that, the driver is later required to call the | 38 | * In response to that, the driver is later required to call the |
38 | * ieee80211_start_tx_ba_cb() (or ieee80211_start_tx_ba_cb_irqsafe()) | 39 | * ieee80211_start_tx_ba_cb_irqsafe() function, which will really |
39 | * function, which will start the aggregation session. | 40 | * start the aggregation session after the peer has also responded. |
41 | * If the peer responds negatively, the session will be stopped | ||
42 | * again right away. Note that it is possible for the aggregation | ||
43 | * session to be stopped before the driver has indicated that it | ||
44 | * is done setting it up, in which case it must not indicate the | ||
45 | * setup completion. | ||
40 | * | 46 | * |
41 | * Similarly, when the aggregation session is stopped by | 47 | * Also note that, since we also need to wait for a response from |
42 | * ieee80211_stop_tx_ba_session(), the driver's @ampdu_action function will | 48 | * the peer, the driver is notified of the completion of the |
43 | * be called with the action %IEEE80211_AMPDU_TX_STOP. In this case, the | 49 | * handshake by the %IEEE80211_AMPDU_TX_OPERATIONAL action to the |
44 | * call must not fail, and the driver must later call ieee80211_stop_tx_ba_cb() | 50 | * @ampdu_action callback. |
45 | * (or ieee80211_stop_tx_ba_cb_irqsafe()). | 51 | * |
52 | * Similarly, when the aggregation session is stopped by the peer | ||
53 | * or something calling ieee80211_stop_tx_ba_session(), the driver's | ||
54 | * @ampdu_action function will be called with the action | ||
55 | * %IEEE80211_AMPDU_TX_STOP. In this case, the call must not fail, | ||
56 | * and the driver must later call ieee80211_stop_tx_ba_cb_irqsafe(). | ||
46 | */ | 57 | */ |
47 | 58 | ||
48 | static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, | 59 | static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata, |
@@ -125,25 +136,53 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1 | |||
125 | ieee80211_tx_skb(sdata, skb); | 136 | ieee80211_tx_skb(sdata, skb); |
126 | } | 137 | } |
127 | 138 | ||
139 | static void kfree_tid_tx(struct rcu_head *rcu_head) | ||
140 | { | ||
141 | struct tid_ampdu_tx *tid_tx = | ||
142 | container_of(rcu_head, struct tid_ampdu_tx, rcu_head); | ||
143 | |||
144 | kfree(tid_tx); | ||
145 | } | ||
146 | |||
128 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | 147 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, |
129 | enum ieee80211_back_parties initiator) | 148 | enum ieee80211_back_parties initiator) |
130 | { | 149 | { |
131 | struct ieee80211_local *local = sta->local; | 150 | struct ieee80211_local *local = sta->local; |
151 | struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid]; | ||
132 | int ret; | 152 | int ret; |
133 | u8 *state; | 153 | |
154 | lockdep_assert_held(&sta->ampdu_mlme.mtx); | ||
155 | |||
156 | if (!tid_tx) | ||
157 | return -ENOENT; | ||
158 | |||
159 | spin_lock_bh(&sta->lock); | ||
160 | |||
161 | if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { | ||
162 | /* not even started yet! */ | ||
163 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); | ||
164 | spin_unlock_bh(&sta->lock); | ||
165 | call_rcu(&tid_tx->rcu_head, kfree_tid_tx); | ||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | spin_unlock_bh(&sta->lock); | ||
134 | 170 | ||
135 | #ifdef CONFIG_MAC80211_HT_DEBUG | 171 | #ifdef CONFIG_MAC80211_HT_DEBUG |
136 | printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", | 172 | printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", |
137 | sta->sta.addr, tid); | 173 | sta->sta.addr, tid); |
138 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 174 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
139 | 175 | ||
140 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 176 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); |
141 | 177 | ||
142 | if (*state == HT_AGG_STATE_OPERATIONAL) | 178 | /* |
143 | sta->ampdu_mlme.addba_req_num[tid] = 0; | 179 | * After this packets are no longer handed right through |
180 | * to the driver but are put onto tid_tx->pending instead, | ||
181 | * with locking to ensure proper access. | ||
182 | */ | ||
183 | clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); | ||
144 | 184 | ||
145 | *state = HT_AGG_STATE_REQ_STOP_BA_MSK | | 185 | tid_tx->stop_initiator = initiator; |
146 | (initiator << HT_AGG_STATE_INITIATOR_SHIFT); | ||
147 | 186 | ||
148 | ret = drv_ampdu_action(local, sta->sdata, | 187 | ret = drv_ampdu_action(local, sta->sdata, |
149 | IEEE80211_AMPDU_TX_STOP, | 188 | IEEE80211_AMPDU_TX_STOP, |
@@ -174,16 +213,14 @@ static void sta_addba_resp_timer_expired(unsigned long data) | |||
174 | u16 tid = *(u8 *)data; | 213 | u16 tid = *(u8 *)data; |
175 | struct sta_info *sta = container_of((void *)data, | 214 | struct sta_info *sta = container_of((void *)data, |
176 | struct sta_info, timer_to_tid[tid]); | 215 | struct sta_info, timer_to_tid[tid]); |
177 | u8 *state; | 216 | struct tid_ampdu_tx *tid_tx; |
178 | |||
179 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
180 | 217 | ||
181 | /* check if the TID waits for addBA response */ | 218 | /* check if the TID waits for addBA response */ |
182 | spin_lock_bh(&sta->lock); | 219 | rcu_read_lock(); |
183 | if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK | | 220 | tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]); |
184 | HT_AGG_STATE_REQ_STOP_BA_MSK)) != | 221 | if (!tid_tx || |
185 | HT_ADDBA_REQUESTED_MSK) { | 222 | test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { |
186 | spin_unlock_bh(&sta->lock); | 223 | rcu_read_unlock(); |
187 | #ifdef CONFIG_MAC80211_HT_DEBUG | 224 | #ifdef CONFIG_MAC80211_HT_DEBUG |
188 | printk(KERN_DEBUG "timer expired on tid %d but we are not " | 225 | printk(KERN_DEBUG "timer expired on tid %d but we are not " |
189 | "(or no longer) expecting addBA response there\n", | 226 | "(or no longer) expecting addBA response there\n", |
@@ -196,8 +233,8 @@ static void sta_addba_resp_timer_expired(unsigned long data) | |||
196 | printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); | 233 | printk(KERN_DEBUG "addBA response timer expired on tid %d\n", tid); |
197 | #endif | 234 | #endif |
198 | 235 | ||
199 | ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); | 236 | ieee80211_stop_tx_ba_session(&sta->sta, tid); |
200 | spin_unlock_bh(&sta->lock); | 237 | rcu_read_unlock(); |
201 | } | 238 | } |
202 | 239 | ||
203 | static inline int ieee80211_ac_from_tid(int tid) | 240 | static inline int ieee80211_ac_from_tid(int tid) |
@@ -205,14 +242,112 @@ static inline int ieee80211_ac_from_tid(int tid) | |||
205 | return ieee802_1d_to_ac[tid & 7]; | 242 | return ieee802_1d_to_ac[tid & 7]; |
206 | } | 243 | } |
207 | 244 | ||
245 | /* | ||
246 | * When multiple aggregation sessions on multiple stations | ||
247 | * are being created/destroyed simultaneously, we need to | ||
248 | * refcount the global queue stop caused by that in order | ||
249 | * to not get into a situation where one of the aggregation | ||
250 | * setup or teardown re-enables queues before the other is | ||
251 | * ready to handle that. | ||
252 | * | ||
253 | * These two functions take care of this issue by keeping | ||
254 | * a global "agg_queue_stop" refcount. | ||
255 | */ | ||
256 | static void __acquires(agg_queue) | ||
257 | ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid) | ||
258 | { | ||
259 | int queue = ieee80211_ac_from_tid(tid); | ||
260 | |||
261 | if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1) | ||
262 | ieee80211_stop_queue_by_reason( | ||
263 | &local->hw, queue, | ||
264 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
265 | __acquire(agg_queue); | ||
266 | } | ||
267 | |||
268 | static void __releases(agg_queue) | ||
269 | ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) | ||
270 | { | ||
271 | int queue = ieee80211_ac_from_tid(tid); | ||
272 | |||
273 | if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0) | ||
274 | ieee80211_wake_queue_by_reason( | ||
275 | &local->hw, queue, | ||
276 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
277 | __release(agg_queue); | ||
278 | } | ||
279 | |||
280 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | ||
281 | { | ||
282 | struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid]; | ||
283 | struct ieee80211_local *local = sta->local; | ||
284 | struct ieee80211_sub_if_data *sdata = sta->sdata; | ||
285 | u16 start_seq_num; | ||
286 | int ret; | ||
287 | |||
288 | lockdep_assert_held(&sta->ampdu_mlme.mtx); | ||
289 | |||
290 | /* | ||
291 | * While we're asking the driver about the aggregation, | ||
292 | * stop the AC queue so that we don't have to worry | ||
293 | * about frames that came in while we were doing that, | ||
294 | * which would require us to put them to the AC pending | ||
295 | * afterwards which just makes the code more complex. | ||
296 | */ | ||
297 | ieee80211_stop_queue_agg(local, tid); | ||
298 | |||
299 | clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); | ||
300 | |||
301 | /* | ||
302 | * make sure no packets are being processed to get | ||
303 | * valid starting sequence number | ||
304 | */ | ||
305 | synchronize_net(); | ||
306 | |||
307 | start_seq_num = sta->tid_seq[tid] >> 4; | ||
308 | |||
309 | ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, | ||
310 | &sta->sta, tid, &start_seq_num); | ||
311 | if (ret) { | ||
312 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
313 | printk(KERN_DEBUG "BA request denied - HW unavailable for" | ||
314 | " tid %d\n", tid); | ||
315 | #endif | ||
316 | spin_lock_bh(&sta->lock); | ||
317 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); | ||
318 | spin_unlock_bh(&sta->lock); | ||
319 | |||
320 | ieee80211_wake_queue_agg(local, tid); | ||
321 | call_rcu(&tid_tx->rcu_head, kfree_tid_tx); | ||
322 | return; | ||
323 | } | ||
324 | |||
325 | /* we can take packets again now */ | ||
326 | ieee80211_wake_queue_agg(local, tid); | ||
327 | |||
328 | /* activate the timer for the recipient's addBA response */ | ||
329 | mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); | ||
330 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
331 | printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); | ||
332 | #endif | ||
333 | |||
334 | spin_lock_bh(&sta->lock); | ||
335 | sta->ampdu_mlme.addba_req_num[tid]++; | ||
336 | spin_unlock_bh(&sta->lock); | ||
337 | |||
338 | /* send AddBA request */ | ||
339 | ieee80211_send_addba_request(sdata, sta->sta.addr, tid, | ||
340 | tid_tx->dialog_token, start_seq_num, | ||
341 | 0x40, 5000); | ||
342 | } | ||
343 | |||
208 | int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | 344 | int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) |
209 | { | 345 | { |
210 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); | 346 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); |
211 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 347 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
212 | struct ieee80211_local *local = sdata->local; | 348 | struct ieee80211_local *local = sdata->local; |
213 | u8 *state; | 349 | struct tid_ampdu_tx *tid_tx; |
214 | int ret = 0; | 350 | int ret = 0; |
215 | u16 start_seq_num; | ||
216 | 351 | ||
217 | trace_api_start_tx_ba_session(pubsta, tid); | 352 | trace_api_start_tx_ba_session(pubsta, tid); |
218 | 353 | ||
@@ -239,24 +374,15 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
239 | sdata->vif.type != NL80211_IFTYPE_AP) | 374 | sdata->vif.type != NL80211_IFTYPE_AP) |
240 | return -EINVAL; | 375 | return -EINVAL; |
241 | 376 | ||
242 | if (test_sta_flags(sta, WLAN_STA_DISASSOC)) { | ||
243 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
244 | printk(KERN_DEBUG "Disassociation is in progress. " | ||
245 | "Denying BA session request\n"); | ||
246 | #endif | ||
247 | return -EINVAL; | ||
248 | } | ||
249 | |||
250 | if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { | 377 | if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) { |
251 | #ifdef CONFIG_MAC80211_HT_DEBUG | 378 | #ifdef CONFIG_MAC80211_HT_DEBUG |
252 | printk(KERN_DEBUG "Suspend in progress. " | 379 | printk(KERN_DEBUG "BA sessions blocked. " |
253 | "Denying BA session request\n"); | 380 | "Denying BA session request\n"); |
254 | #endif | 381 | #endif |
255 | return -EINVAL; | 382 | return -EINVAL; |
256 | } | 383 | } |
257 | 384 | ||
258 | spin_lock_bh(&sta->lock); | 385 | spin_lock_bh(&sta->lock); |
259 | spin_lock(&local->ampdu_lock); | ||
260 | 386 | ||
261 | /* we have tried too many times, receiver does not want A-MPDU */ | 387 | /* we have tried too many times, receiver does not want A-MPDU */ |
262 | if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { | 388 | if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { |
@@ -264,9 +390,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
264 | goto err_unlock_sta; | 390 | goto err_unlock_sta; |
265 | } | 391 | } |
266 | 392 | ||
267 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 393 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; |
268 | /* check if the TID is not in aggregation flow already */ | 394 | /* check if the TID is not in aggregation flow already */ |
269 | if (*state != HT_AGG_STATE_IDLE) { | 395 | if (tid_tx) { |
270 | #ifdef CONFIG_MAC80211_HT_DEBUG | 396 | #ifdef CONFIG_MAC80211_HT_DEBUG |
271 | printk(KERN_DEBUG "BA request denied - session is not " | 397 | printk(KERN_DEBUG "BA request denied - session is not " |
272 | "idle on tid %u\n", tid); | 398 | "idle on tid %u\n", tid); |
@@ -275,96 +401,37 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
275 | goto err_unlock_sta; | 401 | goto err_unlock_sta; |
276 | } | 402 | } |
277 | 403 | ||
278 | /* | ||
279 | * While we're asking the driver about the aggregation, | ||
280 | * stop the AC queue so that we don't have to worry | ||
281 | * about frames that came in while we were doing that, | ||
282 | * which would require us to put them to the AC pending | ||
283 | * afterwards which just makes the code more complex. | ||
284 | */ | ||
285 | ieee80211_stop_queue_by_reason( | ||
286 | &local->hw, ieee80211_ac_from_tid(tid), | ||
287 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
288 | |||
289 | /* prepare A-MPDU MLME for Tx aggregation */ | 404 | /* prepare A-MPDU MLME for Tx aggregation */ |
290 | sta->ampdu_mlme.tid_tx[tid] = | 405 | tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); |
291 | kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); | 406 | if (!tid_tx) { |
292 | if (!sta->ampdu_mlme.tid_tx[tid]) { | ||
293 | #ifdef CONFIG_MAC80211_HT_DEBUG | 407 | #ifdef CONFIG_MAC80211_HT_DEBUG |
294 | if (net_ratelimit()) | 408 | if (net_ratelimit()) |
295 | printk(KERN_ERR "allocate tx mlme to tid %d failed\n", | 409 | printk(KERN_ERR "allocate tx mlme to tid %d failed\n", |
296 | tid); | 410 | tid); |
297 | #endif | 411 | #endif |
298 | ret = -ENOMEM; | 412 | ret = -ENOMEM; |
299 | goto err_wake_queue; | 413 | goto err_unlock_sta; |
300 | } | 414 | } |
301 | 415 | ||
302 | skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending); | 416 | skb_queue_head_init(&tid_tx->pending); |
417 | __set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state); | ||
303 | 418 | ||
304 | /* Tx timer */ | 419 | /* Tx timer */ |
305 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = | 420 | tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired; |
306 | sta_addba_resp_timer_expired; | 421 | tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid]; |
307 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = | 422 | init_timer(&tid_tx->addba_resp_timer); |
308 | (unsigned long)&sta->timer_to_tid[tid]; | ||
309 | init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
310 | |||
311 | /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the | ||
312 | * call back right away, it must see that the flow has begun */ | ||
313 | *state |= HT_ADDBA_REQUESTED_MSK; | ||
314 | |||
315 | start_seq_num = sta->tid_seq[tid] >> 4; | ||
316 | |||
317 | ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, | ||
318 | pubsta, tid, &start_seq_num); | ||
319 | 423 | ||
320 | if (ret) { | 424 | /* assign a dialog token */ |
321 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
322 | printk(KERN_DEBUG "BA request denied - HW unavailable for" | ||
323 | " tid %d\n", tid); | ||
324 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | ||
325 | *state = HT_AGG_STATE_IDLE; | ||
326 | goto err_free; | ||
327 | } | ||
328 | |||
329 | /* Driver vetoed or OKed, but we can take packets again now */ | ||
330 | ieee80211_wake_queue_by_reason( | ||
331 | &local->hw, ieee80211_ac_from_tid(tid), | ||
332 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
333 | |||
334 | spin_unlock(&local->ampdu_lock); | ||
335 | |||
336 | /* prepare tid data */ | ||
337 | sta->ampdu_mlme.dialog_token_allocator++; | 425 | sta->ampdu_mlme.dialog_token_allocator++; |
338 | sta->ampdu_mlme.tid_tx[tid]->dialog_token = | 426 | tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator; |
339 | sta->ampdu_mlme.dialog_token_allocator; | ||
340 | sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; | ||
341 | 427 | ||
342 | spin_unlock_bh(&sta->lock); | 428 | /* finally, assign it to the array */ |
429 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); | ||
343 | 430 | ||
344 | /* send AddBA request */ | 431 | ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); |
345 | ieee80211_send_addba_request(sdata, pubsta->addr, tid, | 432 | |
346 | sta->ampdu_mlme.tid_tx[tid]->dialog_token, | 433 | /* this flow continues off the work */ |
347 | sta->ampdu_mlme.tid_tx[tid]->ssn, | ||
348 | 0x40, 5000); | ||
349 | sta->ampdu_mlme.addba_req_num[tid]++; | ||
350 | /* activate the timer for the recipient's addBA response */ | ||
351 | sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires = | ||
352 | jiffies + ADDBA_RESP_INTERVAL; | ||
353 | add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | ||
354 | #ifdef CONFIG_MAC80211_HT_DEBUG | ||
355 | printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); | ||
356 | #endif | ||
357 | return 0; | ||
358 | |||
359 | err_free: | ||
360 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
361 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
362 | err_wake_queue: | ||
363 | ieee80211_wake_queue_by_reason( | ||
364 | &local->hw, ieee80211_ac_from_tid(tid), | ||
365 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
366 | err_unlock_sta: | 434 | err_unlock_sta: |
367 | spin_unlock(&local->ampdu_lock); | ||
368 | spin_unlock_bh(&sta->lock); | 435 | spin_unlock_bh(&sta->lock); |
369 | return ret; | 436 | return ret; |
370 | } | 437 | } |
@@ -372,69 +439,65 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_session); | |||
372 | 439 | ||
373 | /* | 440 | /* |
374 | * splice packets from the STA's pending to the local pending, | 441 | * splice packets from the STA's pending to the local pending, |
375 | * requires a call to ieee80211_agg_splice_finish and holding | 442 | * requires a call to ieee80211_agg_splice_finish later |
376 | * local->ampdu_lock across both calls. | ||
377 | */ | 443 | */ |
378 | static void ieee80211_agg_splice_packets(struct ieee80211_local *local, | 444 | static void __acquires(agg_queue) |
379 | struct sta_info *sta, u16 tid) | 445 | ieee80211_agg_splice_packets(struct ieee80211_local *local, |
446 | struct tid_ampdu_tx *tid_tx, u16 tid) | ||
380 | { | 447 | { |
448 | int queue = ieee80211_ac_from_tid(tid); | ||
381 | unsigned long flags; | 449 | unsigned long flags; |
382 | u16 queue = ieee80211_ac_from_tid(tid); | ||
383 | |||
384 | ieee80211_stop_queue_by_reason( | ||
385 | &local->hw, queue, | ||
386 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
387 | 450 | ||
388 | if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)) | 451 | ieee80211_stop_queue_agg(local, tid); |
389 | return; | ||
390 | 452 | ||
391 | if (WARN(!sta->ampdu_mlme.tid_tx[tid], | 453 | if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" |
392 | "TID %d gone but expected when splicing aggregates from" | 454 | " from the pending queue\n", tid)) |
393 | "the pending queue\n", tid)) | ||
394 | return; | 455 | return; |
395 | 456 | ||
396 | if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) { | 457 | if (!skb_queue_empty(&tid_tx->pending)) { |
397 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | 458 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); |
398 | /* copy over remaining packets */ | 459 | /* copy over remaining packets */ |
399 | skb_queue_splice_tail_init( | 460 | skb_queue_splice_tail_init(&tid_tx->pending, |
400 | &sta->ampdu_mlme.tid_tx[tid]->pending, | 461 | &local->pending[queue]); |
401 | &local->pending[queue]); | ||
402 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | 462 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); |
403 | } | 463 | } |
404 | } | 464 | } |
405 | 465 | ||
406 | static void ieee80211_agg_splice_finish(struct ieee80211_local *local, | 466 | static void __releases(agg_queue) |
407 | struct sta_info *sta, u16 tid) | 467 | ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) |
408 | { | 468 | { |
409 | u16 queue = ieee80211_ac_from_tid(tid); | 469 | ieee80211_wake_queue_agg(local, tid); |
410 | |||
411 | ieee80211_wake_queue_by_reason( | ||
412 | &local->hw, queue, | ||
413 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
414 | } | 470 | } |
415 | 471 | ||
416 | /* caller must hold sta->lock */ | ||
417 | static void ieee80211_agg_tx_operational(struct ieee80211_local *local, | 472 | static void ieee80211_agg_tx_operational(struct ieee80211_local *local, |
418 | struct sta_info *sta, u16 tid) | 473 | struct sta_info *sta, u16 tid) |
419 | { | 474 | { |
475 | lockdep_assert_held(&sta->ampdu_mlme.mtx); | ||
476 | |||
420 | #ifdef CONFIG_MAC80211_HT_DEBUG | 477 | #ifdef CONFIG_MAC80211_HT_DEBUG |
421 | printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid); | 478 | printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid); |
422 | #endif | 479 | #endif |
423 | 480 | ||
424 | spin_lock(&local->ampdu_lock); | ||
425 | ieee80211_agg_splice_packets(local, sta, tid); | ||
426 | /* | ||
427 | * NB: we rely on sta->lock being taken in the TX | ||
428 | * processing here when adding to the pending queue, | ||
429 | * otherwise we could only change the state of the | ||
430 | * session to OPERATIONAL _here_. | ||
431 | */ | ||
432 | ieee80211_agg_splice_finish(local, sta, tid); | ||
433 | spin_unlock(&local->ampdu_lock); | ||
434 | |||
435 | drv_ampdu_action(local, sta->sdata, | 481 | drv_ampdu_action(local, sta->sdata, |
436 | IEEE80211_AMPDU_TX_OPERATIONAL, | 482 | IEEE80211_AMPDU_TX_OPERATIONAL, |
437 | &sta->sta, tid, NULL); | 483 | &sta->sta, tid, NULL); |
484 | |||
485 | /* | ||
486 | * synchronize with TX path, while splicing the TX path | ||
487 | * should block so it won't put more packets onto pending. | ||
488 | */ | ||
489 | spin_lock_bh(&sta->lock); | ||
490 | |||
491 | ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid); | ||
492 | /* | ||
493 | * Now mark as operational. This will be visible | ||
494 | * in the TX path, and lets it go lock-free in | ||
495 | * the common case. | ||
496 | */ | ||
497 | set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state); | ||
498 | ieee80211_agg_splice_finish(local, tid); | ||
499 | |||
500 | spin_unlock_bh(&sta->lock); | ||
438 | } | 501 | } |
439 | 502 | ||
440 | void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) | 503 | void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) |
@@ -442,7 +505,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) | |||
442 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 505 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |
443 | struct ieee80211_local *local = sdata->local; | 506 | struct ieee80211_local *local = sdata->local; |
444 | struct sta_info *sta; | 507 | struct sta_info *sta; |
445 | u8 *state; | 508 | struct tid_ampdu_tx *tid_tx; |
446 | 509 | ||
447 | trace_api_start_tx_ba_cb(sdata, ra, tid); | 510 | trace_api_start_tx_ba_cb(sdata, ra, tid); |
448 | 511 | ||
@@ -454,42 +517,36 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) | |||
454 | return; | 517 | return; |
455 | } | 518 | } |
456 | 519 | ||
457 | rcu_read_lock(); | 520 | mutex_lock(&local->sta_mtx); |
458 | sta = sta_info_get(sdata, ra); | 521 | sta = sta_info_get(sdata, ra); |
459 | if (!sta) { | 522 | if (!sta) { |
460 | rcu_read_unlock(); | 523 | mutex_unlock(&local->sta_mtx); |
461 | #ifdef CONFIG_MAC80211_HT_DEBUG | 524 | #ifdef CONFIG_MAC80211_HT_DEBUG |
462 | printk(KERN_DEBUG "Could not find station: %pM\n", ra); | 525 | printk(KERN_DEBUG "Could not find station: %pM\n", ra); |
463 | #endif | 526 | #endif |
464 | return; | 527 | return; |
465 | } | 528 | } |
466 | 529 | ||
467 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 530 | mutex_lock(&sta->ampdu_mlme.mtx); |
468 | spin_lock_bh(&sta->lock); | 531 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; |
469 | 532 | ||
470 | if (WARN_ON(!(*state & HT_ADDBA_REQUESTED_MSK))) { | 533 | if (WARN_ON(!tid_tx)) { |
471 | #ifdef CONFIG_MAC80211_HT_DEBUG | 534 | #ifdef CONFIG_MAC80211_HT_DEBUG |
472 | printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", | 535 | printk(KERN_DEBUG "addBA was not requested!\n"); |
473 | *state); | ||
474 | #endif | 536 | #endif |
475 | spin_unlock_bh(&sta->lock); | 537 | goto unlock; |
476 | rcu_read_unlock(); | ||
477 | return; | ||
478 | } | 538 | } |
479 | 539 | ||
480 | if (WARN_ON(*state & HT_ADDBA_DRV_READY_MSK)) | 540 | if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) |
481 | goto out; | 541 | goto unlock; |
482 | |||
483 | *state |= HT_ADDBA_DRV_READY_MSK; | ||
484 | 542 | ||
485 | if (*state == HT_AGG_STATE_OPERATIONAL) | 543 | if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) |
486 | ieee80211_agg_tx_operational(local, sta, tid); | 544 | ieee80211_agg_tx_operational(local, sta, tid); |
487 | 545 | ||
488 | out: | 546 | unlock: |
489 | spin_unlock_bh(&sta->lock); | 547 | mutex_unlock(&sta->ampdu_mlme.mtx); |
490 | rcu_read_unlock(); | 548 | mutex_unlock(&local->sta_mtx); |
491 | } | 549 | } |
492 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb); | ||
493 | 550 | ||
494 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | 551 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, |
495 | const u8 *ra, u16 tid) | 552 | const u8 *ra, u16 tid) |
@@ -510,33 +567,24 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | |||
510 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | 567 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; |
511 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | 568 | memcpy(&ra_tid->ra, ra, ETH_ALEN); |
512 | ra_tid->tid = tid; | 569 | ra_tid->tid = tid; |
513 | ra_tid->vif = vif; | ||
514 | 570 | ||
515 | skb->pkt_type = IEEE80211_ADDBA_MSG; | 571 | skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; |
516 | skb_queue_tail(&local->skb_queue, skb); | 572 | skb_queue_tail(&sdata->skb_queue, skb); |
517 | tasklet_schedule(&local->tasklet); | 573 | ieee80211_queue_work(&local->hw, &sdata->work); |
518 | } | 574 | } |
519 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); | 575 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); |
520 | 576 | ||
521 | int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | 577 | int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, |
522 | enum ieee80211_back_parties initiator) | 578 | enum ieee80211_back_parties initiator) |
523 | { | 579 | { |
524 | u8 *state; | ||
525 | int ret; | 580 | int ret; |
526 | 581 | ||
527 | /* check if the TID is in aggregation */ | 582 | mutex_lock(&sta->ampdu_mlme.mtx); |
528 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
529 | spin_lock_bh(&sta->lock); | ||
530 | |||
531 | if (*state != HT_AGG_STATE_OPERATIONAL) { | ||
532 | ret = -ENOENT; | ||
533 | goto unlock; | ||
534 | } | ||
535 | 583 | ||
536 | ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator); | 584 | ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator); |
537 | 585 | ||
538 | unlock: | 586 | mutex_unlock(&sta->ampdu_mlme.mtx); |
539 | spin_unlock_bh(&sta->lock); | 587 | |
540 | return ret; | 588 | return ret; |
541 | } | 589 | } |
542 | 590 | ||
@@ -545,6 +593,8 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
545 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); | 593 | struct sta_info *sta = container_of(pubsta, struct sta_info, sta); |
546 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 594 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
547 | struct ieee80211_local *local = sdata->local; | 595 | struct ieee80211_local *local = sdata->local; |
596 | struct tid_ampdu_tx *tid_tx; | ||
597 | int ret = 0; | ||
548 | 598 | ||
549 | trace_api_stop_tx_ba_session(pubsta, tid); | 599 | trace_api_stop_tx_ba_session(pubsta, tid); |
550 | 600 | ||
@@ -554,7 +604,26 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
554 | if (tid >= STA_TID_NUM) | 604 | if (tid >= STA_TID_NUM) |
555 | return -EINVAL; | 605 | return -EINVAL; |
556 | 606 | ||
557 | return __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); | 607 | spin_lock_bh(&sta->lock); |
608 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; | ||
609 | |||
610 | if (!tid_tx) { | ||
611 | ret = -ENOENT; | ||
612 | goto unlock; | ||
613 | } | ||
614 | |||
615 | if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { | ||
616 | /* already in progress stopping it */ | ||
617 | ret = 0; | ||
618 | goto unlock; | ||
619 | } | ||
620 | |||
621 | set_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state); | ||
622 | ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); | ||
623 | |||
624 | unlock: | ||
625 | spin_unlock_bh(&sta->lock); | ||
626 | return ret; | ||
558 | } | 627 | } |
559 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); | 628 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); |
560 | 629 | ||
@@ -563,7 +632,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | |||
563 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 632 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |
564 | struct ieee80211_local *local = sdata->local; | 633 | struct ieee80211_local *local = sdata->local; |
565 | struct sta_info *sta; | 634 | struct sta_info *sta; |
566 | u8 *state; | 635 | struct tid_ampdu_tx *tid_tx; |
567 | 636 | ||
568 | trace_api_stop_tx_ba_cb(sdata, ra, tid); | 637 | trace_api_stop_tx_ba_cb(sdata, ra, tid); |
569 | 638 | ||
@@ -580,51 +649,56 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | |||
580 | ra, tid); | 649 | ra, tid); |
581 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 650 | #endif /* CONFIG_MAC80211_HT_DEBUG */ |
582 | 651 | ||
583 | rcu_read_lock(); | 652 | mutex_lock(&local->sta_mtx); |
653 | |||
584 | sta = sta_info_get(sdata, ra); | 654 | sta = sta_info_get(sdata, ra); |
585 | if (!sta) { | 655 | if (!sta) { |
586 | #ifdef CONFIG_MAC80211_HT_DEBUG | 656 | #ifdef CONFIG_MAC80211_HT_DEBUG |
587 | printk(KERN_DEBUG "Could not find station: %pM\n", ra); | 657 | printk(KERN_DEBUG "Could not find station: %pM\n", ra); |
588 | #endif | 658 | #endif |
589 | rcu_read_unlock(); | 659 | goto unlock; |
590 | return; | ||
591 | } | 660 | } |
592 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | ||
593 | 661 | ||
594 | /* NOTE: no need to use sta->lock in this state check, as | 662 | mutex_lock(&sta->ampdu_mlme.mtx); |
595 | * ieee80211_stop_tx_ba_session will let only one stop call to | 663 | spin_lock_bh(&sta->lock); |
596 | * pass through per sta/tid | 664 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; |
597 | */ | 665 | |
598 | if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) { | 666 | if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { |
599 | #ifdef CONFIG_MAC80211_HT_DEBUG | 667 | #ifdef CONFIG_MAC80211_HT_DEBUG |
600 | printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); | 668 | printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); |
601 | #endif | 669 | #endif |
602 | rcu_read_unlock(); | 670 | goto unlock_sta; |
603 | return; | ||
604 | } | 671 | } |
605 | 672 | ||
606 | if (*state & HT_AGG_STATE_INITIATOR_MSK) | 673 | if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR) |
607 | ieee80211_send_delba(sta->sdata, ra, tid, | 674 | ieee80211_send_delba(sta->sdata, ra, tid, |
608 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); | 675 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); |
609 | 676 | ||
610 | spin_lock_bh(&sta->lock); | 677 | /* |
611 | spin_lock(&local->ampdu_lock); | 678 | * When we get here, the TX path will not be lockless any more wrt. |
679 | * aggregation, since the OPERATIONAL bit has long been cleared. | ||
680 | * Thus it will block on getting the lock, if it occurs. So if we | ||
681 | * stop the queue now, we will not get any more packets, and any | ||
682 | * that might be being processed will wait for us here, thereby | ||
683 | * guaranteeing that no packets go to the tid_tx pending queue any | ||
684 | * more. | ||
685 | */ | ||
612 | 686 | ||
613 | ieee80211_agg_splice_packets(local, sta, tid); | 687 | ieee80211_agg_splice_packets(local, tid_tx, tid); |
614 | 688 | ||
615 | *state = HT_AGG_STATE_IDLE; | 689 | /* future packets must not find the tid_tx struct any more */ |
616 | /* from now on packets are no longer put onto sta->pending */ | 690 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL); |
617 | kfree(sta->ampdu_mlme.tid_tx[tid]); | ||
618 | sta->ampdu_mlme.tid_tx[tid] = NULL; | ||
619 | 691 | ||
620 | ieee80211_agg_splice_finish(local, sta, tid); | 692 | ieee80211_agg_splice_finish(local, tid); |
621 | 693 | ||
622 | spin_unlock(&local->ampdu_lock); | 694 | call_rcu(&tid_tx->rcu_head, kfree_tid_tx); |
623 | spin_unlock_bh(&sta->lock); | ||
624 | 695 | ||
625 | rcu_read_unlock(); | 696 | unlock_sta: |
697 | spin_unlock_bh(&sta->lock); | ||
698 | mutex_unlock(&sta->ampdu_mlme.mtx); | ||
699 | unlock: | ||
700 | mutex_unlock(&local->sta_mtx); | ||
626 | } | 701 | } |
627 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); | ||
628 | 702 | ||
629 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | 703 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, |
630 | const u8 *ra, u16 tid) | 704 | const u8 *ra, u16 tid) |
@@ -645,11 +719,10 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | |||
645 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | 719 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; |
646 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | 720 | memcpy(&ra_tid->ra, ra, ETH_ALEN); |
647 | ra_tid->tid = tid; | 721 | ra_tid->tid = tid; |
648 | ra_tid->vif = vif; | ||
649 | 722 | ||
650 | skb->pkt_type = IEEE80211_DELBA_MSG; | 723 | skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; |
651 | skb_queue_tail(&local->skb_queue, skb); | 724 | skb_queue_tail(&sdata->skb_queue, skb); |
652 | tasklet_schedule(&local->tasklet); | 725 | ieee80211_queue_work(&local->hw, &sdata->work); |
653 | } | 726 | } |
654 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); | 727 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); |
655 | 728 | ||
@@ -659,40 +732,40 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
659 | struct ieee80211_mgmt *mgmt, | 732 | struct ieee80211_mgmt *mgmt, |
660 | size_t len) | 733 | size_t len) |
661 | { | 734 | { |
735 | struct tid_ampdu_tx *tid_tx; | ||
662 | u16 capab, tid; | 736 | u16 capab, tid; |
663 | u8 *state; | ||
664 | 737 | ||
665 | capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); | 738 | capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); |
666 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; | 739 | tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; |
667 | 740 | ||
668 | state = &sta->ampdu_mlme.tid_state_tx[tid]; | 741 | mutex_lock(&sta->ampdu_mlme.mtx); |
669 | |||
670 | spin_lock_bh(&sta->lock); | ||
671 | 742 | ||
672 | if (!(*state & HT_ADDBA_REQUESTED_MSK)) | 743 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; |
744 | if (!tid_tx) | ||
673 | goto out; | 745 | goto out; |
674 | 746 | ||
675 | if (mgmt->u.action.u.addba_resp.dialog_token != | 747 | if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { |
676 | sta->ampdu_mlme.tid_tx[tid]->dialog_token) { | ||
677 | #ifdef CONFIG_MAC80211_HT_DEBUG | 748 | #ifdef CONFIG_MAC80211_HT_DEBUG |
678 | printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); | 749 | printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); |
679 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 750 | #endif |
680 | goto out; | 751 | goto out; |
681 | } | 752 | } |
682 | 753 | ||
683 | del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); | 754 | del_timer(&tid_tx->addba_resp_timer); |
684 | 755 | ||
685 | #ifdef CONFIG_MAC80211_HT_DEBUG | 756 | #ifdef CONFIG_MAC80211_HT_DEBUG |
686 | printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); | 757 | printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); |
687 | #endif /* CONFIG_MAC80211_HT_DEBUG */ | 758 | #endif |
688 | 759 | ||
689 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) | 760 | if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) |
690 | == WLAN_STATUS_SUCCESS) { | 761 | == WLAN_STATUS_SUCCESS) { |
691 | u8 curstate = *state; | 762 | if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED, |
692 | 763 | &tid_tx->state)) { | |
693 | *state |= HT_ADDBA_RECEIVED_MSK; | 764 | /* ignore duplicate response */ |
765 | goto out; | ||
766 | } | ||
694 | 767 | ||
695 | if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL) | 768 | if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)) |
696 | ieee80211_agg_tx_operational(local, sta, tid); | 769 | ieee80211_agg_tx_operational(local, sta, tid); |
697 | 770 | ||
698 | sta->ampdu_mlme.addba_req_num[tid] = 0; | 771 | sta->ampdu_mlme.addba_req_num[tid] = 0; |
@@ -701,5 +774,5 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
701 | } | 774 | } |
702 | 775 | ||
703 | out: | 776 | out: |
704 | spin_unlock_bh(&sta->lock); | 777 | mutex_unlock(&sta->ampdu_mlme.mtx); |
705 | } | 778 | } |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 1f76d048388b..ed8c9f5be94f 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -1446,7 +1446,6 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
1446 | { | 1446 | { |
1447 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 1447 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1448 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 1448 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
1449 | struct ieee80211_conf *conf = &local->hw.conf; | ||
1450 | 1449 | ||
1451 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | 1450 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
1452 | return -EOPNOTSUPP; | 1451 | return -EOPNOTSUPP; |
@@ -1455,11 +1454,11 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
1455 | return -EOPNOTSUPP; | 1454 | return -EOPNOTSUPP; |
1456 | 1455 | ||
1457 | if (enabled == sdata->u.mgd.powersave && | 1456 | if (enabled == sdata->u.mgd.powersave && |
1458 | timeout == conf->dynamic_ps_forced_timeout) | 1457 | timeout == local->dynamic_ps_forced_timeout) |
1459 | return 0; | 1458 | return 0; |
1460 | 1459 | ||
1461 | sdata->u.mgd.powersave = enabled; | 1460 | sdata->u.mgd.powersave = enabled; |
1462 | conf->dynamic_ps_forced_timeout = timeout; | 1461 | local->dynamic_ps_forced_timeout = timeout; |
1463 | 1462 | ||
1464 | /* no change, but if automatic follow powersave */ | 1463 | /* no change, but if automatic follow powersave */ |
1465 | mutex_lock(&sdata->u.mgd.mtx); | 1464 | mutex_lock(&sdata->u.mgd.mtx); |
@@ -1555,9 +1554,55 @@ static int ieee80211_action(struct wiphy *wiphy, struct net_device *dev, | |||
1555 | bool channel_type_valid, | 1554 | bool channel_type_valid, |
1556 | const u8 *buf, size_t len, u64 *cookie) | 1555 | const u8 *buf, size_t len, u64 *cookie) |
1557 | { | 1556 | { |
1558 | return ieee80211_mgd_action(IEEE80211_DEV_TO_SUB_IF(dev), chan, | 1557 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
1559 | channel_type, channel_type_valid, | 1558 | struct ieee80211_local *local = sdata->local; |
1560 | buf, len, cookie); | 1559 | struct sk_buff *skb; |
1560 | struct sta_info *sta; | ||
1561 | const struct ieee80211_mgmt *mgmt = (void *)buf; | ||
1562 | u32 flags = IEEE80211_TX_INTFL_NL80211_FRAME_TX | | ||
1563 | IEEE80211_TX_CTL_REQ_TX_STATUS; | ||
1564 | |||
1565 | /* Check that we are on the requested channel for transmission */ | ||
1566 | if (chan != local->tmp_channel && | ||
1567 | chan != local->oper_channel) | ||
1568 | return -EBUSY; | ||
1569 | if (channel_type_valid && | ||
1570 | (channel_type != local->tmp_channel_type && | ||
1571 | channel_type != local->_oper_channel_type)) | ||
1572 | return -EBUSY; | ||
1573 | |||
1574 | switch (sdata->vif.type) { | ||
1575 | case NL80211_IFTYPE_ADHOC: | ||
1576 | if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) | ||
1577 | break; | ||
1578 | rcu_read_lock(); | ||
1579 | sta = sta_info_get(sdata, mgmt->da); | ||
1580 | rcu_read_unlock(); | ||
1581 | if (!sta) | ||
1582 | return -ENOLINK; | ||
1583 | break; | ||
1584 | case NL80211_IFTYPE_STATION: | ||
1585 | if (!(sdata->u.mgd.flags & IEEE80211_STA_MFP_ENABLED)) | ||
1586 | flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; | ||
1587 | break; | ||
1588 | default: | ||
1589 | return -EOPNOTSUPP; | ||
1590 | } | ||
1591 | |||
1592 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); | ||
1593 | if (!skb) | ||
1594 | return -ENOMEM; | ||
1595 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
1596 | |||
1597 | memcpy(skb_put(skb, len), buf, len); | ||
1598 | |||
1599 | IEEE80211_SKB_CB(skb)->flags = flags; | ||
1600 | |||
1601 | skb->dev = sdata->dev; | ||
1602 | ieee80211_tx_skb(sdata, skb); | ||
1603 | |||
1604 | *cookie = (unsigned long) skb; | ||
1605 | return 0; | ||
1561 | } | 1606 | } |
1562 | 1607 | ||
1563 | struct cfg80211_ops mac80211_config_ops = { | 1608 | struct cfg80211_ops mac80211_config_ops = { |
diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c index 97c9e46e859e..fa5e76e658ef 100644 --- a/net/mac80211/debugfs_key.c +++ b/net/mac80211/debugfs_key.c | |||
@@ -143,7 +143,7 @@ static ssize_t key_rx_spec_read(struct file *file, char __user *userbuf, | |||
143 | len = p - buf; | 143 | len = p - buf; |
144 | break; | 144 | break; |
145 | case ALG_CCMP: | 145 | case ALG_CCMP: |
146 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) { | 146 | for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) { |
147 | rpn = key->u.ccmp.rx_pn[i]; | 147 | rpn = key->u.ccmp.rx_pn[i]; |
148 | p += scnprintf(p, sizeof(buf)+buf-p, | 148 | p += scnprintf(p, sizeof(buf)+buf-p, |
149 | "%02x%02x%02x%02x%02x%02x\n", | 149 | "%02x%02x%02x%02x%02x%02x\n", |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 576e024715e3..76839d4dfaac 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -121,28 +121,25 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | |||
121 | p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", | 121 | p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", |
122 | sta->ampdu_mlme.dialog_token_allocator + 1); | 122 | sta->ampdu_mlme.dialog_token_allocator + 1); |
123 | p += scnprintf(p, sizeof(buf) + buf - p, | 123 | p += scnprintf(p, sizeof(buf) + buf - p, |
124 | "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n"); | 124 | "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); |
125 | for (i = 0; i < STA_TID_NUM; i++) { | 125 | for (i = 0; i < STA_TID_NUM; i++) { |
126 | p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); | 126 | p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); |
127 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", | 127 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", |
128 | sta->ampdu_mlme.tid_active_rx[i]); | 128 | !!sta->ampdu_mlme.tid_rx[i]); |
129 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", | 129 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", |
130 | sta->ampdu_mlme.tid_active_rx[i] ? | 130 | sta->ampdu_mlme.tid_rx[i] ? |
131 | sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); | 131 | sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); |
132 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", | 132 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", |
133 | sta->ampdu_mlme.tid_active_rx[i] ? | 133 | sta->ampdu_mlme.tid_rx[i] ? |
134 | sta->ampdu_mlme.tid_rx[i]->ssn : 0); | 134 | sta->ampdu_mlme.tid_rx[i]->ssn : 0); |
135 | 135 | ||
136 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", | 136 | p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", |
137 | sta->ampdu_mlme.tid_state_tx[i]); | 137 | !!sta->ampdu_mlme.tid_tx[i]); |
138 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", | 138 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", |
139 | sta->ampdu_mlme.tid_state_tx[i] ? | 139 | sta->ampdu_mlme.tid_tx[i] ? |
140 | sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); | 140 | sta->ampdu_mlme.tid_tx[i]->dialog_token : 0); |
141 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", | ||
142 | sta->ampdu_mlme.tid_state_tx[i] ? | ||
143 | sta->ampdu_mlme.tid_tx[i]->ssn : 0); | ||
144 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", | 141 | p += scnprintf(p, sizeof(buf) + buf - p, "\t%03d", |
145 | sta->ampdu_mlme.tid_state_tx[i] ? | 142 | sta->ampdu_mlme.tid_tx[i] ? |
146 | skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); | 143 | skb_queue_len(&sta->ampdu_mlme.tid_tx[i]->pending) : 0); |
147 | p += scnprintf(p, sizeof(buf) + buf - p, "\n"); | 144 | p += scnprintf(p, sizeof(buf) + buf - p, "\n"); |
148 | } | 145 | } |
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 7d18a3245e3d..c33317320eee 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h | |||
@@ -16,10 +16,11 @@ static inline int drv_start(struct ieee80211_local *local) | |||
16 | 16 | ||
17 | might_sleep(); | 17 | might_sleep(); |
18 | 18 | ||
19 | trace_drv_start(local); | ||
19 | local->started = true; | 20 | local->started = true; |
20 | smp_mb(); | 21 | smp_mb(); |
21 | ret = local->ops->start(&local->hw); | 22 | ret = local->ops->start(&local->hw); |
22 | trace_drv_start(local, ret); | 23 | trace_drv_return_int(local, ret); |
23 | return ret; | 24 | return ret; |
24 | } | 25 | } |
25 | 26 | ||
@@ -27,8 +28,9 @@ static inline void drv_stop(struct ieee80211_local *local) | |||
27 | { | 28 | { |
28 | might_sleep(); | 29 | might_sleep(); |
29 | 30 | ||
30 | local->ops->stop(&local->hw); | ||
31 | trace_drv_stop(local); | 31 | trace_drv_stop(local); |
32 | local->ops->stop(&local->hw); | ||
33 | trace_drv_return_void(local); | ||
32 | 34 | ||
33 | /* sync away all work on the tasklet before clearing started */ | 35 | /* sync away all work on the tasklet before clearing started */ |
34 | tasklet_disable(&local->tasklet); | 36 | tasklet_disable(&local->tasklet); |
@@ -46,8 +48,9 @@ static inline int drv_add_interface(struct ieee80211_local *local, | |||
46 | 48 | ||
47 | might_sleep(); | 49 | might_sleep(); |
48 | 50 | ||
51 | trace_drv_add_interface(local, vif_to_sdata(vif)); | ||
49 | ret = local->ops->add_interface(&local->hw, vif); | 52 | ret = local->ops->add_interface(&local->hw, vif); |
50 | trace_drv_add_interface(local, vif_to_sdata(vif), ret); | 53 | trace_drv_return_int(local, ret); |
51 | return ret; | 54 | return ret; |
52 | } | 55 | } |
53 | 56 | ||
@@ -56,8 +59,9 @@ static inline void drv_remove_interface(struct ieee80211_local *local, | |||
56 | { | 59 | { |
57 | might_sleep(); | 60 | might_sleep(); |
58 | 61 | ||
59 | local->ops->remove_interface(&local->hw, vif); | ||
60 | trace_drv_remove_interface(local, vif_to_sdata(vif)); | 62 | trace_drv_remove_interface(local, vif_to_sdata(vif)); |
63 | local->ops->remove_interface(&local->hw, vif); | ||
64 | trace_drv_return_void(local); | ||
61 | } | 65 | } |
62 | 66 | ||
63 | static inline int drv_config(struct ieee80211_local *local, u32 changed) | 67 | static inline int drv_config(struct ieee80211_local *local, u32 changed) |
@@ -66,8 +70,9 @@ static inline int drv_config(struct ieee80211_local *local, u32 changed) | |||
66 | 70 | ||
67 | might_sleep(); | 71 | might_sleep(); |
68 | 72 | ||
73 | trace_drv_config(local, changed); | ||
69 | ret = local->ops->config(&local->hw, changed); | 74 | ret = local->ops->config(&local->hw, changed); |
70 | trace_drv_config(local, changed, ret); | 75 | trace_drv_return_int(local, ret); |
71 | return ret; | 76 | return ret; |
72 | } | 77 | } |
73 | 78 | ||
@@ -78,26 +83,10 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local, | |||
78 | { | 83 | { |
79 | might_sleep(); | 84 | might_sleep(); |
80 | 85 | ||
86 | trace_drv_bss_info_changed(local, sdata, info, changed); | ||
81 | if (local->ops->bss_info_changed) | 87 | if (local->ops->bss_info_changed) |
82 | local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed); | 88 | local->ops->bss_info_changed(&local->hw, &sdata->vif, info, changed); |
83 | trace_drv_bss_info_changed(local, sdata, info, changed); | 89 | trace_drv_return_void(local); |
84 | } | ||
85 | |||
86 | struct in_ifaddr; | ||
87 | static inline int drv_configure_arp_filter(struct ieee80211_local *local, | ||
88 | struct ieee80211_vif *vif, | ||
89 | struct in_ifaddr *ifa_list) | ||
90 | { | ||
91 | int ret = 0; | ||
92 | |||
93 | might_sleep(); | ||
94 | |||
95 | if (local->ops->configure_arp_filter) | ||
96 | ret = local->ops->configure_arp_filter(&local->hw, vif, | ||
97 | ifa_list); | ||
98 | |||
99 | trace_drv_configure_arp_filter(local, vif_to_sdata(vif), ifa_list, ret); | ||
100 | return ret; | ||
101 | } | 90 | } |
102 | 91 | ||
103 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, | 92 | static inline u64 drv_prepare_multicast(struct ieee80211_local *local, |
@@ -105,10 +94,12 @@ static inline u64 drv_prepare_multicast(struct ieee80211_local *local, | |||
105 | { | 94 | { |
106 | u64 ret = 0; | 95 | u64 ret = 0; |
107 | 96 | ||
97 | trace_drv_prepare_multicast(local, mc_list->count); | ||
98 | |||
108 | if (local->ops->prepare_multicast) | 99 | if (local->ops->prepare_multicast) |
109 | ret = local->ops->prepare_multicast(&local->hw, mc_list); | 100 | ret = local->ops->prepare_multicast(&local->hw, mc_list); |
110 | 101 | ||
111 | trace_drv_prepare_multicast(local, mc_list->count, ret); | 102 | trace_drv_return_u64(local, ret); |
112 | 103 | ||
113 | return ret; | 104 | return ret; |
114 | } | 105 | } |
@@ -120,19 +111,21 @@ static inline void drv_configure_filter(struct ieee80211_local *local, | |||
120 | { | 111 | { |
121 | might_sleep(); | 112 | might_sleep(); |
122 | 113 | ||
123 | local->ops->configure_filter(&local->hw, changed_flags, total_flags, | ||
124 | multicast); | ||
125 | trace_drv_configure_filter(local, changed_flags, total_flags, | 114 | trace_drv_configure_filter(local, changed_flags, total_flags, |
126 | multicast); | 115 | multicast); |
116 | local->ops->configure_filter(&local->hw, changed_flags, total_flags, | ||
117 | multicast); | ||
118 | trace_drv_return_void(local); | ||
127 | } | 119 | } |
128 | 120 | ||
129 | static inline int drv_set_tim(struct ieee80211_local *local, | 121 | static inline int drv_set_tim(struct ieee80211_local *local, |
130 | struct ieee80211_sta *sta, bool set) | 122 | struct ieee80211_sta *sta, bool set) |
131 | { | 123 | { |
132 | int ret = 0; | 124 | int ret = 0; |
125 | trace_drv_set_tim(local, sta, set); | ||
133 | if (local->ops->set_tim) | 126 | if (local->ops->set_tim) |
134 | ret = local->ops->set_tim(&local->hw, sta, set); | 127 | ret = local->ops->set_tim(&local->hw, sta, set); |
135 | trace_drv_set_tim(local, sta, set, ret); | 128 | trace_drv_return_int(local, ret); |
136 | return ret; | 129 | return ret; |
137 | } | 130 | } |
138 | 131 | ||
@@ -146,8 +139,9 @@ static inline int drv_set_key(struct ieee80211_local *local, | |||
146 | 139 | ||
147 | might_sleep(); | 140 | might_sleep(); |
148 | 141 | ||
142 | trace_drv_set_key(local, cmd, sdata, sta, key); | ||
149 | ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); | 143 | ret = local->ops->set_key(&local->hw, cmd, &sdata->vif, sta, key); |
150 | trace_drv_set_key(local, cmd, sdata, sta, key, ret); | 144 | trace_drv_return_int(local, ret); |
151 | return ret; | 145 | return ret; |
152 | } | 146 | } |
153 | 147 | ||
@@ -162,10 +156,11 @@ static inline void drv_update_tkip_key(struct ieee80211_local *local, | |||
162 | if (sta) | 156 | if (sta) |
163 | ista = &sta->sta; | 157 | ista = &sta->sta; |
164 | 158 | ||
159 | trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); | ||
165 | if (local->ops->update_tkip_key) | 160 | if (local->ops->update_tkip_key) |
166 | local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, | 161 | local->ops->update_tkip_key(&local->hw, &sdata->vif, conf, |
167 | ista, iv32, phase1key); | 162 | ista, iv32, phase1key); |
168 | trace_drv_update_tkip_key(local, sdata, conf, ista, iv32); | 163 | trace_drv_return_void(local); |
169 | } | 164 | } |
170 | 165 | ||
171 | static inline int drv_hw_scan(struct ieee80211_local *local, | 166 | static inline int drv_hw_scan(struct ieee80211_local *local, |
@@ -176,8 +171,9 @@ static inline int drv_hw_scan(struct ieee80211_local *local, | |||
176 | 171 | ||
177 | might_sleep(); | 172 | might_sleep(); |
178 | 173 | ||
174 | trace_drv_hw_scan(local, sdata, req); | ||
179 | ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); | 175 | ret = local->ops->hw_scan(&local->hw, &sdata->vif, req); |
180 | trace_drv_hw_scan(local, sdata, req, ret); | 176 | trace_drv_return_int(local, ret); |
181 | return ret; | 177 | return ret; |
182 | } | 178 | } |
183 | 179 | ||
@@ -185,18 +181,20 @@ static inline void drv_sw_scan_start(struct ieee80211_local *local) | |||
185 | { | 181 | { |
186 | might_sleep(); | 182 | might_sleep(); |
187 | 183 | ||
184 | trace_drv_sw_scan_start(local); | ||
188 | if (local->ops->sw_scan_start) | 185 | if (local->ops->sw_scan_start) |
189 | local->ops->sw_scan_start(&local->hw); | 186 | local->ops->sw_scan_start(&local->hw); |
190 | trace_drv_sw_scan_start(local); | 187 | trace_drv_return_void(local); |
191 | } | 188 | } |
192 | 189 | ||
193 | static inline void drv_sw_scan_complete(struct ieee80211_local *local) | 190 | static inline void drv_sw_scan_complete(struct ieee80211_local *local) |
194 | { | 191 | { |
195 | might_sleep(); | 192 | might_sleep(); |
196 | 193 | ||
194 | trace_drv_sw_scan_complete(local); | ||
197 | if (local->ops->sw_scan_complete) | 195 | if (local->ops->sw_scan_complete) |
198 | local->ops->sw_scan_complete(&local->hw); | 196 | local->ops->sw_scan_complete(&local->hw); |
199 | trace_drv_sw_scan_complete(local); | 197 | trace_drv_return_void(local); |
200 | } | 198 | } |
201 | 199 | ||
202 | static inline int drv_get_stats(struct ieee80211_local *local, | 200 | static inline int drv_get_stats(struct ieee80211_local *local, |
@@ -228,9 +226,10 @@ static inline int drv_set_rts_threshold(struct ieee80211_local *local, | |||
228 | 226 | ||
229 | might_sleep(); | 227 | might_sleep(); |
230 | 228 | ||
229 | trace_drv_set_rts_threshold(local, value); | ||
231 | if (local->ops->set_rts_threshold) | 230 | if (local->ops->set_rts_threshold) |
232 | ret = local->ops->set_rts_threshold(&local->hw, value); | 231 | ret = local->ops->set_rts_threshold(&local->hw, value); |
233 | trace_drv_set_rts_threshold(local, value, ret); | 232 | trace_drv_return_int(local, ret); |
234 | return ret; | 233 | return ret; |
235 | } | 234 | } |
236 | 235 | ||
@@ -240,12 +239,13 @@ static inline int drv_set_coverage_class(struct ieee80211_local *local, | |||
240 | int ret = 0; | 239 | int ret = 0; |
241 | might_sleep(); | 240 | might_sleep(); |
242 | 241 | ||
242 | trace_drv_set_coverage_class(local, value); | ||
243 | if (local->ops->set_coverage_class) | 243 | if (local->ops->set_coverage_class) |
244 | local->ops->set_coverage_class(&local->hw, value); | 244 | local->ops->set_coverage_class(&local->hw, value); |
245 | else | 245 | else |
246 | ret = -EOPNOTSUPP; | 246 | ret = -EOPNOTSUPP; |
247 | 247 | ||
248 | trace_drv_set_coverage_class(local, value, ret); | 248 | trace_drv_return_int(local, ret); |
249 | return ret; | 249 | return ret; |
250 | } | 250 | } |
251 | 251 | ||
@@ -254,9 +254,10 @@ static inline void drv_sta_notify(struct ieee80211_local *local, | |||
254 | enum sta_notify_cmd cmd, | 254 | enum sta_notify_cmd cmd, |
255 | struct ieee80211_sta *sta) | 255 | struct ieee80211_sta *sta) |
256 | { | 256 | { |
257 | trace_drv_sta_notify(local, sdata, cmd, sta); | ||
257 | if (local->ops->sta_notify) | 258 | if (local->ops->sta_notify) |
258 | local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); | 259 | local->ops->sta_notify(&local->hw, &sdata->vif, cmd, sta); |
259 | trace_drv_sta_notify(local, sdata, cmd, sta); | 260 | trace_drv_return_void(local); |
260 | } | 261 | } |
261 | 262 | ||
262 | static inline int drv_sta_add(struct ieee80211_local *local, | 263 | static inline int drv_sta_add(struct ieee80211_local *local, |
@@ -267,10 +268,11 @@ static inline int drv_sta_add(struct ieee80211_local *local, | |||
267 | 268 | ||
268 | might_sleep(); | 269 | might_sleep(); |
269 | 270 | ||
271 | trace_drv_sta_add(local, sdata, sta); | ||
270 | if (local->ops->sta_add) | 272 | if (local->ops->sta_add) |
271 | ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); | 273 | ret = local->ops->sta_add(&local->hw, &sdata->vif, sta); |
272 | 274 | ||
273 | trace_drv_sta_add(local, sdata, sta, ret); | 275 | trace_drv_return_int(local, ret); |
274 | 276 | ||
275 | return ret; | 277 | return ret; |
276 | } | 278 | } |
@@ -281,10 +283,11 @@ static inline void drv_sta_remove(struct ieee80211_local *local, | |||
281 | { | 283 | { |
282 | might_sleep(); | 284 | might_sleep(); |
283 | 285 | ||
286 | trace_drv_sta_remove(local, sdata, sta); | ||
284 | if (local->ops->sta_remove) | 287 | if (local->ops->sta_remove) |
285 | local->ops->sta_remove(&local->hw, &sdata->vif, sta); | 288 | local->ops->sta_remove(&local->hw, &sdata->vif, sta); |
286 | 289 | ||
287 | trace_drv_sta_remove(local, sdata, sta); | 290 | trace_drv_return_void(local); |
288 | } | 291 | } |
289 | 292 | ||
290 | static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, | 293 | static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, |
@@ -294,9 +297,10 @@ static inline int drv_conf_tx(struct ieee80211_local *local, u16 queue, | |||
294 | 297 | ||
295 | might_sleep(); | 298 | might_sleep(); |
296 | 299 | ||
300 | trace_drv_conf_tx(local, queue, params); | ||
297 | if (local->ops->conf_tx) | 301 | if (local->ops->conf_tx) |
298 | ret = local->ops->conf_tx(&local->hw, queue, params); | 302 | ret = local->ops->conf_tx(&local->hw, queue, params); |
299 | trace_drv_conf_tx(local, queue, params, ret); | 303 | trace_drv_return_int(local, ret); |
300 | return ret; | 304 | return ret; |
301 | } | 305 | } |
302 | 306 | ||
@@ -306,9 +310,10 @@ static inline u64 drv_get_tsf(struct ieee80211_local *local) | |||
306 | 310 | ||
307 | might_sleep(); | 311 | might_sleep(); |
308 | 312 | ||
313 | trace_drv_get_tsf(local); | ||
309 | if (local->ops->get_tsf) | 314 | if (local->ops->get_tsf) |
310 | ret = local->ops->get_tsf(&local->hw); | 315 | ret = local->ops->get_tsf(&local->hw); |
311 | trace_drv_get_tsf(local, ret); | 316 | trace_drv_return_u64(local, ret); |
312 | return ret; | 317 | return ret; |
313 | } | 318 | } |
314 | 319 | ||
@@ -316,18 +321,20 @@ static inline void drv_set_tsf(struct ieee80211_local *local, u64 tsf) | |||
316 | { | 321 | { |
317 | might_sleep(); | 322 | might_sleep(); |
318 | 323 | ||
324 | trace_drv_set_tsf(local, tsf); | ||
319 | if (local->ops->set_tsf) | 325 | if (local->ops->set_tsf) |
320 | local->ops->set_tsf(&local->hw, tsf); | 326 | local->ops->set_tsf(&local->hw, tsf); |
321 | trace_drv_set_tsf(local, tsf); | 327 | trace_drv_return_void(local); |
322 | } | 328 | } |
323 | 329 | ||
324 | static inline void drv_reset_tsf(struct ieee80211_local *local) | 330 | static inline void drv_reset_tsf(struct ieee80211_local *local) |
325 | { | 331 | { |
326 | might_sleep(); | 332 | might_sleep(); |
327 | 333 | ||
334 | trace_drv_reset_tsf(local); | ||
328 | if (local->ops->reset_tsf) | 335 | if (local->ops->reset_tsf) |
329 | local->ops->reset_tsf(&local->hw); | 336 | local->ops->reset_tsf(&local->hw); |
330 | trace_drv_reset_tsf(local); | 337 | trace_drv_return_void(local); |
331 | } | 338 | } |
332 | 339 | ||
333 | static inline int drv_tx_last_beacon(struct ieee80211_local *local) | 340 | static inline int drv_tx_last_beacon(struct ieee80211_local *local) |
@@ -336,9 +343,10 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local) | |||
336 | 343 | ||
337 | might_sleep(); | 344 | might_sleep(); |
338 | 345 | ||
346 | trace_drv_tx_last_beacon(local); | ||
339 | if (local->ops->tx_last_beacon) | 347 | if (local->ops->tx_last_beacon) |
340 | ret = local->ops->tx_last_beacon(&local->hw); | 348 | ret = local->ops->tx_last_beacon(&local->hw); |
341 | trace_drv_tx_last_beacon(local, ret); | 349 | trace_drv_return_int(local, ret); |
342 | return ret; | 350 | return ret; |
343 | } | 351 | } |
344 | 352 | ||
@@ -349,10 +357,17 @@ static inline int drv_ampdu_action(struct ieee80211_local *local, | |||
349 | u16 *ssn) | 357 | u16 *ssn) |
350 | { | 358 | { |
351 | int ret = -EOPNOTSUPP; | 359 | int ret = -EOPNOTSUPP; |
360 | |||
361 | might_sleep(); | ||
362 | |||
363 | trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn); | ||
364 | |||
352 | if (local->ops->ampdu_action) | 365 | if (local->ops->ampdu_action) |
353 | ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, | 366 | ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action, |
354 | sta, tid, ssn); | 367 | sta, tid, ssn); |
355 | trace_drv_ampdu_action(local, sdata, action, sta, tid, ssn, ret); | 368 | |
369 | trace_drv_return_int(local, ret); | ||
370 | |||
356 | return ret; | 371 | return ret; |
357 | } | 372 | } |
358 | 373 | ||
@@ -381,6 +396,7 @@ static inline void drv_flush(struct ieee80211_local *local, bool drop) | |||
381 | trace_drv_flush(local, drop); | 396 | trace_drv_flush(local, drop); |
382 | if (local->ops->flush) | 397 | if (local->ops->flush) |
383 | local->ops->flush(&local->hw, drop); | 398 | local->ops->flush(&local->hw, drop); |
399 | trace_drv_return_void(local); | ||
384 | } | 400 | } |
385 | 401 | ||
386 | static inline void drv_channel_switch(struct ieee80211_local *local, | 402 | static inline void drv_channel_switch(struct ieee80211_local *local, |
@@ -388,9 +404,9 @@ static inline void drv_channel_switch(struct ieee80211_local *local, | |||
388 | { | 404 | { |
389 | might_sleep(); | 405 | might_sleep(); |
390 | 406 | ||
391 | local->ops->channel_switch(&local->hw, ch_switch); | ||
392 | |||
393 | trace_drv_channel_switch(local, ch_switch); | 407 | trace_drv_channel_switch(local, ch_switch); |
408 | local->ops->channel_switch(&local->hw, ch_switch); | ||
409 | trace_drv_return_void(local); | ||
394 | } | 410 | } |
395 | 411 | ||
396 | #endif /* __MAC80211_DRIVER_OPS */ | 412 | #endif /* __MAC80211_DRIVER_OPS */ |
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h index 6b90630151ab..8da31caff931 100644 --- a/net/mac80211/driver-trace.h +++ b/net/mac80211/driver-trace.h | |||
@@ -36,20 +36,58 @@ static inline void trace_ ## name(proto) {} | |||
36 | * Tracing for driver callbacks. | 36 | * Tracing for driver callbacks. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | TRACE_EVENT(drv_start, | 39 | TRACE_EVENT(drv_return_void, |
40 | TP_PROTO(struct ieee80211_local *local, int ret), | 40 | TP_PROTO(struct ieee80211_local *local), |
41 | TP_ARGS(local), | ||
42 | TP_STRUCT__entry( | ||
43 | LOCAL_ENTRY | ||
44 | ), | ||
45 | TP_fast_assign( | ||
46 | LOCAL_ASSIGN; | ||
47 | ), | ||
48 | TP_printk(LOCAL_PR_FMT, LOCAL_PR_ARG) | ||
49 | ); | ||
41 | 50 | ||
51 | TRACE_EVENT(drv_return_int, | ||
52 | TP_PROTO(struct ieee80211_local *local, int ret), | ||
42 | TP_ARGS(local, ret), | 53 | TP_ARGS(local, ret), |
43 | |||
44 | TP_STRUCT__entry( | 54 | TP_STRUCT__entry( |
45 | LOCAL_ENTRY | 55 | LOCAL_ENTRY |
46 | __field(int, ret) | 56 | __field(int, ret) |
47 | ), | 57 | ), |
58 | TP_fast_assign( | ||
59 | LOCAL_ASSIGN; | ||
60 | __entry->ret = ret; | ||
61 | ), | ||
62 | TP_printk(LOCAL_PR_FMT " - %d", LOCAL_PR_ARG, __entry->ret) | ||
63 | ); | ||
48 | 64 | ||
65 | TRACE_EVENT(drv_return_u64, | ||
66 | TP_PROTO(struct ieee80211_local *local, u64 ret), | ||
67 | TP_ARGS(local, ret), | ||
68 | TP_STRUCT__entry( | ||
69 | LOCAL_ENTRY | ||
70 | __field(u64, ret) | ||
71 | ), | ||
49 | TP_fast_assign( | 72 | TP_fast_assign( |
50 | LOCAL_ASSIGN; | 73 | LOCAL_ASSIGN; |
51 | __entry->ret = ret; | 74 | __entry->ret = ret; |
52 | ), | 75 | ), |
76 | TP_printk(LOCAL_PR_FMT " - %llu", LOCAL_PR_ARG, __entry->ret) | ||
77 | ); | ||
78 | |||
79 | TRACE_EVENT(drv_start, | ||
80 | TP_PROTO(struct ieee80211_local *local), | ||
81 | |||
82 | TP_ARGS(local), | ||
83 | |||
84 | TP_STRUCT__entry( | ||
85 | LOCAL_ENTRY | ||
86 | ), | ||
87 | |||
88 | TP_fast_assign( | ||
89 | LOCAL_ASSIGN; | ||
90 | ), | ||
53 | 91 | ||
54 | TP_printk( | 92 | TP_printk( |
55 | LOCAL_PR_FMT, LOCAL_PR_ARG | 93 | LOCAL_PR_FMT, LOCAL_PR_ARG |
@@ -76,28 +114,25 @@ TRACE_EVENT(drv_stop, | |||
76 | 114 | ||
77 | TRACE_EVENT(drv_add_interface, | 115 | TRACE_EVENT(drv_add_interface, |
78 | TP_PROTO(struct ieee80211_local *local, | 116 | TP_PROTO(struct ieee80211_local *local, |
79 | struct ieee80211_sub_if_data *sdata, | 117 | struct ieee80211_sub_if_data *sdata), |
80 | int ret), | ||
81 | 118 | ||
82 | TP_ARGS(local, sdata, ret), | 119 | TP_ARGS(local, sdata), |
83 | 120 | ||
84 | TP_STRUCT__entry( | 121 | TP_STRUCT__entry( |
85 | LOCAL_ENTRY | 122 | LOCAL_ENTRY |
86 | VIF_ENTRY | 123 | VIF_ENTRY |
87 | __array(char, addr, 6) | 124 | __array(char, addr, 6) |
88 | __field(int, ret) | ||
89 | ), | 125 | ), |
90 | 126 | ||
91 | TP_fast_assign( | 127 | TP_fast_assign( |
92 | LOCAL_ASSIGN; | 128 | LOCAL_ASSIGN; |
93 | VIF_ASSIGN; | 129 | VIF_ASSIGN; |
94 | memcpy(__entry->addr, sdata->vif.addr, 6); | 130 | memcpy(__entry->addr, sdata->vif.addr, 6); |
95 | __entry->ret = ret; | ||
96 | ), | 131 | ), |
97 | 132 | ||
98 | TP_printk( | 133 | TP_printk( |
99 | LOCAL_PR_FMT VIF_PR_FMT " addr:%pM ret:%d", | 134 | LOCAL_PR_FMT VIF_PR_FMT " addr:%pM", |
100 | LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr, __entry->ret | 135 | LOCAL_PR_ARG, VIF_PR_ARG, __entry->addr |
101 | ) | 136 | ) |
102 | ); | 137 | ); |
103 | 138 | ||
@@ -126,15 +161,13 @@ TRACE_EVENT(drv_remove_interface, | |||
126 | 161 | ||
127 | TRACE_EVENT(drv_config, | 162 | TRACE_EVENT(drv_config, |
128 | TP_PROTO(struct ieee80211_local *local, | 163 | TP_PROTO(struct ieee80211_local *local, |
129 | u32 changed, | 164 | u32 changed), |
130 | int ret), | ||
131 | 165 | ||
132 | TP_ARGS(local, changed, ret), | 166 | TP_ARGS(local, changed), |
133 | 167 | ||
134 | TP_STRUCT__entry( | 168 | TP_STRUCT__entry( |
135 | LOCAL_ENTRY | 169 | LOCAL_ENTRY |
136 | __field(u32, changed) | 170 | __field(u32, changed) |
137 | __field(int, ret) | ||
138 | __field(u32, flags) | 171 | __field(u32, flags) |
139 | __field(int, power_level) | 172 | __field(int, power_level) |
140 | __field(int, dynamic_ps_timeout) | 173 | __field(int, dynamic_ps_timeout) |
@@ -150,7 +183,6 @@ TRACE_EVENT(drv_config, | |||
150 | TP_fast_assign( | 183 | TP_fast_assign( |
151 | LOCAL_ASSIGN; | 184 | LOCAL_ASSIGN; |
152 | __entry->changed = changed; | 185 | __entry->changed = changed; |
153 | __entry->ret = ret; | ||
154 | __entry->flags = local->hw.conf.flags; | 186 | __entry->flags = local->hw.conf.flags; |
155 | __entry->power_level = local->hw.conf.power_level; | 187 | __entry->power_level = local->hw.conf.power_level; |
156 | __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; | 188 | __entry->dynamic_ps_timeout = local->hw.conf.dynamic_ps_timeout; |
@@ -164,8 +196,8 @@ TRACE_EVENT(drv_config, | |||
164 | ), | 196 | ), |
165 | 197 | ||
166 | TP_printk( | 198 | TP_printk( |
167 | LOCAL_PR_FMT " ch:%#x freq:%d ret:%d", | 199 | LOCAL_PR_FMT " ch:%#x freq:%d", |
168 | LOCAL_PR_ARG, __entry->changed, __entry->center_freq, __entry->ret | 200 | LOCAL_PR_ARG, __entry->changed, __entry->center_freq |
169 | ) | 201 | ) |
170 | ); | 202 | ); |
171 | 203 | ||
@@ -219,52 +251,24 @@ TRACE_EVENT(drv_bss_info_changed, | |||
219 | ) | 251 | ) |
220 | ); | 252 | ); |
221 | 253 | ||
222 | TRACE_EVENT(drv_configure_arp_filter, | ||
223 | TP_PROTO(struct ieee80211_local *local, | ||
224 | struct ieee80211_sub_if_data *sdata, | ||
225 | struct in_ifaddr *ifa_list, int ret), | ||
226 | |||
227 | TP_ARGS(local, sdata, ifa_list, ret), | ||
228 | |||
229 | TP_STRUCT__entry( | ||
230 | LOCAL_ENTRY | ||
231 | VIF_ENTRY | ||
232 | __field(int, ret) | ||
233 | ), | ||
234 | |||
235 | TP_fast_assign( | ||
236 | LOCAL_ASSIGN; | ||
237 | VIF_ASSIGN; | ||
238 | __entry->ret = ret; | ||
239 | ), | ||
240 | |||
241 | TP_printk( | ||
242 | VIF_PR_FMT LOCAL_PR_FMT " ret:%d", | ||
243 | VIF_PR_ARG, LOCAL_PR_ARG, __entry->ret | ||
244 | ) | ||
245 | ); | ||
246 | |||
247 | TRACE_EVENT(drv_prepare_multicast, | 254 | TRACE_EVENT(drv_prepare_multicast, |
248 | TP_PROTO(struct ieee80211_local *local, int mc_count, u64 ret), | 255 | TP_PROTO(struct ieee80211_local *local, int mc_count), |
249 | 256 | ||
250 | TP_ARGS(local, mc_count, ret), | 257 | TP_ARGS(local, mc_count), |
251 | 258 | ||
252 | TP_STRUCT__entry( | 259 | TP_STRUCT__entry( |
253 | LOCAL_ENTRY | 260 | LOCAL_ENTRY |
254 | __field(int, mc_count) | 261 | __field(int, mc_count) |
255 | __field(u64, ret) | ||
256 | ), | 262 | ), |
257 | 263 | ||
258 | TP_fast_assign( | 264 | TP_fast_assign( |
259 | LOCAL_ASSIGN; | 265 | LOCAL_ASSIGN; |
260 | __entry->mc_count = mc_count; | 266 | __entry->mc_count = mc_count; |
261 | __entry->ret = ret; | ||
262 | ), | 267 | ), |
263 | 268 | ||
264 | TP_printk( | 269 | TP_printk( |
265 | LOCAL_PR_FMT " prepare mc (%d): %llx", | 270 | LOCAL_PR_FMT " prepare mc (%d)", |
266 | LOCAL_PR_ARG, __entry->mc_count, | 271 | LOCAL_PR_ARG, __entry->mc_count |
267 | (unsigned long long) __entry->ret | ||
268 | ) | 272 | ) |
269 | ); | 273 | ); |
270 | 274 | ||
@@ -298,27 +302,25 @@ TRACE_EVENT(drv_configure_filter, | |||
298 | 302 | ||
299 | TRACE_EVENT(drv_set_tim, | 303 | TRACE_EVENT(drv_set_tim, |
300 | TP_PROTO(struct ieee80211_local *local, | 304 | TP_PROTO(struct ieee80211_local *local, |
301 | struct ieee80211_sta *sta, bool set, int ret), | 305 | struct ieee80211_sta *sta, bool set), |
302 | 306 | ||
303 | TP_ARGS(local, sta, set, ret), | 307 | TP_ARGS(local, sta, set), |
304 | 308 | ||
305 | TP_STRUCT__entry( | 309 | TP_STRUCT__entry( |
306 | LOCAL_ENTRY | 310 | LOCAL_ENTRY |
307 | STA_ENTRY | 311 | STA_ENTRY |
308 | __field(bool, set) | 312 | __field(bool, set) |
309 | __field(int, ret) | ||
310 | ), | 313 | ), |
311 | 314 | ||
312 | TP_fast_assign( | 315 | TP_fast_assign( |
313 | LOCAL_ASSIGN; | 316 | LOCAL_ASSIGN; |
314 | STA_ASSIGN; | 317 | STA_ASSIGN; |
315 | __entry->set = set; | 318 | __entry->set = set; |
316 | __entry->ret = ret; | ||
317 | ), | 319 | ), |
318 | 320 | ||
319 | TP_printk( | 321 | TP_printk( |
320 | LOCAL_PR_FMT STA_PR_FMT " set:%d ret:%d", | 322 | LOCAL_PR_FMT STA_PR_FMT " set:%d", |
321 | LOCAL_PR_ARG, STA_PR_FMT, __entry->set, __entry->ret | 323 | LOCAL_PR_ARG, STA_PR_FMT, __entry->set |
322 | ) | 324 | ) |
323 | ); | 325 | ); |
324 | 326 | ||
@@ -326,9 +328,9 @@ TRACE_EVENT(drv_set_key, | |||
326 | TP_PROTO(struct ieee80211_local *local, | 328 | TP_PROTO(struct ieee80211_local *local, |
327 | enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata, | 329 | enum set_key_cmd cmd, struct ieee80211_sub_if_data *sdata, |
328 | struct ieee80211_sta *sta, | 330 | struct ieee80211_sta *sta, |
329 | struct ieee80211_key_conf *key, int ret), | 331 | struct ieee80211_key_conf *key), |
330 | 332 | ||
331 | TP_ARGS(local, cmd, sdata, sta, key, ret), | 333 | TP_ARGS(local, cmd, sdata, sta, key), |
332 | 334 | ||
333 | TP_STRUCT__entry( | 335 | TP_STRUCT__entry( |
334 | LOCAL_ENTRY | 336 | LOCAL_ENTRY |
@@ -338,7 +340,6 @@ TRACE_EVENT(drv_set_key, | |||
338 | __field(u8, hw_key_idx) | 340 | __field(u8, hw_key_idx) |
339 | __field(u8, flags) | 341 | __field(u8, flags) |
340 | __field(s8, keyidx) | 342 | __field(s8, keyidx) |
341 | __field(int, ret) | ||
342 | ), | 343 | ), |
343 | 344 | ||
344 | TP_fast_assign( | 345 | TP_fast_assign( |
@@ -349,12 +350,11 @@ TRACE_EVENT(drv_set_key, | |||
349 | __entry->flags = key->flags; | 350 | __entry->flags = key->flags; |
350 | __entry->keyidx = key->keyidx; | 351 | __entry->keyidx = key->keyidx; |
351 | __entry->hw_key_idx = key->hw_key_idx; | 352 | __entry->hw_key_idx = key->hw_key_idx; |
352 | __entry->ret = ret; | ||
353 | ), | 353 | ), |
354 | 354 | ||
355 | TP_printk( | 355 | TP_printk( |
356 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d", | 356 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT, |
357 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret | 357 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG |
358 | ) | 358 | ) |
359 | ); | 359 | ); |
360 | 360 | ||
@@ -389,25 +389,23 @@ TRACE_EVENT(drv_update_tkip_key, | |||
389 | TRACE_EVENT(drv_hw_scan, | 389 | TRACE_EVENT(drv_hw_scan, |
390 | TP_PROTO(struct ieee80211_local *local, | 390 | TP_PROTO(struct ieee80211_local *local, |
391 | struct ieee80211_sub_if_data *sdata, | 391 | struct ieee80211_sub_if_data *sdata, |
392 | struct cfg80211_scan_request *req, int ret), | 392 | struct cfg80211_scan_request *req), |
393 | 393 | ||
394 | TP_ARGS(local, sdata, req, ret), | 394 | TP_ARGS(local, sdata, req), |
395 | 395 | ||
396 | TP_STRUCT__entry( | 396 | TP_STRUCT__entry( |
397 | LOCAL_ENTRY | 397 | LOCAL_ENTRY |
398 | VIF_ENTRY | 398 | VIF_ENTRY |
399 | __field(int, ret) | ||
400 | ), | 399 | ), |
401 | 400 | ||
402 | TP_fast_assign( | 401 | TP_fast_assign( |
403 | LOCAL_ASSIGN; | 402 | LOCAL_ASSIGN; |
404 | VIF_ASSIGN; | 403 | VIF_ASSIGN; |
405 | __entry->ret = ret; | ||
406 | ), | 404 | ), |
407 | 405 | ||
408 | TP_printk( | 406 | TP_printk( |
409 | LOCAL_PR_FMT VIF_PR_FMT " ret:%d", | 407 | LOCAL_PR_FMT VIF_PR_FMT, |
410 | LOCAL_PR_ARG,VIF_PR_ARG, __entry->ret | 408 | LOCAL_PR_ARG,VIF_PR_ARG |
411 | ) | 409 | ) |
412 | ); | 410 | ); |
413 | 411 | ||
@@ -504,48 +502,44 @@ TRACE_EVENT(drv_get_tkip_seq, | |||
504 | ); | 502 | ); |
505 | 503 | ||
506 | TRACE_EVENT(drv_set_rts_threshold, | 504 | TRACE_EVENT(drv_set_rts_threshold, |
507 | TP_PROTO(struct ieee80211_local *local, u32 value, int ret), | 505 | TP_PROTO(struct ieee80211_local *local, u32 value), |
508 | 506 | ||
509 | TP_ARGS(local, value, ret), | 507 | TP_ARGS(local, value), |
510 | 508 | ||
511 | TP_STRUCT__entry( | 509 | TP_STRUCT__entry( |
512 | LOCAL_ENTRY | 510 | LOCAL_ENTRY |
513 | __field(u32, value) | 511 | __field(u32, value) |
514 | __field(int, ret) | ||
515 | ), | 512 | ), |
516 | 513 | ||
517 | TP_fast_assign( | 514 | TP_fast_assign( |
518 | LOCAL_ASSIGN; | 515 | LOCAL_ASSIGN; |
519 | __entry->ret = ret; | ||
520 | __entry->value = value; | 516 | __entry->value = value; |
521 | ), | 517 | ), |
522 | 518 | ||
523 | TP_printk( | 519 | TP_printk( |
524 | LOCAL_PR_FMT " value:%d ret:%d", | 520 | LOCAL_PR_FMT " value:%d", |
525 | LOCAL_PR_ARG, __entry->value, __entry->ret | 521 | LOCAL_PR_ARG, __entry->value |
526 | ) | 522 | ) |
527 | ); | 523 | ); |
528 | 524 | ||
529 | TRACE_EVENT(drv_set_coverage_class, | 525 | TRACE_EVENT(drv_set_coverage_class, |
530 | TP_PROTO(struct ieee80211_local *local, u8 value, int ret), | 526 | TP_PROTO(struct ieee80211_local *local, u8 value), |
531 | 527 | ||
532 | TP_ARGS(local, value, ret), | 528 | TP_ARGS(local, value), |
533 | 529 | ||
534 | TP_STRUCT__entry( | 530 | TP_STRUCT__entry( |
535 | LOCAL_ENTRY | 531 | LOCAL_ENTRY |
536 | __field(u8, value) | 532 | __field(u8, value) |
537 | __field(int, ret) | ||
538 | ), | 533 | ), |
539 | 534 | ||
540 | TP_fast_assign( | 535 | TP_fast_assign( |
541 | LOCAL_ASSIGN; | 536 | LOCAL_ASSIGN; |
542 | __entry->ret = ret; | ||
543 | __entry->value = value; | 537 | __entry->value = value; |
544 | ), | 538 | ), |
545 | 539 | ||
546 | TP_printk( | 540 | TP_printk( |
547 | LOCAL_PR_FMT " value:%d ret:%d", | 541 | LOCAL_PR_FMT " value:%d", |
548 | LOCAL_PR_ARG, __entry->value, __entry->ret | 542 | LOCAL_PR_ARG, __entry->value |
549 | ) | 543 | ) |
550 | ); | 544 | ); |
551 | 545 | ||
@@ -580,27 +574,25 @@ TRACE_EVENT(drv_sta_notify, | |||
580 | TRACE_EVENT(drv_sta_add, | 574 | TRACE_EVENT(drv_sta_add, |
581 | TP_PROTO(struct ieee80211_local *local, | 575 | TP_PROTO(struct ieee80211_local *local, |
582 | struct ieee80211_sub_if_data *sdata, | 576 | struct ieee80211_sub_if_data *sdata, |
583 | struct ieee80211_sta *sta, int ret), | 577 | struct ieee80211_sta *sta), |
584 | 578 | ||
585 | TP_ARGS(local, sdata, sta, ret), | 579 | TP_ARGS(local, sdata, sta), |
586 | 580 | ||
587 | TP_STRUCT__entry( | 581 | TP_STRUCT__entry( |
588 | LOCAL_ENTRY | 582 | LOCAL_ENTRY |
589 | VIF_ENTRY | 583 | VIF_ENTRY |
590 | STA_ENTRY | 584 | STA_ENTRY |
591 | __field(int, ret) | ||
592 | ), | 585 | ), |
593 | 586 | ||
594 | TP_fast_assign( | 587 | TP_fast_assign( |
595 | LOCAL_ASSIGN; | 588 | LOCAL_ASSIGN; |
596 | VIF_ASSIGN; | 589 | VIF_ASSIGN; |
597 | STA_ASSIGN; | 590 | STA_ASSIGN; |
598 | __entry->ret = ret; | ||
599 | ), | 591 | ), |
600 | 592 | ||
601 | TP_printk( | 593 | TP_printk( |
602 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " ret:%d", | 594 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT, |
603 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->ret | 595 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG |
604 | ) | 596 | ) |
605 | ); | 597 | ); |
606 | 598 | ||
@@ -631,10 +623,9 @@ TRACE_EVENT(drv_sta_remove, | |||
631 | 623 | ||
632 | TRACE_EVENT(drv_conf_tx, | 624 | TRACE_EVENT(drv_conf_tx, |
633 | TP_PROTO(struct ieee80211_local *local, u16 queue, | 625 | TP_PROTO(struct ieee80211_local *local, u16 queue, |
634 | const struct ieee80211_tx_queue_params *params, | 626 | const struct ieee80211_tx_queue_params *params), |
635 | int ret), | ||
636 | 627 | ||
637 | TP_ARGS(local, queue, params, ret), | 628 | TP_ARGS(local, queue, params), |
638 | 629 | ||
639 | TP_STRUCT__entry( | 630 | TP_STRUCT__entry( |
640 | LOCAL_ENTRY | 631 | LOCAL_ENTRY |
@@ -643,13 +634,11 @@ TRACE_EVENT(drv_conf_tx, | |||
643 | __field(u16, cw_min) | 634 | __field(u16, cw_min) |
644 | __field(u16, cw_max) | 635 | __field(u16, cw_max) |
645 | __field(u8, aifs) | 636 | __field(u8, aifs) |
646 | __field(int, ret) | ||
647 | ), | 637 | ), |
648 | 638 | ||
649 | TP_fast_assign( | 639 | TP_fast_assign( |
650 | LOCAL_ASSIGN; | 640 | LOCAL_ASSIGN; |
651 | __entry->queue = queue; | 641 | __entry->queue = queue; |
652 | __entry->ret = ret; | ||
653 | __entry->txop = params->txop; | 642 | __entry->txop = params->txop; |
654 | __entry->cw_max = params->cw_max; | 643 | __entry->cw_max = params->cw_max; |
655 | __entry->cw_min = params->cw_min; | 644 | __entry->cw_min = params->cw_min; |
@@ -657,29 +646,27 @@ TRACE_EVENT(drv_conf_tx, | |||
657 | ), | 646 | ), |
658 | 647 | ||
659 | TP_printk( | 648 | TP_printk( |
660 | LOCAL_PR_FMT " queue:%d ret:%d", | 649 | LOCAL_PR_FMT " queue:%d", |
661 | LOCAL_PR_ARG, __entry->queue, __entry->ret | 650 | LOCAL_PR_ARG, __entry->queue |
662 | ) | 651 | ) |
663 | ); | 652 | ); |
664 | 653 | ||
665 | TRACE_EVENT(drv_get_tsf, | 654 | TRACE_EVENT(drv_get_tsf, |
666 | TP_PROTO(struct ieee80211_local *local, u64 ret), | 655 | TP_PROTO(struct ieee80211_local *local), |
667 | 656 | ||
668 | TP_ARGS(local, ret), | 657 | TP_ARGS(local), |
669 | 658 | ||
670 | TP_STRUCT__entry( | 659 | TP_STRUCT__entry( |
671 | LOCAL_ENTRY | 660 | LOCAL_ENTRY |
672 | __field(u64, ret) | ||
673 | ), | 661 | ), |
674 | 662 | ||
675 | TP_fast_assign( | 663 | TP_fast_assign( |
676 | LOCAL_ASSIGN; | 664 | LOCAL_ASSIGN; |
677 | __entry->ret = ret; | ||
678 | ), | 665 | ), |
679 | 666 | ||
680 | TP_printk( | 667 | TP_printk( |
681 | LOCAL_PR_FMT " ret:%llu", | 668 | LOCAL_PR_FMT, |
682 | LOCAL_PR_ARG, (unsigned long long)__entry->ret | 669 | LOCAL_PR_ARG |
683 | ) | 670 | ) |
684 | ); | 671 | ); |
685 | 672 | ||
@@ -723,23 +710,21 @@ TRACE_EVENT(drv_reset_tsf, | |||
723 | ); | 710 | ); |
724 | 711 | ||
725 | TRACE_EVENT(drv_tx_last_beacon, | 712 | TRACE_EVENT(drv_tx_last_beacon, |
726 | TP_PROTO(struct ieee80211_local *local, int ret), | 713 | TP_PROTO(struct ieee80211_local *local), |
727 | 714 | ||
728 | TP_ARGS(local, ret), | 715 | TP_ARGS(local), |
729 | 716 | ||
730 | TP_STRUCT__entry( | 717 | TP_STRUCT__entry( |
731 | LOCAL_ENTRY | 718 | LOCAL_ENTRY |
732 | __field(int, ret) | ||
733 | ), | 719 | ), |
734 | 720 | ||
735 | TP_fast_assign( | 721 | TP_fast_assign( |
736 | LOCAL_ASSIGN; | 722 | LOCAL_ASSIGN; |
737 | __entry->ret = ret; | ||
738 | ), | 723 | ), |
739 | 724 | ||
740 | TP_printk( | 725 | TP_printk( |
741 | LOCAL_PR_FMT " ret:%d", | 726 | LOCAL_PR_FMT, |
742 | LOCAL_PR_ARG, __entry->ret | 727 | LOCAL_PR_ARG |
743 | ) | 728 | ) |
744 | ); | 729 | ); |
745 | 730 | ||
@@ -748,9 +733,9 @@ TRACE_EVENT(drv_ampdu_action, | |||
748 | struct ieee80211_sub_if_data *sdata, | 733 | struct ieee80211_sub_if_data *sdata, |
749 | enum ieee80211_ampdu_mlme_action action, | 734 | enum ieee80211_ampdu_mlme_action action, |
750 | struct ieee80211_sta *sta, u16 tid, | 735 | struct ieee80211_sta *sta, u16 tid, |
751 | u16 *ssn, int ret), | 736 | u16 *ssn), |
752 | 737 | ||
753 | TP_ARGS(local, sdata, action, sta, tid, ssn, ret), | 738 | TP_ARGS(local, sdata, action, sta, tid, ssn), |
754 | 739 | ||
755 | TP_STRUCT__entry( | 740 | TP_STRUCT__entry( |
756 | LOCAL_ENTRY | 741 | LOCAL_ENTRY |
@@ -758,7 +743,6 @@ TRACE_EVENT(drv_ampdu_action, | |||
758 | __field(u32, action) | 743 | __field(u32, action) |
759 | __field(u16, tid) | 744 | __field(u16, tid) |
760 | __field(u16, ssn) | 745 | __field(u16, ssn) |
761 | __field(int, ret) | ||
762 | VIF_ENTRY | 746 | VIF_ENTRY |
763 | ), | 747 | ), |
764 | 748 | ||
@@ -766,15 +750,14 @@ TRACE_EVENT(drv_ampdu_action, | |||
766 | LOCAL_ASSIGN; | 750 | LOCAL_ASSIGN; |
767 | VIF_ASSIGN; | 751 | VIF_ASSIGN; |
768 | STA_ASSIGN; | 752 | STA_ASSIGN; |
769 | __entry->ret = ret; | ||
770 | __entry->action = action; | 753 | __entry->action = action; |
771 | __entry->tid = tid; | 754 | __entry->tid = tid; |
772 | __entry->ssn = ssn ? *ssn : 0; | 755 | __entry->ssn = ssn ? *ssn : 0; |
773 | ), | 756 | ), |
774 | 757 | ||
775 | TP_printk( | 758 | TP_printk( |
776 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d ret:%d", | 759 | LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d", |
777 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid, __entry->ret | 760 | LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action, __entry->tid |
778 | ) | 761 | ) |
779 | ); | 762 | ); |
780 | 763 | ||
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 2ab106a0a491..be928ef7ef51 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c | |||
@@ -6,7 +6,7 @@ | |||
6 | * Copyright 2005-2006, Devicescape Software, Inc. | 6 | * Copyright 2005-2006, Devicescape Software, Inc. |
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | 7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> |
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2008, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | 12 | * it under the terms of the GNU General Public License version 2 as |
@@ -105,6 +105,8 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta) | |||
105 | { | 105 | { |
106 | int i; | 106 | int i; |
107 | 107 | ||
108 | cancel_work_sync(&sta->ampdu_mlme.work); | ||
109 | |||
108 | for (i = 0; i < STA_TID_NUM; i++) { | 110 | for (i = 0; i < STA_TID_NUM; i++) { |
109 | __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR); | 111 | __ieee80211_stop_tx_ba_session(sta, i, WLAN_BACK_INITIATOR); |
110 | __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, | 112 | __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, |
@@ -112,6 +114,43 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta) | |||
112 | } | 114 | } |
113 | } | 115 | } |
114 | 116 | ||
117 | void ieee80211_ba_session_work(struct work_struct *work) | ||
118 | { | ||
119 | struct sta_info *sta = | ||
120 | container_of(work, struct sta_info, ampdu_mlme.work); | ||
121 | struct tid_ampdu_tx *tid_tx; | ||
122 | int tid; | ||
123 | |||
124 | /* | ||
125 | * When this flag is set, new sessions should be | ||
126 | * blocked, and existing sessions will be torn | ||
127 | * down by the code that set the flag, so this | ||
128 | * need not run. | ||
129 | */ | ||
130 | if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) | ||
131 | return; | ||
132 | |||
133 | mutex_lock(&sta->ampdu_mlme.mtx); | ||
134 | for (tid = 0; tid < STA_TID_NUM; tid++) { | ||
135 | if (test_and_clear_bit(tid, sta->ampdu_mlme.tid_rx_timer_expired)) | ||
136 | ___ieee80211_stop_rx_ba_session( | ||
137 | sta, tid, WLAN_BACK_RECIPIENT, | ||
138 | WLAN_REASON_QSTA_TIMEOUT); | ||
139 | |||
140 | tid_tx = sta->ampdu_mlme.tid_tx[tid]; | ||
141 | if (!tid_tx) | ||
142 | continue; | ||
143 | |||
144 | if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) | ||
145 | ieee80211_tx_ba_session_handle_start(sta, tid); | ||
146 | else if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, | ||
147 | &tid_tx->state)) | ||
148 | ___ieee80211_stop_tx_ba_session(sta, tid, | ||
149 | WLAN_BACK_INITIATOR); | ||
150 | } | ||
151 | mutex_unlock(&sta->ampdu_mlme.mtx); | ||
152 | } | ||
153 | |||
115 | void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, | 154 | void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, |
116 | const u8 *da, u16 tid, | 155 | const u8 *da, u16 tid, |
117 | u16 initiator, u16 reason_code) | 156 | u16 initiator, u16 reason_code) |
@@ -176,13 +215,8 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata, | |||
176 | 215 | ||
177 | if (initiator == WLAN_BACK_INITIATOR) | 216 | if (initiator == WLAN_BACK_INITIATOR) |
178 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0); | 217 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0); |
179 | else { /* WLAN_BACK_RECIPIENT */ | 218 | else |
180 | spin_lock_bh(&sta->lock); | 219 | __ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_RECIPIENT); |
181 | if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK) | ||
182 | ___ieee80211_stop_tx_ba_session(sta, tid, | ||
183 | WLAN_BACK_RECIPIENT); | ||
184 | spin_unlock_bh(&sta->lock); | ||
185 | } | ||
186 | } | 220 | } |
187 | 221 | ||
188 | int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, | 222 | int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index d7a96ced2c83..d4e84b22a66d 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c | |||
@@ -172,11 +172,13 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata, | |||
172 | rcu_assign_pointer(ifibss->presp, skb); | 172 | rcu_assign_pointer(ifibss->presp, skb); |
173 | 173 | ||
174 | sdata->vif.bss_conf.beacon_int = beacon_int; | 174 | sdata->vif.bss_conf.beacon_int = beacon_int; |
175 | sdata->vif.bss_conf.basic_rates = basic_rates; | ||
175 | bss_change = BSS_CHANGED_BEACON_INT; | 176 | bss_change = BSS_CHANGED_BEACON_INT; |
176 | bss_change |= ieee80211_reset_erp_info(sdata); | 177 | bss_change |= ieee80211_reset_erp_info(sdata); |
177 | bss_change |= BSS_CHANGED_BSSID; | 178 | bss_change |= BSS_CHANGED_BSSID; |
178 | bss_change |= BSS_CHANGED_BEACON; | 179 | bss_change |= BSS_CHANGED_BEACON; |
179 | bss_change |= BSS_CHANGED_BEACON_ENABLED; | 180 | bss_change |= BSS_CHANGED_BEACON_ENABLED; |
181 | bss_change |= BSS_CHANGED_BASIC_RATES; | ||
180 | bss_change |= BSS_CHANGED_IBSS; | 182 | bss_change |= BSS_CHANGED_IBSS; |
181 | sdata->vif.bss_conf.ibss_joined = true; | 183 | sdata->vif.bss_conf.ibss_joined = true; |
182 | ieee80211_bss_info_change_notify(sdata, bss_change); | 184 | ieee80211_bss_info_change_notify(sdata, bss_change); |
@@ -529,7 +531,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata) | |||
529 | sdata->drop_unencrypted = 0; | 531 | sdata->drop_unencrypted = 0; |
530 | 532 | ||
531 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, | 533 | __ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int, |
532 | ifibss->channel, 3, /* first two are basic */ | 534 | ifibss->channel, ifibss->basic_rates, |
533 | capability, 0); | 535 | capability, 0); |
534 | } | 536 | } |
535 | 537 | ||
@@ -727,8 +729,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
727 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true); | 729 | ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems, true); |
728 | } | 730 | } |
729 | 731 | ||
730 | static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | 732 | void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, |
731 | struct sk_buff *skb) | 733 | struct sk_buff *skb) |
732 | { | 734 | { |
733 | struct ieee80211_rx_status *rx_status; | 735 | struct ieee80211_rx_status *rx_status; |
734 | struct ieee80211_mgmt *mgmt; | 736 | struct ieee80211_mgmt *mgmt; |
@@ -754,33 +756,11 @@ static void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
754 | ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len); | 756 | ieee80211_rx_mgmt_auth_ibss(sdata, mgmt, skb->len); |
755 | break; | 757 | break; |
756 | } | 758 | } |
757 | |||
758 | kfree_skb(skb); | ||
759 | } | 759 | } |
760 | 760 | ||
761 | static void ieee80211_ibss_work(struct work_struct *work) | 761 | void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata) |
762 | { | 762 | { |
763 | struct ieee80211_sub_if_data *sdata = | 763 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; |
764 | container_of(work, struct ieee80211_sub_if_data, u.ibss.work); | ||
765 | struct ieee80211_local *local = sdata->local; | ||
766 | struct ieee80211_if_ibss *ifibss; | ||
767 | struct sk_buff *skb; | ||
768 | |||
769 | if (WARN_ON(local->suspended)) | ||
770 | return; | ||
771 | |||
772 | if (!ieee80211_sdata_running(sdata)) | ||
773 | return; | ||
774 | |||
775 | if (local->scanning) | ||
776 | return; | ||
777 | |||
778 | if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_ADHOC)) | ||
779 | return; | ||
780 | ifibss = &sdata->u.ibss; | ||
781 | |||
782 | while ((skb = skb_dequeue(&ifibss->skb_queue))) | ||
783 | ieee80211_ibss_rx_queued_mgmt(sdata, skb); | ||
784 | 764 | ||
785 | if (!test_and_clear_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request)) | 765 | if (!test_and_clear_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request)) |
786 | return; | 766 | return; |
@@ -804,7 +784,7 @@ static void ieee80211_queue_ibss_work(struct ieee80211_sub_if_data *sdata) | |||
804 | struct ieee80211_local *local = sdata->local; | 784 | struct ieee80211_local *local = sdata->local; |
805 | 785 | ||
806 | set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); | 786 | set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request); |
807 | ieee80211_queue_work(&local->hw, &ifibss->work); | 787 | ieee80211_queue_work(&local->hw, &sdata->work); |
808 | } | 788 | } |
809 | 789 | ||
810 | static void ieee80211_ibss_timer(unsigned long data) | 790 | static void ieee80211_ibss_timer(unsigned long data) |
@@ -827,7 +807,6 @@ void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata) | |||
827 | { | 807 | { |
828 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; | 808 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; |
829 | 809 | ||
830 | cancel_work_sync(&ifibss->work); | ||
831 | if (del_timer_sync(&ifibss->timer)) | 810 | if (del_timer_sync(&ifibss->timer)) |
832 | ifibss->timer_running = true; | 811 | ifibss->timer_running = true; |
833 | } | 812 | } |
@@ -847,10 +826,8 @@ void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) | |||
847 | { | 826 | { |
848 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; | 827 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; |
849 | 828 | ||
850 | INIT_WORK(&ifibss->work, ieee80211_ibss_work); | ||
851 | setup_timer(&ifibss->timer, ieee80211_ibss_timer, | 829 | setup_timer(&ifibss->timer, ieee80211_ibss_timer, |
852 | (unsigned long) sdata); | 830 | (unsigned long) sdata); |
853 | skb_queue_head_init(&ifibss->skb_queue); | ||
854 | } | 831 | } |
855 | 832 | ||
856 | /* scan finished notification */ | 833 | /* scan finished notification */ |
@@ -872,32 +849,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local) | |||
872 | mutex_unlock(&local->iflist_mtx); | 849 | mutex_unlock(&local->iflist_mtx); |
873 | } | 850 | } |
874 | 851 | ||
875 | ieee80211_rx_result | ||
876 | ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | ||
877 | { | ||
878 | struct ieee80211_local *local = sdata->local; | ||
879 | struct ieee80211_mgmt *mgmt; | ||
880 | u16 fc; | ||
881 | |||
882 | if (skb->len < 24) | ||
883 | return RX_DROP_MONITOR; | ||
884 | |||
885 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
886 | fc = le16_to_cpu(mgmt->frame_control); | ||
887 | |||
888 | switch (fc & IEEE80211_FCTL_STYPE) { | ||
889 | case IEEE80211_STYPE_PROBE_RESP: | ||
890 | case IEEE80211_STYPE_BEACON: | ||
891 | case IEEE80211_STYPE_PROBE_REQ: | ||
892 | case IEEE80211_STYPE_AUTH: | ||
893 | skb_queue_tail(&sdata->u.ibss.skb_queue, skb); | ||
894 | ieee80211_queue_work(&local->hw, &sdata->u.ibss.work); | ||
895 | return RX_QUEUED; | ||
896 | } | ||
897 | |||
898 | return RX_DROP_MONITOR; | ||
899 | } | ||
900 | |||
901 | int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | 852 | int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, |
902 | struct cfg80211_ibss_params *params) | 853 | struct cfg80211_ibss_params *params) |
903 | { | 854 | { |
@@ -910,6 +861,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
910 | sdata->u.ibss.fixed_bssid = false; | 861 | sdata->u.ibss.fixed_bssid = false; |
911 | 862 | ||
912 | sdata->u.ibss.privacy = params->privacy; | 863 | sdata->u.ibss.privacy = params->privacy; |
864 | sdata->u.ibss.basic_rates = params->basic_rates; | ||
913 | 865 | ||
914 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; | 866 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; |
915 | 867 | ||
@@ -957,7 +909,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
957 | ieee80211_recalc_idle(sdata->local); | 909 | ieee80211_recalc_idle(sdata->local); |
958 | 910 | ||
959 | set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); | 911 | set_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); |
960 | ieee80211_queue_work(&sdata->local->hw, &sdata->u.ibss.work); | 912 | ieee80211_queue_work(&sdata->local->hw, &sdata->work); |
961 | 913 | ||
962 | return 0; | 914 | return 0; |
963 | } | 915 | } |
@@ -965,10 +917,35 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
965 | int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) | 917 | int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) |
966 | { | 918 | { |
967 | struct sk_buff *skb; | 919 | struct sk_buff *skb; |
920 | struct ieee80211_if_ibss *ifibss = &sdata->u.ibss; | ||
921 | struct ieee80211_local *local = sdata->local; | ||
922 | struct cfg80211_bss *cbss; | ||
923 | u16 capability; | ||
924 | int active_ibss = 0; | ||
925 | |||
926 | active_ibss = ieee80211_sta_active_ibss(sdata); | ||
927 | |||
928 | if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { | ||
929 | capability = WLAN_CAPABILITY_IBSS; | ||
930 | |||
931 | if (ifibss->privacy) | ||
932 | capability |= WLAN_CAPABILITY_PRIVACY; | ||
933 | |||
934 | cbss = cfg80211_get_bss(local->hw.wiphy, ifibss->channel, | ||
935 | ifibss->bssid, ifibss->ssid, | ||
936 | ifibss->ssid_len, WLAN_CAPABILITY_IBSS | | ||
937 | WLAN_CAPABILITY_PRIVACY, | ||
938 | capability); | ||
939 | |||
940 | if (cbss) { | ||
941 | cfg80211_unlink_bss(local->hw.wiphy, cbss); | ||
942 | cfg80211_put_bss(cbss); | ||
943 | } | ||
944 | } | ||
968 | 945 | ||
969 | del_timer_sync(&sdata->u.ibss.timer); | 946 | del_timer_sync(&sdata->u.ibss.timer); |
970 | clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); | 947 | clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); |
971 | cancel_work_sync(&sdata->u.ibss.work); | 948 | cancel_work_sync(&sdata->work); |
972 | clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); | 949 | clear_bit(IEEE80211_IBSS_REQ_RUN, &sdata->u.ibss.request); |
973 | 950 | ||
974 | sta_info_flush(sdata->local, sdata); | 951 | sta_info_flush(sdata->local, sdata); |
@@ -983,7 +960,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) | |||
983 | synchronize_rcu(); | 960 | synchronize_rcu(); |
984 | kfree_skb(skb); | 961 | kfree_skb(skb); |
985 | 962 | ||
986 | skb_queue_purge(&sdata->u.ibss.skb_queue); | 963 | skb_queue_purge(&sdata->skb_queue); |
987 | memset(sdata->u.ibss.bssid, 0, ETH_ALEN); | 964 | memset(sdata->u.ibss.bssid, 0, ETH_ALEN); |
988 | sdata->u.ibss.ssid_len = 0; | 965 | sdata->u.ibss.ssid_len = 0; |
989 | 966 | ||
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 1e779e833473..6f905f153ed7 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -325,7 +325,6 @@ struct ieee80211_if_managed { | |||
325 | struct timer_list conn_mon_timer; | 325 | struct timer_list conn_mon_timer; |
326 | struct timer_list bcn_mon_timer; | 326 | struct timer_list bcn_mon_timer; |
327 | struct timer_list chswitch_timer; | 327 | struct timer_list chswitch_timer; |
328 | struct work_struct work; | ||
329 | struct work_struct monitor_work; | 328 | struct work_struct monitor_work; |
330 | struct work_struct chswitch_work; | 329 | struct work_struct chswitch_work; |
331 | struct work_struct beacon_connection_loss_work; | 330 | struct work_struct beacon_connection_loss_work; |
@@ -340,8 +339,6 @@ struct ieee80211_if_managed { | |||
340 | 339 | ||
341 | u16 aid; | 340 | u16 aid; |
342 | 341 | ||
343 | struct sk_buff_head skb_queue; | ||
344 | |||
345 | unsigned long timers_running; /* used for quiesce/restart */ | 342 | unsigned long timers_running; /* used for quiesce/restart */ |
346 | bool powersave; /* powersave requested for this iface */ | 343 | bool powersave; /* powersave requested for this iface */ |
347 | enum ieee80211_smps_mode req_smps, /* requested smps mode */ | 344 | enum ieee80211_smps_mode req_smps, /* requested smps mode */ |
@@ -386,13 +383,12 @@ enum ieee80211_ibss_request { | |||
386 | 383 | ||
387 | struct ieee80211_if_ibss { | 384 | struct ieee80211_if_ibss { |
388 | struct timer_list timer; | 385 | struct timer_list timer; |
389 | struct work_struct work; | ||
390 | |||
391 | struct sk_buff_head skb_queue; | ||
392 | 386 | ||
393 | unsigned long request; | 387 | unsigned long request; |
394 | unsigned long last_scan_completed; | 388 | unsigned long last_scan_completed; |
395 | 389 | ||
390 | u32 basic_rates; | ||
391 | |||
396 | bool timer_running; | 392 | bool timer_running; |
397 | 393 | ||
398 | bool fixed_bssid; | 394 | bool fixed_bssid; |
@@ -416,11 +412,9 @@ struct ieee80211_if_ibss { | |||
416 | }; | 412 | }; |
417 | 413 | ||
418 | struct ieee80211_if_mesh { | 414 | struct ieee80211_if_mesh { |
419 | struct work_struct work; | ||
420 | struct timer_list housekeeping_timer; | 415 | struct timer_list housekeeping_timer; |
421 | struct timer_list mesh_path_timer; | 416 | struct timer_list mesh_path_timer; |
422 | struct timer_list mesh_path_root_timer; | 417 | struct timer_list mesh_path_root_timer; |
423 | struct sk_buff_head skb_queue; | ||
424 | 418 | ||
425 | unsigned long timers_running; | 419 | unsigned long timers_running; |
426 | 420 | ||
@@ -517,6 +511,11 @@ struct ieee80211_sub_if_data { | |||
517 | 511 | ||
518 | u16 sequence_number; | 512 | u16 sequence_number; |
519 | 513 | ||
514 | struct work_struct work; | ||
515 | struct sk_buff_head skb_queue; | ||
516 | |||
517 | bool arp_filter_state; | ||
518 | |||
520 | /* | 519 | /* |
521 | * AP this belongs to: self in AP mode and | 520 | * AP this belongs to: self in AP mode and |
522 | * corresponding AP in VLAN mode, NULL for | 521 | * corresponding AP in VLAN mode, NULL for |
@@ -569,11 +568,15 @@ ieee80211_sdata_set_mesh_id(struct ieee80211_sub_if_data *sdata, | |||
569 | #endif | 568 | #endif |
570 | } | 569 | } |
571 | 570 | ||
571 | enum sdata_queue_type { | ||
572 | IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, | ||
573 | IEEE80211_SDATA_QUEUE_AGG_START = 1, | ||
574 | IEEE80211_SDATA_QUEUE_AGG_STOP = 2, | ||
575 | }; | ||
576 | |||
572 | enum { | 577 | enum { |
573 | IEEE80211_RX_MSG = 1, | 578 | IEEE80211_RX_MSG = 1, |
574 | IEEE80211_TX_STATUS_MSG = 2, | 579 | IEEE80211_TX_STATUS_MSG = 2, |
575 | IEEE80211_DELBA_MSG = 3, | ||
576 | IEEE80211_ADDBA_MSG = 4, | ||
577 | }; | 580 | }; |
578 | 581 | ||
579 | enum queue_stop_reason { | 582 | enum queue_stop_reason { |
@@ -724,13 +727,7 @@ struct ieee80211_local { | |||
724 | struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; | 727 | struct sk_buff_head pending[IEEE80211_MAX_QUEUES]; |
725 | struct tasklet_struct tx_pending_tasklet; | 728 | struct tasklet_struct tx_pending_tasklet; |
726 | 729 | ||
727 | /* | 730 | atomic_t agg_queue_stop[IEEE80211_MAX_QUEUES]; |
728 | * This lock is used to prevent concurrent A-MPDU | ||
729 | * session start/stop processing, this thus also | ||
730 | * synchronises the ->ampdu_action() callback to | ||
731 | * drivers and limits it to one at a time. | ||
732 | */ | ||
733 | spinlock_t ampdu_lock; | ||
734 | 731 | ||
735 | /* number of interfaces with corresponding IFF_ flags */ | 732 | /* number of interfaces with corresponding IFF_ flags */ |
736 | atomic_t iff_allmultis, iff_promiscs; | 733 | atomic_t iff_allmultis, iff_promiscs; |
@@ -853,6 +850,12 @@ struct ieee80211_local { | |||
853 | struct notifier_block network_latency_notifier; | 850 | struct notifier_block network_latency_notifier; |
854 | struct notifier_block ifa_notifier; | 851 | struct notifier_block ifa_notifier; |
855 | 852 | ||
853 | /* | ||
854 | * The dynamic ps timeout configured from user space via WEXT - | ||
855 | * this will override whatever chosen by mac80211 internally. | ||
856 | */ | ||
857 | int dynamic_ps_forced_timeout; | ||
858 | |||
856 | int user_power_level; /* in dBm */ | 859 | int user_power_level; /* in dBm */ |
857 | int power_constr_level; /* in dBm */ | 860 | int power_constr_level; /* in dBm */ |
858 | 861 | ||
@@ -875,9 +878,8 @@ IEEE80211_DEV_TO_SUB_IF(struct net_device *dev) | |||
875 | return netdev_priv(dev); | 878 | return netdev_priv(dev); |
876 | } | 879 | } |
877 | 880 | ||
878 | /* this struct represents 802.11n's RA/TID combination along with our vif */ | 881 | /* this struct represents 802.11n's RA/TID combination */ |
879 | struct ieee80211_ra_tid { | 882 | struct ieee80211_ra_tid { |
880 | struct ieee80211_vif *vif; | ||
881 | u8 ra[ETH_ALEN]; | 883 | u8 ra[ETH_ALEN]; |
882 | u16 tid; | 884 | u16 tid; |
883 | }; | 885 | }; |
@@ -986,13 +988,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
986 | int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, | 988 | int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, |
987 | struct cfg80211_disassoc_request *req, | 989 | struct cfg80211_disassoc_request *req, |
988 | void *cookie); | 990 | void *cookie); |
989 | int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, | ||
990 | struct ieee80211_channel *chan, | ||
991 | enum nl80211_channel_type channel_type, | ||
992 | bool channel_type_valid, | ||
993 | const u8 *buf, size_t len, u64 *cookie); | ||
994 | ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, | ||
995 | struct sk_buff *skb); | ||
996 | void ieee80211_send_pspoll(struct ieee80211_local *local, | 991 | void ieee80211_send_pspoll(struct ieee80211_local *local, |
997 | struct ieee80211_sub_if_data *sdata); | 992 | struct ieee80211_sub_if_data *sdata); |
998 | void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); | 993 | void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency); |
@@ -1005,12 +1000,13 @@ void ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, | |||
1005 | u64 timestamp); | 1000 | u64 timestamp); |
1006 | void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); | 1001 | void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata); |
1007 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); | 1002 | void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata); |
1003 | void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); | ||
1004 | void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | ||
1005 | struct sk_buff *skb); | ||
1008 | 1006 | ||
1009 | /* IBSS code */ | 1007 | /* IBSS code */ |
1010 | void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); | 1008 | void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); |
1011 | void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); | 1009 | void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata); |
1012 | ieee80211_rx_result | ||
1013 | ieee80211_ibss_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); | ||
1014 | struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, | 1010 | struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, |
1015 | u8 *bssid, u8 *addr, u32 supp_rates, | 1011 | u8 *bssid, u8 *addr, u32 supp_rates, |
1016 | gfp_t gfp); | 1012 | gfp_t gfp); |
@@ -1019,6 +1015,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, | |||
1019 | int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); | 1015 | int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); |
1020 | void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata); | 1016 | void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata); |
1021 | void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata); | 1017 | void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata); |
1018 | void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata); | ||
1019 | void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | ||
1020 | struct sk_buff *skb); | ||
1021 | |||
1022 | /* mesh code */ | ||
1023 | void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata); | ||
1024 | void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | ||
1025 | struct sk_buff *skb); | ||
1022 | 1026 | ||
1023 | /* scan/BSS handling */ | 1027 | /* scan/BSS handling */ |
1024 | void ieee80211_scan_work(struct work_struct *work); | 1028 | void ieee80211_scan_work(struct work_struct *work); |
@@ -1102,6 +1106,8 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata, | |||
1102 | enum ieee80211_smps_mode smps, const u8 *da, | 1106 | enum ieee80211_smps_mode smps, const u8 *da, |
1103 | const u8 *bssid); | 1107 | const u8 *bssid); |
1104 | 1108 | ||
1109 | void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | ||
1110 | u16 initiator, u16 reason); | ||
1105 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, | 1111 | void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, |
1106 | u16 initiator, u16 reason); | 1112 | u16 initiator, u16 reason); |
1107 | void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta); | 1113 | void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta); |
@@ -1121,6 +1127,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
1121 | enum ieee80211_back_parties initiator); | 1127 | enum ieee80211_back_parties initiator); |
1122 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | 1128 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, |
1123 | enum ieee80211_back_parties initiator); | 1129 | enum ieee80211_back_parties initiator); |
1130 | void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); | ||
1131 | void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); | ||
1132 | void ieee80211_ba_session_work(struct work_struct *work); | ||
1133 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); | ||
1124 | 1134 | ||
1125 | /* Spectrum management */ | 1135 | /* Spectrum management */ |
1126 | void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, | 1136 | void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 1afa9ec81fe8..910729fc18cd 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -339,7 +339,6 @@ static int ieee80211_stop(struct net_device *dev) | |||
339 | { | 339 | { |
340 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); | 340 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
341 | struct ieee80211_local *local = sdata->local; | 341 | struct ieee80211_local *local = sdata->local; |
342 | struct sta_info *sta; | ||
343 | unsigned long flags; | 342 | unsigned long flags; |
344 | struct sk_buff *skb, *tmp; | 343 | struct sk_buff *skb, *tmp; |
345 | u32 hw_reconf_flags = 0; | 344 | u32 hw_reconf_flags = 0; |
@@ -356,18 +355,6 @@ static int ieee80211_stop(struct net_device *dev) | |||
356 | ieee80211_work_purge(sdata); | 355 | ieee80211_work_purge(sdata); |
357 | 356 | ||
358 | /* | 357 | /* |
359 | * Now delete all active aggregation sessions. | ||
360 | */ | ||
361 | rcu_read_lock(); | ||
362 | |||
363 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | ||
364 | if (sta->sdata == sdata) | ||
365 | ieee80211_sta_tear_down_BA_sessions(sta); | ||
366 | } | ||
367 | |||
368 | rcu_read_unlock(); | ||
369 | |||
370 | /* | ||
371 | * Remove all stations associated with this interface. | 358 | * Remove all stations associated with this interface. |
372 | * | 359 | * |
373 | * This must be done before calling ops->remove_interface() | 360 | * This must be done before calling ops->remove_interface() |
@@ -473,27 +460,14 @@ static int ieee80211_stop(struct net_device *dev) | |||
473 | * whether the interface is running, which, at this point, | 460 | * whether the interface is running, which, at this point, |
474 | * it no longer is. | 461 | * it no longer is. |
475 | */ | 462 | */ |
476 | cancel_work_sync(&sdata->u.mgd.work); | ||
477 | cancel_work_sync(&sdata->u.mgd.chswitch_work); | 463 | cancel_work_sync(&sdata->u.mgd.chswitch_work); |
478 | cancel_work_sync(&sdata->u.mgd.monitor_work); | 464 | cancel_work_sync(&sdata->u.mgd.monitor_work); |
479 | cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work); | 465 | cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work); |
480 | 466 | ||
481 | /* | ||
482 | * When we get here, the interface is marked down. | ||
483 | * Call synchronize_rcu() to wait for the RX path | ||
484 | * should it be using the interface and enqueuing | ||
485 | * frames at this very time on another CPU. | ||
486 | */ | ||
487 | synchronize_rcu(); | ||
488 | skb_queue_purge(&sdata->u.mgd.skb_queue); | ||
489 | /* fall through */ | 467 | /* fall through */ |
490 | case NL80211_IFTYPE_ADHOC: | 468 | case NL80211_IFTYPE_ADHOC: |
491 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { | 469 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) |
492 | del_timer_sync(&sdata->u.ibss.timer); | 470 | del_timer_sync(&sdata->u.ibss.timer); |
493 | cancel_work_sync(&sdata->u.ibss.work); | ||
494 | synchronize_rcu(); | ||
495 | skb_queue_purge(&sdata->u.ibss.skb_queue); | ||
496 | } | ||
497 | /* fall through */ | 471 | /* fall through */ |
498 | case NL80211_IFTYPE_MESH_POINT: | 472 | case NL80211_IFTYPE_MESH_POINT: |
499 | if (ieee80211_vif_is_mesh(&sdata->vif)) { | 473 | if (ieee80211_vif_is_mesh(&sdata->vif)) { |
@@ -508,6 +482,16 @@ static int ieee80211_stop(struct net_device *dev) | |||
508 | } | 482 | } |
509 | /* fall through */ | 483 | /* fall through */ |
510 | default: | 484 | default: |
485 | flush_work(&sdata->work); | ||
486 | /* | ||
487 | * When we get here, the interface is marked down. | ||
488 | * Call synchronize_rcu() to wait for the RX path | ||
489 | * should it be using the interface and enqueuing | ||
490 | * frames at this very time on another CPU. | ||
491 | */ | ||
492 | synchronize_rcu(); | ||
493 | skb_queue_purge(&sdata->skb_queue); | ||
494 | |||
511 | if (local->scan_sdata == sdata) | 495 | if (local->scan_sdata == sdata) |
512 | ieee80211_scan_cancel(local); | 496 | ieee80211_scan_cancel(local); |
513 | 497 | ||
@@ -717,6 +701,136 @@ static void ieee80211_if_setup(struct net_device *dev) | |||
717 | dev->destructor = free_netdev; | 701 | dev->destructor = free_netdev; |
718 | } | 702 | } |
719 | 703 | ||
704 | static void ieee80211_iface_work(struct work_struct *work) | ||
705 | { | ||
706 | struct ieee80211_sub_if_data *sdata = | ||
707 | container_of(work, struct ieee80211_sub_if_data, work); | ||
708 | struct ieee80211_local *local = sdata->local; | ||
709 | struct sk_buff *skb; | ||
710 | struct sta_info *sta; | ||
711 | struct ieee80211_ra_tid *ra_tid; | ||
712 | |||
713 | if (!ieee80211_sdata_running(sdata)) | ||
714 | return; | ||
715 | |||
716 | if (local->scanning) | ||
717 | return; | ||
718 | |||
719 | /* | ||
720 | * ieee80211_queue_work() should have picked up most cases, | ||
721 | * here we'll pick the rest. | ||
722 | */ | ||
723 | if (WARN(local->suspended, | ||
724 | "interface work scheduled while going to suspend\n")) | ||
725 | return; | ||
726 | |||
727 | /* first process frames */ | ||
728 | while ((skb = skb_dequeue(&sdata->skb_queue))) { | ||
729 | struct ieee80211_mgmt *mgmt = (void *)skb->data; | ||
730 | |||
731 | if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { | ||
732 | ra_tid = (void *)&skb->cb; | ||
733 | ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra, | ||
734 | ra_tid->tid); | ||
735 | } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) { | ||
736 | ra_tid = (void *)&skb->cb; | ||
737 | ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra, | ||
738 | ra_tid->tid); | ||
739 | } else if (ieee80211_is_action(mgmt->frame_control) && | ||
740 | mgmt->u.action.category == WLAN_CATEGORY_BACK) { | ||
741 | int len = skb->len; | ||
742 | |||
743 | mutex_lock(&local->sta_mtx); | ||
744 | sta = sta_info_get(sdata, mgmt->sa); | ||
745 | if (sta) { | ||
746 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
747 | case WLAN_ACTION_ADDBA_REQ: | ||
748 | ieee80211_process_addba_request( | ||
749 | local, sta, mgmt, len); | ||
750 | break; | ||
751 | case WLAN_ACTION_ADDBA_RESP: | ||
752 | ieee80211_process_addba_resp(local, sta, | ||
753 | mgmt, len); | ||
754 | break; | ||
755 | case WLAN_ACTION_DELBA: | ||
756 | ieee80211_process_delba(sdata, sta, | ||
757 | mgmt, len); | ||
758 | break; | ||
759 | default: | ||
760 | WARN_ON(1); | ||
761 | break; | ||
762 | } | ||
763 | } | ||
764 | mutex_unlock(&local->sta_mtx); | ||
765 | } else if (ieee80211_is_data_qos(mgmt->frame_control)) { | ||
766 | struct ieee80211_hdr *hdr = (void *)mgmt; | ||
767 | /* | ||
768 | * So the frame isn't mgmt, but frame_control | ||
769 | * is at the right place anyway, of course, so | ||
770 | * the if statement is correct. | ||
771 | * | ||
772 | * Warn if we have other data frame types here, | ||
773 | * they must not get here. | ||
774 | */ | ||
775 | WARN_ON(hdr->frame_control & | ||
776 | cpu_to_le16(IEEE80211_STYPE_NULLFUNC)); | ||
777 | WARN_ON(!(hdr->seq_ctrl & | ||
778 | cpu_to_le16(IEEE80211_SCTL_FRAG))); | ||
779 | /* | ||
780 | * This was a fragment of a frame, received while | ||
781 | * a block-ack session was active. That cannot be | ||
782 | * right, so terminate the session. | ||
783 | */ | ||
784 | mutex_lock(&local->sta_mtx); | ||
785 | sta = sta_info_get(sdata, mgmt->sa); | ||
786 | if (sta) { | ||
787 | u16 tid = *ieee80211_get_qos_ctl(hdr) & | ||
788 | IEEE80211_QOS_CTL_TID_MASK; | ||
789 | |||
790 | __ieee80211_stop_rx_ba_session( | ||
791 | sta, tid, WLAN_BACK_RECIPIENT, | ||
792 | WLAN_REASON_QSTA_REQUIRE_SETUP); | ||
793 | } | ||
794 | mutex_unlock(&local->sta_mtx); | ||
795 | } else switch (sdata->vif.type) { | ||
796 | case NL80211_IFTYPE_STATION: | ||
797 | ieee80211_sta_rx_queued_mgmt(sdata, skb); | ||
798 | break; | ||
799 | case NL80211_IFTYPE_ADHOC: | ||
800 | ieee80211_ibss_rx_queued_mgmt(sdata, skb); | ||
801 | break; | ||
802 | case NL80211_IFTYPE_MESH_POINT: | ||
803 | if (!ieee80211_vif_is_mesh(&sdata->vif)) | ||
804 | break; | ||
805 | ieee80211_mesh_rx_queued_mgmt(sdata, skb); | ||
806 | break; | ||
807 | default: | ||
808 | WARN(1, "frame for unexpected interface type"); | ||
809 | break; | ||
810 | } | ||
811 | |||
812 | kfree_skb(skb); | ||
813 | } | ||
814 | |||
815 | /* then other type-dependent work */ | ||
816 | switch (sdata->vif.type) { | ||
817 | case NL80211_IFTYPE_STATION: | ||
818 | ieee80211_sta_work(sdata); | ||
819 | break; | ||
820 | case NL80211_IFTYPE_ADHOC: | ||
821 | ieee80211_ibss_work(sdata); | ||
822 | break; | ||
823 | case NL80211_IFTYPE_MESH_POINT: | ||
824 | if (!ieee80211_vif_is_mesh(&sdata->vif)) | ||
825 | break; | ||
826 | ieee80211_mesh_work(sdata); | ||
827 | break; | ||
828 | default: | ||
829 | break; | ||
830 | } | ||
831 | } | ||
832 | |||
833 | |||
720 | /* | 834 | /* |
721 | * Helper function to initialise an interface to a specific type. | 835 | * Helper function to initialise an interface to a specific type. |
722 | */ | 836 | */ |
@@ -734,6 +848,9 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, | |||
734 | /* only monitor differs */ | 848 | /* only monitor differs */ |
735 | sdata->dev->type = ARPHRD_ETHER; | 849 | sdata->dev->type = ARPHRD_ETHER; |
736 | 850 | ||
851 | skb_queue_head_init(&sdata->skb_queue); | ||
852 | INIT_WORK(&sdata->work, ieee80211_iface_work); | ||
853 | |||
737 | switch (type) { | 854 | switch (type) { |
738 | case NL80211_IFTYPE_AP: | 855 | case NL80211_IFTYPE_AP: |
739 | skb_queue_head_init(&sdata->u.ap.ps_bc_buf); | 856 | skb_queue_head_init(&sdata->u.ap.ps_bc_buf); |
@@ -959,6 +1076,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, | |||
959 | sdata->wdev.wiphy = local->hw.wiphy; | 1076 | sdata->wdev.wiphy = local->hw.wiphy; |
960 | sdata->local = local; | 1077 | sdata->local = local; |
961 | sdata->dev = ndev; | 1078 | sdata->dev = ndev; |
1079 | #ifdef CONFIG_INET | ||
1080 | sdata->arp_filter_state = true; | ||
1081 | #endif | ||
962 | 1082 | ||
963 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) | 1083 | for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) |
964 | skb_queue_head_init(&sdata->fragments[i].skb_list); | 1084 | skb_queue_head_init(&sdata->fragments[i].skb_list); |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index d0d9001a4a6a..50d1cff23d8e 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -273,7 +273,7 @@ struct ieee80211_key *ieee80211_key_alloc(enum ieee80211_key_alg alg, | |||
273 | key->conf.iv_len = CCMP_HDR_LEN; | 273 | key->conf.iv_len = CCMP_HDR_LEN; |
274 | key->conf.icv_len = CCMP_MIC_LEN; | 274 | key->conf.icv_len = CCMP_MIC_LEN; |
275 | if (seq) { | 275 | if (seq) { |
276 | for (i = 0; i < NUM_RX_DATA_QUEUES; i++) | 276 | for (i = 0; i < NUM_RX_DATA_QUEUES + 1; i++) |
277 | for (j = 0; j < CCMP_PN_LEN; j++) | 277 | for (j = 0; j < CCMP_PN_LEN; j++) |
278 | key->u.ccmp.rx_pn[i][j] = | 278 | key->u.ccmp.rx_pn[i][j] = |
279 | seq[CCMP_PN_LEN - j - 1]; | 279 | seq[CCMP_PN_LEN - j - 1]; |
diff --git a/net/mac80211/key.h b/net/mac80211/key.h index 9996e3be6e63..a3849fa3fce8 100644 --- a/net/mac80211/key.h +++ b/net/mac80211/key.h | |||
@@ -77,7 +77,13 @@ struct ieee80211_key { | |||
77 | } tkip; | 77 | } tkip; |
78 | struct { | 78 | struct { |
79 | u8 tx_pn[6]; | 79 | u8 tx_pn[6]; |
80 | u8 rx_pn[NUM_RX_DATA_QUEUES][6]; | 80 | /* |
81 | * Last received packet number. The first | ||
82 | * NUM_RX_DATA_QUEUES counters are used with Data | ||
83 | * frames and the last counter is used with Robust | ||
84 | * Management frames. | ||
85 | */ | ||
86 | u8 rx_pn[NUM_RX_DATA_QUEUES + 1][6]; | ||
81 | struct crypto_cipher *tfm; | 87 | struct crypto_cipher *tfm; |
82 | u32 replays; /* dot11RSNAStatsCCMPReplays */ | 88 | u32 replays; /* dot11RSNAStatsCCMPReplays */ |
83 | /* scratch buffers for virt_to_page() (crypto API) */ | 89 | /* scratch buffers for virt_to_page() (crypto API) */ |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 88b671a16a41..edf7aff93268 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
21 | #include <linux/bitmap.h> | 21 | #include <linux/bitmap.h> |
22 | #include <linux/pm_qos_params.h> | 22 | #include <linux/pm_qos_params.h> |
23 | #include <linux/inetdevice.h> | ||
23 | #include <net/net_namespace.h> | 24 | #include <net/net_namespace.h> |
24 | #include <net/cfg80211.h> | 25 | #include <net/cfg80211.h> |
25 | 26 | ||
@@ -259,7 +260,6 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
259 | { | 260 | { |
260 | struct ieee80211_local *local = (struct ieee80211_local *) data; | 261 | struct ieee80211_local *local = (struct ieee80211_local *) data; |
261 | struct sk_buff *skb; | 262 | struct sk_buff *skb; |
262 | struct ieee80211_ra_tid *ra_tid; | ||
263 | 263 | ||
264 | while ((skb = skb_dequeue(&local->skb_queue)) || | 264 | while ((skb = skb_dequeue(&local->skb_queue)) || |
265 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { | 265 | (skb = skb_dequeue(&local->skb_queue_unreliable))) { |
@@ -274,18 +274,6 @@ static void ieee80211_tasklet_handler(unsigned long data) | |||
274 | skb->pkt_type = 0; | 274 | skb->pkt_type = 0; |
275 | ieee80211_tx_status(local_to_hw(local), skb); | 275 | ieee80211_tx_status(local_to_hw(local), skb); |
276 | break; | 276 | break; |
277 | case IEEE80211_DELBA_MSG: | ||
278 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
279 | ieee80211_stop_tx_ba_cb(ra_tid->vif, ra_tid->ra, | ||
280 | ra_tid->tid); | ||
281 | dev_kfree_skb(skb); | ||
282 | break; | ||
283 | case IEEE80211_ADDBA_MSG: | ||
284 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | ||
285 | ieee80211_start_tx_ba_cb(ra_tid->vif, ra_tid->ra, | ||
286 | ra_tid->tid); | ||
287 | dev_kfree_skb(skb); | ||
288 | break ; | ||
289 | default: | 277 | default: |
290 | WARN(1, "mac80211: Packet is of unknown type %d\n", | 278 | WARN(1, "mac80211: Packet is of unknown type %d\n", |
291 | skb->pkt_type); | 279 | skb->pkt_type); |
@@ -330,23 +318,6 @@ static void ieee80211_recalc_smps_work(struct work_struct *work) | |||
330 | } | 318 | } |
331 | 319 | ||
332 | #ifdef CONFIG_INET | 320 | #ifdef CONFIG_INET |
333 | int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata) | ||
334 | { | ||
335 | struct in_device *idev; | ||
336 | int ret = 0; | ||
337 | |||
338 | BUG_ON(!sdata); | ||
339 | ASSERT_RTNL(); | ||
340 | |||
341 | idev = sdata->dev->ip_ptr; | ||
342 | if (!idev) | ||
343 | return 0; | ||
344 | |||
345 | ret = drv_configure_arp_filter(sdata->local, &sdata->vif, | ||
346 | idev->ifa_list); | ||
347 | return ret; | ||
348 | } | ||
349 | |||
350 | static int ieee80211_ifa_changed(struct notifier_block *nb, | 321 | static int ieee80211_ifa_changed(struct notifier_block *nb, |
351 | unsigned long data, void *arg) | 322 | unsigned long data, void *arg) |
352 | { | 323 | { |
@@ -356,8 +327,11 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, | |||
356 | ifa_notifier); | 327 | ifa_notifier); |
357 | struct net_device *ndev = ifa->ifa_dev->dev; | 328 | struct net_device *ndev = ifa->ifa_dev->dev; |
358 | struct wireless_dev *wdev = ndev->ieee80211_ptr; | 329 | struct wireless_dev *wdev = ndev->ieee80211_ptr; |
330 | struct in_device *idev; | ||
359 | struct ieee80211_sub_if_data *sdata; | 331 | struct ieee80211_sub_if_data *sdata; |
332 | struct ieee80211_bss_conf *bss_conf; | ||
360 | struct ieee80211_if_managed *ifmgd; | 333 | struct ieee80211_if_managed *ifmgd; |
334 | int c = 0; | ||
361 | 335 | ||
362 | if (!netif_running(ndev)) | 336 | if (!netif_running(ndev)) |
363 | return NOTIFY_DONE; | 337 | return NOTIFY_DONE; |
@@ -369,17 +343,44 @@ static int ieee80211_ifa_changed(struct notifier_block *nb, | |||
369 | if (wdev->wiphy != local->hw.wiphy) | 343 | if (wdev->wiphy != local->hw.wiphy) |
370 | return NOTIFY_DONE; | 344 | return NOTIFY_DONE; |
371 | 345 | ||
372 | /* We are concerned about IP addresses only when associated */ | ||
373 | sdata = IEEE80211_DEV_TO_SUB_IF(ndev); | 346 | sdata = IEEE80211_DEV_TO_SUB_IF(ndev); |
347 | bss_conf = &sdata->vif.bss_conf; | ||
374 | 348 | ||
375 | /* ARP filtering is only supported in managed mode */ | 349 | /* ARP filtering is only supported in managed mode */ |
376 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | 350 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
377 | return NOTIFY_DONE; | 351 | return NOTIFY_DONE; |
378 | 352 | ||
353 | idev = sdata->dev->ip_ptr; | ||
354 | if (!idev) | ||
355 | return NOTIFY_DONE; | ||
356 | |||
379 | ifmgd = &sdata->u.mgd; | 357 | ifmgd = &sdata->u.mgd; |
380 | mutex_lock(&ifmgd->mtx); | 358 | mutex_lock(&ifmgd->mtx); |
381 | if (ifmgd->associated) | 359 | |
382 | ieee80211_set_arp_filter(sdata); | 360 | /* Copy the addresses to the bss_conf list */ |
361 | ifa = idev->ifa_list; | ||
362 | while (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN && ifa) { | ||
363 | bss_conf->arp_addr_list[c] = ifa->ifa_address; | ||
364 | ifa = ifa->ifa_next; | ||
365 | c++; | ||
366 | } | ||
367 | |||
368 | /* If not all addresses fit the list, disable filtering */ | ||
369 | if (ifa) { | ||
370 | sdata->arp_filter_state = false; | ||
371 | c = 0; | ||
372 | } else { | ||
373 | sdata->arp_filter_state = true; | ||
374 | } | ||
375 | bss_conf->arp_addr_cnt = c; | ||
376 | |||
377 | /* Configure driver only if associated */ | ||
378 | if (ifmgd->associated) { | ||
379 | bss_conf->arp_filter_enabled = sdata->arp_filter_state; | ||
380 | ieee80211_bss_info_change_notify(sdata, | ||
381 | BSS_CHANGED_ARP_FILTER); | ||
382 | } | ||
383 | |||
383 | mutex_unlock(&ifmgd->mtx); | 384 | mutex_unlock(&ifmgd->mtx); |
384 | 385 | ||
385 | return NOTIFY_DONE; | 386 | return NOTIFY_DONE; |
@@ -476,8 +477,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
476 | 477 | ||
477 | sta_info_init(local); | 478 | sta_info_init(local); |
478 | 479 | ||
479 | for (i = 0; i < IEEE80211_MAX_QUEUES; i++) | 480 | for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { |
480 | skb_queue_head_init(&local->pending[i]); | 481 | skb_queue_head_init(&local->pending[i]); |
482 | atomic_set(&local->agg_queue_stop[i], 0); | ||
483 | } | ||
481 | tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, | 484 | tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, |
482 | (unsigned long)local); | 485 | (unsigned long)local); |
483 | 486 | ||
@@ -488,8 +491,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, | |||
488 | skb_queue_head_init(&local->skb_queue); | 491 | skb_queue_head_init(&local->skb_queue); |
489 | skb_queue_head_init(&local->skb_queue_unreliable); | 492 | skb_queue_head_init(&local->skb_queue_unreliable); |
490 | 493 | ||
491 | spin_lock_init(&local->ampdu_lock); | ||
492 | |||
493 | return local_to_hw(local); | 494 | return local_to_hw(local); |
494 | } | 495 | } |
495 | EXPORT_SYMBOL(ieee80211_alloc_hw); | 496 | EXPORT_SYMBOL(ieee80211_alloc_hw); |
@@ -629,7 +630,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) | |||
629 | 630 | ||
630 | local->hw.conf.listen_interval = local->hw.max_listen_interval; | 631 | local->hw.conf.listen_interval = local->hw.max_listen_interval; |
631 | 632 | ||
632 | local->hw.conf.dynamic_ps_forced_timeout = -1; | 633 | local->dynamic_ps_forced_timeout = -1; |
633 | 634 | ||
634 | result = sta_info_start(local); | 635 | result = sta_info_start(local); |
635 | if (result < 0) | 636 | if (result < 0) |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index bde81031727a..c8a4f19ed13b 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
@@ -54,7 +54,7 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data) | |||
54 | return; | 54 | return; |
55 | } | 55 | } |
56 | 56 | ||
57 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 57 | ieee80211_queue_work(&local->hw, &sdata->work); |
58 | } | 58 | } |
59 | 59 | ||
60 | /** | 60 | /** |
@@ -345,7 +345,7 @@ static void ieee80211_mesh_path_timer(unsigned long data) | |||
345 | return; | 345 | return; |
346 | } | 346 | } |
347 | 347 | ||
348 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 348 | ieee80211_queue_work(&local->hw, &sdata->work); |
349 | } | 349 | } |
350 | 350 | ||
351 | static void ieee80211_mesh_path_root_timer(unsigned long data) | 351 | static void ieee80211_mesh_path_root_timer(unsigned long data) |
@@ -362,7 +362,7 @@ static void ieee80211_mesh_path_root_timer(unsigned long data) | |||
362 | return; | 362 | return; |
363 | } | 363 | } |
364 | 364 | ||
365 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 365 | ieee80211_queue_work(&local->hw, &sdata->work); |
366 | } | 366 | } |
367 | 367 | ||
368 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) | 368 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) |
@@ -484,9 +484,6 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata) | |||
484 | { | 484 | { |
485 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 485 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
486 | 486 | ||
487 | /* might restart the timer but that doesn't matter */ | ||
488 | cancel_work_sync(&ifmsh->work); | ||
489 | |||
490 | /* use atomic bitops in case both timers fire at the same time */ | 487 | /* use atomic bitops in case both timers fire at the same time */ |
491 | 488 | ||
492 | if (del_timer_sync(&ifmsh->housekeeping_timer)) | 489 | if (del_timer_sync(&ifmsh->housekeeping_timer)) |
@@ -518,7 +515,7 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata) | |||
518 | 515 | ||
519 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); | 516 | set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); |
520 | ieee80211_mesh_root_setup(ifmsh); | 517 | ieee80211_mesh_root_setup(ifmsh); |
521 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 518 | ieee80211_queue_work(&local->hw, &sdata->work); |
522 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; | 519 | sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; |
523 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | | 520 | ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | |
524 | BSS_CHANGED_BEACON_ENABLED | | 521 | BSS_CHANGED_BEACON_ENABLED | |
@@ -536,16 +533,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) | |||
536 | * whether the interface is running, which, at this point, | 533 | * whether the interface is running, which, at this point, |
537 | * it no longer is. | 534 | * it no longer is. |
538 | */ | 535 | */ |
539 | cancel_work_sync(&sdata->u.mesh.work); | 536 | cancel_work_sync(&sdata->work); |
540 | |||
541 | /* | ||
542 | * When we get here, the interface is marked down. | ||
543 | * Call synchronize_rcu() to wait for the RX path | ||
544 | * should it be using the interface and enqueuing | ||
545 | * frames at this very time on another CPU. | ||
546 | */ | ||
547 | rcu_barrier(); /* Wait for RX path and call_rcu()'s */ | ||
548 | skb_queue_purge(&sdata->u.mesh.skb_queue); | ||
549 | } | 537 | } |
550 | 538 | ||
551 | static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, | 539 | static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, |
@@ -608,8 +596,8 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, | |||
608 | } | 596 | } |
609 | } | 597 | } |
610 | 598 | ||
611 | static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | 599 | void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, |
612 | struct sk_buff *skb) | 600 | struct sk_buff *skb) |
613 | { | 601 | { |
614 | struct ieee80211_rx_status *rx_status; | 602 | struct ieee80211_rx_status *rx_status; |
615 | struct ieee80211_if_mesh *ifmsh; | 603 | struct ieee80211_if_mesh *ifmsh; |
@@ -632,26 +620,11 @@ static void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
632 | ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); | 620 | ieee80211_mesh_rx_mgmt_action(sdata, mgmt, skb->len, rx_status); |
633 | break; | 621 | break; |
634 | } | 622 | } |
635 | |||
636 | kfree_skb(skb); | ||
637 | } | 623 | } |
638 | 624 | ||
639 | static void ieee80211_mesh_work(struct work_struct *work) | 625 | void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata) |
640 | { | 626 | { |
641 | struct ieee80211_sub_if_data *sdata = | ||
642 | container_of(work, struct ieee80211_sub_if_data, u.mesh.work); | ||
643 | struct ieee80211_local *local = sdata->local; | ||
644 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 627 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
645 | struct sk_buff *skb; | ||
646 | |||
647 | if (!ieee80211_sdata_running(sdata)) | ||
648 | return; | ||
649 | |||
650 | if (local->scanning) | ||
651 | return; | ||
652 | |||
653 | while ((skb = skb_dequeue(&ifmsh->skb_queue))) | ||
654 | ieee80211_mesh_rx_queued_mgmt(sdata, skb); | ||
655 | 628 | ||
656 | if (ifmsh->preq_queue_len && | 629 | if (ifmsh->preq_queue_len && |
657 | time_after(jiffies, | 630 | time_after(jiffies, |
@@ -678,7 +651,7 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) | |||
678 | rcu_read_lock(); | 651 | rcu_read_lock(); |
679 | list_for_each_entry_rcu(sdata, &local->interfaces, list) | 652 | list_for_each_entry_rcu(sdata, &local->interfaces, list) |
680 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 653 | if (ieee80211_vif_is_mesh(&sdata->vif)) |
681 | ieee80211_queue_work(&local->hw, &sdata->u.mesh.work); | 654 | ieee80211_queue_work(&local->hw, &sdata->work); |
682 | rcu_read_unlock(); | 655 | rcu_read_unlock(); |
683 | } | 656 | } |
684 | 657 | ||
@@ -686,11 +659,9 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | |||
686 | { | 659 | { |
687 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 660 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
688 | 661 | ||
689 | INIT_WORK(&ifmsh->work, ieee80211_mesh_work); | ||
690 | setup_timer(&ifmsh->housekeeping_timer, | 662 | setup_timer(&ifmsh->housekeeping_timer, |
691 | ieee80211_mesh_housekeeping_timer, | 663 | ieee80211_mesh_housekeeping_timer, |
692 | (unsigned long) sdata); | 664 | (unsigned long) sdata); |
693 | skb_queue_head_init(&sdata->u.mesh.skb_queue); | ||
694 | 665 | ||
695 | ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; | 666 | ifmsh->mshcfg.dot11MeshRetryTimeout = MESH_RET_T; |
696 | ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; | 667 | ifmsh->mshcfg.dot11MeshConfirmTimeout = MESH_CONF_T; |
@@ -731,29 +702,3 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata) | |||
731 | INIT_LIST_HEAD(&ifmsh->preq_queue.list); | 702 | INIT_LIST_HEAD(&ifmsh->preq_queue.list); |
732 | spin_lock_init(&ifmsh->mesh_preq_queue_lock); | 703 | spin_lock_init(&ifmsh->mesh_preq_queue_lock); |
733 | } | 704 | } |
734 | |||
735 | ieee80211_rx_result | ||
736 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb) | ||
737 | { | ||
738 | struct ieee80211_local *local = sdata->local; | ||
739 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | ||
740 | struct ieee80211_mgmt *mgmt; | ||
741 | u16 fc; | ||
742 | |||
743 | if (skb->len < 24) | ||
744 | return RX_DROP_MONITOR; | ||
745 | |||
746 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
747 | fc = le16_to_cpu(mgmt->frame_control); | ||
748 | |||
749 | switch (fc & IEEE80211_FCTL_STYPE) { | ||
750 | case IEEE80211_STYPE_ACTION: | ||
751 | case IEEE80211_STYPE_PROBE_RESP: | ||
752 | case IEEE80211_STYPE_BEACON: | ||
753 | skb_queue_tail(&ifmsh->skb_queue, skb); | ||
754 | ieee80211_queue_work(&local->hw, &ifmsh->work); | ||
755 | return RX_QUEUED; | ||
756 | } | ||
757 | |||
758 | return RX_CONTINUE; | ||
759 | } | ||
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index c88087f1cd0f..ebd3f1d9d889 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -237,8 +237,6 @@ void ieee80211s_update_metric(struct ieee80211_local *local, | |||
237 | struct sta_info *stainfo, struct sk_buff *skb); | 237 | struct sta_info *stainfo, struct sk_buff *skb); |
238 | void ieee80211s_stop(void); | 238 | void ieee80211s_stop(void); |
239 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); | 239 | void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata); |
240 | ieee80211_rx_result | ||
241 | ieee80211_mesh_rx_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); | ||
242 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); | 240 | void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); |
243 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); | 241 | void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); |
244 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); | 242 | void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); |
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 0705018d8d1e..829e08a657d0 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c | |||
@@ -805,14 +805,14 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags) | |||
805 | spin_unlock(&ifmsh->mesh_preq_queue_lock); | 805 | spin_unlock(&ifmsh->mesh_preq_queue_lock); |
806 | 806 | ||
807 | if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) | 807 | if (time_after(jiffies, ifmsh->last_preq + min_preq_int_jiff(sdata))) |
808 | ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); | 808 | ieee80211_queue_work(&sdata->local->hw, &sdata->work); |
809 | 809 | ||
810 | else if (time_before(jiffies, ifmsh->last_preq)) { | 810 | else if (time_before(jiffies, ifmsh->last_preq)) { |
811 | /* avoid long wait if did not send preqs for a long time | 811 | /* avoid long wait if did not send preqs for a long time |
812 | * and jiffies wrapped around | 812 | * and jiffies wrapped around |
813 | */ | 813 | */ |
814 | ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; | 814 | ifmsh->last_preq = jiffies - min_preq_int_jiff(sdata) - 1; |
815 | ieee80211_queue_work(&sdata->local->hw, &ifmsh->work); | 815 | ieee80211_queue_work(&sdata->local->hw, &sdata->work); |
816 | } else | 816 | } else |
817 | mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + | 817 | mod_timer(&ifmsh->mesh_path_timer, ifmsh->last_preq + |
818 | min_preq_int_jiff(sdata)); | 818 | min_preq_int_jiff(sdata)); |
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index 181ffd6efd81..349e466cf08b 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -315,7 +315,7 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata) | |||
315 | read_unlock(&pathtbl_resize_lock); | 315 | read_unlock(&pathtbl_resize_lock); |
316 | if (grow) { | 316 | if (grow) { |
317 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); | 317 | set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); |
318 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 318 | ieee80211_queue_work(&local->hw, &sdata->work); |
319 | } | 319 | } |
320 | return 0; | 320 | return 0; |
321 | 321 | ||
@@ -425,7 +425,7 @@ int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata) | |||
425 | read_unlock(&pathtbl_resize_lock); | 425 | read_unlock(&pathtbl_resize_lock); |
426 | if (grow) { | 426 | if (grow) { |
427 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); | 427 | set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); |
428 | ieee80211_queue_work(&local->hw, &ifmsh->work); | 428 | ieee80211_queue_work(&local->hw, &sdata->work); |
429 | } | 429 | } |
430 | return 0; | 430 | return 0; |
431 | 431 | ||
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 8fb85c3a043d..85c3ca33333e 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -561,23 +561,19 @@ void ieee80211_recalc_ps(struct ieee80211_local *local, s32 latency) | |||
561 | beaconint_us = ieee80211_tu_to_usec( | 561 | beaconint_us = ieee80211_tu_to_usec( |
562 | found->vif.bss_conf.beacon_int); | 562 | found->vif.bss_conf.beacon_int); |
563 | 563 | ||
564 | timeout = local->hw.conf.dynamic_ps_forced_timeout; | 564 | timeout = local->dynamic_ps_forced_timeout; |
565 | if (timeout < 0) { | 565 | if (timeout < 0) { |
566 | /* | 566 | /* |
567 | * Go to full PSM if the user configures a very low | ||
568 | * latency requirement. | ||
567 | * The 2 second value is there for compatibility until | 569 | * The 2 second value is there for compatibility until |
568 | * the PM_QOS_NETWORK_LATENCY is configured with real | 570 | * the PM_QOS_NETWORK_LATENCY is configured with real |
569 | * values. | 571 | * values. |
570 | */ | 572 | */ |
571 | if (latency == 2000000000) | 573 | if (latency > 1900000000 && latency != 2000000000) |
572 | timeout = 100; | ||
573 | else if (latency <= 50000) | ||
574 | timeout = 300; | ||
575 | else if (latency <= 100000) | ||
576 | timeout = 100; | ||
577 | else if (latency <= 500000) | ||
578 | timeout = 50; | ||
579 | else | ||
580 | timeout = 0; | 574 | timeout = 0; |
575 | else | ||
576 | timeout = 100; | ||
581 | } | 577 | } |
582 | local->hw.conf.dynamic_ps_timeout = timeout; | 578 | local->hw.conf.dynamic_ps_timeout = timeout; |
583 | 579 | ||
@@ -806,11 +802,12 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
806 | { | 802 | { |
807 | struct ieee80211_bss *bss = (void *)cbss->priv; | 803 | struct ieee80211_bss *bss = (void *)cbss->priv; |
808 | struct ieee80211_local *local = sdata->local; | 804 | struct ieee80211_local *local = sdata->local; |
805 | struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; | ||
809 | 806 | ||
810 | bss_info_changed |= BSS_CHANGED_ASSOC; | 807 | bss_info_changed |= BSS_CHANGED_ASSOC; |
811 | /* set timing information */ | 808 | /* set timing information */ |
812 | sdata->vif.bss_conf.beacon_int = cbss->beacon_interval; | 809 | bss_conf->beacon_int = cbss->beacon_interval; |
813 | sdata->vif.bss_conf.timestamp = cbss->tsf; | 810 | bss_conf->timestamp = cbss->tsf; |
814 | 811 | ||
815 | bss_info_changed |= BSS_CHANGED_BEACON_INT; | 812 | bss_info_changed |= BSS_CHANGED_BEACON_INT; |
816 | bss_info_changed |= ieee80211_handle_bss_capability(sdata, | 813 | bss_info_changed |= ieee80211_handle_bss_capability(sdata, |
@@ -835,7 +832,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
835 | 832 | ||
836 | ieee80211_led_assoc(local, 1); | 833 | ieee80211_led_assoc(local, 1); |
837 | 834 | ||
838 | sdata->vif.bss_conf.assoc = 1; | 835 | bss_conf->assoc = 1; |
839 | /* | 836 | /* |
840 | * For now just always ask the driver to update the basic rateset | 837 | * For now just always ask the driver to update the basic rateset |
841 | * when we have associated, we aren't checking whether it actually | 838 | * when we have associated, we aren't checking whether it actually |
@@ -848,9 +845,15 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata, | |||
848 | 845 | ||
849 | /* Tell the driver to monitor connection quality (if supported) */ | 846 | /* Tell the driver to monitor connection quality (if supported) */ |
850 | if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) && | 847 | if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) && |
851 | sdata->vif.bss_conf.cqm_rssi_thold) | 848 | bss_conf->cqm_rssi_thold) |
852 | bss_info_changed |= BSS_CHANGED_CQM; | 849 | bss_info_changed |= BSS_CHANGED_CQM; |
853 | 850 | ||
851 | /* Enable ARP filtering */ | ||
852 | if (bss_conf->arp_filter_enabled != sdata->arp_filter_state) { | ||
853 | bss_conf->arp_filter_enabled = sdata->arp_filter_state; | ||
854 | bss_info_changed |= BSS_CHANGED_ARP_FILTER; | ||
855 | } | ||
856 | |||
854 | ieee80211_bss_info_change_notify(sdata, bss_info_changed); | 857 | ieee80211_bss_info_change_notify(sdata, bss_info_changed); |
855 | 858 | ||
856 | mutex_lock(&local->iflist_mtx); | 859 | mutex_lock(&local->iflist_mtx); |
@@ -898,13 +901,13 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
898 | netif_tx_stop_all_queues(sdata->dev); | 901 | netif_tx_stop_all_queues(sdata->dev); |
899 | netif_carrier_off(sdata->dev); | 902 | netif_carrier_off(sdata->dev); |
900 | 903 | ||
901 | rcu_read_lock(); | 904 | mutex_lock(&local->sta_mtx); |
902 | sta = sta_info_get(sdata, bssid); | 905 | sta = sta_info_get(sdata, bssid); |
903 | if (sta) { | 906 | if (sta) { |
904 | set_sta_flags(sta, WLAN_STA_DISASSOC); | 907 | set_sta_flags(sta, WLAN_STA_BLOCK_BA); |
905 | ieee80211_sta_tear_down_BA_sessions(sta); | 908 | ieee80211_sta_tear_down_BA_sessions(sta); |
906 | } | 909 | } |
907 | rcu_read_unlock(); | 910 | mutex_unlock(&local->sta_mtx); |
908 | 911 | ||
909 | changed |= ieee80211_reset_erp_info(sdata); | 912 | changed |= ieee80211_reset_erp_info(sdata); |
910 | 913 | ||
@@ -932,6 +935,12 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, | |||
932 | 935 | ||
933 | ieee80211_hw_config(local, config_changed); | 936 | ieee80211_hw_config(local, config_changed); |
934 | 937 | ||
938 | /* Disable ARP filtering */ | ||
939 | if (sdata->vif.bss_conf.arp_filter_enabled) { | ||
940 | sdata->vif.bss_conf.arp_filter_enabled = false; | ||
941 | changed |= BSS_CHANGED_ARP_FILTER; | ||
942 | } | ||
943 | |||
935 | /* The BSSID (not really interesting) and HT changed */ | 944 | /* The BSSID (not really interesting) and HT changed */ |
936 | changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; | 945 | changed |= BSS_CHANGED_BSSID | BSS_CHANGED_HT; |
937 | ieee80211_bss_info_change_notify(sdata, changed); | 946 | ieee80211_bss_info_change_notify(sdata, changed); |
@@ -1633,35 +1642,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, | |||
1633 | ieee80211_bss_info_change_notify(sdata, changed); | 1642 | ieee80211_bss_info_change_notify(sdata, changed); |
1634 | } | 1643 | } |
1635 | 1644 | ||
1636 | ieee80211_rx_result ieee80211_sta_rx_mgmt(struct ieee80211_sub_if_data *sdata, | 1645 | void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, |
1637 | struct sk_buff *skb) | 1646 | struct sk_buff *skb) |
1638 | { | ||
1639 | struct ieee80211_local *local = sdata->local; | ||
1640 | struct ieee80211_mgmt *mgmt; | ||
1641 | u16 fc; | ||
1642 | |||
1643 | if (skb->len < 24) | ||
1644 | return RX_DROP_MONITOR; | ||
1645 | |||
1646 | mgmt = (struct ieee80211_mgmt *) skb->data; | ||
1647 | fc = le16_to_cpu(mgmt->frame_control); | ||
1648 | |||
1649 | switch (fc & IEEE80211_FCTL_STYPE) { | ||
1650 | case IEEE80211_STYPE_PROBE_RESP: | ||
1651 | case IEEE80211_STYPE_BEACON: | ||
1652 | case IEEE80211_STYPE_DEAUTH: | ||
1653 | case IEEE80211_STYPE_DISASSOC: | ||
1654 | case IEEE80211_STYPE_ACTION: | ||
1655 | skb_queue_tail(&sdata->u.mgd.skb_queue, skb); | ||
1656 | ieee80211_queue_work(&local->hw, &sdata->u.mgd.work); | ||
1657 | return RX_QUEUED; | ||
1658 | } | ||
1659 | |||
1660 | return RX_DROP_MONITOR; | ||
1661 | } | ||
1662 | |||
1663 | static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | ||
1664 | struct sk_buff *skb) | ||
1665 | { | 1647 | { |
1666 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 1648 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1667 | struct ieee80211_rx_status *rx_status; | 1649 | struct ieee80211_rx_status *rx_status; |
@@ -1693,44 +1675,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1693 | break; | 1675 | break; |
1694 | case IEEE80211_STYPE_ACTION: | 1676 | case IEEE80211_STYPE_ACTION: |
1695 | switch (mgmt->u.action.category) { | 1677 | switch (mgmt->u.action.category) { |
1696 | case WLAN_CATEGORY_BACK: { | ||
1697 | struct ieee80211_local *local = sdata->local; | ||
1698 | int len = skb->len; | ||
1699 | struct sta_info *sta; | ||
1700 | |||
1701 | rcu_read_lock(); | ||
1702 | sta = sta_info_get(sdata, mgmt->sa); | ||
1703 | if (!sta) { | ||
1704 | rcu_read_unlock(); | ||
1705 | break; | ||
1706 | } | ||
1707 | |||
1708 | local_bh_disable(); | ||
1709 | |||
1710 | switch (mgmt->u.action.u.addba_req.action_code) { | ||
1711 | case WLAN_ACTION_ADDBA_REQ: | ||
1712 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1713 | sizeof(mgmt->u.action.u.addba_req))) | ||
1714 | break; | ||
1715 | ieee80211_process_addba_request(local, sta, mgmt, len); | ||
1716 | break; | ||
1717 | case WLAN_ACTION_ADDBA_RESP: | ||
1718 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1719 | sizeof(mgmt->u.action.u.addba_resp))) | ||
1720 | break; | ||
1721 | ieee80211_process_addba_resp(local, sta, mgmt, len); | ||
1722 | break; | ||
1723 | case WLAN_ACTION_DELBA: | ||
1724 | if (len < (IEEE80211_MIN_ACTION_SIZE + | ||
1725 | sizeof(mgmt->u.action.u.delba))) | ||
1726 | break; | ||
1727 | ieee80211_process_delba(sdata, sta, mgmt, len); | ||
1728 | break; | ||
1729 | } | ||
1730 | local_bh_enable(); | ||
1731 | rcu_read_unlock(); | ||
1732 | break; | ||
1733 | } | ||
1734 | case WLAN_CATEGORY_SPECTRUM_MGMT: | 1678 | case WLAN_CATEGORY_SPECTRUM_MGMT: |
1735 | ieee80211_sta_process_chanswitch(sdata, | 1679 | ieee80211_sta_process_chanswitch(sdata, |
1736 | &mgmt->u.action.u.chan_switch.sw_elem, | 1680 | &mgmt->u.action.u.chan_switch.sw_elem, |
@@ -1754,7 +1698,7 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1754 | default: | 1698 | default: |
1755 | WARN(1, "unexpected: %d", rma); | 1699 | WARN(1, "unexpected: %d", rma); |
1756 | } | 1700 | } |
1757 | goto out; | 1701 | return; |
1758 | } | 1702 | } |
1759 | 1703 | ||
1760 | mutex_unlock(&ifmgd->mtx); | 1704 | mutex_unlock(&ifmgd->mtx); |
@@ -1799,8 +1743,6 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, | |||
1799 | 1743 | ||
1800 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); | 1744 | cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); |
1801 | } | 1745 | } |
1802 | out: | ||
1803 | kfree_skb(skb); | ||
1804 | } | 1746 | } |
1805 | 1747 | ||
1806 | static void ieee80211_sta_timer(unsigned long data) | 1748 | static void ieee80211_sta_timer(unsigned long data) |
@@ -1815,39 +1757,13 @@ static void ieee80211_sta_timer(unsigned long data) | |||
1815 | return; | 1757 | return; |
1816 | } | 1758 | } |
1817 | 1759 | ||
1818 | ieee80211_queue_work(&local->hw, &ifmgd->work); | 1760 | ieee80211_queue_work(&local->hw, &sdata->work); |
1819 | } | 1761 | } |
1820 | 1762 | ||
1821 | static void ieee80211_sta_work(struct work_struct *work) | 1763 | void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata) |
1822 | { | 1764 | { |
1823 | struct ieee80211_sub_if_data *sdata = | ||
1824 | container_of(work, struct ieee80211_sub_if_data, u.mgd.work); | ||
1825 | struct ieee80211_local *local = sdata->local; | 1765 | struct ieee80211_local *local = sdata->local; |
1826 | struct ieee80211_if_managed *ifmgd; | 1766 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
1827 | struct sk_buff *skb; | ||
1828 | |||
1829 | if (!ieee80211_sdata_running(sdata)) | ||
1830 | return; | ||
1831 | |||
1832 | if (local->scanning) | ||
1833 | return; | ||
1834 | |||
1835 | if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) | ||
1836 | return; | ||
1837 | |||
1838 | /* | ||
1839 | * ieee80211_queue_work() should have picked up most cases, | ||
1840 | * here we'll pick the rest. | ||
1841 | */ | ||
1842 | if (WARN(local->suspended, "STA MLME work scheduled while " | ||
1843 | "going to suspend\n")) | ||
1844 | return; | ||
1845 | |||
1846 | ifmgd = &sdata->u.mgd; | ||
1847 | |||
1848 | /* first process frames to avoid timing out while a frame is pending */ | ||
1849 | while ((skb = skb_dequeue(&ifmgd->skb_queue))) | ||
1850 | ieee80211_sta_rx_queued_mgmt(sdata, skb); | ||
1851 | 1767 | ||
1852 | /* then process the rest of the work */ | 1768 | /* then process the rest of the work */ |
1853 | mutex_lock(&ifmgd->mtx); | 1769 | mutex_lock(&ifmgd->mtx); |
@@ -1942,8 +1858,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata) | |||
1942 | ieee80211_queue_work(&sdata->local->hw, | 1858 | ieee80211_queue_work(&sdata->local->hw, |
1943 | &sdata->u.mgd.monitor_work); | 1859 | &sdata->u.mgd.monitor_work); |
1944 | /* and do all the other regular work too */ | 1860 | /* and do all the other regular work too */ |
1945 | ieee80211_queue_work(&sdata->local->hw, | 1861 | ieee80211_queue_work(&sdata->local->hw, &sdata->work); |
1946 | &sdata->u.mgd.work); | ||
1947 | } | 1862 | } |
1948 | } | 1863 | } |
1949 | 1864 | ||
@@ -1958,7 +1873,6 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata) | |||
1958 | * time -- the code here is properly synchronised. | 1873 | * time -- the code here is properly synchronised. |
1959 | */ | 1874 | */ |
1960 | 1875 | ||
1961 | cancel_work_sync(&ifmgd->work); | ||
1962 | cancel_work_sync(&ifmgd->beacon_connection_loss_work); | 1876 | cancel_work_sync(&ifmgd->beacon_connection_loss_work); |
1963 | if (del_timer_sync(&ifmgd->timer)) | 1877 | if (del_timer_sync(&ifmgd->timer)) |
1964 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); | 1878 | set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); |
@@ -1990,7 +1904,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) | |||
1990 | struct ieee80211_if_managed *ifmgd; | 1904 | struct ieee80211_if_managed *ifmgd; |
1991 | 1905 | ||
1992 | ifmgd = &sdata->u.mgd; | 1906 | ifmgd = &sdata->u.mgd; |
1993 | INIT_WORK(&ifmgd->work, ieee80211_sta_work); | ||
1994 | INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); | 1907 | INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); |
1995 | INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); | 1908 | INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); |
1996 | INIT_WORK(&ifmgd->beacon_connection_loss_work, | 1909 | INIT_WORK(&ifmgd->beacon_connection_loss_work, |
@@ -2003,7 +1916,6 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) | |||
2003 | (unsigned long) sdata); | 1916 | (unsigned long) sdata); |
2004 | setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, | 1917 | setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, |
2005 | (unsigned long) sdata); | 1918 | (unsigned long) sdata); |
2006 | skb_queue_head_init(&ifmgd->skb_queue); | ||
2007 | 1919 | ||
2008 | ifmgd->flags = 0; | 1920 | ifmgd->flags = 0; |
2009 | 1921 | ||
@@ -2152,18 +2064,9 @@ static enum work_done_result ieee80211_assoc_done(struct ieee80211_work *wk, | |||
2152 | cfg80211_send_assoc_timeout(wk->sdata->dev, | 2064 | cfg80211_send_assoc_timeout(wk->sdata->dev, |
2153 | wk->filter_ta); | 2065 | wk->filter_ta); |
2154 | return WORK_DONE_DESTROY; | 2066 | return WORK_DONE_DESTROY; |
2155 | } else { | ||
2156 | mutex_unlock(&wk->sdata->u.mgd.mtx); | ||
2157 | #ifdef CONFIG_INET | ||
2158 | /* | ||
2159 | * configure ARP filter IP addresses to the driver, | ||
2160 | * intentionally outside the mgd mutex. | ||
2161 | */ | ||
2162 | rtnl_lock(); | ||
2163 | ieee80211_set_arp_filter(wk->sdata); | ||
2164 | rtnl_unlock(); | ||
2165 | #endif | ||
2166 | } | 2067 | } |
2068 | |||
2069 | mutex_unlock(&wk->sdata->u.mgd.mtx); | ||
2167 | } | 2070 | } |
2168 | 2071 | ||
2169 | cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len); | 2072 | cfg80211_send_rx_assoc(wk->sdata->dev, skb->data, skb->len); |
@@ -2292,14 +2195,16 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
2292 | struct ieee80211_local *local = sdata->local; | 2195 | struct ieee80211_local *local = sdata->local; |
2293 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | 2196 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; |
2294 | struct ieee80211_work *wk; | 2197 | struct ieee80211_work *wk; |
2295 | const u8 *bssid = req->bss->bssid; | 2198 | u8 bssid[ETH_ALEN]; |
2199 | bool assoc_bss = false; | ||
2296 | 2200 | ||
2297 | mutex_lock(&ifmgd->mtx); | 2201 | mutex_lock(&ifmgd->mtx); |
2298 | 2202 | ||
2203 | memcpy(bssid, req->bss->bssid, ETH_ALEN); | ||
2299 | if (ifmgd->associated == req->bss) { | 2204 | if (ifmgd->associated == req->bss) { |
2300 | bssid = req->bss->bssid; | 2205 | ieee80211_set_disassoc(sdata, false); |
2301 | ieee80211_set_disassoc(sdata, true); | ||
2302 | mutex_unlock(&ifmgd->mtx); | 2206 | mutex_unlock(&ifmgd->mtx); |
2207 | assoc_bss = true; | ||
2303 | } else { | 2208 | } else { |
2304 | bool not_auth_yet = false; | 2209 | bool not_auth_yet = false; |
2305 | 2210 | ||
@@ -2345,6 +2250,8 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata, | |||
2345 | ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, | 2250 | ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH, |
2346 | req->reason_code, cookie, | 2251 | req->reason_code, cookie, |
2347 | !req->local_state_change); | 2252 | !req->local_state_change); |
2253 | if (assoc_bss) | ||
2254 | sta_info_destroy_addr(sdata, bssid); | ||
2348 | 2255 | ||
2349 | ieee80211_recalc_idle(sdata->local); | 2256 | ieee80211_recalc_idle(sdata->local); |
2350 | 2257 | ||
@@ -2389,44 +2296,6 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, | |||
2389 | return 0; | 2296 | return 0; |
2390 | } | 2297 | } |
2391 | 2298 | ||
2392 | int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata, | ||
2393 | struct ieee80211_channel *chan, | ||
2394 | enum nl80211_channel_type channel_type, | ||
2395 | bool channel_type_valid, | ||
2396 | const u8 *buf, size_t len, u64 *cookie) | ||
2397 | { | ||
2398 | struct ieee80211_local *local = sdata->local; | ||
2399 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
2400 | struct sk_buff *skb; | ||
2401 | |||
2402 | /* Check that we are on the requested channel for transmission */ | ||
2403 | if (chan != local->tmp_channel && | ||
2404 | chan != local->oper_channel) | ||
2405 | return -EBUSY; | ||
2406 | if (channel_type_valid && | ||
2407 | (channel_type != local->tmp_channel_type && | ||
2408 | channel_type != local->_oper_channel_type)) | ||
2409 | return -EBUSY; | ||
2410 | |||
2411 | skb = dev_alloc_skb(local->hw.extra_tx_headroom + len); | ||
2412 | if (!skb) | ||
2413 | return -ENOMEM; | ||
2414 | skb_reserve(skb, local->hw.extra_tx_headroom); | ||
2415 | |||
2416 | memcpy(skb_put(skb, len), buf, len); | ||
2417 | |||
2418 | if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) | ||
2419 | IEEE80211_SKB_CB(skb)->flags |= | ||
2420 | IEEE80211_TX_INTFL_DONT_ENCRYPT; | ||
2421 | IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_NL80211_FRAME_TX | | ||
2422 | IEEE80211_TX_CTL_REQ_TX_STATUS; | ||
2423 | skb->dev = sdata->dev; | ||
2424 | ieee80211_tx_skb(sdata, skb); | ||
2425 | |||
2426 | *cookie = (unsigned long) skb; | ||
2427 | return 0; | ||
2428 | } | ||
2429 | |||
2430 | void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, | 2299 | void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, |
2431 | enum nl80211_cqm_rssi_threshold_event rssi_event, | 2300 | enum nl80211_cqm_rssi_threshold_event rssi_event, |
2432 | gfp_t gfp) | 2301 | gfp_t gfp) |
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 75202b295a4e..d287fde0431d 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c | |||
@@ -40,22 +40,14 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) | |||
40 | list_for_each_entry(sdata, &local->interfaces, list) | 40 | list_for_each_entry(sdata, &local->interfaces, list) |
41 | ieee80211_disable_keys(sdata); | 41 | ieee80211_disable_keys(sdata); |
42 | 42 | ||
43 | /* Tear down aggregation sessions */ | 43 | /* tear down aggregation sessions and remove STAs */ |
44 | 44 | mutex_lock(&local->sta_mtx); | |
45 | rcu_read_lock(); | 45 | list_for_each_entry(sta, &local->sta_list, list) { |
46 | 46 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { | |
47 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { | ||
48 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | ||
49 | set_sta_flags(sta, WLAN_STA_BLOCK_BA); | 47 | set_sta_flags(sta, WLAN_STA_BLOCK_BA); |
50 | ieee80211_sta_tear_down_BA_sessions(sta); | 48 | ieee80211_sta_tear_down_BA_sessions(sta); |
51 | } | 49 | } |
52 | } | ||
53 | 50 | ||
54 | rcu_read_unlock(); | ||
55 | |||
56 | /* remove STAs */ | ||
57 | mutex_lock(&local->sta_mtx); | ||
58 | list_for_each_entry(sta, &local->sta_list, list) { | ||
59 | if (sta->uploaded) { | 51 | if (sta->uploaded) { |
60 | sdata = sta->sdata; | 52 | sdata = sta->sdata; |
61 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | 53 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
@@ -72,6 +64,8 @@ int __ieee80211_suspend(struct ieee80211_hw *hw) | |||
72 | 64 | ||
73 | /* remove all interfaces */ | 65 | /* remove all interfaces */ |
74 | list_for_each_entry(sdata, &local->interfaces, list) { | 66 | list_for_each_entry(sdata, &local->interfaces, list) { |
67 | cancel_work_sync(&sdata->work); | ||
68 | |||
75 | switch(sdata->vif.type) { | 69 | switch(sdata->vif.type) { |
76 | case NL80211_IFTYPE_STATION: | 70 | case NL80211_IFTYPE_STATION: |
77 | ieee80211_sta_quiesce(sdata); | 71 | ieee80211_sta_quiesce(sdata); |
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c index c23f08251da4..7a04951fcb1f 100644 --- a/net/mac80211/rc80211_minstrel_ht.c +++ b/net/mac80211/rc80211_minstrel_ht.c | |||
@@ -365,7 +365,7 @@ minstrel_aggr_check(struct minstrel_priv *mp, struct ieee80211_sta *pubsta, stru | |||
365 | return; | 365 | return; |
366 | 366 | ||
367 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | 367 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; |
368 | if (likely(sta->ampdu_mlme.tid_state_tx[tid] != HT_AGG_STATE_IDLE)) | 368 | if (likely(sta->ampdu_mlme.tid_tx[tid])) |
369 | return; | 369 | return; |
370 | 370 | ||
371 | ieee80211_start_tx_ba_session(pubsta, tid); | 371 | ieee80211_start_tx_ba_session(pubsta, tid); |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 6a15632e7eca..a8aa0f2411a2 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -719,16 +719,13 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, | |||
719 | 719 | ||
720 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; | 720 | tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; |
721 | 721 | ||
722 | spin_lock(&sta->lock); | 722 | tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]); |
723 | 723 | if (!tid_agg_rx) | |
724 | if (!sta->ampdu_mlme.tid_active_rx[tid]) | 724 | goto dont_reorder; |
725 | goto dont_reorder_unlock; | ||
726 | |||
727 | tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; | ||
728 | 725 | ||
729 | /* qos null data frames are excluded */ | 726 | /* qos null data frames are excluded */ |
730 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) | 727 | if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) |
731 | goto dont_reorder_unlock; | 728 | goto dont_reorder; |
732 | 729 | ||
733 | /* new, potentially un-ordered, ampdu frame - process it */ | 730 | /* new, potentially un-ordered, ampdu frame - process it */ |
734 | 731 | ||
@@ -740,20 +737,22 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx, | |||
740 | /* if this mpdu is fragmented - terminate rx aggregation session */ | 737 | /* if this mpdu is fragmented - terminate rx aggregation session */ |
741 | sc = le16_to_cpu(hdr->seq_ctrl); | 738 | sc = le16_to_cpu(hdr->seq_ctrl); |
742 | if (sc & IEEE80211_SCTL_FRAG) { | 739 | if (sc & IEEE80211_SCTL_FRAG) { |
743 | spin_unlock(&sta->lock); | 740 | skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; |
744 | __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT, | 741 | skb_queue_tail(&rx->sdata->skb_queue, skb); |
745 | WLAN_REASON_QSTA_REQUIRE_SETUP); | 742 | ieee80211_queue_work(&local->hw, &rx->sdata->work); |
746 | dev_kfree_skb(skb); | ||
747 | return; | 743 | return; |
748 | } | 744 | } |
749 | 745 | ||
750 | if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) { | 746 | /* |
751 | spin_unlock(&sta->lock); | 747 | * No locking needed -- we will only ever process one |
748 | * RX packet at a time, and thus own tid_agg_rx. All | ||
749 | * other code manipulating it needs to (and does) make | ||
750 | * sure that we cannot get to it any more before doing | ||
751 | * anything with it. | ||
752 | */ | ||
753 | if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) | ||
752 | return; | 754 | return; |
753 | } | ||
754 | 755 | ||
755 | dont_reorder_unlock: | ||
756 | spin_unlock(&sta->lock); | ||
757 | dont_reorder: | 756 | dont_reorder: |
758 | __skb_queue_tail(frames, skb); | 757 | __skb_queue_tail(frames, skb); |
759 | } | 758 | } |
@@ -1268,11 +1267,13 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1268 | rx->queue, &(rx->skb)); | 1267 | rx->queue, &(rx->skb)); |
1269 | if (rx->key && rx->key->conf.alg == ALG_CCMP && | 1268 | if (rx->key && rx->key->conf.alg == ALG_CCMP && |
1270 | ieee80211_has_protected(fc)) { | 1269 | ieee80211_has_protected(fc)) { |
1270 | int queue = ieee80211_is_mgmt(fc) ? | ||
1271 | NUM_RX_DATA_QUEUES : rx->queue; | ||
1271 | /* Store CCMP PN so that we can verify that the next | 1272 | /* Store CCMP PN so that we can verify that the next |
1272 | * fragment has a sequential PN value. */ | 1273 | * fragment has a sequential PN value. */ |
1273 | entry->ccmp = 1; | 1274 | entry->ccmp = 1; |
1274 | memcpy(entry->last_pn, | 1275 | memcpy(entry->last_pn, |
1275 | rx->key->u.ccmp.rx_pn[rx->queue], | 1276 | rx->key->u.ccmp.rx_pn[queue], |
1276 | CCMP_PN_LEN); | 1277 | CCMP_PN_LEN); |
1277 | } | 1278 | } |
1278 | return RX_QUEUED; | 1279 | return RX_QUEUED; |
@@ -1292,6 +1293,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1292 | if (entry->ccmp) { | 1293 | if (entry->ccmp) { |
1293 | int i; | 1294 | int i; |
1294 | u8 pn[CCMP_PN_LEN], *rpn; | 1295 | u8 pn[CCMP_PN_LEN], *rpn; |
1296 | int queue; | ||
1295 | if (!rx->key || rx->key->conf.alg != ALG_CCMP) | 1297 | if (!rx->key || rx->key->conf.alg != ALG_CCMP) |
1296 | return RX_DROP_UNUSABLE; | 1298 | return RX_DROP_UNUSABLE; |
1297 | memcpy(pn, entry->last_pn, CCMP_PN_LEN); | 1299 | memcpy(pn, entry->last_pn, CCMP_PN_LEN); |
@@ -1300,7 +1302,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) | |||
1300 | if (pn[i]) | 1302 | if (pn[i]) |
1301 | break; | 1303 | break; |
1302 | } | 1304 | } |
1303 | rpn = rx->key->u.ccmp.rx_pn[rx->queue]; | 1305 | queue = ieee80211_is_mgmt(fc) ? |
1306 | NUM_RX_DATA_QUEUES : rx->queue; | ||
1307 | rpn = rx->key->u.ccmp.rx_pn[queue]; | ||
1304 | if (memcmp(pn, rpn, CCMP_PN_LEN)) | 1308 | if (memcmp(pn, rpn, CCMP_PN_LEN)) |
1305 | return RX_DROP_UNUSABLE; | 1309 | return RX_DROP_UNUSABLE; |
1306 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); | 1310 | memcpy(entry->last_pn, pn, CCMP_PN_LEN); |
@@ -1830,13 +1834,11 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1830 | &bar_data, sizeof(bar_data))) | 1834 | &bar_data, sizeof(bar_data))) |
1831 | return RX_DROP_MONITOR; | 1835 | return RX_DROP_MONITOR; |
1832 | 1836 | ||
1833 | spin_lock(&rx->sta->lock); | ||
1834 | tid = le16_to_cpu(bar_data.control) >> 12; | 1837 | tid = le16_to_cpu(bar_data.control) >> 12; |
1835 | if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) { | 1838 | |
1836 | spin_unlock(&rx->sta->lock); | 1839 | tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]); |
1840 | if (!tid_agg_rx) | ||
1837 | return RX_DROP_MONITOR; | 1841 | return RX_DROP_MONITOR; |
1838 | } | ||
1839 | tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; | ||
1840 | 1842 | ||
1841 | start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; | 1843 | start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4; |
1842 | 1844 | ||
@@ -1849,7 +1851,6 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames) | |||
1849 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, | 1851 | ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, |
1850 | frames); | 1852 | frames); |
1851 | kfree_skb(skb); | 1853 | kfree_skb(skb); |
1852 | spin_unlock(&rx->sta->lock); | ||
1853 | return RX_QUEUED; | 1854 | return RX_QUEUED; |
1854 | } | 1855 | } |
1855 | 1856 | ||
@@ -1950,30 +1951,27 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
1950 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) | 1951 | if (len < IEEE80211_MIN_ACTION_SIZE + 1) |
1951 | break; | 1952 | break; |
1952 | 1953 | ||
1953 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | ||
1954 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); | ||
1955 | |||
1956 | switch (mgmt->u.action.u.addba_req.action_code) { | 1954 | switch (mgmt->u.action.u.addba_req.action_code) { |
1957 | case WLAN_ACTION_ADDBA_REQ: | 1955 | case WLAN_ACTION_ADDBA_REQ: |
1958 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 1956 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
1959 | sizeof(mgmt->u.action.u.addba_req))) | 1957 | sizeof(mgmt->u.action.u.addba_req))) |
1960 | return RX_DROP_MONITOR; | 1958 | goto invalid; |
1961 | ieee80211_process_addba_request(local, rx->sta, mgmt, len); | 1959 | break; |
1962 | goto handled; | ||
1963 | case WLAN_ACTION_ADDBA_RESP: | 1960 | case WLAN_ACTION_ADDBA_RESP: |
1964 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 1961 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
1965 | sizeof(mgmt->u.action.u.addba_resp))) | 1962 | sizeof(mgmt->u.action.u.addba_resp))) |
1966 | break; | 1963 | goto invalid; |
1967 | ieee80211_process_addba_resp(local, rx->sta, mgmt, len); | 1964 | break; |
1968 | goto handled; | ||
1969 | case WLAN_ACTION_DELBA: | 1965 | case WLAN_ACTION_DELBA: |
1970 | if (len < (IEEE80211_MIN_ACTION_SIZE + | 1966 | if (len < (IEEE80211_MIN_ACTION_SIZE + |
1971 | sizeof(mgmt->u.action.u.delba))) | 1967 | sizeof(mgmt->u.action.u.delba))) |
1972 | break; | 1968 | goto invalid; |
1973 | ieee80211_process_delba(sdata, rx->sta, mgmt, len); | 1969 | break; |
1974 | goto handled; | 1970 | default: |
1971 | goto invalid; | ||
1975 | } | 1972 | } |
1976 | break; | 1973 | |
1974 | goto queue; | ||
1977 | case WLAN_CATEGORY_SPECTRUM_MGMT: | 1975 | case WLAN_CATEGORY_SPECTRUM_MGMT: |
1978 | if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) | 1976 | if (local->hw.conf.channel->band != IEEE80211_BAND_5GHZ) |
1979 | break; | 1977 | break; |
@@ -2003,7 +2001,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2003 | if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) | 2001 | if (memcmp(mgmt->bssid, sdata->u.mgd.bssid, ETH_ALEN)) |
2004 | break; | 2002 | break; |
2005 | 2003 | ||
2006 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); | 2004 | goto queue; |
2007 | } | 2005 | } |
2008 | break; | 2006 | break; |
2009 | case WLAN_CATEGORY_SA_QUERY: | 2007 | case WLAN_CATEGORY_SA_QUERY: |
@@ -2021,11 +2019,12 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2021 | break; | 2019 | break; |
2022 | case WLAN_CATEGORY_MESH_PLINK: | 2020 | case WLAN_CATEGORY_MESH_PLINK: |
2023 | case WLAN_CATEGORY_MESH_PATH_SEL: | 2021 | case WLAN_CATEGORY_MESH_PATH_SEL: |
2024 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 2022 | if (!ieee80211_vif_is_mesh(&sdata->vif)) |
2025 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb); | 2023 | break; |
2026 | break; | 2024 | goto queue; |
2027 | } | 2025 | } |
2028 | 2026 | ||
2027 | invalid: | ||
2029 | /* | 2028 | /* |
2030 | * For AP mode, hostapd is responsible for handling any action | 2029 | * For AP mode, hostapd is responsible for handling any action |
2031 | * frames that we didn't handle, including returning unknown | 2030 | * frames that we didn't handle, including returning unknown |
@@ -2045,8 +2044,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2045 | */ | 2044 | */ |
2046 | status = IEEE80211_SKB_RXCB(rx->skb); | 2045 | status = IEEE80211_SKB_RXCB(rx->skb); |
2047 | 2046 | ||
2048 | if (sdata->vif.type == NL80211_IFTYPE_STATION && | 2047 | if (cfg80211_rx_action(rx->sdata->dev, status->freq, |
2049 | cfg80211_rx_action(rx->sdata->dev, status->freq, | ||
2050 | rx->skb->data, rx->skb->len, | 2048 | rx->skb->data, rx->skb->len, |
2051 | GFP_ATOMIC)) | 2049 | GFP_ATOMIC)) |
2052 | goto handled; | 2050 | goto handled; |
@@ -2074,6 +2072,14 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) | |||
2074 | rx->sta->rx_packets++; | 2072 | rx->sta->rx_packets++; |
2075 | dev_kfree_skb(rx->skb); | 2073 | dev_kfree_skb(rx->skb); |
2076 | return RX_QUEUED; | 2074 | return RX_QUEUED; |
2075 | |||
2076 | queue: | ||
2077 | rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; | ||
2078 | skb_queue_tail(&sdata->skb_queue, rx->skb); | ||
2079 | ieee80211_queue_work(&local->hw, &sdata->work); | ||
2080 | if (rx->sta) | ||
2081 | rx->sta->rx_packets++; | ||
2082 | return RX_QUEUED; | ||
2077 | } | 2083 | } |
2078 | 2084 | ||
2079 | static ieee80211_rx_result debug_noinline | 2085 | static ieee80211_rx_result debug_noinline |
@@ -2081,10 +2087,15 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) | |||
2081 | { | 2087 | { |
2082 | struct ieee80211_sub_if_data *sdata = rx->sdata; | 2088 | struct ieee80211_sub_if_data *sdata = rx->sdata; |
2083 | ieee80211_rx_result rxs; | 2089 | ieee80211_rx_result rxs; |
2090 | struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; | ||
2091 | __le16 stype; | ||
2084 | 2092 | ||
2085 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) | 2093 | if (!(rx->flags & IEEE80211_RX_RA_MATCH)) |
2086 | return RX_DROP_MONITOR; | 2094 | return RX_DROP_MONITOR; |
2087 | 2095 | ||
2096 | if (rx->skb->len < 24) | ||
2097 | return RX_DROP_MONITOR; | ||
2098 | |||
2088 | if (ieee80211_drop_unencrypted_mgmt(rx)) | 2099 | if (ieee80211_drop_unencrypted_mgmt(rx)) |
2089 | return RX_DROP_UNUSABLE; | 2100 | return RX_DROP_UNUSABLE; |
2090 | 2101 | ||
@@ -2092,16 +2103,42 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) | |||
2092 | if (rxs != RX_CONTINUE) | 2103 | if (rxs != RX_CONTINUE) |
2093 | return rxs; | 2104 | return rxs; |
2094 | 2105 | ||
2095 | if (ieee80211_vif_is_mesh(&sdata->vif)) | 2106 | stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE); |
2096 | return ieee80211_mesh_rx_mgmt(sdata, rx->skb); | ||
2097 | 2107 | ||
2098 | if (sdata->vif.type == NL80211_IFTYPE_ADHOC) | 2108 | if (!ieee80211_vif_is_mesh(&sdata->vif) && |
2099 | return ieee80211_ibss_rx_mgmt(sdata, rx->skb); | 2109 | sdata->vif.type != NL80211_IFTYPE_ADHOC && |
2110 | sdata->vif.type != NL80211_IFTYPE_STATION) | ||
2111 | return RX_DROP_MONITOR; | ||
2112 | |||
2113 | switch (stype) { | ||
2114 | case cpu_to_le16(IEEE80211_STYPE_BEACON): | ||
2115 | case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP): | ||
2116 | /* process for all: mesh, mlme, ibss */ | ||
2117 | break; | ||
2118 | case cpu_to_le16(IEEE80211_STYPE_DEAUTH): | ||
2119 | case cpu_to_le16(IEEE80211_STYPE_DISASSOC): | ||
2120 | /* process only for station */ | ||
2121 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | ||
2122 | return RX_DROP_MONITOR; | ||
2123 | break; | ||
2124 | case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ): | ||
2125 | case cpu_to_le16(IEEE80211_STYPE_AUTH): | ||
2126 | /* process only for ibss */ | ||
2127 | if (sdata->vif.type != NL80211_IFTYPE_ADHOC) | ||
2128 | return RX_DROP_MONITOR; | ||
2129 | break; | ||
2130 | default: | ||
2131 | return RX_DROP_MONITOR; | ||
2132 | } | ||
2100 | 2133 | ||
2101 | if (sdata->vif.type == NL80211_IFTYPE_STATION) | 2134 | /* queue up frame and kick off work to process it */ |
2102 | return ieee80211_sta_rx_mgmt(sdata, rx->skb); | 2135 | rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME; |
2136 | skb_queue_tail(&sdata->skb_queue, rx->skb); | ||
2137 | ieee80211_queue_work(&rx->local->hw, &sdata->work); | ||
2138 | if (rx->sta) | ||
2139 | rx->sta->rx_packets++; | ||
2103 | 2140 | ||
2104 | return RX_DROP_MONITOR; | 2141 | return RX_QUEUED; |
2105 | } | 2142 | } |
2106 | 2143 | ||
2107 | static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, | 2144 | static void ieee80211_rx_michael_mic_report(struct ieee80211_hdr *hdr, |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 4607da9a6ff7..67656cbf2b15 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -235,6 +235,8 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
235 | spin_lock_init(&sta->lock); | 235 | spin_lock_init(&sta->lock); |
236 | spin_lock_init(&sta->flaglock); | 236 | spin_lock_init(&sta->flaglock); |
237 | INIT_WORK(&sta->drv_unblock_wk, sta_unblock); | 237 | INIT_WORK(&sta->drv_unblock_wk, sta_unblock); |
238 | INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); | ||
239 | mutex_init(&sta->ampdu_mlme.mtx); | ||
238 | 240 | ||
239 | memcpy(sta->sta.addr, addr, ETH_ALEN); | 241 | memcpy(sta->sta.addr, addr, ETH_ALEN); |
240 | sta->local = local; | 242 | sta->local = local; |
@@ -246,14 +248,12 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata, | |||
246 | } | 248 | } |
247 | 249 | ||
248 | for (i = 0; i < STA_TID_NUM; i++) { | 250 | for (i = 0; i < STA_TID_NUM; i++) { |
249 | /* timer_to_tid must be initialized with identity mapping to | 251 | /* |
250 | * enable session_timer's data differentiation. refer to | 252 | * timer_to_tid must be initialized with identity mapping |
251 | * sta_rx_agg_session_timer_expired for useage */ | 253 | * to enable session_timer's data differentiation. See |
254 | * sta_rx_agg_session_timer_expired for usage. | ||
255 | */ | ||
252 | sta->timer_to_tid[i] = i; | 256 | sta->timer_to_tid[i] = i; |
253 | /* tx */ | ||
254 | sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE; | ||
255 | sta->ampdu_mlme.tid_tx[i] = NULL; | ||
256 | sta->ampdu_mlme.addba_req_num[i] = 0; | ||
257 | } | 257 | } |
258 | skb_queue_head_init(&sta->ps_tx_buf); | 258 | skb_queue_head_init(&sta->ps_tx_buf); |
259 | skb_queue_head_init(&sta->tx_filtered); | 259 | skb_queue_head_init(&sta->tx_filtered); |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 813da34db733..10d0fcb417ae 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -42,9 +42,6 @@ | |||
42 | * be in the queues | 42 | * be in the queues |
43 | * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping | 43 | * @WLAN_STA_PSPOLL: Station sent PS-poll while driver was keeping |
44 | * station in power-save mode, reply when the driver unblocks. | 44 | * station in power-save mode, reply when the driver unblocks. |
45 | * @WLAN_STA_DISASSOC: Disassociation in progress. | ||
46 | * This is used to reject TX BA session requests when disassociation | ||
47 | * is in progress. | ||
48 | */ | 45 | */ |
49 | enum ieee80211_sta_info_flags { | 46 | enum ieee80211_sta_info_flags { |
50 | WLAN_STA_AUTH = 1<<0, | 47 | WLAN_STA_AUTH = 1<<0, |
@@ -60,38 +57,44 @@ enum ieee80211_sta_info_flags { | |||
60 | WLAN_STA_BLOCK_BA = 1<<11, | 57 | WLAN_STA_BLOCK_BA = 1<<11, |
61 | WLAN_STA_PS_DRIVER = 1<<12, | 58 | WLAN_STA_PS_DRIVER = 1<<12, |
62 | WLAN_STA_PSPOLL = 1<<13, | 59 | WLAN_STA_PSPOLL = 1<<13, |
63 | WLAN_STA_DISASSOC = 1<<14, | ||
64 | }; | 60 | }; |
65 | 61 | ||
66 | #define STA_TID_NUM 16 | 62 | #define STA_TID_NUM 16 |
67 | #define ADDBA_RESP_INTERVAL HZ | 63 | #define ADDBA_RESP_INTERVAL HZ |
68 | #define HT_AGG_MAX_RETRIES (0x3) | 64 | #define HT_AGG_MAX_RETRIES 0x3 |
69 | 65 | ||
70 | #define HT_AGG_STATE_INITIATOR_SHIFT (4) | 66 | #define HT_AGG_STATE_DRV_READY 0 |
71 | 67 | #define HT_AGG_STATE_RESPONSE_RECEIVED 1 | |
72 | #define HT_ADDBA_REQUESTED_MSK BIT(0) | 68 | #define HT_AGG_STATE_OPERATIONAL 2 |
73 | #define HT_ADDBA_DRV_READY_MSK BIT(1) | 69 | #define HT_AGG_STATE_STOPPING 3 |
74 | #define HT_ADDBA_RECEIVED_MSK BIT(2) | 70 | #define HT_AGG_STATE_WANT_START 4 |
75 | #define HT_AGG_STATE_REQ_STOP_BA_MSK BIT(3) | 71 | #define HT_AGG_STATE_WANT_STOP 5 |
76 | #define HT_AGG_STATE_INITIATOR_MSK BIT(HT_AGG_STATE_INITIATOR_SHIFT) | ||
77 | #define HT_AGG_STATE_IDLE (0x0) | ||
78 | #define HT_AGG_STATE_OPERATIONAL (HT_ADDBA_REQUESTED_MSK | \ | ||
79 | HT_ADDBA_DRV_READY_MSK | \ | ||
80 | HT_ADDBA_RECEIVED_MSK) | ||
81 | 72 | ||
82 | /** | 73 | /** |
83 | * struct tid_ampdu_tx - TID aggregation information (Tx). | 74 | * struct tid_ampdu_tx - TID aggregation information (Tx). |
84 | * | 75 | * |
76 | * @rcu_head: rcu head for freeing structure | ||
85 | * @addba_resp_timer: timer for peer's response to addba request | 77 | * @addba_resp_timer: timer for peer's response to addba request |
86 | * @pending: pending frames queue -- use sta's spinlock to protect | 78 | * @pending: pending frames queue -- use sta's spinlock to protect |
87 | * @ssn: Starting Sequence Number expected to be aggregated. | ||
88 | * @dialog_token: dialog token for aggregation session | 79 | * @dialog_token: dialog token for aggregation session |
80 | * @state: session state (see above) | ||
81 | * @stop_initiator: initiator of a session stop | ||
82 | * | ||
83 | * This structure is protected by RCU and the per-station | ||
84 | * spinlock. Assignments to the array holding it must hold | ||
85 | * the spinlock, only the TX path can access it under RCU | ||
86 | * lock-free if, and only if, the state has the flag | ||
87 | * %HT_AGG_STATE_OPERATIONAL set. Otherwise, the TX path | ||
88 | * must also acquire the spinlock and re-check the state, | ||
89 | * see comments in the tx code touching it. | ||
89 | */ | 90 | */ |
90 | struct tid_ampdu_tx { | 91 | struct tid_ampdu_tx { |
92 | struct rcu_head rcu_head; | ||
91 | struct timer_list addba_resp_timer; | 93 | struct timer_list addba_resp_timer; |
92 | struct sk_buff_head pending; | 94 | struct sk_buff_head pending; |
93 | u16 ssn; | 95 | unsigned long state; |
94 | u8 dialog_token; | 96 | u8 dialog_token; |
97 | u8 stop_initiator; | ||
95 | }; | 98 | }; |
96 | 99 | ||
97 | /** | 100 | /** |
@@ -106,8 +109,18 @@ struct tid_ampdu_tx { | |||
106 | * @buf_size: buffer size for incoming A-MPDUs | 109 | * @buf_size: buffer size for incoming A-MPDUs |
107 | * @timeout: reset timer value (in TUs). | 110 | * @timeout: reset timer value (in TUs). |
108 | * @dialog_token: dialog token for aggregation session | 111 | * @dialog_token: dialog token for aggregation session |
112 | * @rcu_head: RCU head used for freeing this struct | ||
113 | * | ||
114 | * This structure is protected by RCU and the per-station | ||
115 | * spinlock. Assignments to the array holding it must hold | ||
116 | * the spinlock, only the RX path can access it under RCU | ||
117 | * lock-free. The RX path, since it is single-threaded, | ||
118 | * can even modify the structure without locking since the | ||
119 | * only other modifications to it are done when the struct | ||
120 | * can not yet or no longer be found by the RX path. | ||
109 | */ | 121 | */ |
110 | struct tid_ampdu_rx { | 122 | struct tid_ampdu_rx { |
123 | struct rcu_head rcu_head; | ||
111 | struct sk_buff **reorder_buf; | 124 | struct sk_buff **reorder_buf; |
112 | unsigned long *reorder_time; | 125 | unsigned long *reorder_time; |
113 | struct timer_list session_timer; | 126 | struct timer_list session_timer; |
@@ -122,19 +135,23 @@ struct tid_ampdu_rx { | |||
122 | /** | 135 | /** |
123 | * struct sta_ampdu_mlme - STA aggregation information. | 136 | * struct sta_ampdu_mlme - STA aggregation information. |
124 | * | 137 | * |
125 | * @tid_active_rx: TID's state in Rx session state machine. | 138 | * @tid_rx: aggregation info for Rx per TID -- RCU protected |
126 | * @tid_rx: aggregation info for Rx per TID | ||
127 | * @tid_state_tx: TID's state in Tx session state machine. | ||
128 | * @tid_tx: aggregation info for Tx per TID | 139 | * @tid_tx: aggregation info for Tx per TID |
129 | * @addba_req_num: number of times addBA request has been sent. | 140 | * @addba_req_num: number of times addBA request has been sent. |
130 | * @dialog_token_allocator: dialog token enumerator for each new session; | 141 | * @dialog_token_allocator: dialog token enumerator for each new session; |
142 | * @work: work struct for starting/stopping aggregation | ||
143 | * @tid_rx_timer_expired: bitmap indicating on which TIDs the | ||
144 | * RX timer expired until the work for it runs | ||
145 | * @mtx: mutex to protect all TX data (except non-NULL assignments | ||
146 | * to tid_tx[idx], which are protected by the sta spinlock) | ||
131 | */ | 147 | */ |
132 | struct sta_ampdu_mlme { | 148 | struct sta_ampdu_mlme { |
149 | struct mutex mtx; | ||
133 | /* rx */ | 150 | /* rx */ |
134 | bool tid_active_rx[STA_TID_NUM]; | ||
135 | struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; | 151 | struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; |
152 | unsigned long tid_rx_timer_expired[BITS_TO_LONGS(STA_TID_NUM)]; | ||
136 | /* tx */ | 153 | /* tx */ |
137 | u8 tid_state_tx[STA_TID_NUM]; | 154 | struct work_struct work; |
138 | struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; | 155 | struct tid_ampdu_tx *tid_tx[STA_TID_NUM]; |
139 | u8 addba_req_num[STA_TID_NUM]; | 156 | u8 addba_req_num[STA_TID_NUM]; |
140 | u8 dialog_token_allocator; | 157 | u8 dialog_token_allocator; |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 680bcb7093db..698d4718b1a4 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1092,6 +1092,59 @@ static bool __ieee80211_parse_tx_radiotap(struct ieee80211_tx_data *tx, | |||
1092 | return true; | 1092 | return true; |
1093 | } | 1093 | } |
1094 | 1094 | ||
1095 | static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, | ||
1096 | struct sk_buff *skb, | ||
1097 | struct ieee80211_tx_info *info, | ||
1098 | struct tid_ampdu_tx *tid_tx, | ||
1099 | int tid) | ||
1100 | { | ||
1101 | bool queued = false; | ||
1102 | |||
1103 | if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { | ||
1104 | info->flags |= IEEE80211_TX_CTL_AMPDU; | ||
1105 | } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { | ||
1106 | /* | ||
1107 | * nothing -- this aggregation session is being started | ||
1108 | * but that might still fail with the driver | ||
1109 | */ | ||
1110 | } else { | ||
1111 | spin_lock(&tx->sta->lock); | ||
1112 | /* | ||
1113 | * Need to re-check now, because we may get here | ||
1114 | * | ||
1115 | * 1) in the window during which the setup is actually | ||
1116 | * already done, but not marked yet because not all | ||
1117 | * packets are spliced over to the driver pending | ||
1118 | * queue yet -- if this happened we acquire the lock | ||
1119 | * either before or after the splice happens, but | ||
1120 | * need to recheck which of these cases happened. | ||
1121 | * | ||
1122 | * 2) during session teardown, if the OPERATIONAL bit | ||
1123 | * was cleared due to the teardown but the pointer | ||
1124 | * hasn't been assigned NULL yet (or we loaded it | ||
1125 | * before it was assigned) -- in this case it may | ||
1126 | * now be NULL which means we should just let the | ||
1127 | * packet pass through because splicing the frames | ||
1128 | * back is already done. | ||
1129 | */ | ||
1130 | tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; | ||
1131 | |||
1132 | if (!tid_tx) { | ||
1133 | /* do nothing, let packet pass through */ | ||
1134 | } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) { | ||
1135 | info->flags |= IEEE80211_TX_CTL_AMPDU; | ||
1136 | } else { | ||
1137 | queued = true; | ||
1138 | info->control.vif = &tx->sdata->vif; | ||
1139 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | ||
1140 | __skb_queue_tail(&tid_tx->pending, skb); | ||
1141 | } | ||
1142 | spin_unlock(&tx->sta->lock); | ||
1143 | } | ||
1144 | |||
1145 | return queued; | ||
1146 | } | ||
1147 | |||
1095 | /* | 1148 | /* |
1096 | * initialises @tx | 1149 | * initialises @tx |
1097 | */ | 1150 | */ |
@@ -1104,8 +1157,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
1104 | struct ieee80211_hdr *hdr; | 1157 | struct ieee80211_hdr *hdr; |
1105 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | 1158 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1106 | int hdrlen, tid; | 1159 | int hdrlen, tid; |
1107 | u8 *qc, *state; | 1160 | u8 *qc; |
1108 | bool queued = false; | ||
1109 | 1161 | ||
1110 | memset(tx, 0, sizeof(*tx)); | 1162 | memset(tx, 0, sizeof(*tx)); |
1111 | tx->skb = skb; | 1163 | tx->skb = skb; |
@@ -1157,35 +1209,16 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, | |||
1157 | qc = ieee80211_get_qos_ctl(hdr); | 1209 | qc = ieee80211_get_qos_ctl(hdr); |
1158 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; | 1210 | tid = *qc & IEEE80211_QOS_CTL_TID_MASK; |
1159 | 1211 | ||
1160 | spin_lock(&tx->sta->lock); | 1212 | tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); |
1161 | /* | 1213 | if (tid_tx) { |
1162 | * XXX: This spinlock could be fairly expensive, but see the | 1214 | bool queued; |
1163 | * comment in agg-tx.c:ieee80211_agg_tx_operational(). | ||
1164 | * One way to solve this would be to do something RCU-like | ||
1165 | * for managing the tid_tx struct and using atomic bitops | ||
1166 | * for the actual state -- by introducing an actual | ||
1167 | * 'operational' bit that would be possible. It would | ||
1168 | * require changing ieee80211_agg_tx_operational() to | ||
1169 | * set that bit, and changing the way tid_tx is managed | ||
1170 | * everywhere, including races between that bit and | ||
1171 | * tid_tx going away (tid_tx being added can be easily | ||
1172 | * committed to memory before the 'operational' bit). | ||
1173 | */ | ||
1174 | tid_tx = tx->sta->ampdu_mlme.tid_tx[tid]; | ||
1175 | state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; | ||
1176 | if (*state == HT_AGG_STATE_OPERATIONAL) { | ||
1177 | info->flags |= IEEE80211_TX_CTL_AMPDU; | ||
1178 | } else if (*state != HT_AGG_STATE_IDLE) { | ||
1179 | /* in progress */ | ||
1180 | queued = true; | ||
1181 | info->control.vif = &sdata->vif; | ||
1182 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | ||
1183 | __skb_queue_tail(&tid_tx->pending, skb); | ||
1184 | } | ||
1185 | spin_unlock(&tx->sta->lock); | ||
1186 | 1215 | ||
1187 | if (unlikely(queued)) | 1216 | queued = ieee80211_tx_prep_agg(tx, skb, info, |
1188 | return TX_QUEUED; | 1217 | tid_tx, tid); |
1218 | |||
1219 | if (unlikely(queued)) | ||
1220 | return TX_QUEUED; | ||
1221 | } | ||
1189 | } | 1222 | } |
1190 | 1223 | ||
1191 | if (is_multicast_ether_addr(hdr->addr1)) { | 1224 | if (is_multicast_ether_addr(hdr->addr1)) { |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 5b79d552780a..a54cf146ed50 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1138,18 +1138,6 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1138 | } | 1138 | } |
1139 | mutex_unlock(&local->sta_mtx); | 1139 | mutex_unlock(&local->sta_mtx); |
1140 | 1140 | ||
1141 | /* Clear Suspend state so that ADDBA requests can be processed */ | ||
1142 | |||
1143 | rcu_read_lock(); | ||
1144 | |||
1145 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { | ||
1146 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | ||
1147 | clear_sta_flags(sta, WLAN_STA_BLOCK_BA); | ||
1148 | } | ||
1149 | } | ||
1150 | |||
1151 | rcu_read_unlock(); | ||
1152 | |||
1153 | /* setup RTS threshold */ | 1141 | /* setup RTS threshold */ |
1154 | drv_set_rts_threshold(local, hw->wiphy->rts_threshold); | 1142 | drv_set_rts_threshold(local, hw->wiphy->rts_threshold); |
1155 | 1143 | ||
@@ -1202,13 +1190,26 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1202 | } | 1190 | } |
1203 | } | 1191 | } |
1204 | 1192 | ||
1205 | rcu_read_lock(); | 1193 | /* |
1194 | * Clear the WLAN_STA_BLOCK_BA flag so new aggregation | ||
1195 | * sessions can be established after a resume. | ||
1196 | * | ||
1197 | * Also tear down aggregation sessions since reconfiguring | ||
1198 | * them in a hardware restart scenario is not easily done | ||
1199 | * right now, and the hardware will have lost information | ||
1200 | * about the sessions, but we and the AP still think they | ||
1201 | * are active. This is really a workaround though. | ||
1202 | */ | ||
1206 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { | 1203 | if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { |
1207 | list_for_each_entry_rcu(sta, &local->sta_list, list) { | 1204 | mutex_lock(&local->sta_mtx); |
1205 | |||
1206 | list_for_each_entry(sta, &local->sta_list, list) { | ||
1208 | ieee80211_sta_tear_down_BA_sessions(sta); | 1207 | ieee80211_sta_tear_down_BA_sessions(sta); |
1208 | clear_sta_flags(sta, WLAN_STA_BLOCK_BA); | ||
1209 | } | 1209 | } |
1210 | |||
1211 | mutex_unlock(&local->sta_mtx); | ||
1210 | } | 1212 | } |
1211 | rcu_read_unlock(); | ||
1212 | 1213 | ||
1213 | /* add back keys */ | 1214 | /* add back keys */ |
1214 | list_for_each_entry(sdata, &local->interfaces, list) | 1215 | list_for_each_entry(sdata, &local->interfaces, list) |
diff --git a/net/mac80211/work.c b/net/mac80211/work.c index 4157717ed786..c22a71c5cb45 100644 --- a/net/mac80211/work.c +++ b/net/mac80211/work.c | |||
@@ -715,7 +715,7 @@ static void ieee80211_work_rx_queued_mgmt(struct ieee80211_local *local, | |||
715 | struct ieee80211_rx_status *rx_status; | 715 | struct ieee80211_rx_status *rx_status; |
716 | struct ieee80211_mgmt *mgmt; | 716 | struct ieee80211_mgmt *mgmt; |
717 | struct ieee80211_work *wk; | 717 | struct ieee80211_work *wk; |
718 | enum work_action rma; | 718 | enum work_action rma = WORK_ACT_NONE; |
719 | u16 fc; | 719 | u16 fc; |
720 | 720 | ||
721 | rx_status = (struct ieee80211_rx_status *) skb->cb; | 721 | rx_status = (struct ieee80211_rx_status *) skb->cb; |
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index 0adbcc941ac9..a14e67707476 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c | |||
@@ -436,6 +436,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) | |||
436 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); | 436 | struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); |
437 | u8 pn[CCMP_PN_LEN]; | 437 | u8 pn[CCMP_PN_LEN]; |
438 | int data_len; | 438 | int data_len; |
439 | int queue; | ||
439 | 440 | ||
440 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 441 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
441 | 442 | ||
@@ -453,7 +454,10 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) | |||
453 | 454 | ||
454 | ccmp_hdr2pn(pn, skb->data + hdrlen); | 455 | ccmp_hdr2pn(pn, skb->data + hdrlen); |
455 | 456 | ||
456 | if (memcmp(pn, key->u.ccmp.rx_pn[rx->queue], CCMP_PN_LEN) <= 0) { | 457 | queue = ieee80211_is_mgmt(hdr->frame_control) ? |
458 | NUM_RX_DATA_QUEUES : rx->queue; | ||
459 | |||
460 | if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) { | ||
457 | key->u.ccmp.replays++; | 461 | key->u.ccmp.replays++; |
458 | return RX_DROP_UNUSABLE; | 462 | return RX_DROP_UNUSABLE; |
459 | } | 463 | } |
@@ -470,7 +474,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) | |||
470 | return RX_DROP_UNUSABLE; | 474 | return RX_DROP_UNUSABLE; |
471 | } | 475 | } |
472 | 476 | ||
473 | memcpy(key->u.ccmp.rx_pn[rx->queue], pn, CCMP_PN_LEN); | 477 | memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN); |
474 | 478 | ||
475 | /* Remove CCMP header and MIC */ | 479 | /* Remove CCMP header and MIC */ |
476 | skb_trim(skb, skb->len - CCMP_MIC_LEN); | 480 | skb_trim(skb, skb->len - CCMP_MIC_LEN); |
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c index f69ae19f497f..9f95354f859f 100644 --- a/net/wireless/mlme.c +++ b/net/wireless/mlme.c | |||
@@ -846,8 +846,9 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, | |||
846 | if (!wdev->current_bss || | 846 | if (!wdev->current_bss || |
847 | memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, | 847 | memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, |
848 | ETH_ALEN) != 0 || | 848 | ETH_ALEN) != 0 || |
849 | memcmp(wdev->current_bss->pub.bssid, mgmt->da, | 849 | (wdev->iftype == NL80211_IFTYPE_STATION && |
850 | ETH_ALEN) != 0) | 850 | memcmp(wdev->current_bss->pub.bssid, mgmt->da, |
851 | ETH_ALEN) != 0)) | ||
851 | return -ENOTCONN; | 852 | return -ENOTCONN; |
852 | } | 853 | } |
853 | 854 | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 3a7b8a2f2d5a..6b41d15c4a05 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -3955,6 +3955,55 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) | |||
3955 | } | 3955 | } |
3956 | } | 3956 | } |
3957 | 3957 | ||
3958 | if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { | ||
3959 | u8 *rates = | ||
3960 | nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); | ||
3961 | int n_rates = | ||
3962 | nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); | ||
3963 | struct ieee80211_supported_band *sband = | ||
3964 | wiphy->bands[ibss.channel->band]; | ||
3965 | int i, j; | ||
3966 | |||
3967 | if (n_rates == 0) { | ||
3968 | err = -EINVAL; | ||
3969 | goto out; | ||
3970 | } | ||
3971 | |||
3972 | for (i = 0; i < n_rates; i++) { | ||
3973 | int rate = (rates[i] & 0x7f) * 5; | ||
3974 | bool found = false; | ||
3975 | |||
3976 | for (j = 0; j < sband->n_bitrates; j++) { | ||
3977 | if (sband->bitrates[j].bitrate == rate) { | ||
3978 | found = true; | ||
3979 | ibss.basic_rates |= BIT(j); | ||
3980 | break; | ||
3981 | } | ||
3982 | } | ||
3983 | if (!found) { | ||
3984 | err = -EINVAL; | ||
3985 | goto out; | ||
3986 | } | ||
3987 | } | ||
3988 | } else { | ||
3989 | /* | ||
3990 | * If no rates were explicitly configured, | ||
3991 | * use the mandatory rate set for 11b or | ||
3992 | * 11a for maximum compatibility. | ||
3993 | */ | ||
3994 | struct ieee80211_supported_band *sband = | ||
3995 | wiphy->bands[ibss.channel->band]; | ||
3996 | int j; | ||
3997 | u32 flag = ibss.channel->band == IEEE80211_BAND_5GHZ ? | ||
3998 | IEEE80211_RATE_MANDATORY_A : | ||
3999 | IEEE80211_RATE_MANDATORY_B; | ||
4000 | |||
4001 | for (j = 0; j < sband->n_bitrates; j++) { | ||
4002 | if (sband->bitrates[j].flags & flag) | ||
4003 | ibss.basic_rates |= BIT(j); | ||
4004 | } | ||
4005 | } | ||
4006 | |||
3958 | err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); | 4007 | err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); |
3959 | 4008 | ||
3960 | out: | 4009 | out: |
@@ -4653,7 +4702,8 @@ static int nl80211_register_action(struct sk_buff *skb, struct genl_info *info) | |||
4653 | if (err) | 4702 | if (err) |
4654 | goto unlock_rtnl; | 4703 | goto unlock_rtnl; |
4655 | 4704 | ||
4656 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { | 4705 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && |
4706 | dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { | ||
4657 | err = -EOPNOTSUPP; | 4707 | err = -EOPNOTSUPP; |
4658 | goto out; | 4708 | goto out; |
4659 | } | 4709 | } |
@@ -4703,7 +4753,8 @@ static int nl80211_action(struct sk_buff *skb, struct genl_info *info) | |||
4703 | goto out; | 4753 | goto out; |
4704 | } | 4754 | } |
4705 | 4755 | ||
4706 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION) { | 4756 | if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && |
4757 | dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) { | ||
4707 | err = -EOPNOTSUPP; | 4758 | err = -EOPNOTSUPP; |
4708 | goto out; | 4759 | goto out; |
4709 | } | 4760 | } |