aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211')
-rw-r--r--net/mac80211/agg-rx.c8
-rw-r--r--net/mac80211/agg-tx.c232
-rw-r--r--net/mac80211/cfg.c245
-rw-r--r--net/mac80211/debugfs.c24
-rw-r--r--net/mac80211/ibss.c3
-rw-r--r--net/mac80211/ieee80211_i.h78
-rw-r--r--net/mac80211/iface.c14
-rw-r--r--net/mac80211/main.c25
-rw-r--r--net/mac80211/mlme.c259
-rw-r--r--net/mac80211/pm.c78
-rw-r--r--net/mac80211/rate.c6
-rw-r--r--net/mac80211/rate.h4
-rw-r--r--net/mac80211/rx.c29
-rw-r--r--net/mac80211/scan.c77
-rw-r--r--net/mac80211/sta_info.c17
-rw-r--r--net/mac80211/sta_info.h7
-rw-r--r--net/mac80211/tx.c579
-rw-r--r--net/mac80211/util.c126
-rw-r--r--net/mac80211/wep.c21
-rw-r--r--net/mac80211/wext.c33
-rw-r--r--net/mac80211/wpa.c28
21 files changed, 1074 insertions, 819 deletions
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index a95affc9462..07656d830bc 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -197,6 +197,14 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
197 197
198 status = WLAN_STATUS_REQUEST_DECLINED; 198 status = WLAN_STATUS_REQUEST_DECLINED;
199 199
200 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
201#ifdef CONFIG_MAC80211_HT_DEBUG
202 printk(KERN_DEBUG "Suspend in progress. "
203 "Denying ADDBA request\n");
204#endif
205 goto end_no_lock;
206 }
207
200 /* sanity check for incoming parameters: 208 /* sanity check for incoming parameters:
201 * check if configuration can support the BA policy 209 * check if configuration can support the BA policy
202 * and if buffer size does not exceeds max value */ 210 * and if buffer size does not exceeds max value */
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 1df116d4d6e..947aaaad35d 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -131,24 +131,6 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
131 131
132 state = &sta->ampdu_mlme.tid_state_tx[tid]; 132 state = &sta->ampdu_mlme.tid_state_tx[tid];
133 133
134 if (local->hw.ampdu_queues) {
135 if (initiator) {
136 /*
137 * Stop the AC queue to avoid issues where we send
138 * unaggregated frames already before the delba.
139 */
140 ieee80211_stop_queue_by_reason(&local->hw,
141 local->hw.queues + sta->tid_to_tx_q[tid],
142 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
143 }
144
145 /*
146 * Pretend the driver woke the queue, just in case
147 * it disabled it before the session was stopped.
148 */
149 ieee80211_wake_queue(
150 &local->hw, local->hw.queues + sta->tid_to_tx_q[tid]);
151 }
152 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 134 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
153 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 135 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
154 136
@@ -158,6 +140,10 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
158 /* HW shall not deny going back to legacy */ 140 /* HW shall not deny going back to legacy */
159 if (WARN_ON(ret)) { 141 if (WARN_ON(ret)) {
160 *state = HT_AGG_STATE_OPERATIONAL; 142 *state = HT_AGG_STATE_OPERATIONAL;
143 /*
144 * We may have pending packets get stuck in this case...
145 * Not bothering with a workaround for now.
146 */
161 } 147 }
162 148
163 return ret; 149 return ret;
@@ -212,7 +198,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
212 struct sta_info *sta; 198 struct sta_info *sta;
213 struct ieee80211_sub_if_data *sdata; 199 struct ieee80211_sub_if_data *sdata;
214 u8 *state; 200 u8 *state;
215 int i, qn = -1, ret = 0; 201 int ret = 0;
216 u16 start_seq_num; 202 u16 start_seq_num;
217 203
218 if (WARN_ON(!local->ops->ampdu_action)) 204 if (WARN_ON(!local->ops->ampdu_action))
@@ -226,13 +212,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
226 ra, tid); 212 ra, tid);
227#endif /* CONFIG_MAC80211_HT_DEBUG */ 213#endif /* CONFIG_MAC80211_HT_DEBUG */
228 214
229 if (hw->ampdu_queues && ieee80211_ac_from_tid(tid) == 0) {
230#ifdef CONFIG_MAC80211_HT_DEBUG
231 printk(KERN_DEBUG "rejecting on voice AC\n");
232#endif
233 return -EINVAL;
234 }
235
236 rcu_read_lock(); 215 rcu_read_lock();
237 216
238 sta = sta_info_get(local, ra); 217 sta = sta_info_get(local, ra);
@@ -257,7 +236,17 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
257 goto unlock; 236 goto unlock;
258 } 237 }
259 238
239 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) {
240#ifdef CONFIG_MAC80211_HT_DEBUG
241 printk(KERN_DEBUG "Suspend in progress. "
242 "Denying BA session request\n");
243#endif
244 ret = -EINVAL;
245 goto unlock;
246 }
247
260 spin_lock_bh(&sta->lock); 248 spin_lock_bh(&sta->lock);
249 spin_lock(&local->ampdu_lock);
261 250
262 sdata = sta->sdata; 251 sdata = sta->sdata;
263 252
@@ -278,41 +267,16 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
278 goto err_unlock_sta; 267 goto err_unlock_sta;
279 } 268 }
280 269
281 if (hw->ampdu_queues) { 270 /*
282 spin_lock(&local->queue_stop_reason_lock); 271 * While we're asking the driver about the aggregation,
283 /* reserve a new queue for this session */ 272 * stop the AC queue so that we don't have to worry
284 for (i = 0; i < local->hw.ampdu_queues; i++) { 273 * about frames that came in while we were doing that,
285 if (local->ampdu_ac_queue[i] < 0) { 274 * which would require us to put them to the AC pending
286 qn = i; 275 * afterwards which just makes the code more complex.
287 local->ampdu_ac_queue[qn] = 276 */
288 ieee80211_ac_from_tid(tid); 277 ieee80211_stop_queue_by_reason(
289 break; 278 &local->hw, ieee80211_ac_from_tid(tid),
290 } 279 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
291 }
292 spin_unlock(&local->queue_stop_reason_lock);
293
294 if (qn < 0) {
295#ifdef CONFIG_MAC80211_HT_DEBUG
296 printk(KERN_DEBUG "BA request denied - "
297 "queue unavailable for tid %d\n", tid);
298#endif /* CONFIG_MAC80211_HT_DEBUG */
299 ret = -ENOSPC;
300 goto err_unlock_sta;
301 }
302
303 /*
304 * If we successfully allocate the session, we can't have
305 * anything going on on the queue this TID maps into, so
306 * stop it for now. This is a "virtual" stop using the same
307 * mechanism that drivers will use.
308 *
309 * XXX: queue up frames for this session in the sta_info
310 * struct instead to avoid hitting all other STAs.
311 */
312 ieee80211_stop_queue_by_reason(
313 &local->hw, hw->queues + qn,
314 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
315 }
316 280
317 /* prepare A-MPDU MLME for Tx aggregation */ 281 /* prepare A-MPDU MLME for Tx aggregation */
318 sta->ampdu_mlme.tid_tx[tid] = 282 sta->ampdu_mlme.tid_tx[tid] =
@@ -324,9 +288,11 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
324 tid); 288 tid);
325#endif 289#endif
326 ret = -ENOMEM; 290 ret = -ENOMEM;
327 goto err_return_queue; 291 goto err_wake_queue;
328 } 292 }
329 293
294 skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending);
295
330 /* Tx timer */ 296 /* Tx timer */
331 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = 297 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
332 sta_addba_resp_timer_expired; 298 sta_addba_resp_timer_expired;
@@ -351,8 +317,13 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
351 *state = HT_AGG_STATE_IDLE; 317 *state = HT_AGG_STATE_IDLE;
352 goto err_free; 318 goto err_free;
353 } 319 }
354 sta->tid_to_tx_q[tid] = qn;
355 320
321 /* Driver vetoed or OKed, but we can take packets again now */
322 ieee80211_wake_queue_by_reason(
323 &local->hw, ieee80211_ac_from_tid(tid),
324 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
325
326 spin_unlock(&local->ampdu_lock);
356 spin_unlock_bh(&sta->lock); 327 spin_unlock_bh(&sta->lock);
357 328
358 /* send an addBA request */ 329 /* send an addBA request */
@@ -377,17 +348,12 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
377 err_free: 348 err_free:
378 kfree(sta->ampdu_mlme.tid_tx[tid]); 349 kfree(sta->ampdu_mlme.tid_tx[tid]);
379 sta->ampdu_mlme.tid_tx[tid] = NULL; 350 sta->ampdu_mlme.tid_tx[tid] = NULL;
380 err_return_queue: 351 err_wake_queue:
381 if (qn >= 0) { 352 ieee80211_wake_queue_by_reason(
382 /* We failed, so start queue again right away. */ 353 &local->hw, ieee80211_ac_from_tid(tid),
383 ieee80211_wake_queue_by_reason(hw, hw->queues + qn, 354 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
384 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
385 /* give queue back to pool */
386 spin_lock(&local->queue_stop_reason_lock);
387 local->ampdu_ac_queue[qn] = -1;
388 spin_unlock(&local->queue_stop_reason_lock);
389 }
390 err_unlock_sta: 355 err_unlock_sta:
356 spin_unlock(&local->ampdu_lock);
391 spin_unlock_bh(&sta->lock); 357 spin_unlock_bh(&sta->lock);
392 unlock: 358 unlock:
393 rcu_read_unlock(); 359 rcu_read_unlock();
@@ -395,6 +361,67 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
395} 361}
396EXPORT_SYMBOL(ieee80211_start_tx_ba_session); 362EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
397 363
364/*
365 * splice packets from the STA's pending to the local pending,
366 * requires a call to ieee80211_agg_splice_finish and holding
367 * local->ampdu_lock across both calls.
368 */
369static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
370 struct sta_info *sta, u16 tid)
371{
372 unsigned long flags;
373 u16 queue = ieee80211_ac_from_tid(tid);
374
375 ieee80211_stop_queue_by_reason(
376 &local->hw, queue,
377 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
378
379 if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) {
380 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
381 /* mark queue as pending, it is stopped already */
382 __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING,
383 &local->queue_stop_reasons[queue]);
384 /* copy over remaining packets */
385 skb_queue_splice_tail_init(
386 &sta->ampdu_mlme.tid_tx[tid]->pending,
387 &local->pending[queue]);
388 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
389 }
390}
391
392static void ieee80211_agg_splice_finish(struct ieee80211_local *local,
393 struct sta_info *sta, u16 tid)
394{
395 u16 queue = ieee80211_ac_from_tid(tid);
396
397 ieee80211_wake_queue_by_reason(
398 &local->hw, queue,
399 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
400}
401
402/* caller must hold sta->lock */
403static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
404 struct sta_info *sta, u16 tid)
405{
406#ifdef CONFIG_MAC80211_HT_DEBUG
407 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
408#endif
409
410 spin_lock(&local->ampdu_lock);
411 ieee80211_agg_splice_packets(local, sta, tid);
412 /*
413 * NB: we rely on sta->lock being taken in the TX
414 * processing here when adding to the pending queue,
415 * otherwise we could only change the state of the
416 * session to OPERATIONAL _here_.
417 */
418 ieee80211_agg_splice_finish(local, sta, tid);
419 spin_unlock(&local->ampdu_lock);
420
421 local->ops->ampdu_action(&local->hw, IEEE80211_AMPDU_TX_OPERATIONAL,
422 &sta->sta, tid, NULL);
423}
424
398void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid) 425void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
399{ 426{
400 struct ieee80211_local *local = hw_to_local(hw); 427 struct ieee80211_local *local = hw_to_local(hw);
@@ -437,20 +464,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
437 464
438 *state |= HT_ADDBA_DRV_READY_MSK; 465 *state |= HT_ADDBA_DRV_READY_MSK;
439 466
440 if (*state == HT_AGG_STATE_OPERATIONAL) { 467 if (*state == HT_AGG_STATE_OPERATIONAL)
441#ifdef CONFIG_MAC80211_HT_DEBUG 468 ieee80211_agg_tx_operational(local, sta, tid);
442 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
443#endif
444 if (hw->ampdu_queues) {
445 /*
446 * Wake up this queue, we stopped it earlier,
447 * this will in turn wake the entire AC.
448 */
449 ieee80211_wake_queue_by_reason(hw,
450 hw->queues + sta->tid_to_tx_q[tid],
451 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
452 }
453 }
454 469
455 out: 470 out:
456 spin_unlock_bh(&sta->lock); 471 spin_unlock_bh(&sta->lock);
@@ -584,22 +599,19 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
584 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 599 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
585 600
586 spin_lock_bh(&sta->lock); 601 spin_lock_bh(&sta->lock);
602 spin_lock(&local->ampdu_lock);
587 603
588 if (*state & HT_AGG_STATE_INITIATOR_MSK && 604 ieee80211_agg_splice_packets(local, sta, tid);
589 hw->ampdu_queues) {
590 /*
591 * Wake up this queue, we stopped it earlier,
592 * this will in turn wake the entire AC.
593 */
594 ieee80211_wake_queue_by_reason(hw,
595 hw->queues + sta->tid_to_tx_q[tid],
596 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
597 }
598 605
599 *state = HT_AGG_STATE_IDLE; 606 *state = HT_AGG_STATE_IDLE;
607 /* from now on packets are no longer put onto sta->pending */
600 sta->ampdu_mlme.addba_req_num[tid] = 0; 608 sta->ampdu_mlme.addba_req_num[tid] = 0;
601 kfree(sta->ampdu_mlme.tid_tx[tid]); 609 kfree(sta->ampdu_mlme.tid_tx[tid]);
602 sta->ampdu_mlme.tid_tx[tid] = NULL; 610 sta->ampdu_mlme.tid_tx[tid] = NULL;
611
612 ieee80211_agg_splice_finish(local, sta, tid);
613
614 spin_unlock(&local->ampdu_lock);
603 spin_unlock_bh(&sta->lock); 615 spin_unlock_bh(&sta->lock);
604 616
605 rcu_read_unlock(); 617 rcu_read_unlock();
@@ -637,9 +649,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
637 struct ieee80211_mgmt *mgmt, 649 struct ieee80211_mgmt *mgmt,
638 size_t len) 650 size_t len)
639{ 651{
640 struct ieee80211_hw *hw = &local->hw; 652 u16 capab, tid;
641 u16 capab;
642 u16 tid, start_seq_num;
643 u8 *state; 653 u8 *state;
644 654
645 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); 655 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
@@ -673,26 +683,10 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
673 683
674 *state |= HT_ADDBA_RECEIVED_MSK; 684 *state |= HT_ADDBA_RECEIVED_MSK;
675 685
676 if (hw->ampdu_queues && *state != curstate && 686 if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL)
677 *state == HT_AGG_STATE_OPERATIONAL) { 687 ieee80211_agg_tx_operational(local, sta, tid);
678 /*
679 * Wake up this queue, we stopped it earlier,
680 * this will in turn wake the entire AC.
681 */
682 ieee80211_wake_queue_by_reason(hw,
683 hw->queues + sta->tid_to_tx_q[tid],
684 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
685 }
686 sta->ampdu_mlme.addba_req_num[tid] = 0;
687 688
688 if (local->ops->ampdu_action) { 689 sta->ampdu_mlme.addba_req_num[tid] = 0;
689 (void)local->ops->ampdu_action(hw,
690 IEEE80211_AMPDU_TX_RESUME,
691 &sta->sta, tid, &start_seq_num);
692 }
693#ifdef CONFIG_MAC80211_HT_DEBUG
694 printk(KERN_DEBUG "Resuming TX aggregation for tid %d\n", tid);
695#endif /* CONFIG_MAC80211_HT_DEBUG */
696 } else { 690 } else {
697 sta->ampdu_mlme.addba_req_num[tid]++; 691 sta->ampdu_mlme.addba_req_num[tid]++;
698 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR); 692 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 58693e52d45..e677b751d46 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -540,9 +540,6 @@ static int ieee80211_add_beacon(struct wiphy *wiphy, struct net_device *dev,
540 540
541 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 541 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
542 542
543 if (sdata->vif.type != NL80211_IFTYPE_AP)
544 return -EINVAL;
545
546 old = sdata->u.ap.beacon; 543 old = sdata->u.ap.beacon;
547 544
548 if (old) 545 if (old)
@@ -559,9 +556,6 @@ static int ieee80211_set_beacon(struct wiphy *wiphy, struct net_device *dev,
559 556
560 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 557 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
561 558
562 if (sdata->vif.type != NL80211_IFTYPE_AP)
563 return -EINVAL;
564
565 old = sdata->u.ap.beacon; 559 old = sdata->u.ap.beacon;
566 560
567 if (!old) 561 if (!old)
@@ -577,9 +571,6 @@ static int ieee80211_del_beacon(struct wiphy *wiphy, struct net_device *dev)
577 571
578 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 572 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
579 573
580 if (sdata->vif.type != NL80211_IFTYPE_AP)
581 return -EINVAL;
582
583 old = sdata->u.ap.beacon; 574 old = sdata->u.ap.beacon;
584 575
585 if (!old) 576 if (!old)
@@ -728,10 +719,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
728 int err; 719 int err;
729 int layer2_update; 720 int layer2_update;
730 721
731 /* Prevent a race with changing the rate control algorithm */
732 if (!netif_running(dev))
733 return -ENETDOWN;
734
735 if (params->vlan) { 722 if (params->vlan) {
736 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 723 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
737 724
@@ -860,14 +847,8 @@ static int ieee80211_add_mpath(struct wiphy *wiphy, struct net_device *dev,
860 struct sta_info *sta; 847 struct sta_info *sta;
861 int err; 848 int err;
862 849
863 if (!netif_running(dev))
864 return -ENETDOWN;
865
866 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 850 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
867 851
868 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
869 return -ENOTSUPP;
870
871 rcu_read_lock(); 852 rcu_read_lock();
872 sta = sta_info_get(local, next_hop); 853 sta = sta_info_get(local, next_hop);
873 if (!sta) { 854 if (!sta) {
@@ -913,14 +894,8 @@ static int ieee80211_change_mpath(struct wiphy *wiphy,
913 struct mesh_path *mpath; 894 struct mesh_path *mpath;
914 struct sta_info *sta; 895 struct sta_info *sta;
915 896
916 if (!netif_running(dev))
917 return -ENETDOWN;
918
919 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 897 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
920 898
921 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
922 return -ENOTSUPP;
923
924 rcu_read_lock(); 899 rcu_read_lock();
925 900
926 sta = sta_info_get(local, next_hop); 901 sta = sta_info_get(local, next_hop);
@@ -989,9 +964,6 @@ static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev,
989 964
990 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 965 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
991 966
992 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
993 return -ENOTSUPP;
994
995 rcu_read_lock(); 967 rcu_read_lock();
996 mpath = mesh_path_lookup(dst, sdata); 968 mpath = mesh_path_lookup(dst, sdata);
997 if (!mpath) { 969 if (!mpath) {
@@ -1013,9 +985,6 @@ static int ieee80211_dump_mpath(struct wiphy *wiphy, struct net_device *dev,
1013 985
1014 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 986 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1015 987
1016 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
1017 return -ENOTSUPP;
1018
1019 rcu_read_lock(); 988 rcu_read_lock();
1020 mpath = mesh_path_lookup_by_idx(idx, sdata); 989 mpath = mesh_path_lookup_by_idx(idx, sdata);
1021 if (!mpath) { 990 if (!mpath) {
@@ -1035,8 +1004,6 @@ static int ieee80211_get_mesh_params(struct wiphy *wiphy,
1035 struct ieee80211_sub_if_data *sdata; 1004 struct ieee80211_sub_if_data *sdata;
1036 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1005 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1037 1006
1038 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
1039 return -ENOTSUPP;
1040 memcpy(conf, &(sdata->u.mesh.mshcfg), sizeof(struct mesh_config)); 1007 memcpy(conf, &(sdata->u.mesh.mshcfg), sizeof(struct mesh_config));
1041 return 0; 1008 return 0;
1042} 1009}
@@ -1054,9 +1021,6 @@ static int ieee80211_set_mesh_params(struct wiphy *wiphy,
1054 struct ieee80211_sub_if_data *sdata; 1021 struct ieee80211_sub_if_data *sdata;
1055 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1022 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1056 1023
1057 if (sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
1058 return -ENOTSUPP;
1059
1060 /* Set the config options which we are interested in setting */ 1024 /* Set the config options which we are interested in setting */
1061 conf = &(sdata->u.mesh.mshcfg); 1025 conf = &(sdata->u.mesh.mshcfg);
1062 if (_chg_mesh_attr(NL80211_MESHCONF_RETRY_TIMEOUT, mask)) 1026 if (_chg_mesh_attr(NL80211_MESHCONF_RETRY_TIMEOUT, mask))
@@ -1104,9 +1068,6 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
1104 1068
1105 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1069 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1106 1070
1107 if (sdata->vif.type != NL80211_IFTYPE_AP)
1108 return -EINVAL;
1109
1110 if (params->use_cts_prot >= 0) { 1071 if (params->use_cts_prot >= 0) {
1111 sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot; 1072 sdata->vif.bss_conf.use_cts_prot = params->use_cts_prot;
1112 changed |= BSS_CHANGED_ERP_CTS_PROT; 1073 changed |= BSS_CHANGED_ERP_CTS_PROT;
@@ -1181,91 +1142,6 @@ static int ieee80211_set_channel(struct wiphy *wiphy,
1181 return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 1142 return ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
1182} 1143}
1183 1144
1184static int set_mgmt_extra_ie_sta(struct ieee80211_sub_if_data *sdata,
1185 u8 subtype, u8 *ies, size_t ies_len)
1186{
1187 struct ieee80211_local *local = sdata->local;
1188 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1189
1190 switch (subtype) {
1191 case IEEE80211_STYPE_PROBE_REQ >> 4:
1192 if (local->ops->hw_scan)
1193 break;
1194 kfree(ifmgd->ie_probereq);
1195 ifmgd->ie_probereq = ies;
1196 ifmgd->ie_probereq_len = ies_len;
1197 return 0;
1198 case IEEE80211_STYPE_PROBE_RESP >> 4:
1199 kfree(ifmgd->ie_proberesp);
1200 ifmgd->ie_proberesp = ies;
1201 ifmgd->ie_proberesp_len = ies_len;
1202 return 0;
1203 case IEEE80211_STYPE_AUTH >> 4:
1204 kfree(ifmgd->ie_auth);
1205 ifmgd->ie_auth = ies;
1206 ifmgd->ie_auth_len = ies_len;
1207 return 0;
1208 case IEEE80211_STYPE_ASSOC_REQ >> 4:
1209 kfree(ifmgd->ie_assocreq);
1210 ifmgd->ie_assocreq = ies;
1211 ifmgd->ie_assocreq_len = ies_len;
1212 return 0;
1213 case IEEE80211_STYPE_REASSOC_REQ >> 4:
1214 kfree(ifmgd->ie_reassocreq);
1215 ifmgd->ie_reassocreq = ies;
1216 ifmgd->ie_reassocreq_len = ies_len;
1217 return 0;
1218 case IEEE80211_STYPE_DEAUTH >> 4:
1219 kfree(ifmgd->ie_deauth);
1220 ifmgd->ie_deauth = ies;
1221 ifmgd->ie_deauth_len = ies_len;
1222 return 0;
1223 case IEEE80211_STYPE_DISASSOC >> 4:
1224 kfree(ifmgd->ie_disassoc);
1225 ifmgd->ie_disassoc = ies;
1226 ifmgd->ie_disassoc_len = ies_len;
1227 return 0;
1228 }
1229
1230 return -EOPNOTSUPP;
1231}
1232
1233static int ieee80211_set_mgmt_extra_ie(struct wiphy *wiphy,
1234 struct net_device *dev,
1235 struct mgmt_extra_ie_params *params)
1236{
1237 struct ieee80211_sub_if_data *sdata;
1238 u8 *ies;
1239 size_t ies_len;
1240 int ret = -EOPNOTSUPP;
1241
1242 if (params->ies) {
1243 ies = kmemdup(params->ies, params->ies_len, GFP_KERNEL);
1244 if (ies == NULL)
1245 return -ENOMEM;
1246 ies_len = params->ies_len;
1247 } else {
1248 ies = NULL;
1249 ies_len = 0;
1250 }
1251
1252 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1253
1254 switch (sdata->vif.type) {
1255 case NL80211_IFTYPE_STATION:
1256 ret = set_mgmt_extra_ie_sta(sdata, params->subtype,
1257 ies, ies_len);
1258 break;
1259 default:
1260 ret = -EOPNOTSUPP;
1261 break;
1262 }
1263
1264 if (ret)
1265 kfree(ies);
1266 return ret;
1267}
1268
1269#ifdef CONFIG_PM 1145#ifdef CONFIG_PM
1270static int ieee80211_suspend(struct wiphy *wiphy) 1146static int ieee80211_suspend(struct wiphy *wiphy)
1271{ 1147{
@@ -1287,9 +1163,6 @@ static int ieee80211_scan(struct wiphy *wiphy,
1287{ 1163{
1288 struct ieee80211_sub_if_data *sdata; 1164 struct ieee80211_sub_if_data *sdata;
1289 1165
1290 if (!netif_running(dev))
1291 return -ENETDOWN;
1292
1293 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1166 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1294 1167
1295 if (sdata->vif.type != NL80211_IFTYPE_STATION && 1168 if (sdata->vif.type != NL80211_IFTYPE_STATION &&
@@ -1300,6 +1173,119 @@ static int ieee80211_scan(struct wiphy *wiphy,
1300 return ieee80211_request_scan(sdata, req); 1173 return ieee80211_request_scan(sdata, req);
1301} 1174}
1302 1175
1176static int ieee80211_auth(struct wiphy *wiphy, struct net_device *dev,
1177 struct cfg80211_auth_request *req)
1178{
1179 struct ieee80211_sub_if_data *sdata;
1180
1181 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1182
1183 switch (req->auth_type) {
1184 case NL80211_AUTHTYPE_OPEN_SYSTEM:
1185 sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_OPEN;
1186 break;
1187 case NL80211_AUTHTYPE_SHARED_KEY:
1188 sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_SHARED_KEY;
1189 break;
1190 case NL80211_AUTHTYPE_FT:
1191 sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_FT;
1192 break;
1193 case NL80211_AUTHTYPE_NETWORK_EAP:
1194 sdata->u.mgd.auth_algs = IEEE80211_AUTH_ALG_LEAP;
1195 break;
1196 default:
1197 return -EOPNOTSUPP;
1198 }
1199
1200 memcpy(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN);
1201 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
1202 sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET;
1203
1204 /* TODO: req->chan */
1205 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL;
1206
1207 if (req->ssid) {
1208 sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET;
1209 memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len);
1210 sdata->u.mgd.ssid_len = req->ssid_len;
1211 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
1212 }
1213
1214 kfree(sdata->u.mgd.sme_auth_ie);
1215 sdata->u.mgd.sme_auth_ie = NULL;
1216 sdata->u.mgd.sme_auth_ie_len = 0;
1217 if (req->ie) {
1218 sdata->u.mgd.sme_auth_ie = kmalloc(req->ie_len, GFP_KERNEL);
1219 if (sdata->u.mgd.sme_auth_ie == NULL)
1220 return -ENOMEM;
1221 memcpy(sdata->u.mgd.sme_auth_ie, req->ie, req->ie_len);
1222 sdata->u.mgd.sme_auth_ie_len = req->ie_len;
1223 }
1224
1225 sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME;
1226 sdata->u.mgd.state = IEEE80211_STA_MLME_DIRECT_PROBE;
1227 ieee80211_sta_req_auth(sdata);
1228 return 0;
1229}
1230
1231static int ieee80211_assoc(struct wiphy *wiphy, struct net_device *dev,
1232 struct cfg80211_assoc_request *req)
1233{
1234 struct ieee80211_sub_if_data *sdata;
1235 int ret;
1236
1237 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1238
1239 if (memcmp(sdata->u.mgd.bssid, req->peer_addr, ETH_ALEN) != 0 ||
1240 !(sdata->u.mgd.flags & IEEE80211_STA_AUTHENTICATED))
1241 return -ENOLINK; /* not authenticated */
1242
1243 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
1244 sdata->u.mgd.flags |= IEEE80211_STA_BSSID_SET;
1245
1246 /* TODO: req->chan */
1247 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_CHANNEL_SEL;
1248
1249 if (req->ssid) {
1250 sdata->u.mgd.flags |= IEEE80211_STA_SSID_SET;
1251 memcpy(sdata->u.mgd.ssid, req->ssid, req->ssid_len);
1252 sdata->u.mgd.ssid_len = req->ssid_len;
1253 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
1254 } else
1255 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_SSID_SEL;
1256
1257 ret = ieee80211_sta_set_extra_ie(sdata, req->ie, req->ie_len);
1258 if (ret)
1259 return ret;
1260
1261 sdata->u.mgd.flags |= IEEE80211_STA_EXT_SME;
1262 sdata->u.mgd.state = IEEE80211_STA_MLME_ASSOCIATE;
1263 ieee80211_sta_req_auth(sdata);
1264 return 0;
1265}
1266
1267static int ieee80211_deauth(struct wiphy *wiphy, struct net_device *dev,
1268 struct cfg80211_deauth_request *req)
1269{
1270 struct ieee80211_sub_if_data *sdata;
1271
1272 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1273
1274 /* TODO: req->ie */
1275 return ieee80211_sta_deauthenticate(sdata, req->reason_code);
1276}
1277
1278static int ieee80211_disassoc(struct wiphy *wiphy, struct net_device *dev,
1279 struct cfg80211_disassoc_request *req)
1280{
1281 struct ieee80211_sub_if_data *sdata;
1282
1283 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1284
1285 /* TODO: req->ie */
1286 return ieee80211_sta_disassociate(sdata, req->reason_code);
1287}
1288
1303struct cfg80211_ops mac80211_config_ops = { 1289struct cfg80211_ops mac80211_config_ops = {
1304 .add_virtual_intf = ieee80211_add_iface, 1290 .add_virtual_intf = ieee80211_add_iface,
1305 .del_virtual_intf = ieee80211_del_iface, 1291 .del_virtual_intf = ieee80211_del_iface,
@@ -1329,8 +1315,11 @@ struct cfg80211_ops mac80211_config_ops = {
1329 .change_bss = ieee80211_change_bss, 1315 .change_bss = ieee80211_change_bss,
1330 .set_txq_params = ieee80211_set_txq_params, 1316 .set_txq_params = ieee80211_set_txq_params,
1331 .set_channel = ieee80211_set_channel, 1317 .set_channel = ieee80211_set_channel,
1332 .set_mgmt_extra_ie = ieee80211_set_mgmt_extra_ie,
1333 .suspend = ieee80211_suspend, 1318 .suspend = ieee80211_suspend,
1334 .resume = ieee80211_resume, 1319 .resume = ieee80211_resume,
1335 .scan = ieee80211_scan, 1320 .scan = ieee80211_scan,
1321 .auth = ieee80211_auth,
1322 .assoc = ieee80211_assoc,
1323 .deauth = ieee80211_deauth,
1324 .disassoc = ieee80211_disassoc,
1336}; 1325};
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index e37f557de3f..210b9b6fecd 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -40,6 +40,10 @@ static const struct file_operations name## _ops = { \
40 local->debugfs.name = debugfs_create_file(#name, 0400, phyd, \ 40 local->debugfs.name = debugfs_create_file(#name, 0400, phyd, \
41 local, &name## _ops); 41 local, &name## _ops);
42 42
43#define DEBUGFS_ADD_MODE(name, mode) \
44 local->debugfs.name = debugfs_create_file(#name, mode, phyd, \
45 local, &name## _ops);
46
43#define DEBUGFS_DEL(name) \ 47#define DEBUGFS_DEL(name) \
44 debugfs_remove(local->debugfs.name); \ 48 debugfs_remove(local->debugfs.name); \
45 local->debugfs.name = NULL; 49 local->debugfs.name = NULL;
@@ -113,6 +117,24 @@ static const struct file_operations tsf_ops = {
113 .open = mac80211_open_file_generic 117 .open = mac80211_open_file_generic
114}; 118};
115 119
120static ssize_t reset_write(struct file *file, const char __user *user_buf,
121 size_t count, loff_t *ppos)
122{
123 struct ieee80211_local *local = file->private_data;
124
125 rtnl_lock();
126 __ieee80211_suspend(&local->hw);
127 __ieee80211_resume(&local->hw);
128 rtnl_unlock();
129
130 return count;
131}
132
133static const struct file_operations reset_ops = {
134 .write = reset_write,
135 .open = mac80211_open_file_generic,
136};
137
116/* statistics stuff */ 138/* statistics stuff */
117 139
118#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \ 140#define DEBUGFS_STATS_FILE(name, buflen, fmt, value...) \
@@ -254,6 +276,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
254 DEBUGFS_ADD(total_ps_buffered); 276 DEBUGFS_ADD(total_ps_buffered);
255 DEBUGFS_ADD(wep_iv); 277 DEBUGFS_ADD(wep_iv);
256 DEBUGFS_ADD(tsf); 278 DEBUGFS_ADD(tsf);
279 DEBUGFS_ADD_MODE(reset, 0200);
257 280
258 statsd = debugfs_create_dir("statistics", phyd); 281 statsd = debugfs_create_dir("statistics", phyd);
259 local->debugfs.statistics = statsd; 282 local->debugfs.statistics = statsd;
@@ -308,6 +331,7 @@ void debugfs_hw_del(struct ieee80211_local *local)
308 DEBUGFS_DEL(total_ps_buffered); 331 DEBUGFS_DEL(total_ps_buffered);
309 DEBUGFS_DEL(wep_iv); 332 DEBUGFS_DEL(wep_iv);
310 DEBUGFS_DEL(tsf); 333 DEBUGFS_DEL(tsf);
334 DEBUGFS_DEL(reset);
311 335
312 DEBUGFS_STATS_DEL(transmitted_fragment_count); 336 DEBUGFS_STATS_DEL(transmitted_fragment_count);
313 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count); 337 DEBUGFS_STATS_DEL(multicast_transmitted_frame_count);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index f4becc12904..3201e1f9636 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -812,8 +812,9 @@ int ieee80211_ibss_commit(struct ieee80211_sub_if_data *sdata)
812 812
813 ifibss->ibss_join_req = jiffies; 813 ifibss->ibss_join_req = jiffies;
814 ifibss->state = IEEE80211_IBSS_MLME_SEARCH; 814 ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
815 set_bit(IEEE80211_IBSS_REQ_RUN, &ifibss->request);
815 816
816 return ieee80211_sta_find_ibss(sdata); 817 return 0;
817} 818}
818 819
819int ieee80211_ibss_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len) 820int ieee80211_ibss_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size_t len)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index fbb91f1aebb..e6ed78cb16b 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -149,11 +149,6 @@ struct ieee80211_tx_data {
149 149
150 struct ieee80211_channel *channel; 150 struct ieee80211_channel *channel;
151 151
152 /* Extra fragments (in addition to the first fragment
153 * in skb) */
154 struct sk_buff **extra_frag;
155 int num_extra_frag;
156
157 u16 ethertype; 152 u16 ethertype;
158 unsigned int flags; 153 unsigned int flags;
159}; 154};
@@ -189,12 +184,6 @@ struct ieee80211_rx_data {
189 u16 tkip_iv16; 184 u16 tkip_iv16;
190}; 185};
191 186
192struct ieee80211_tx_stored_packet {
193 struct sk_buff *skb;
194 struct sk_buff **extra_frag;
195 int num_extra_frag;
196};
197
198struct beacon_data { 187struct beacon_data {
199 u8 *head, *tail; 188 u8 *head, *tail;
200 int head_len, tail_len; 189 int head_len, tail_len;
@@ -247,8 +236,9 @@ struct mesh_preq_queue {
247#define IEEE80211_STA_ASSOCIATED BIT(4) 236#define IEEE80211_STA_ASSOCIATED BIT(4)
248#define IEEE80211_STA_PROBEREQ_POLL BIT(5) 237#define IEEE80211_STA_PROBEREQ_POLL BIT(5)
249#define IEEE80211_STA_CREATE_IBSS BIT(6) 238#define IEEE80211_STA_CREATE_IBSS BIT(6)
250#define IEEE80211_STA_MIXED_CELL BIT(7) 239/* hole at 7, please re-use */
251#define IEEE80211_STA_WMM_ENABLED BIT(8) 240#define IEEE80211_STA_WMM_ENABLED BIT(8)
241/* hole at 9, please re-use */
252#define IEEE80211_STA_AUTO_SSID_SEL BIT(10) 242#define IEEE80211_STA_AUTO_SSID_SEL BIT(10)
253#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11) 243#define IEEE80211_STA_AUTO_BSSID_SEL BIT(11)
254#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12) 244#define IEEE80211_STA_AUTO_CHANNEL_SEL BIT(12)
@@ -256,6 +246,7 @@ struct mesh_preq_queue {
256#define IEEE80211_STA_TKIP_WEP_USED BIT(14) 246#define IEEE80211_STA_TKIP_WEP_USED BIT(14)
257#define IEEE80211_STA_CSA_RECEIVED BIT(15) 247#define IEEE80211_STA_CSA_RECEIVED BIT(15)
258#define IEEE80211_STA_MFP_ENABLED BIT(16) 248#define IEEE80211_STA_MFP_ENABLED BIT(16)
249#define IEEE80211_STA_EXT_SME BIT(17)
259/* flags for MLME request */ 250/* flags for MLME request */
260#define IEEE80211_STA_REQ_SCAN 0 251#define IEEE80211_STA_REQ_SCAN 0
261#define IEEE80211_STA_REQ_DIRECT_PROBE 1 252#define IEEE80211_STA_REQ_DIRECT_PROBE 1
@@ -266,12 +257,14 @@ struct mesh_preq_queue {
266#define IEEE80211_AUTH_ALG_OPEN BIT(0) 257#define IEEE80211_AUTH_ALG_OPEN BIT(0)
267#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1) 258#define IEEE80211_AUTH_ALG_SHARED_KEY BIT(1)
268#define IEEE80211_AUTH_ALG_LEAP BIT(2) 259#define IEEE80211_AUTH_ALG_LEAP BIT(2)
260#define IEEE80211_AUTH_ALG_FT BIT(3)
269 261
270struct ieee80211_if_managed { 262struct ieee80211_if_managed {
271 struct timer_list timer; 263 struct timer_list timer;
272 struct timer_list chswitch_timer; 264 struct timer_list chswitch_timer;
273 struct work_struct work; 265 struct work_struct work;
274 struct work_struct chswitch_work; 266 struct work_struct chswitch_work;
267 struct work_struct beacon_loss_work;
275 268
276 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN]; 269 u8 bssid[ETH_ALEN], prev_bssid[ETH_ALEN];
277 270
@@ -305,6 +298,7 @@ struct ieee80211_if_managed {
305 unsigned long request; 298 unsigned long request;
306 299
307 unsigned long last_probe; 300 unsigned long last_probe;
301 unsigned long last_beacon;
308 302
309 unsigned int flags; 303 unsigned int flags;
310 304
@@ -321,20 +315,8 @@ struct ieee80211_if_managed {
321 int wmm_last_param_set; 315 int wmm_last_param_set;
322 316
323 /* Extra IE data for management frames */ 317 /* Extra IE data for management frames */
324 u8 *ie_probereq; 318 u8 *sme_auth_ie;
325 size_t ie_probereq_len; 319 size_t sme_auth_ie_len;
326 u8 *ie_proberesp;
327 size_t ie_proberesp_len;
328 u8 *ie_auth;
329 size_t ie_auth_len;
330 u8 *ie_assocreq;
331 size_t ie_assocreq_len;
332 u8 *ie_reassocreq;
333 size_t ie_reassocreq_len;
334 u8 *ie_deauth;
335 size_t ie_deauth_len;
336 u8 *ie_disassoc;
337 size_t ie_disassoc_len;
338}; 320};
339 321
340enum ieee80211_ibss_flags { 322enum ieee80211_ibss_flags {
@@ -421,7 +403,6 @@ struct ieee80211_if_mesh {
421 * 403 *
422 * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets 404 * @IEEE80211_SDATA_ALLMULTI: interface wants all multicast packets
423 * @IEEE80211_SDATA_PROMISC: interface is promisc 405 * @IEEE80211_SDATA_PROMISC: interface is promisc
424 * @IEEE80211_SDATA_USERSPACE_MLME: userspace MLME is active
425 * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode 406 * @IEEE80211_SDATA_OPERATING_GMODE: operating in G-only mode
426 * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between 407 * @IEEE80211_SDATA_DONT_BRIDGE_PACKETS: bridge packets between
427 * associated stations and deliver multicast frames both 408 * associated stations and deliver multicast frames both
@@ -430,9 +411,8 @@ struct ieee80211_if_mesh {
430enum ieee80211_sub_if_data_flags { 411enum ieee80211_sub_if_data_flags {
431 IEEE80211_SDATA_ALLMULTI = BIT(0), 412 IEEE80211_SDATA_ALLMULTI = BIT(0),
432 IEEE80211_SDATA_PROMISC = BIT(1), 413 IEEE80211_SDATA_PROMISC = BIT(1),
433 IEEE80211_SDATA_USERSPACE_MLME = BIT(2), 414 IEEE80211_SDATA_OPERATING_GMODE = BIT(2),
434 IEEE80211_SDATA_OPERATING_GMODE = BIT(3), 415 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(3),
435 IEEE80211_SDATA_DONT_BRIDGE_PACKETS = BIT(4),
436}; 416};
437 417
438struct ieee80211_sub_if_data { 418struct ieee80211_sub_if_data {
@@ -598,6 +578,8 @@ enum queue_stop_reason {
598 IEEE80211_QUEUE_STOP_REASON_PS, 578 IEEE80211_QUEUE_STOP_REASON_PS,
599 IEEE80211_QUEUE_STOP_REASON_CSA, 579 IEEE80211_QUEUE_STOP_REASON_CSA,
600 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 580 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
581 IEEE80211_QUEUE_STOP_REASON_SUSPEND,
582 IEEE80211_QUEUE_STOP_REASON_PENDING,
601}; 583};
602 584
603struct ieee80211_master_priv { 585struct ieee80211_master_priv {
@@ -612,12 +594,7 @@ struct ieee80211_local {
612 594
613 const struct ieee80211_ops *ops; 595 const struct ieee80211_ops *ops;
614 596
615 /* AC queue corresponding to each AMPDU queue */ 597 unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES];
616 s8 ampdu_ac_queue[IEEE80211_MAX_AMPDU_QUEUES];
617 unsigned int amdpu_ac_stop_refcnt[IEEE80211_MAX_AMPDU_QUEUES];
618
619 unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES +
620 IEEE80211_MAX_AMPDU_QUEUES];
621 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ 598 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
622 spinlock_t queue_stop_reason_lock; 599 spinlock_t queue_stop_reason_lock;
623 600
@@ -654,11 +631,17 @@ struct ieee80211_local {
654 struct sta_info *sta_hash[STA_HASH_SIZE]; 631 struct sta_info *sta_hash[STA_HASH_SIZE];
655 struct timer_list sta_cleanup; 632 struct timer_list sta_cleanup;
656 633
657 unsigned long queues_pending[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)]; 634 struct sk_buff_head pending[IEEE80211_MAX_QUEUES];
658 unsigned long queues_pending_run[BITS_TO_LONGS(IEEE80211_MAX_QUEUES)];
659 struct ieee80211_tx_stored_packet pending_packet[IEEE80211_MAX_QUEUES];
660 struct tasklet_struct tx_pending_tasklet; 635 struct tasklet_struct tx_pending_tasklet;
661 636
637 /*
638 * This lock is used to prevent concurrent A-MPDU
639 * session start/stop processing, this thus also
640 * synchronises the ->ampdu_action() callback to
641 * drivers and limits it to one at a time.
642 */
643 spinlock_t ampdu_lock;
644
662 /* number of interfaces with corresponding IFF_ flags */ 645 /* number of interfaces with corresponding IFF_ flags */
663 atomic_t iff_allmultis, iff_promiscs; 646 atomic_t iff_allmultis, iff_promiscs;
664 647
@@ -774,6 +757,7 @@ struct ieee80211_local {
774 struct dentry *total_ps_buffered; 757 struct dentry *total_ps_buffered;
775 struct dentry *wep_iv; 758 struct dentry *wep_iv;
776 struct dentry *tsf; 759 struct dentry *tsf;
760 struct dentry *reset;
777 struct dentry *statistics; 761 struct dentry *statistics;
778 struct local_debugfsdentries_statsdentries { 762 struct local_debugfsdentries_statsdentries {
779 struct dentry *transmitted_fragment_count; 763 struct dentry *transmitted_fragment_count;
@@ -969,7 +953,7 @@ ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata,
969 struct sk_buff *skb, 953 struct sk_buff *skb,
970 struct ieee80211_rx_status *rx_status); 954 struct ieee80211_rx_status *rx_status);
971int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, 955int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
972 char *ie, size_t len); 956 const char *ie, size_t len);
973 957
974void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local); 958void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local);
975void ieee80211_scan_failed(struct ieee80211_local *local); 959void ieee80211_scan_failed(struct ieee80211_local *local);
@@ -1053,8 +1037,19 @@ void ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
1053 u8 pwr_constr_elem_len); 1037 u8 pwr_constr_elem_len);
1054 1038
1055/* Suspend/resume */ 1039/* Suspend/resume */
1040#ifdef CONFIG_PM
1056int __ieee80211_suspend(struct ieee80211_hw *hw); 1041int __ieee80211_suspend(struct ieee80211_hw *hw);
1057int __ieee80211_resume(struct ieee80211_hw *hw); 1042int __ieee80211_resume(struct ieee80211_hw *hw);
1043#else
1044static inline int __ieee80211_suspend(struct ieee80211_hw *hw)
1045{
1046 return 0;
1047}
1048static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1049{
1050 return 0;
1051}
1052#endif
1058 1053
1059/* utility functions/constants */ 1054/* utility functions/constants */
1060extern void *mac80211_wiphy_privid; /* for wiphy privid */ 1055extern void *mac80211_wiphy_privid; /* for wiphy privid */
@@ -1081,6 +1076,9 @@ void ieee80211_dynamic_ps_timer(unsigned long data);
1081void ieee80211_send_nullfunc(struct ieee80211_local *local, 1076void ieee80211_send_nullfunc(struct ieee80211_local *local,
1082 struct ieee80211_sub_if_data *sdata, 1077 struct ieee80211_sub_if_data *sdata,
1083 int powersave); 1078 int powersave);
1079void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1080 struct ieee80211_hdr *hdr);
1081void ieee80211_beacon_loss_work(struct work_struct *work);
1084 1082
1085void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, 1083void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
1086 enum queue_stop_reason reason); 1084 enum queue_stop_reason reason);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index f9f27b9cadb..91e8e1bacaa 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -261,8 +261,7 @@ static int ieee80211_open(struct net_device *dev)
261 ieee80211_bss_info_change_notify(sdata, changed); 261 ieee80211_bss_info_change_notify(sdata, changed);
262 ieee80211_enable_keys(sdata); 262 ieee80211_enable_keys(sdata);
263 263
264 if (sdata->vif.type == NL80211_IFTYPE_STATION && 264 if (sdata->vif.type == NL80211_IFTYPE_STATION)
265 !(sdata->flags & IEEE80211_SDATA_USERSPACE_MLME))
266 netif_carrier_off(dev); 265 netif_carrier_off(dev);
267 else 266 else
268 netif_carrier_on(dev); 267 netif_carrier_on(dev);
@@ -478,6 +477,9 @@ static int ieee80211_stop(struct net_device *dev)
478 */ 477 */
479 cancel_work_sync(&sdata->u.mgd.work); 478 cancel_work_sync(&sdata->u.mgd.work);
480 cancel_work_sync(&sdata->u.mgd.chswitch_work); 479 cancel_work_sync(&sdata->u.mgd.chswitch_work);
480
481 cancel_work_sync(&sdata->u.mgd.beacon_loss_work);
482
481 /* 483 /*
482 * When we get here, the interface is marked down. 484 * When we get here, the interface is marked down.
483 * Call synchronize_rcu() to wait for the RX path 485 * Call synchronize_rcu() to wait for the RX path
@@ -653,13 +655,7 @@ static void ieee80211_teardown_sdata(struct net_device *dev)
653 kfree(sdata->u.mgd.extra_ie); 655 kfree(sdata->u.mgd.extra_ie);
654 kfree(sdata->u.mgd.assocreq_ies); 656 kfree(sdata->u.mgd.assocreq_ies);
655 kfree(sdata->u.mgd.assocresp_ies); 657 kfree(sdata->u.mgd.assocresp_ies);
656 kfree(sdata->u.mgd.ie_probereq); 658 kfree(sdata->u.mgd.sme_auth_ie);
657 kfree(sdata->u.mgd.ie_proberesp);
658 kfree(sdata->u.mgd.ie_auth);
659 kfree(sdata->u.mgd.ie_assocreq);
660 kfree(sdata->u.mgd.ie_reassocreq);
661 kfree(sdata->u.mgd.ie_deauth);
662 kfree(sdata->u.mgd.ie_disassoc);
663 break; 659 break;
664 case NL80211_IFTYPE_WDS: 660 case NL80211_IFTYPE_WDS:
665 case NL80211_IFTYPE_AP_VLAN: 661 case NL80211_IFTYPE_AP_VLAN:
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index f38db4d37e5..a6f1d8a869b 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -161,12 +161,6 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
161 if (WARN_ON(!netif_running(sdata->dev))) 161 if (WARN_ON(!netif_running(sdata->dev)))
162 return 0; 162 return 0;
163 163
164 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
165 return -EINVAL;
166
167 if (!local->ops->config_interface)
168 return 0;
169
170 memset(&conf, 0, sizeof(conf)); 164 memset(&conf, 0, sizeof(conf));
171 165
172 if (sdata->vif.type == NL80211_IFTYPE_STATION) 166 if (sdata->vif.type == NL80211_IFTYPE_STATION)
@@ -183,6 +177,9 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
183 return -EINVAL; 177 return -EINVAL;
184 } 178 }
185 179
180 if (!local->ops->config_interface)
181 return 0;
182
186 switch (sdata->vif.type) { 183 switch (sdata->vif.type) {
187 case NL80211_IFTYPE_AP: 184 case NL80211_IFTYPE_AP:
188 case NL80211_IFTYPE_ADHOC: 185 case NL80211_IFTYPE_ADHOC:
@@ -224,9 +221,6 @@ int ieee80211_if_config(struct ieee80211_sub_if_data *sdata, u32 changed)
224 } 221 }
225 } 222 }
226 223
227 if (WARN_ON(!conf.bssid && (changed & IEEE80211_IFCC_BSSID)))
228 return -EINVAL;
229
230 conf.changed = changed; 224 conf.changed = changed;
231 225
232 return local->ops->config_interface(local_to_hw(local), 226 return local->ops->config_interface(local_to_hw(local),
@@ -780,13 +774,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
780 setup_timer(&local->dynamic_ps_timer, 774 setup_timer(&local->dynamic_ps_timer,
781 ieee80211_dynamic_ps_timer, (unsigned long) local); 775 ieee80211_dynamic_ps_timer, (unsigned long) local);
782 776
783 for (i = 0; i < IEEE80211_MAX_AMPDU_QUEUES; i++)
784 local->ampdu_ac_queue[i] = -1;
785 /* using an s8 won't work with more than that */
786 BUILD_BUG_ON(IEEE80211_MAX_AMPDU_QUEUES > 127);
787
788 sta_info_init(local); 777 sta_info_init(local);
789 778
779 for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
780 skb_queue_head_init(&local->pending[i]);
790 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, 781 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
791 (unsigned long)local); 782 (unsigned long)local);
792 tasklet_disable(&local->tx_pending_tasklet); 783 tasklet_disable(&local->tx_pending_tasklet);
@@ -799,6 +790,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
799 skb_queue_head_init(&local->skb_queue); 790 skb_queue_head_init(&local->skb_queue);
800 skb_queue_head_init(&local->skb_queue_unreliable); 791 skb_queue_head_init(&local->skb_queue_unreliable);
801 792
793 spin_lock_init(&local->ampdu_lock);
794
802 return local_to_hw(local); 795 return local_to_hw(local);
803} 796}
804EXPORT_SYMBOL(ieee80211_alloc_hw); 797EXPORT_SYMBOL(ieee80211_alloc_hw);
@@ -876,10 +869,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
876 */ 869 */
877 if (hw->queues > IEEE80211_MAX_QUEUES) 870 if (hw->queues > IEEE80211_MAX_QUEUES)
878 hw->queues = IEEE80211_MAX_QUEUES; 871 hw->queues = IEEE80211_MAX_QUEUES;
879 if (hw->ampdu_queues > IEEE80211_MAX_AMPDU_QUEUES)
880 hw->ampdu_queues = IEEE80211_MAX_AMPDU_QUEUES;
881 if (hw->queues < 4)
882 hw->ampdu_queues = 0;
883 872
884 mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv), 873 mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv),
885 "wmaster%d", ieee80211_master_setup, 874 "wmaster%d", ieee80211_master_setup,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 841b8450b3d..7ecda9d59d8 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -30,7 +30,7 @@
30#define IEEE80211_ASSOC_TIMEOUT (HZ / 5) 30#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
31#define IEEE80211_ASSOC_MAX_TRIES 3 31#define IEEE80211_ASSOC_MAX_TRIES 3
32#define IEEE80211_MONITORING_INTERVAL (2 * HZ) 32#define IEEE80211_MONITORING_INTERVAL (2 * HZ)
33#define IEEE80211_PROBE_INTERVAL (60 * HZ) 33#define IEEE80211_PROBE_IDLE_TIME (60 * HZ)
34#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) 34#define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ)
35 35
36/* utils */ 36/* utils */
@@ -82,38 +82,23 @@ static int ieee80211_compatible_rates(struct ieee80211_bss *bss,
82 82
83/* frame sending functions */ 83/* frame sending functions */
84 84
85static void add_extra_ies(struct sk_buff *skb, u8 *ies, size_t ies_len)
86{
87 if (ies)
88 memcpy(skb_put(skb, ies_len), ies, ies_len);
89}
90
91static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata) 85static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
92{ 86{
93 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 87 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
94 struct ieee80211_local *local = sdata->local; 88 struct ieee80211_local *local = sdata->local;
95 struct sk_buff *skb; 89 struct sk_buff *skb;
96 struct ieee80211_mgmt *mgmt; 90 struct ieee80211_mgmt *mgmt;
97 u8 *pos, *ies, *ht_ie, *e_ies; 91 u8 *pos, *ies, *ht_ie;
98 int i, len, count, rates_len, supp_rates_len; 92 int i, len, count, rates_len, supp_rates_len;
99 u16 capab; 93 u16 capab;
100 struct ieee80211_bss *bss; 94 struct ieee80211_bss *bss;
101 int wmm = 0; 95 int wmm = 0;
102 struct ieee80211_supported_band *sband; 96 struct ieee80211_supported_band *sband;
103 u32 rates = 0; 97 u32 rates = 0;
104 size_t e_ies_len;
105
106 if (ifmgd->flags & IEEE80211_IBSS_PREV_BSSID_SET) {
107 e_ies = sdata->u.mgd.ie_reassocreq;
108 e_ies_len = sdata->u.mgd.ie_reassocreq_len;
109 } else {
110 e_ies = sdata->u.mgd.ie_assocreq;
111 e_ies_len = sdata->u.mgd.ie_assocreq_len;
112 }
113 98
114 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 99 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
115 sizeof(*mgmt) + 200 + ifmgd->extra_ie_len + 100 sizeof(*mgmt) + 200 + ifmgd->extra_ie_len +
116 ifmgd->ssid_len + e_ies_len); 101 ifmgd->ssid_len);
117 if (!skb) { 102 if (!skb) {
118 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc " 103 printk(KERN_DEBUG "%s: failed to allocate buffer for assoc "
119 "frame\n", sdata->dev->name); 104 "frame\n", sdata->dev->name);
@@ -304,8 +289,6 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
304 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs)); 289 memcpy(pos, &sband->ht_cap.mcs, sizeof(sband->ht_cap.mcs));
305 } 290 }
306 291
307 add_extra_ies(skb, e_ies, e_ies_len);
308
309 kfree(ifmgd->assocreq_ies); 292 kfree(ifmgd->assocreq_ies);
310 ifmgd->assocreq_ies_len = (skb->data + skb->len) - ies; 293 ifmgd->assocreq_ies_len = (skb->data + skb->len) - ies;
311 ifmgd->assocreq_ies = kmalloc(ifmgd->assocreq_ies_len, GFP_KERNEL); 294 ifmgd->assocreq_ies = kmalloc(ifmgd->assocreq_ies_len, GFP_KERNEL);
@@ -323,19 +306,8 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
323 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 306 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
324 struct sk_buff *skb; 307 struct sk_buff *skb;
325 struct ieee80211_mgmt *mgmt; 308 struct ieee80211_mgmt *mgmt;
326 u8 *ies;
327 size_t ies_len;
328
329 if (stype == IEEE80211_STYPE_DEAUTH) {
330 ies = sdata->u.mgd.ie_deauth;
331 ies_len = sdata->u.mgd.ie_deauth_len;
332 } else {
333 ies = sdata->u.mgd.ie_disassoc;
334 ies_len = sdata->u.mgd.ie_disassoc_len;
335 }
336 309
337 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 310 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt));
338 ies_len);
339 if (!skb) { 311 if (!skb) {
340 printk(KERN_DEBUG "%s: failed to allocate buffer for " 312 printk(KERN_DEBUG "%s: failed to allocate buffer for "
341 "deauth/disassoc frame\n", sdata->dev->name); 313 "deauth/disassoc frame\n", sdata->dev->name);
@@ -353,8 +325,6 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
353 /* u.deauth.reason_code == u.disassoc.reason_code */ 325 /* u.deauth.reason_code == u.disassoc.reason_code */
354 mgmt->u.deauth.reason_code = cpu_to_le16(reason); 326 mgmt->u.deauth.reason_code = cpu_to_le16(reason);
355 327
356 add_extra_ies(skb, ies, ies_len);
357
358 ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED); 328 ieee80211_tx_skb(sdata, skb, ifmgd->flags & IEEE80211_STA_MFP_ENABLED);
359} 329}
360 330
@@ -640,6 +610,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
640 bss_info_changed |= ieee80211_handle_bss_capability(sdata, 610 bss_info_changed |= ieee80211_handle_bss_capability(sdata,
641 bss->cbss.capability, bss->has_erp_value, bss->erp_value); 611 bss->cbss.capability, bss->has_erp_value, bss->erp_value);
642 612
613 cfg80211_hold_bss(&bss->cbss);
614
643 ieee80211_rx_bss_put(local, bss); 615 ieee80211_rx_bss_put(local, bss);
644 } 616 }
645 617
@@ -682,6 +654,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
682static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata) 654static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
683{ 655{
684 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 656 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
657 struct ieee80211_local *local = sdata->local;
685 658
686 ifmgd->direct_probe_tries++; 659 ifmgd->direct_probe_tries++;
687 if (ifmgd->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) { 660 if (ifmgd->direct_probe_tries > IEEE80211_AUTH_MAX_TRIES) {
@@ -697,6 +670,13 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
697 ieee80211_rx_bss_remove(sdata, ifmgd->bssid, 670 ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
698 sdata->local->hw.conf.channel->center_freq, 671 sdata->local->hw.conf.channel->center_freq,
699 ifmgd->ssid, ifmgd->ssid_len); 672 ifmgd->ssid, ifmgd->ssid_len);
673
674 /*
675 * We might have a pending scan which had no chance to run yet
676 * due to state == IEEE80211_STA_MLME_DIRECT_PROBE.
677 * Hence, queue the STAs work again
678 */
679 queue_work(local->hw.workqueue, &ifmgd->work);
700 return; 680 return;
701 } 681 }
702 682
@@ -721,6 +701,9 @@ static void ieee80211_direct_probe(struct ieee80211_sub_if_data *sdata)
721static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata) 701static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
722{ 702{
723 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 703 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
704 struct ieee80211_local *local = sdata->local;
705 u8 *ies;
706 size_t ies_len;
724 707
725 ifmgd->auth_tries++; 708 ifmgd->auth_tries++;
726 if (ifmgd->auth_tries > IEEE80211_AUTH_MAX_TRIES) { 709 if (ifmgd->auth_tries > IEEE80211_AUTH_MAX_TRIES) {
@@ -732,6 +715,13 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
732 ieee80211_rx_bss_remove(sdata, ifmgd->bssid, 715 ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
733 sdata->local->hw.conf.channel->center_freq, 716 sdata->local->hw.conf.channel->center_freq,
734 ifmgd->ssid, ifmgd->ssid_len); 717 ifmgd->ssid, ifmgd->ssid_len);
718
719 /*
720 * We might have a pending scan which had no chance to run yet
721 * due to state == IEEE80211_STA_MLME_AUTHENTICATE.
722 * Hence, queue the STAs work again
723 */
724 queue_work(local->hw.workqueue, &ifmgd->work);
735 return; 725 return;
736 } 726 }
737 727
@@ -739,7 +729,14 @@ static void ieee80211_authenticate(struct ieee80211_sub_if_data *sdata)
739 printk(KERN_DEBUG "%s: authenticate with AP %pM\n", 729 printk(KERN_DEBUG "%s: authenticate with AP %pM\n",
740 sdata->dev->name, ifmgd->bssid); 730 sdata->dev->name, ifmgd->bssid);
741 731
742 ieee80211_send_auth(sdata, 1, ifmgd->auth_alg, NULL, 0, 732 if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
733 ies = ifmgd->sme_auth_ie;
734 ies_len = ifmgd->sme_auth_ie_len;
735 } else {
736 ies = NULL;
737 ies_len = 0;
738 }
739 ieee80211_send_auth(sdata, 1, ifmgd->auth_alg, ies, ies_len,
743 ifmgd->bssid, 0); 740 ifmgd->bssid, 0);
744 ifmgd->auth_transaction = 2; 741 ifmgd->auth_transaction = 2;
745 742
@@ -756,6 +753,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
756{ 753{
757 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 754 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
758 struct ieee80211_local *local = sdata->local; 755 struct ieee80211_local *local = sdata->local;
756 struct ieee80211_conf *conf = &local_to_hw(local)->conf;
757 struct ieee80211_bss *bss;
759 struct sta_info *sta; 758 struct sta_info *sta;
760 u32 changed = 0, config_changed = 0; 759 u32 changed = 0, config_changed = 0;
761 760
@@ -779,6 +778,15 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
779 778
780 ieee80211_sta_tear_down_BA_sessions(sta); 779 ieee80211_sta_tear_down_BA_sessions(sta);
781 780
781 bss = ieee80211_rx_bss_get(local, ifmgd->bssid,
782 conf->channel->center_freq,
783 ifmgd->ssid, ifmgd->ssid_len);
784
785 if (bss) {
786 cfg80211_unhold_bss(&bss->cbss);
787 ieee80211_rx_bss_put(local, bss);
788 }
789
782 if (self_disconnected) { 790 if (self_disconnected) {
783 if (deauth) 791 if (deauth)
784 ieee80211_send_deauth_disassoc(sdata, 792 ieee80211_send_deauth_disassoc(sdata,
@@ -854,7 +862,7 @@ static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata)
854 int wep_privacy; 862 int wep_privacy;
855 int privacy_invoked; 863 int privacy_invoked;
856 864
857 if (!ifmgd || (ifmgd->flags & IEEE80211_STA_MIXED_CELL)) 865 if (!ifmgd || (ifmgd->flags & IEEE80211_STA_EXT_SME))
858 return 0; 866 return 0;
859 867
860 bss = ieee80211_rx_bss_get(local, ifmgd->bssid, 868 bss = ieee80211_rx_bss_get(local, ifmgd->bssid,
@@ -878,6 +886,7 @@ static int ieee80211_privacy_mismatch(struct ieee80211_sub_if_data *sdata)
878static void ieee80211_associate(struct ieee80211_sub_if_data *sdata) 886static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
879{ 887{
880 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 888 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
889 struct ieee80211_local *local = sdata->local;
881 890
882 ifmgd->assoc_tries++; 891 ifmgd->assoc_tries++;
883 if (ifmgd->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) { 892 if (ifmgd->assoc_tries > IEEE80211_ASSOC_MAX_TRIES) {
@@ -889,6 +898,12 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
889 ieee80211_rx_bss_remove(sdata, ifmgd->bssid, 898 ieee80211_rx_bss_remove(sdata, ifmgd->bssid,
890 sdata->local->hw.conf.channel->center_freq, 899 sdata->local->hw.conf.channel->center_freq,
891 ifmgd->ssid, ifmgd->ssid_len); 900 ifmgd->ssid, ifmgd->ssid_len);
901 /*
902 * We might have a pending scan which had no chance to run yet
903 * due to state == IEEE80211_STA_MLME_ASSOCIATE.
904 * Hence, queue the STAs work again
905 */
906 queue_work(local->hw.workqueue, &ifmgd->work);
892 return; 907 return;
893 } 908 }
894 909
@@ -907,13 +922,55 @@ static void ieee80211_associate(struct ieee80211_sub_if_data *sdata)
907 mod_timer(&ifmgd->timer, jiffies + IEEE80211_ASSOC_TIMEOUT); 922 mod_timer(&ifmgd->timer, jiffies + IEEE80211_ASSOC_TIMEOUT);
908} 923}
909 924
925void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
926 struct ieee80211_hdr *hdr)
927{
928 /*
929 * We can postpone the mgd.timer whenever receiving unicast frames
930 * from AP because we know that the connection is working both ways
931 * at that time. But multicast frames (and hence also beacons) must
932 * be ignored here, because we need to trigger the timer during
933 * data idle periods for sending the periodical probe request to
934 * the AP.
935 */
936 if (!is_multicast_ether_addr(hdr->addr1))
937 mod_timer(&sdata->u.mgd.timer,
938 jiffies + IEEE80211_MONITORING_INTERVAL);
939}
940
941void ieee80211_beacon_loss_work(struct work_struct *work)
942{
943 struct ieee80211_sub_if_data *sdata =
944 container_of(work, struct ieee80211_sub_if_data,
945 u.mgd.beacon_loss_work);
946 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
947
948 printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
949 "- sending probe request\n", sdata->dev->name,
950 sdata->u.mgd.bssid);
951
952 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
953 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
954 ifmgd->ssid_len, NULL, 0);
955
956 mod_timer(&ifmgd->timer, jiffies + IEEE80211_MONITORING_INTERVAL);
957}
958
959void ieee80211_beacon_loss(struct ieee80211_vif *vif)
960{
961 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
962
963 queue_work(sdata->local->hw.workqueue,
964 &sdata->u.mgd.beacon_loss_work);
965}
966EXPORT_SYMBOL(ieee80211_beacon_loss);
910 967
911static void ieee80211_associated(struct ieee80211_sub_if_data *sdata) 968static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
912{ 969{
913 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 970 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
914 struct ieee80211_local *local = sdata->local; 971 struct ieee80211_local *local = sdata->local;
915 struct sta_info *sta; 972 struct sta_info *sta;
916 int disassoc; 973 bool disassoc = false;
917 974
918 /* TODO: start monitoring current AP signal quality and number of 975 /* TODO: start monitoring current AP signal quality and number of
919 * missed beacons. Scan other channels every now and then and search 976 * missed beacons. Scan other channels every now and then and search
@@ -928,36 +985,45 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
928 if (!sta) { 985 if (!sta) {
929 printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n", 986 printk(KERN_DEBUG "%s: No STA entry for own AP %pM\n",
930 sdata->dev->name, ifmgd->bssid); 987 sdata->dev->name, ifmgd->bssid);
931 disassoc = 1; 988 disassoc = true;
932 } else { 989 goto unlock;
933 disassoc = 0;
934 if (time_after(jiffies,
935 sta->last_rx + IEEE80211_MONITORING_INTERVAL)) {
936 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) {
937 printk(KERN_DEBUG "%s: No ProbeResp from "
938 "current AP %pM - assume out of "
939 "range\n",
940 sdata->dev->name, ifmgd->bssid);
941 disassoc = 1;
942 } else
943 ieee80211_send_probe_req(sdata, ifmgd->bssid,
944 ifmgd->ssid,
945 ifmgd->ssid_len,
946 NULL, 0);
947 ifmgd->flags ^= IEEE80211_STA_PROBEREQ_POLL;
948 } else {
949 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
950 if (time_after(jiffies, ifmgd->last_probe +
951 IEEE80211_PROBE_INTERVAL)) {
952 ifmgd->last_probe = jiffies;
953 ieee80211_send_probe_req(sdata, ifmgd->bssid,
954 ifmgd->ssid,
955 ifmgd->ssid_len,
956 NULL, 0);
957 }
958 }
959 } 990 }
960 991
992 if ((ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL) &&
993 time_after(jiffies, sta->last_rx + IEEE80211_MONITORING_INTERVAL)) {
994 printk(KERN_DEBUG "%s: no probe response from AP %pM "
995 "- disassociating\n",
996 sdata->dev->name, ifmgd->bssid);
997 disassoc = true;
998 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
999 goto unlock;
1000 }
1001
1002 /*
1003 * Beacon filtering is only enabled with power save and then the
1004 * stack should not check for beacon loss.
1005 */
1006 if (!((local->hw.flags & IEEE80211_HW_BEACON_FILTER) &&
1007 (local->hw.conf.flags & IEEE80211_CONF_PS)) &&
1008 time_after(jiffies,
1009 ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) {
1010 printk(KERN_DEBUG "%s: beacon loss from AP %pM "
1011 "- sending probe request\n",
1012 sdata->dev->name, ifmgd->bssid);
1013 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1014 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1015 ifmgd->ssid_len, NULL, 0);
1016 goto unlock;
1017
1018 }
1019
1020 if (time_after(jiffies, sta->last_rx + IEEE80211_PROBE_IDLE_TIME)) {
1021 ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
1022 ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
1023 ifmgd->ssid_len, NULL, 0);
1024 }
1025
1026 unlock:
961 rcu_read_unlock(); 1027 rcu_read_unlock();
962 1028
963 if (disassoc) 1029 if (disassoc)
@@ -975,7 +1041,11 @@ static void ieee80211_auth_completed(struct ieee80211_sub_if_data *sdata)
975 1041
976 printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name); 1042 printk(KERN_DEBUG "%s: authenticated\n", sdata->dev->name);
977 ifmgd->flags |= IEEE80211_STA_AUTHENTICATED; 1043 ifmgd->flags |= IEEE80211_STA_AUTHENTICATED;
978 ieee80211_associate(sdata); 1044 if (ifmgd->flags & IEEE80211_STA_EXT_SME) {
1045 /* Wait for SME to request association */
1046 ifmgd->state = IEEE80211_STA_MLME_DISABLED;
1047 } else
1048 ieee80211_associate(sdata);
979} 1049}
980 1050
981 1051
@@ -1061,12 +1131,15 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
1061 switch (ifmgd->auth_alg) { 1131 switch (ifmgd->auth_alg) {
1062 case WLAN_AUTH_OPEN: 1132 case WLAN_AUTH_OPEN:
1063 case WLAN_AUTH_LEAP: 1133 case WLAN_AUTH_LEAP:
1134 case WLAN_AUTH_FT:
1064 ieee80211_auth_completed(sdata); 1135 ieee80211_auth_completed(sdata);
1136 cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len);
1065 break; 1137 break;
1066 case WLAN_AUTH_SHARED_KEY: 1138 case WLAN_AUTH_SHARED_KEY:
1067 if (ifmgd->auth_transaction == 4) 1139 if (ifmgd->auth_transaction == 4) {
1068 ieee80211_auth_completed(sdata); 1140 ieee80211_auth_completed(sdata);
1069 else 1141 cfg80211_send_rx_auth(sdata->dev, (u8 *) mgmt, len);
1142 } else
1070 ieee80211_auth_challenge(sdata, mgmt, len); 1143 ieee80211_auth_challenge(sdata, mgmt, len);
1071 break; 1144 break;
1072 } 1145 }
@@ -1092,9 +1165,10 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1092 printk(KERN_DEBUG "%s: deauthenticated (Reason: %u)\n", 1165 printk(KERN_DEBUG "%s: deauthenticated (Reason: %u)\n",
1093 sdata->dev->name, reason_code); 1166 sdata->dev->name, reason_code);
1094 1167
1095 if (ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE || 1168 if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
1096 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE || 1169 (ifmgd->state == IEEE80211_STA_MLME_AUTHENTICATE ||
1097 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) { 1170 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATE ||
1171 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED)) {
1098 ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE; 1172 ifmgd->state = IEEE80211_STA_MLME_DIRECT_PROBE;
1099 mod_timer(&ifmgd->timer, jiffies + 1173 mod_timer(&ifmgd->timer, jiffies +
1100 IEEE80211_RETRY_AUTH_INTERVAL); 1174 IEEE80211_RETRY_AUTH_INTERVAL);
@@ -1102,6 +1176,7 @@ static void ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
1102 1176
1103 ieee80211_set_disassoc(sdata, true, false, 0); 1177 ieee80211_set_disassoc(sdata, true, false, 0);
1104 ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED; 1178 ifmgd->flags &= ~IEEE80211_STA_AUTHENTICATED;
1179 cfg80211_send_rx_deauth(sdata->dev, (u8 *) mgmt, len);
1105} 1180}
1106 1181
1107 1182
@@ -1124,13 +1199,15 @@ static void ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1124 printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n", 1199 printk(KERN_DEBUG "%s: disassociated (Reason: %u)\n",
1125 sdata->dev->name, reason_code); 1200 sdata->dev->name, reason_code);
1126 1201
1127 if (ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) { 1202 if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
1203 ifmgd->state == IEEE80211_STA_MLME_ASSOCIATED) {
1128 ifmgd->state = IEEE80211_STA_MLME_ASSOCIATE; 1204 ifmgd->state = IEEE80211_STA_MLME_ASSOCIATE;
1129 mod_timer(&ifmgd->timer, jiffies + 1205 mod_timer(&ifmgd->timer, jiffies +
1130 IEEE80211_RETRY_AUTH_INTERVAL); 1206 IEEE80211_RETRY_AUTH_INTERVAL);
1131 } 1207 }
1132 1208
1133 ieee80211_set_disassoc(sdata, false, false, reason_code); 1209 ieee80211_set_disassoc(sdata, false, false, reason_code);
1210 cfg80211_send_rx_disassoc(sdata->dev, (u8 *) mgmt, len);
1134} 1211}
1135 1212
1136 1213
@@ -1346,7 +1423,14 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
1346 bss_conf->assoc_capability = capab_info; 1423 bss_conf->assoc_capability = capab_info;
1347 ieee80211_set_associated(sdata, changed); 1424 ieee80211_set_associated(sdata, changed);
1348 1425
1426 /*
1427 * initialise the time of last beacon to be the association time,
1428 * otherwise beacon loss check will trigger immediately
1429 */
1430 ifmgd->last_beacon = jiffies;
1431
1349 ieee80211_associated(sdata); 1432 ieee80211_associated(sdata);
1433 cfg80211_send_rx_assoc(sdata->dev, (u8 *) mgmt, len);
1350} 1434}
1351 1435
1352 1436
@@ -1393,9 +1477,12 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1393 size_t len, 1477 size_t len,
1394 struct ieee80211_rx_status *rx_status) 1478 struct ieee80211_rx_status *rx_status)
1395{ 1479{
1480 struct ieee80211_if_managed *ifmgd;
1396 size_t baselen; 1481 size_t baselen;
1397 struct ieee802_11_elems elems; 1482 struct ieee802_11_elems elems;
1398 1483
1484 ifmgd = &sdata->u.mgd;
1485
1399 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN)) 1486 if (memcmp(mgmt->da, sdata->dev->dev_addr, ETH_ALEN))
1400 return; /* ignore ProbeResp to foreign address */ 1487 return; /* ignore ProbeResp to foreign address */
1401 1488
@@ -1410,11 +1497,14 @@ static void ieee80211_rx_mgmt_probe_resp(struct ieee80211_sub_if_data *sdata,
1410 1497
1411 /* direct probe may be part of the association flow */ 1498 /* direct probe may be part of the association flow */
1412 if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE, 1499 if (test_and_clear_bit(IEEE80211_STA_REQ_DIRECT_PROBE,
1413 &sdata->u.mgd.request)) { 1500 &ifmgd->request)) {
1414 printk(KERN_DEBUG "%s direct probe responded\n", 1501 printk(KERN_DEBUG "%s direct probe responded\n",
1415 sdata->dev->name); 1502 sdata->dev->name);
1416 ieee80211_authenticate(sdata); 1503 ieee80211_authenticate(sdata);
1417 } 1504 }
1505
1506 if (ifmgd->flags & IEEE80211_STA_PROBEREQ_POLL)
1507 ifmgd->flags &= ~IEEE80211_STA_PROBEREQ_POLL;
1418} 1508}
1419 1509
1420static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, 1510static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
@@ -1636,6 +1726,8 @@ static void ieee80211_sta_reset_auth(struct ieee80211_sub_if_data *sdata)
1636 ifmgd->auth_alg = WLAN_AUTH_SHARED_KEY; 1726 ifmgd->auth_alg = WLAN_AUTH_SHARED_KEY;
1637 else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_LEAP) 1727 else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_LEAP)
1638 ifmgd->auth_alg = WLAN_AUTH_LEAP; 1728 ifmgd->auth_alg = WLAN_AUTH_LEAP;
1729 else if (ifmgd->auth_algs & IEEE80211_AUTH_ALG_FT)
1730 ifmgd->auth_alg = WLAN_AUTH_FT;
1639 else 1731 else
1640 ifmgd->auth_alg = WLAN_AUTH_OPEN; 1732 ifmgd->auth_alg = WLAN_AUTH_OPEN;
1641 ifmgd->auth_transaction = -1; 1733 ifmgd->auth_transaction = -1;
@@ -1659,7 +1751,8 @@ static int ieee80211_sta_config_auth(struct ieee80211_sub_if_data *sdata)
1659 u16 capa_val = WLAN_CAPABILITY_ESS; 1751 u16 capa_val = WLAN_CAPABILITY_ESS;
1660 struct ieee80211_channel *chan = local->oper_channel; 1752 struct ieee80211_channel *chan = local->oper_channel;
1661 1753
1662 if (ifmgd->flags & (IEEE80211_STA_AUTO_SSID_SEL | 1754 if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) &&
1755 ifmgd->flags & (IEEE80211_STA_AUTO_SSID_SEL |
1663 IEEE80211_STA_AUTO_BSSID_SEL | 1756 IEEE80211_STA_AUTO_BSSID_SEL |
1664 IEEE80211_STA_AUTO_CHANNEL_SEL)) { 1757 IEEE80211_STA_AUTO_CHANNEL_SEL)) {
1665 capa_mask |= WLAN_CAPABILITY_PRIVACY; 1758 capa_mask |= WLAN_CAPABILITY_PRIVACY;
@@ -1822,6 +1915,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1822 ifmgd = &sdata->u.mgd; 1915 ifmgd = &sdata->u.mgd;
1823 INIT_WORK(&ifmgd->work, ieee80211_sta_work); 1916 INIT_WORK(&ifmgd->work, ieee80211_sta_work);
1824 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); 1917 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
1918 INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work);
1825 setup_timer(&ifmgd->timer, ieee80211_sta_timer, 1919 setup_timer(&ifmgd->timer, ieee80211_sta_timer,
1826 (unsigned long) sdata); 1920 (unsigned long) sdata);
1827 setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer, 1921 setup_timer(&ifmgd->chswitch_timer, ieee80211_chswitch_timer,
@@ -1834,7 +1928,7 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1834 ifmgd->flags |= IEEE80211_STA_CREATE_IBSS | 1928 ifmgd->flags |= IEEE80211_STA_CREATE_IBSS |
1835 IEEE80211_STA_AUTO_BSSID_SEL | 1929 IEEE80211_STA_AUTO_BSSID_SEL |
1836 IEEE80211_STA_AUTO_CHANNEL_SEL; 1930 IEEE80211_STA_AUTO_CHANNEL_SEL;
1837 if (ieee80211_num_regular_queues(&sdata->local->hw) >= 4) 1931 if (sdata->local->hw.queues >= 4)
1838 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED; 1932 ifmgd->flags |= IEEE80211_STA_WMM_ENABLED;
1839} 1933}
1840 1934
@@ -1856,7 +1950,11 @@ void ieee80211_sta_req_auth(struct ieee80211_sub_if_data *sdata)
1856 ieee80211_set_disassoc(sdata, true, true, 1950 ieee80211_set_disassoc(sdata, true, true,
1857 WLAN_REASON_DEAUTH_LEAVING); 1951 WLAN_REASON_DEAUTH_LEAVING);
1858 1952
1859 set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request); 1953 if (!(ifmgd->flags & IEEE80211_STA_EXT_SME) ||
1954 ifmgd->state != IEEE80211_STA_MLME_ASSOCIATE)
1955 set_bit(IEEE80211_STA_REQ_AUTH, &ifmgd->request);
1956 else if (ifmgd->flags & IEEE80211_STA_EXT_SME)
1957 set_bit(IEEE80211_STA_REQ_RUN, &ifmgd->request);
1860 queue_work(local->hw.workqueue, &ifmgd->work); 1958 queue_work(local->hw.workqueue, &ifmgd->work);
1861 } 1959 }
1862} 1960}
@@ -1865,8 +1963,6 @@ int ieee80211_sta_commit(struct ieee80211_sub_if_data *sdata)
1865{ 1963{
1866 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1964 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1867 1965
1868 ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
1869
1870 if (ifmgd->ssid_len) 1966 if (ifmgd->ssid_len)
1871 ifmgd->flags |= IEEE80211_STA_SSID_SET; 1967 ifmgd->flags |= IEEE80211_STA_SSID_SET;
1872 else 1968 else
@@ -1885,6 +1981,10 @@ int ieee80211_sta_set_ssid(struct ieee80211_sub_if_data *sdata, char *ssid, size
1885 ifmgd = &sdata->u.mgd; 1981 ifmgd = &sdata->u.mgd;
1886 1982
1887 if (ifmgd->ssid_len != len || memcmp(ifmgd->ssid, ssid, len) != 0) { 1983 if (ifmgd->ssid_len != len || memcmp(ifmgd->ssid, ssid, len) != 0) {
1984 /*
1985 * Do not use reassociation if SSID is changed (different ESS).
1986 */
1987 ifmgd->flags &= ~IEEE80211_STA_PREV_BSSID_SET;
1888 memset(ifmgd->ssid, 0, sizeof(ifmgd->ssid)); 1988 memset(ifmgd->ssid, 0, sizeof(ifmgd->ssid));
1889 memcpy(ifmgd->ssid, ssid, len); 1989 memcpy(ifmgd->ssid, ssid, len);
1890 ifmgd->ssid_len = len; 1990 ifmgd->ssid_len = len;
@@ -1923,7 +2023,8 @@ int ieee80211_sta_set_bssid(struct ieee80211_sub_if_data *sdata, u8 *bssid)
1923 return ieee80211_sta_commit(sdata); 2023 return ieee80211_sta_commit(sdata);
1924} 2024}
1925 2025
1926int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata, char *ie, size_t len) 2026int ieee80211_sta_set_extra_ie(struct ieee80211_sub_if_data *sdata,
2027 const char *ie, size_t len)
1927{ 2028{
1928 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2029 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1929 2030
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 44525f51707..02730232649 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -10,6 +10,10 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
10 struct ieee80211_sub_if_data *sdata; 10 struct ieee80211_sub_if_data *sdata;
11 struct ieee80211_if_init_conf conf; 11 struct ieee80211_if_init_conf conf;
12 struct sta_info *sta; 12 struct sta_info *sta;
13 unsigned long flags;
14
15 ieee80211_stop_queues_by_reason(hw,
16 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
13 17
14 flush_workqueue(local->hw.workqueue); 18 flush_workqueue(local->hw.workqueue);
15 19
@@ -17,10 +21,23 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
17 list_for_each_entry(sdata, &local->interfaces, list) 21 list_for_each_entry(sdata, &local->interfaces, list)
18 ieee80211_disable_keys(sdata); 22 ieee80211_disable_keys(sdata);
19 23
20 /* remove STAs */ 24 /* Tear down aggregation sessions */
21 list_for_each_entry(sta, &local->sta_list, list) { 25
26 rcu_read_lock();
27
28 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
29 list_for_each_entry_rcu(sta, &local->sta_list, list) {
30 set_sta_flags(sta, WLAN_STA_SUSPEND);
31 ieee80211_sta_tear_down_BA_sessions(sta);
32 }
33 }
22 34
23 if (local->ops->sta_notify) { 35 rcu_read_unlock();
36
37 /* remove STAs */
38 if (local->ops->sta_notify) {
39 spin_lock_irqsave(&local->sta_lock, flags);
40 list_for_each_entry(sta, &local->sta_list, list) {
24 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 41 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
25 sdata = container_of(sdata->bss, 42 sdata = container_of(sdata->bss,
26 struct ieee80211_sub_if_data, 43 struct ieee80211_sub_if_data,
@@ -29,11 +46,11 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
29 local->ops->sta_notify(hw, &sdata->vif, 46 local->ops->sta_notify(hw, &sdata->vif,
30 STA_NOTIFY_REMOVE, &sta->sta); 47 STA_NOTIFY_REMOVE, &sta->sta);
31 } 48 }
49 spin_unlock_irqrestore(&local->sta_lock, flags);
32 } 50 }
33 51
34 /* remove all interfaces */ 52 /* remove all interfaces */
35 list_for_each_entry(sdata, &local->interfaces, list) { 53 list_for_each_entry(sdata, &local->interfaces, list) {
36
37 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 54 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
38 sdata->vif.type != NL80211_IFTYPE_MONITOR && 55 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
39 netif_running(sdata->dev)) { 56 netif_running(sdata->dev)) {
@@ -61,6 +78,7 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
61 struct ieee80211_sub_if_data *sdata; 78 struct ieee80211_sub_if_data *sdata;
62 struct ieee80211_if_init_conf conf; 79 struct ieee80211_if_init_conf conf;
63 struct sta_info *sta; 80 struct sta_info *sta;
81 unsigned long flags;
64 int res; 82 int res;
65 83
66 /* restart hardware */ 84 /* restart hardware */
@@ -72,7 +90,6 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
72 90
73 /* add interfaces */ 91 /* add interfaces */
74 list_for_each_entry(sdata, &local->interfaces, list) { 92 list_for_each_entry(sdata, &local->interfaces, list) {
75
76 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 93 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
77 sdata->vif.type != NL80211_IFTYPE_MONITOR && 94 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
78 netif_running(sdata->dev)) { 95 netif_running(sdata->dev)) {
@@ -84,9 +101,9 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
84 } 101 }
85 102
86 /* add STAs back */ 103 /* add STAs back */
87 list_for_each_entry(sta, &local->sta_list, list) { 104 if (local->ops->sta_notify) {
88 105 spin_lock_irqsave(&local->sta_lock, flags);
89 if (local->ops->sta_notify) { 106 list_for_each_entry(sta, &local->sta_list, list) {
90 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 107 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
91 sdata = container_of(sdata->bss, 108 sdata = container_of(sdata->bss,
92 struct ieee80211_sub_if_data, 109 struct ieee80211_sub_if_data,
@@ -95,8 +112,21 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
95 local->ops->sta_notify(hw, &sdata->vif, 112 local->ops->sta_notify(hw, &sdata->vif,
96 STA_NOTIFY_ADD, &sta->sta); 113 STA_NOTIFY_ADD, &sta->sta);
97 } 114 }
115 spin_unlock_irqrestore(&local->sta_lock, flags);
98 } 116 }
99 117
118 /* Clear Suspend state so that ADDBA requests can be processed */
119
120 rcu_read_lock();
121
122 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
123 list_for_each_entry_rcu(sta, &local->sta_list, list) {
124 clear_sta_flags(sta, WLAN_STA_SUSPEND);
125 }
126 }
127
128 rcu_read_unlock();
129
100 /* add back keys */ 130 /* add back keys */
101 list_for_each_entry(sdata, &local->interfaces, list) 131 list_for_each_entry(sdata, &local->interfaces, list)
102 if (netif_running(sdata->dev)) 132 if (netif_running(sdata->dev))
@@ -113,5 +143,37 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
113 ieee80211_configure_filter(local); 143 ieee80211_configure_filter(local);
114 netif_addr_unlock_bh(local->mdev); 144 netif_addr_unlock_bh(local->mdev);
115 145
146 /* Finally also reconfigure all the BSS information */
147 list_for_each_entry(sdata, &local->interfaces, list) {
148 u32 changed = ~0;
149 if (!netif_running(sdata->dev))
150 continue;
151 switch (sdata->vif.type) {
152 case NL80211_IFTYPE_STATION:
153 /* disable beacon change bits */
154 changed &= ~IEEE80211_IFCC_BEACON;
155 /* fall through */
156 case NL80211_IFTYPE_ADHOC:
157 case NL80211_IFTYPE_AP:
158 case NL80211_IFTYPE_MESH_POINT:
159 WARN_ON(ieee80211_if_config(sdata, changed));
160 ieee80211_bss_info_change_notify(sdata, ~0);
161 break;
162 case NL80211_IFTYPE_WDS:
163 break;
164 case NL80211_IFTYPE_AP_VLAN:
165 case NL80211_IFTYPE_MONITOR:
166 /* ignore virtual */
167 break;
168 case NL80211_IFTYPE_UNSPECIFIED:
169 case __NL80211_IFTYPE_AFTER_LAST:
170 WARN_ON(1);
171 break;
172 }
173 }
174
175 ieee80211_wake_queues_by_reason(hw,
176 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
177
116 return 0; 178 return 0;
117} 179}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 3fa7ab28506..4641f00a1e5 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -219,10 +219,12 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
219 info->control.rates[i].count = 1; 219 info->control.rates[i].count = 1;
220 } 220 }
221 221
222 if (sta && sdata->force_unicast_rateidx > -1) 222 if (sta && sdata->force_unicast_rateidx > -1) {
223 info->control.rates[0].idx = sdata->force_unicast_rateidx; 223 info->control.rates[0].idx = sdata->force_unicast_rateidx;
224 else 224 } else {
225 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc); 225 ref->ops->get_rate(ref->priv, ista, priv_sta, txrc);
226 info->flags |= IEEE80211_TX_INTFL_RCALGO;
227 }
226 228
227 /* 229 /*
228 * try to enforce the maximum rate the user wanted 230 * try to enforce the maximum rate the user wanted
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index b9164c9a956..2ab5ad9e71c 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -44,8 +44,10 @@ static inline void rate_control_tx_status(struct ieee80211_local *local,
44 struct rate_control_ref *ref = local->rate_ctrl; 44 struct rate_control_ref *ref = local->rate_ctrl;
45 struct ieee80211_sta *ista = &sta->sta; 45 struct ieee80211_sta *ista = &sta->sta;
46 void *priv_sta = sta->rate_ctrl_priv; 46 void *priv_sta = sta->rate_ctrl_priv;
47 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
47 48
48 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb); 49 if (likely(info->flags & IEEE80211_TX_INTFL_RCALGO))
50 ref->ops->tx_status(ref->priv, sband, ista, priv_sta, skb);
49} 51}
50 52
51 53
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 66f7ecf51b9..64ebe664eff 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -142,6 +142,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
142 /* IEEE80211_RADIOTAP_FLAGS */ 142 /* IEEE80211_RADIOTAP_FLAGS */
143 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 143 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
144 *pos |= IEEE80211_RADIOTAP_F_FCS; 144 *pos |= IEEE80211_RADIOTAP_F_FCS;
145 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
146 *pos |= IEEE80211_RADIOTAP_F_BADFCS;
145 if (status->flag & RX_FLAG_SHORTPRE) 147 if (status->flag & RX_FLAG_SHORTPRE)
146 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE; 148 *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
147 pos++; 149 pos++;
@@ -204,9 +206,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
204 /* ensure 2 byte alignment for the 2 byte field as required */ 206 /* ensure 2 byte alignment for the 2 byte field as required */
205 if ((pos - (unsigned char *)rthdr) & 1) 207 if ((pos - (unsigned char *)rthdr) & 1)
206 pos++; 208 pos++;
207 /* FIXME: when radiotap gets a 'bad PLCP' flag use it here */ 209 if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
208 if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) 210 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADPLCP);
209 *(__le16 *)pos |= cpu_to_le16(IEEE80211_RADIOTAP_F_RX_BADFCS);
210 pos += 2; 211 pos += 2;
211} 212}
212 213
@@ -849,12 +850,19 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
849 * Mesh beacons will update last_rx when if they are found to 850 * Mesh beacons will update last_rx when if they are found to
850 * match the current local configuration when processed. 851 * match the current local configuration when processed.
851 */ 852 */
852 sta->last_rx = jiffies; 853 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
854 ieee80211_is_beacon(hdr->frame_control)) {
855 rx->sdata->u.mgd.last_beacon = jiffies;
856 } else
857 sta->last_rx = jiffies;
853 } 858 }
854 859
855 if (!(rx->flags & IEEE80211_RX_RA_MATCH)) 860 if (!(rx->flags & IEEE80211_RX_RA_MATCH))
856 return RX_CONTINUE; 861 return RX_CONTINUE;
857 862
863 if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
864 ieee80211_sta_rx_notify(rx->sdata, hdr);
865
858 sta->rx_fragments++; 866 sta->rx_fragments++;
859 sta->rx_bytes += rx->skb->len; 867 sta->rx_bytes += rx->skb->len;
860 sta->last_signal = rx->status->signal; 868 sta->last_signal = rx->status->signal;
@@ -1876,18 +1884,13 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
1876 if (ieee80211_vif_is_mesh(&sdata->vif)) 1884 if (ieee80211_vif_is_mesh(&sdata->vif))
1877 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status); 1885 return ieee80211_mesh_rx_mgmt(sdata, rx->skb, rx->status);
1878 1886
1879 if (sdata->vif.type != NL80211_IFTYPE_STATION && 1887 if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
1880 sdata->vif.type != NL80211_IFTYPE_ADHOC) 1888 return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status);
1881 return RX_DROP_MONITOR;
1882
1883 1889
1884 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 1890 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1885 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
1886 return RX_DROP_MONITOR;
1887 return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status); 1891 return ieee80211_sta_rx_mgmt(sdata, rx->skb, rx->status);
1888 }
1889 1892
1890 return ieee80211_ibss_rx_mgmt(sdata, rx->skb, rx->status); 1893 return RX_DROP_MONITOR;
1891} 1894}
1892 1895
1893static void ieee80211_rx_michael_mic_report(struct net_device *dev, 1896static void ieee80211_rx_michael_mic_report(struct net_device *dev,
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 5030a3c8750..3bf9839f591 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -214,6 +214,66 @@ void ieee80211_scan_failed(struct ieee80211_local *local)
214 local->scan_req = NULL; 214 local->scan_req = NULL;
215} 215}
216 216
217/*
218 * inform AP that we will go to sleep so that it will buffer the frames
219 * while we scan
220 */
221static void ieee80211_scan_ps_enable(struct ieee80211_sub_if_data *sdata)
222{
223 struct ieee80211_local *local = sdata->local;
224 bool ps = false;
225
226 /* FIXME: what to do when local->pspolling is true? */
227
228 del_timer_sync(&local->dynamic_ps_timer);
229 cancel_work_sync(&local->dynamic_ps_enable_work);
230
231 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
232 ps = true;
233 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
234 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
235 }
236
237 if (!ps || !(local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK))
238 /*
239 * If power save was enabled, no need to send a nullfunc
240 * frame because AP knows that we are sleeping. But if the
241 * hardware is creating the nullfunc frame for power save
242 * status (ie. IEEE80211_HW_PS_NULLFUNC_STACK is not
243 * enabled) and power save was enabled, the firmware just
244 * sent a null frame with power save disabled. So we need
245 * to send a new nullfunc frame to inform the AP that we
246 * are again sleeping.
247 */
248 ieee80211_send_nullfunc(local, sdata, 1);
249}
250
251/* inform AP that we are awake again, unless power save is enabled */
252static void ieee80211_scan_ps_disable(struct ieee80211_sub_if_data *sdata)
253{
254 struct ieee80211_local *local = sdata->local;
255
256 if (!local->powersave)
257 ieee80211_send_nullfunc(local, sdata, 0);
258 else {
259 /*
260 * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
261 * will send a nullfunc frame with the powersave bit set
262 * even though the AP already knows that we are sleeping.
263 * This could be avoided by sending a null frame with power
264 * save bit disabled before enabling the power save, but
265 * this doesn't gain anything.
266 *
267 * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
268 * to send a nullfunc frame because AP already knows that
269 * we are sleeping, let's just enable power save mode in
270 * hardware.
271 */
272 local->hw.conf.flags |= IEEE80211_CONF_PS;
273 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
274 }
275}
276
217void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted) 277void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
218{ 278{
219 struct ieee80211_local *local = hw_to_local(hw); 279 struct ieee80211_local *local = hw_to_local(hw);
@@ -268,7 +328,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
268 /* Tell AP we're back */ 328 /* Tell AP we're back */
269 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 329 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
270 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) { 330 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) {
271 ieee80211_send_nullfunc(local, sdata, 0); 331 ieee80211_scan_ps_disable(sdata);
272 netif_tx_wake_all_queues(sdata->dev); 332 netif_tx_wake_all_queues(sdata->dev);
273 } 333 }
274 } else 334 } else
@@ -409,6 +469,19 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
409 return 0; 469 return 0;
410 } 470 }
411 471
472 /*
473 * Hardware/driver doesn't support hw_scan, so use software
474 * scanning instead. First send a nullfunc frame with power save
475 * bit on so that AP will buffer the frames for us while we are not
476 * listening, then send probe requests to each channel and wait for
477 * the responses. After all channels are scanned, tune back to the
478 * original channel and send a nullfunc frame with power save bit
479 * off to trigger the AP to send us all the buffered frames.
480 *
481 * Note that while local->sw_scanning is true everything else but
482 * nullfunc frames and probe requests will be dropped in
483 * ieee80211_tx_h_check_assoc().
484 */
412 local->sw_scanning = true; 485 local->sw_scanning = true;
413 if (local->ops->sw_scan_start) 486 if (local->ops->sw_scan_start)
414 local->ops->sw_scan_start(local_to_hw(local)); 487 local->ops->sw_scan_start(local_to_hw(local));
@@ -428,7 +501,7 @@ int ieee80211_start_scan(struct ieee80211_sub_if_data *scan_sdata,
428 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 501 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
429 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) { 502 if (sdata->u.mgd.flags & IEEE80211_STA_ASSOCIATED) {
430 netif_tx_stop_all_queues(sdata->dev); 503 netif_tx_stop_all_queues(sdata->dev);
431 ieee80211_send_nullfunc(local, sdata, 1); 504 ieee80211_scan_ps_enable(sdata);
432 } 505 }
433 } else 506 } else
434 netif_tx_stop_all_queues(sdata->dev); 507 netif_tx_stop_all_queues(sdata->dev);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 4ba3c540fcf..c5f14e6bbde 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -203,17 +203,6 @@ void sta_info_destroy(struct sta_info *sta)
203 if (tid_rx) 203 if (tid_rx)
204 tid_rx->shutdown = true; 204 tid_rx->shutdown = true;
205 205
206 /*
207 * The stop callback cannot find this station any more, but
208 * it didn't complete its work -- start the queue if necessary
209 */
210 if (sta->ampdu_mlme.tid_state_tx[i] & HT_AGG_STATE_INITIATOR_MSK &&
211 sta->ampdu_mlme.tid_state_tx[i] & HT_AGG_STATE_REQ_STOP_BA_MSK &&
212 local->hw.ampdu_queues)
213 ieee80211_wake_queue_by_reason(&local->hw,
214 local->hw.queues + sta->tid_to_tx_q[i],
215 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
216
217 spin_unlock_bh(&sta->lock); 206 spin_unlock_bh(&sta->lock);
218 207
219 /* 208 /*
@@ -239,6 +228,11 @@ void sta_info_destroy(struct sta_info *sta)
239 tid_tx = sta->ampdu_mlme.tid_tx[i]; 228 tid_tx = sta->ampdu_mlme.tid_tx[i];
240 if (tid_tx) { 229 if (tid_tx) {
241 del_timer_sync(&tid_tx->addba_resp_timer); 230 del_timer_sync(&tid_tx->addba_resp_timer);
231 /*
232 * STA removed while aggregation session being
233 * started? Bit odd, but purge frames anyway.
234 */
235 skb_queue_purge(&tid_tx->pending);
242 kfree(tid_tx); 236 kfree(tid_tx);
243 } 237 }
244 } 238 }
@@ -287,7 +281,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
287 * enable session_timer's data differentiation. refer to 281 * enable session_timer's data differentiation. refer to
288 * sta_rx_agg_session_timer_expired for useage */ 282 * sta_rx_agg_session_timer_expired for useage */
289 sta->timer_to_tid[i] = i; 283 sta->timer_to_tid[i] = i;
290 sta->tid_to_tx_q[i] = -1;
291 /* rx */ 284 /* rx */
292 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; 285 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
293 sta->ampdu_mlme.tid_rx[i] = NULL; 286 sta->ampdu_mlme.tid_rx[i] = NULL;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 1f45573c580..5534d489f50 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -35,6 +35,8 @@
35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next 35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
36 * frame to this station is transmitted. 36 * frame to this station is transmitted.
37 * @WLAN_STA_MFP: Management frame protection is used with this STA. 37 * @WLAN_STA_MFP: Management frame protection is used with this STA.
38 * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle.
39 * Used to deny ADDBA requests (both TX and RX).
38 */ 40 */
39enum ieee80211_sta_info_flags { 41enum ieee80211_sta_info_flags {
40 WLAN_STA_AUTH = 1<<0, 42 WLAN_STA_AUTH = 1<<0,
@@ -48,6 +50,7 @@ enum ieee80211_sta_info_flags {
48 WLAN_STA_PSPOLL = 1<<8, 50 WLAN_STA_PSPOLL = 1<<8,
49 WLAN_STA_CLEAR_PS_FILT = 1<<9, 51 WLAN_STA_CLEAR_PS_FILT = 1<<9,
50 WLAN_STA_MFP = 1<<10, 52 WLAN_STA_MFP = 1<<10,
53 WLAN_STA_SUSPEND = 1<<11
51}; 54};
52 55
53#define STA_TID_NUM 16 56#define STA_TID_NUM 16
@@ -70,11 +73,13 @@ enum ieee80211_sta_info_flags {
70 * struct tid_ampdu_tx - TID aggregation information (Tx). 73 * struct tid_ampdu_tx - TID aggregation information (Tx).
71 * 74 *
72 * @addba_resp_timer: timer for peer's response to addba request 75 * @addba_resp_timer: timer for peer's response to addba request
76 * @pending: pending frames queue -- use sta's spinlock to protect
73 * @ssn: Starting Sequence Number expected to be aggregated. 77 * @ssn: Starting Sequence Number expected to be aggregated.
74 * @dialog_token: dialog token for aggregation session 78 * @dialog_token: dialog token for aggregation session
75 */ 79 */
76struct tid_ampdu_tx { 80struct tid_ampdu_tx {
77 struct timer_list addba_resp_timer; 81 struct timer_list addba_resp_timer;
82 struct sk_buff_head pending;
78 u16 ssn; 83 u16 ssn;
79 u8 dialog_token; 84 u8 dialog_token;
80}; 85};
@@ -201,7 +206,6 @@ struct sta_ampdu_mlme {
201 * @tid_seq: per-TID sequence numbers for sending to this STA 206 * @tid_seq: per-TID sequence numbers for sending to this STA
202 * @ampdu_mlme: A-MPDU state machine state 207 * @ampdu_mlme: A-MPDU state machine state
203 * @timer_to_tid: identity mapping to ID timers 208 * @timer_to_tid: identity mapping to ID timers
204 * @tid_to_tx_q: map tid to tx queue (invalid == negative values)
205 * @llid: Local link ID 209 * @llid: Local link ID
206 * @plid: Peer link ID 210 * @plid: Peer link ID
207 * @reason: Cancel reason on PLINK_HOLDING state 211 * @reason: Cancel reason on PLINK_HOLDING state
@@ -276,7 +280,6 @@ struct sta_info {
276 */ 280 */
277 struct sta_ampdu_mlme ampdu_mlme; 281 struct sta_ampdu_mlme ampdu_mlme;
278 u8 timer_to_tid[STA_TID_NUM]; 282 u8 timer_to_tid[STA_TID_NUM];
279 s8 tid_to_tx_q[STA_TID_NUM];
280 283
281#ifdef CONFIG_MAC80211_MESH 284#ifdef CONFIG_MAC80211_MESH
282 /* 285 /*
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 457238a2f3f..3fb04a86444 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -34,8 +34,7 @@
34 34
35#define IEEE80211_TX_OK 0 35#define IEEE80211_TX_OK 0
36#define IEEE80211_TX_AGAIN 1 36#define IEEE80211_TX_AGAIN 1
37#define IEEE80211_TX_FRAG_AGAIN 2 37#define IEEE80211_TX_PENDING 2
38#define IEEE80211_TX_PENDING 3
39 38
40/* misc utils */ 39/* misc utils */
41 40
@@ -193,7 +192,19 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
193 return TX_CONTINUE; 192 return TX_CONTINUE;
194 193
195 if (unlikely(tx->local->sw_scanning) && 194 if (unlikely(tx->local->sw_scanning) &&
196 !ieee80211_is_probe_req(hdr->frame_control)) 195 !ieee80211_is_probe_req(hdr->frame_control) &&
196 !ieee80211_is_nullfunc(hdr->frame_control))
197 /*
198 * When software scanning only nullfunc frames (to notify
199 * the sleep state to the AP) and probe requests (for the
200 * active scan) are allowed, all other frames should not be
201 * sent and we should not get here, but if we do
202 * nonetheless, drop them to avoid sending them
203 * off-channel. See the link below and
204 * ieee80211_start_scan() for more.
205 *
206 * http://article.gmane.org/gmane.linux.kernel.wireless.general/30089
207 */
197 return TX_DROP; 208 return TX_DROP;
198 209
199 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) 210 if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
@@ -690,17 +701,62 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
690 return TX_CONTINUE; 701 return TX_CONTINUE;
691} 702}
692 703
704static int ieee80211_fragment(struct ieee80211_local *local,
705 struct sk_buff *skb, int hdrlen,
706 int frag_threshold)
707{
708 struct sk_buff *tail = skb, *tmp;
709 int per_fragm = frag_threshold - hdrlen - FCS_LEN;
710 int pos = hdrlen + per_fragm;
711 int rem = skb->len - hdrlen - per_fragm;
712
713 if (WARN_ON(rem < 0))
714 return -EINVAL;
715
716 while (rem) {
717 int fraglen = per_fragm;
718
719 if (fraglen > rem)
720 fraglen = rem;
721 rem -= fraglen;
722 tmp = dev_alloc_skb(local->tx_headroom +
723 frag_threshold +
724 IEEE80211_ENCRYPT_HEADROOM +
725 IEEE80211_ENCRYPT_TAILROOM);
726 if (!tmp)
727 return -ENOMEM;
728 tail->next = tmp;
729 tail = tmp;
730 skb_reserve(tmp, local->tx_headroom +
731 IEEE80211_ENCRYPT_HEADROOM);
732 /* copy control information */
733 memcpy(tmp->cb, skb->cb, sizeof(tmp->cb));
734 skb_copy_queue_mapping(tmp, skb);
735 tmp->priority = skb->priority;
736 tmp->do_not_encrypt = skb->do_not_encrypt;
737 tmp->dev = skb->dev;
738 tmp->iif = skb->iif;
739
740 /* copy header and data */
741 memcpy(skb_put(tmp, hdrlen), skb->data, hdrlen);
742 memcpy(skb_put(tmp, fraglen), skb->data + pos, fraglen);
743
744 pos += fraglen;
745 }
746
747 skb->len = hdrlen + per_fragm;
748 return 0;
749}
750
693static ieee80211_tx_result debug_noinline 751static ieee80211_tx_result debug_noinline
694ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) 752ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
695{ 753{
696 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); 754 struct sk_buff *skb = tx->skb;
697 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 755 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
698 size_t hdrlen, per_fragm, num_fragm, payload_len, left; 756 struct ieee80211_hdr *hdr = (void *)skb->data;
699 struct sk_buff **frags, *first, *frag;
700 int i;
701 u16 seq;
702 u8 *pos;
703 int frag_threshold = tx->local->fragmentation_threshold; 757 int frag_threshold = tx->local->fragmentation_threshold;
758 int hdrlen;
759 int fragnum;
704 760
705 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) 761 if (!(tx->flags & IEEE80211_TX_FRAGMENTED))
706 return TX_CONTINUE; 762 return TX_CONTINUE;
@@ -713,58 +769,35 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
713 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) 769 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
714 return TX_DROP; 770 return TX_DROP;
715 771
716 first = tx->skb;
717
718 hdrlen = ieee80211_hdrlen(hdr->frame_control); 772 hdrlen = ieee80211_hdrlen(hdr->frame_control);
719 payload_len = first->len - hdrlen;
720 per_fragm = frag_threshold - hdrlen - FCS_LEN;
721 num_fragm = DIV_ROUND_UP(payload_len, per_fragm);
722
723 frags = kzalloc(num_fragm * sizeof(struct sk_buff *), GFP_ATOMIC);
724 if (!frags)
725 goto fail;
726
727 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
728 seq = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ;
729 pos = first->data + hdrlen + per_fragm;
730 left = payload_len - per_fragm;
731 for (i = 0; i < num_fragm - 1; i++) {
732 struct ieee80211_hdr *fhdr;
733 size_t copylen;
734
735 if (left <= 0)
736 goto fail;
737 773
738 /* reserve enough extra head and tail room for possible 774 /* internal error, why is TX_FRAGMENTED set? */
739 * encryption */ 775 if (WARN_ON(skb->len <= frag_threshold))
740 frag = frags[i] = 776 return TX_DROP;
741 dev_alloc_skb(tx->local->tx_headroom +
742 frag_threshold +
743 IEEE80211_ENCRYPT_HEADROOM +
744 IEEE80211_ENCRYPT_TAILROOM);
745 if (!frag)
746 goto fail;
747
748 /* Make sure that all fragments use the same priority so
749 * that they end up using the same TX queue */
750 frag->priority = first->priority;
751 777
752 skb_reserve(frag, tx->local->tx_headroom + 778 /*
753 IEEE80211_ENCRYPT_HEADROOM); 779 * Now fragment the frame. This will allocate all the fragments and
780 * chain them (using skb as the first fragment) to skb->next.
781 * During transmission, we will remove the successfully transmitted
782 * fragments from this list. When the low-level driver rejects one
783 * of the fragments then we will simply pretend to accept the skb
784 * but store it away as pending.
785 */
786 if (ieee80211_fragment(tx->local, skb, hdrlen, frag_threshold))
787 return TX_DROP;
754 788
755 /* copy TX information */ 789 /* update duration/seq/flags of fragments */
756 info = IEEE80211_SKB_CB(frag); 790 fragnum = 0;
757 memcpy(info, first->cb, sizeof(frag->cb)); 791 do {
792 int next_len;
793 const __le16 morefrags = cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
758 794
759 /* copy/fill in 802.11 header */ 795 hdr = (void *)skb->data;
760 fhdr = (struct ieee80211_hdr *) skb_put(frag, hdrlen); 796 info = IEEE80211_SKB_CB(skb);
761 memcpy(fhdr, first->data, hdrlen);
762 fhdr->seq_ctrl = cpu_to_le16(seq | ((i + 1) & IEEE80211_SCTL_FRAG));
763 797
764 if (i == num_fragm - 2) { 798 if (skb->next) {
765 /* clear MOREFRAGS bit for the last fragment */ 799 hdr->frame_control |= morefrags;
766 fhdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_MOREFRAGS); 800 next_len = skb->next->len;
767 } else {
768 /* 801 /*
769 * No multi-rate retries for fragmented frames, that 802 * No multi-rate retries for fragmented frames, that
770 * would completely throw off the NAV at other STAs. 803 * would completely throw off the NAV at other STAs.
@@ -775,37 +808,16 @@ ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx)
775 info->control.rates[4].idx = -1; 808 info->control.rates[4].idx = -1;
776 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5); 809 BUILD_BUG_ON(IEEE80211_TX_MAX_RATES != 5);
777 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE; 810 info->flags &= ~IEEE80211_TX_CTL_RATE_CTRL_PROBE;
811 } else {
812 hdr->frame_control &= ~morefrags;
813 next_len = 0;
778 } 814 }
779 815 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
780 /* copy data */ 816 hdr->seq_ctrl |= cpu_to_le16(fragnum & IEEE80211_SCTL_FRAG);
781 copylen = left > per_fragm ? per_fragm : left; 817 fragnum++;
782 memcpy(skb_put(frag, copylen), pos, copylen); 818 } while ((skb = skb->next));
783
784 skb_copy_queue_mapping(frag, first);
785
786 frag->do_not_encrypt = first->do_not_encrypt;
787 frag->dev = first->dev;
788 frag->iif = first->iif;
789
790 pos += copylen;
791 left -= copylen;
792 }
793 skb_trim(first, hdrlen + per_fragm);
794
795 tx->num_extra_frag = num_fragm - 1;
796 tx->extra_frag = frags;
797 819
798 return TX_CONTINUE; 820 return TX_CONTINUE;
799
800 fail:
801 if (frags) {
802 for (i = 0; i < num_fragm - 1; i++)
803 if (frags[i])
804 dev_kfree_skb(frags[i]);
805 kfree(frags);
806 }
807 I802_DEBUG_INC(tx->local->tx_handlers_drop_fragment);
808 return TX_DROP;
809} 821}
810 822
811static ieee80211_tx_result debug_noinline 823static ieee80211_tx_result debug_noinline
@@ -833,27 +845,19 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
833static ieee80211_tx_result debug_noinline 845static ieee80211_tx_result debug_noinline
834ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) 846ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
835{ 847{
836 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; 848 struct sk_buff *skb = tx->skb;
837 int next_len, i; 849 struct ieee80211_hdr *hdr;
838 int group_addr = is_multicast_ether_addr(hdr->addr1); 850 int next_len;
839 851 bool group_addr;
840 if (!(tx->flags & IEEE80211_TX_FRAGMENTED)) {
841 hdr->duration_id = ieee80211_duration(tx, group_addr, 0);
842 return TX_CONTINUE;
843 }
844 852
845 hdr->duration_id = ieee80211_duration(tx, group_addr, 853 do {
846 tx->extra_frag[0]->len); 854 hdr = (void *) skb->data;
855 next_len = skb->next ? skb->next->len : 0;
856 group_addr = is_multicast_ether_addr(hdr->addr1);
847 857
848 for (i = 0; i < tx->num_extra_frag; i++) { 858 hdr->duration_id =
849 if (i + 1 < tx->num_extra_frag) 859 ieee80211_duration(tx, group_addr, next_len);
850 next_len = tx->extra_frag[i + 1]->len; 860 } while ((skb = skb->next));
851 else
852 next_len = 0;
853
854 hdr = (struct ieee80211_hdr *)tx->extra_frag[i]->data;
855 hdr->duration_id = ieee80211_duration(tx, 0, next_len);
856 }
857 861
858 return TX_CONTINUE; 862 return TX_CONTINUE;
859} 863}
@@ -861,19 +865,16 @@ ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx)
861static ieee80211_tx_result debug_noinline 865static ieee80211_tx_result debug_noinline
862ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) 866ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
863{ 867{
864 int i; 868 struct sk_buff *skb = tx->skb;
865 869
866 if (!tx->sta) 870 if (!tx->sta)
867 return TX_CONTINUE; 871 return TX_CONTINUE;
868 872
869 tx->sta->tx_packets++; 873 tx->sta->tx_packets++;
870 tx->sta->tx_fragments++; 874 do {
871 tx->sta->tx_bytes += tx->skb->len; 875 tx->sta->tx_fragments++;
872 if (tx->extra_frag) { 876 tx->sta->tx_bytes += skb->len;
873 tx->sta->tx_fragments += tx->num_extra_frag; 877 } while ((skb = skb->next));
874 for (i = 0; i < tx->num_extra_frag; i++)
875 tx->sta->tx_bytes += tx->extra_frag[i]->len;
876 }
877 878
878 return TX_CONTINUE; 879 return TX_CONTINUE;
879} 880}
@@ -983,9 +984,9 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
983 struct ieee80211_hdr *hdr; 984 struct ieee80211_hdr *hdr;
984 struct ieee80211_sub_if_data *sdata; 985 struct ieee80211_sub_if_data *sdata;
985 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 986 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
986
987 int hdrlen, tid; 987 int hdrlen, tid;
988 u8 *qc, *state; 988 u8 *qc, *state;
989 bool queued = false;
989 990
990 memset(tx, 0, sizeof(*tx)); 991 memset(tx, 0, sizeof(*tx));
991 tx->skb = skb; 992 tx->skb = skb;
@@ -1012,25 +1013,53 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1012 */ 1013 */
1013 } 1014 }
1014 1015
1016 /*
1017 * If this flag is set to true anywhere, and we get here,
1018 * we are doing the needed processing, so remove the flag
1019 * now.
1020 */
1021 info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1022
1015 hdr = (struct ieee80211_hdr *) skb->data; 1023 hdr = (struct ieee80211_hdr *) skb->data;
1016 1024
1017 tx->sta = sta_info_get(local, hdr->addr1); 1025 tx->sta = sta_info_get(local, hdr->addr1);
1018 1026
1019 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control)) { 1027 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1028 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
1020 unsigned long flags; 1029 unsigned long flags;
1030 struct tid_ampdu_tx *tid_tx;
1031
1021 qc = ieee80211_get_qos_ctl(hdr); 1032 qc = ieee80211_get_qos_ctl(hdr);
1022 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1033 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1023 1034
1024 spin_lock_irqsave(&tx->sta->lock, flags); 1035 spin_lock_irqsave(&tx->sta->lock, flags);
1036 /*
1037 * XXX: This spinlock could be fairly expensive, but see the
1038 * comment in agg-tx.c:ieee80211_agg_tx_operational().
1039 * One way to solve this would be to do something RCU-like
1040 * for managing the tid_tx struct and using atomic bitops
1041 * for the actual state -- by introducing an actual
1042 * 'operational' bit that would be possible. It would
1043 * require changing ieee80211_agg_tx_operational() to
1044 * set that bit, and changing the way tid_tx is managed
1045 * everywhere, including races between that bit and
1046 * tid_tx going away (tid_tx being added can be easily
1047 * committed to memory before the 'operational' bit).
1048 */
1049 tid_tx = tx->sta->ampdu_mlme.tid_tx[tid];
1025 state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; 1050 state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
1026 if (*state == HT_AGG_STATE_OPERATIONAL) { 1051 if (*state == HT_AGG_STATE_OPERATIONAL) {
1027 info->flags |= IEEE80211_TX_CTL_AMPDU; 1052 info->flags |= IEEE80211_TX_CTL_AMPDU;
1028 if (local->hw.ampdu_queues) 1053 } else if (*state != HT_AGG_STATE_IDLE) {
1029 skb_set_queue_mapping( 1054 /* in progress */
1030 skb, tx->local->hw.queues + 1055 queued = true;
1031 tx->sta->tid_to_tx_q[tid]); 1056 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1057 __skb_queue_tail(&tid_tx->pending, skb);
1032 } 1058 }
1033 spin_unlock_irqrestore(&tx->sta->lock, flags); 1059 spin_unlock_irqrestore(&tx->sta->lock, flags);
1060
1061 if (unlikely(queued))
1062 return TX_QUEUED;
1034 } 1063 }
1035 1064
1036 if (is_multicast_ether_addr(hdr->addr1)) { 1065 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1081,51 +1110,55 @@ static int ieee80211_tx_prepare(struct ieee80211_local *local,
1081 } 1110 }
1082 if (unlikely(!dev)) 1111 if (unlikely(!dev))
1083 return -ENODEV; 1112 return -ENODEV;
1084 /* initialises tx with control */ 1113 /*
1114 * initialises tx with control
1115 *
1116 * return value is safe to ignore here because this function
1117 * can only be invoked for multicast frames
1118 *
1119 * XXX: clean up
1120 */
1085 __ieee80211_tx_prepare(tx, skb, dev); 1121 __ieee80211_tx_prepare(tx, skb, dev);
1086 dev_put(dev); 1122 dev_put(dev);
1087 return 0; 1123 return 0;
1088} 1124}
1089 1125
1090static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb, 1126static int __ieee80211_tx(struct ieee80211_local *local,
1091 struct ieee80211_tx_data *tx) 1127 struct sk_buff **skbp,
1128 struct sta_info *sta)
1092{ 1129{
1130 struct sk_buff *skb = *skbp, *next;
1093 struct ieee80211_tx_info *info; 1131 struct ieee80211_tx_info *info;
1094 int ret, i; 1132 int ret, len;
1133 bool fragm = false;
1095 1134
1096 if (skb) { 1135 local->mdev->trans_start = jiffies;
1136
1137 while (skb) {
1097 if (ieee80211_queue_stopped(&local->hw, 1138 if (ieee80211_queue_stopped(&local->hw,
1098 skb_get_queue_mapping(skb))) 1139 skb_get_queue_mapping(skb)))
1099 return IEEE80211_TX_PENDING; 1140 return IEEE80211_TX_PENDING;
1100 1141
1101 ret = local->ops->tx(local_to_hw(local), skb); 1142 info = IEEE80211_SKB_CB(skb);
1102 if (ret) 1143
1103 return IEEE80211_TX_AGAIN; 1144 if (fragm)
1104 local->mdev->trans_start = jiffies;
1105 ieee80211_led_tx(local, 1);
1106 }
1107 if (tx->extra_frag) {
1108 for (i = 0; i < tx->num_extra_frag; i++) {
1109 if (!tx->extra_frag[i])
1110 continue;
1111 info = IEEE80211_SKB_CB(tx->extra_frag[i]);
1112 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | 1145 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
1113 IEEE80211_TX_CTL_FIRST_FRAGMENT); 1146 IEEE80211_TX_CTL_FIRST_FRAGMENT);
1114 if (ieee80211_queue_stopped(&local->hw, 1147
1115 skb_get_queue_mapping(tx->extra_frag[i]))) 1148 next = skb->next;
1116 return IEEE80211_TX_FRAG_AGAIN; 1149 len = skb->len;
1117 1150 ret = local->ops->tx(local_to_hw(local), skb);
1118 ret = local->ops->tx(local_to_hw(local), 1151 if (WARN_ON(ret != NETDEV_TX_OK && skb->len != len)) {
1119 tx->extra_frag[i]); 1152 dev_kfree_skb(skb);
1120 if (ret) 1153 ret = NETDEV_TX_OK;
1121 return IEEE80211_TX_FRAG_AGAIN;
1122 local->mdev->trans_start = jiffies;
1123 ieee80211_led_tx(local, 1);
1124 tx->extra_frag[i] = NULL;
1125 } 1154 }
1126 kfree(tx->extra_frag); 1155 if (ret != NETDEV_TX_OK)
1127 tx->extra_frag = NULL; 1156 return IEEE80211_TX_AGAIN;
1157 *skbp = skb = next;
1158 ieee80211_led_tx(local, 1);
1159 fragm = true;
1128 } 1160 }
1161
1129 return IEEE80211_TX_OK; 1162 return IEEE80211_TX_OK;
1130} 1163}
1131 1164
@@ -1137,7 +1170,6 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1137{ 1170{
1138 struct sk_buff *skb = tx->skb; 1171 struct sk_buff *skb = tx->skb;
1139 ieee80211_tx_result res = TX_DROP; 1172 ieee80211_tx_result res = TX_DROP;
1140 int i;
1141 1173
1142#define CALL_TXH(txh) \ 1174#define CALL_TXH(txh) \
1143 res = txh(tx); \ 1175 res = txh(tx); \
@@ -1161,11 +1193,13 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1161 txh_done: 1193 txh_done:
1162 if (unlikely(res == TX_DROP)) { 1194 if (unlikely(res == TX_DROP)) {
1163 I802_DEBUG_INC(tx->local->tx_handlers_drop); 1195 I802_DEBUG_INC(tx->local->tx_handlers_drop);
1164 dev_kfree_skb(skb); 1196 while (skb) {
1165 for (i = 0; i < tx->num_extra_frag; i++) 1197 struct sk_buff *next;
1166 if (tx->extra_frag[i]) 1198
1167 dev_kfree_skb(tx->extra_frag[i]); 1199 next = skb->next;
1168 kfree(tx->extra_frag); 1200 dev_kfree_skb(skb);
1201 skb = next;
1202 }
1169 return -1; 1203 return -1;
1170 } else if (unlikely(res == TX_QUEUED)) { 1204 } else if (unlikely(res == TX_QUEUED)) {
1171 I802_DEBUG_INC(tx->local->tx_handlers_queued); 1205 I802_DEBUG_INC(tx->local->tx_handlers_queued);
@@ -1175,23 +1209,26 @@ static int invoke_tx_handlers(struct ieee80211_tx_data *tx)
1175 return 0; 1209 return 0;
1176} 1210}
1177 1211
1178static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb) 1212static void ieee80211_tx(struct net_device *dev, struct sk_buff *skb,
1213 bool txpending)
1179{ 1214{
1180 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 1215 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1181 struct sta_info *sta; 1216 struct sta_info *sta;
1182 struct ieee80211_tx_data tx; 1217 struct ieee80211_tx_data tx;
1183 ieee80211_tx_result res_prepare; 1218 ieee80211_tx_result res_prepare;
1184 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1219 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1185 int ret, i; 1220 struct sk_buff *next;
1221 unsigned long flags;
1222 int ret, retries;
1186 u16 queue; 1223 u16 queue;
1187 1224
1188 queue = skb_get_queue_mapping(skb); 1225 queue = skb_get_queue_mapping(skb);
1189 1226
1190 WARN_ON(test_bit(queue, local->queues_pending)); 1227 WARN_ON(!txpending && !skb_queue_empty(&local->pending[queue]));
1191 1228
1192 if (unlikely(skb->len < 10)) { 1229 if (unlikely(skb->len < 10)) {
1193 dev_kfree_skb(skb); 1230 dev_kfree_skb(skb);
1194 return 0; 1231 return;
1195 } 1232 }
1196 1233
1197 rcu_read_lock(); 1234 rcu_read_lock();
@@ -1199,10 +1236,13 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
1199 /* initialises tx */ 1236 /* initialises tx */
1200 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev); 1237 res_prepare = __ieee80211_tx_prepare(&tx, skb, dev);
1201 1238
1202 if (res_prepare == TX_DROP) { 1239 if (unlikely(res_prepare == TX_DROP)) {
1203 dev_kfree_skb(skb); 1240 dev_kfree_skb(skb);
1204 rcu_read_unlock(); 1241 rcu_read_unlock();
1205 return 0; 1242 return;
1243 } else if (unlikely(res_prepare == TX_QUEUED)) {
1244 rcu_read_unlock();
1245 return;
1206 } 1246 }
1207 1247
1208 sta = tx.sta; 1248 sta = tx.sta;
@@ -1212,59 +1252,71 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb)
1212 if (invoke_tx_handlers(&tx)) 1252 if (invoke_tx_handlers(&tx))
1213 goto out; 1253 goto out;
1214 1254
1215retry: 1255 retries = 0;
1216 ret = __ieee80211_tx(local, skb, &tx); 1256 retry:
1217 if (ret) { 1257 ret = __ieee80211_tx(local, &tx.skb, tx.sta);
1218 struct ieee80211_tx_stored_packet *store; 1258 switch (ret) {
1219 1259 case IEEE80211_TX_OK:
1260 break;
1261 case IEEE80211_TX_AGAIN:
1220 /* 1262 /*
1221 * Since there are no fragmented frames on A-MPDU 1263 * Since there are no fragmented frames on A-MPDU
1222 * queues, there's no reason for a driver to reject 1264 * queues, there's no reason for a driver to reject
1223 * a frame there, warn and drop it. 1265 * a frame there, warn and drop it.
1224 */ 1266 */
1225 if (ret != IEEE80211_TX_PENDING) 1267 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU))
1226 if (WARN_ON(info->flags & IEEE80211_TX_CTL_AMPDU)) 1268 goto drop;
1227 goto drop; 1269 /* fall through */
1270 case IEEE80211_TX_PENDING:
1271 skb = tx.skb;
1272
1273 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1274
1275 if (__netif_subqueue_stopped(local->mdev, queue)) {
1276 do {
1277 next = skb->next;
1278 skb->next = NULL;
1279 if (unlikely(txpending))
1280 skb_queue_head(&local->pending[queue],
1281 skb);
1282 else
1283 skb_queue_tail(&local->pending[queue],
1284 skb);
1285 } while ((skb = next));
1228 1286
1229 store = &local->pending_packet[queue]; 1287 /*
1288 * Make sure nobody will enable the queue on us
1289 * (without going through the tasklet) nor disable the
1290 * netdev queue underneath the pending handling code.
1291 */
1292 __set_bit(IEEE80211_QUEUE_STOP_REASON_PENDING,
1293 &local->queue_stop_reasons[queue]);
1230 1294
1231 if (ret == IEEE80211_TX_FRAG_AGAIN) 1295 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1232 skb = NULL; 1296 flags);
1297 } else {
1298 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
1299 flags);
1233 1300
1234 set_bit(queue, local->queues_pending); 1301 retries++;
1235 smp_mb(); 1302 if (WARN(retries > 10, "tx refused but queue active"))
1236 /* 1303 goto drop;
1237 * When the driver gets out of buffers during sending of
1238 * fragments and calls ieee80211_stop_queue, the netif
1239 * subqueue is stopped. There is, however, a small window
1240 * in which the PENDING bit is not yet set. If a buffer
1241 * gets available in that window (i.e. driver calls
1242 * ieee80211_wake_queue), we would end up with ieee80211_tx
1243 * called with the PENDING bit still set. Prevent this by
1244 * continuing transmitting here when that situation is
1245 * possible to have happened.
1246 */
1247 if (!__netif_subqueue_stopped(local->mdev, queue)) {
1248 clear_bit(queue, local->queues_pending);
1249 goto retry; 1304 goto retry;
1250 } 1305 }
1251 store->skb = skb;
1252 store->extra_frag = tx.extra_frag;
1253 store->num_extra_frag = tx.num_extra_frag;
1254 } 1306 }
1255 out: 1307 out:
1256 rcu_read_unlock(); 1308 rcu_read_unlock();
1257 return 0; 1309 return;
1258 1310
1259 drop: 1311 drop:
1260 if (skb)
1261 dev_kfree_skb(skb);
1262 for (i = 0; i < tx.num_extra_frag; i++)
1263 if (tx.extra_frag[i])
1264 dev_kfree_skb(tx.extra_frag[i]);
1265 kfree(tx.extra_frag);
1266 rcu_read_unlock(); 1312 rcu_read_unlock();
1267 return 0; 1313
1314 skb = tx.skb;
1315 while (skb) {
1316 next = skb->next;
1317 dev_kfree_skb(skb);
1318 skb = next;
1319 }
1268} 1320}
1269 1321
1270/* device xmit handlers */ 1322/* device xmit handlers */
@@ -1323,7 +1375,6 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1323 FOUND_SDATA, 1375 FOUND_SDATA,
1324 UNKNOWN_ADDRESS, 1376 UNKNOWN_ADDRESS,
1325 } monitor_iface = NOT_MONITOR; 1377 } monitor_iface = NOT_MONITOR;
1326 int ret;
1327 1378
1328 if (skb->iif) 1379 if (skb->iif)
1329 odev = dev_get_by_index(&init_net, skb->iif); 1380 odev = dev_get_by_index(&init_net, skb->iif);
@@ -1337,7 +1388,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1337 "originating device\n", dev->name); 1388 "originating device\n", dev->name);
1338#endif 1389#endif
1339 dev_kfree_skb(skb); 1390 dev_kfree_skb(skb);
1340 return 0; 1391 return NETDEV_TX_OK;
1341 } 1392 }
1342 1393
1343 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) && 1394 if ((local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) &&
@@ -1366,7 +1417,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1366 else 1417 else
1367 if (mesh_nexthop_lookup(skb, osdata)) { 1418 if (mesh_nexthop_lookup(skb, osdata)) {
1368 dev_put(odev); 1419 dev_put(odev);
1369 return 0; 1420 return NETDEV_TX_OK;
1370 } 1421 }
1371 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0) 1422 if (memcmp(odev->dev_addr, hdr->addr4, ETH_ALEN) != 0)
1372 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh, 1423 IEEE80211_IFSTA_MESH_CTR_INC(&osdata->u.mesh,
@@ -1428,7 +1479,7 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1428 if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) { 1479 if (ieee80211_skb_resize(osdata->local, skb, headroom, may_encrypt)) {
1429 dev_kfree_skb(skb); 1480 dev_kfree_skb(skb);
1430 dev_put(odev); 1481 dev_put(odev);
1431 return 0; 1482 return NETDEV_TX_OK;
1432 } 1483 }
1433 1484
1434 if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1485 if (osdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -1437,10 +1488,11 @@ int ieee80211_master_start_xmit(struct sk_buff *skb, struct net_device *dev)
1437 u.ap); 1488 u.ap);
1438 if (likely(monitor_iface != UNKNOWN_ADDRESS)) 1489 if (likely(monitor_iface != UNKNOWN_ADDRESS))
1439 info->control.vif = &osdata->vif; 1490 info->control.vif = &osdata->vif;
1440 ret = ieee80211_tx(odev, skb); 1491
1492 ieee80211_tx(odev, skb, false);
1441 dev_put(odev); 1493 dev_put(odev);
1442 1494
1443 return ret; 1495 return NETDEV_TX_OK;
1444} 1496}
1445 1497
1446int ieee80211_monitor_start_xmit(struct sk_buff *skb, 1498int ieee80211_monitor_start_xmit(struct sk_buff *skb,
@@ -1666,8 +1718,7 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1666 } 1718 }
1667 1719
1668 /* receiver and we are QoS enabled, use a QoS type frame */ 1720 /* receiver and we are QoS enabled, use a QoS type frame */
1669 if (sta_flags & WLAN_STA_WME && 1721 if ((sta_flags & WLAN_STA_WME) && local->hw.queues >= 4) {
1670 ieee80211_num_regular_queues(&local->hw) >= 4) {
1671 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1722 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1672 hdrlen += 2; 1723 hdrlen += 2;
1673 } 1724 }
@@ -1799,19 +1850,58 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb,
1799 */ 1850 */
1800void ieee80211_clear_tx_pending(struct ieee80211_local *local) 1851void ieee80211_clear_tx_pending(struct ieee80211_local *local)
1801{ 1852{
1802 int i, j; 1853 int i;
1803 struct ieee80211_tx_stored_packet *store;
1804 1854
1805 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) { 1855 for (i = 0; i < local->hw.queues; i++)
1806 if (!test_bit(i, local->queues_pending)) 1856 skb_queue_purge(&local->pending[i]);
1807 continue; 1857}
1808 store = &local->pending_packet[i]; 1858
1809 kfree_skb(store->skb); 1859static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
1810 for (j = 0; j < store->num_extra_frag; j++) 1860 struct sk_buff *skb)
1811 kfree_skb(store->extra_frag[j]); 1861{
1812 kfree(store->extra_frag); 1862 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1813 clear_bit(i, local->queues_pending); 1863 struct ieee80211_sub_if_data *sdata;
1864 struct sta_info *sta;
1865 struct ieee80211_hdr *hdr;
1866 struct net_device *dev;
1867 int ret;
1868 bool result = true;
1869
1870 /* does interface still exist? */
1871 dev = dev_get_by_index(&init_net, skb->iif);
1872 if (!dev) {
1873 dev_kfree_skb(skb);
1874 return true;
1814 } 1875 }
1876
1877 /* validate info->control.vif against skb->iif */
1878 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1879 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
1880 sdata = container_of(sdata->bss,
1881 struct ieee80211_sub_if_data,
1882 u.ap);
1883
1884 if (unlikely(info->control.vif && info->control.vif != &sdata->vif)) {
1885 dev_kfree_skb(skb);
1886 result = true;
1887 goto out;
1888 }
1889
1890 if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
1891 ieee80211_tx(dev, skb, true);
1892 } else {
1893 hdr = (struct ieee80211_hdr *)skb->data;
1894 sta = sta_info_get(local, hdr->addr1);
1895
1896 ret = __ieee80211_tx(local, &skb, sta);
1897 if (ret != IEEE80211_TX_OK)
1898 result = false;
1899 }
1900
1901 out:
1902 dev_put(dev);
1903
1904 return result;
1815} 1905}
1816 1906
1817/* 1907/*
@@ -1822,40 +1912,53 @@ void ieee80211_tx_pending(unsigned long data)
1822{ 1912{
1823 struct ieee80211_local *local = (struct ieee80211_local *)data; 1913 struct ieee80211_local *local = (struct ieee80211_local *)data;
1824 struct net_device *dev = local->mdev; 1914 struct net_device *dev = local->mdev;
1825 struct ieee80211_tx_stored_packet *store; 1915 unsigned long flags;
1826 struct ieee80211_tx_data tx; 1916 int i;
1827 int i, ret; 1917 bool next;
1828 1918
1919 rcu_read_lock();
1829 netif_tx_lock_bh(dev); 1920 netif_tx_lock_bh(dev);
1830 for (i = 0; i < ieee80211_num_regular_queues(&local->hw); i++) {
1831 /* Check that this queue is ok */
1832 if (__netif_subqueue_stopped(local->mdev, i) &&
1833 !test_bit(i, local->queues_pending_run))
1834 continue;
1835 1921
1836 if (!test_bit(i, local->queues_pending)) { 1922 for (i = 0; i < local->hw.queues; i++) {
1837 clear_bit(i, local->queues_pending_run); 1923 /*
1838 ieee80211_wake_queue(&local->hw, i); 1924 * If queue is stopped by something other than due to pending
1925 * frames, or we have no pending frames, proceed to next queue.
1926 */
1927 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1928 next = false;
1929 if (local->queue_stop_reasons[i] !=
1930 BIT(IEEE80211_QUEUE_STOP_REASON_PENDING) ||
1931 skb_queue_empty(&local->pending[i]))
1932 next = true;
1933 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1934
1935 if (next)
1839 continue; 1936 continue;
1840 }
1841 1937
1842 clear_bit(i, local->queues_pending_run); 1938 /*
1939 * start the queue now to allow processing our packets,
1940 * we're under the tx lock here anyway so nothing will
1941 * happen as a result of this
1942 */
1843 netif_start_subqueue(local->mdev, i); 1943 netif_start_subqueue(local->mdev, i);
1844 1944
1845 store = &local->pending_packet[i]; 1945 while (!skb_queue_empty(&local->pending[i])) {
1846 tx.extra_frag = store->extra_frag; 1946 struct sk_buff *skb = skb_dequeue(&local->pending[i]);
1847 tx.num_extra_frag = store->num_extra_frag; 1947
1848 tx.flags = 0; 1948 if (!ieee80211_tx_pending_skb(local, skb)) {
1849 ret = __ieee80211_tx(local, store->skb, &tx); 1949 skb_queue_head(&local->pending[i], skb);
1850 if (ret) { 1950 break;
1851 if (ret == IEEE80211_TX_FRAG_AGAIN) 1951 }
1852 store->skb = NULL;
1853 } else {
1854 clear_bit(i, local->queues_pending);
1855 ieee80211_wake_queue(&local->hw, i);
1856 } 1952 }
1953
1954 /* Start regular packet processing again. */
1955 if (skb_queue_empty(&local->pending[i]))
1956 ieee80211_wake_queue_by_reason(&local->hw, i,
1957 IEEE80211_QUEUE_STOP_REASON_PENDING);
1857 } 1958 }
1959
1858 netif_tx_unlock_bh(dev); 1960 netif_tx_unlock_bh(dev);
1961 rcu_read_unlock();
1859} 1962}
1860 1963
1861/* functions for drivers to get certain frames */ 1964/* functions for drivers to get certain frames */
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index e0431a1d218..fdf432f1455 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -166,18 +166,13 @@ int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
166 166
167void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx) 167void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
168{ 168{
169 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx->skb->data; 169 struct sk_buff *skb = tx->skb;
170 170 struct ieee80211_hdr *hdr;
171 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); 171
172 if (tx->extra_frag) { 172 do {
173 struct ieee80211_hdr *fhdr; 173 hdr = (struct ieee80211_hdr *) skb->data;
174 int i; 174 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
175 for (i = 0; i < tx->num_extra_frag; i++) { 175 } while ((skb = skb->next));
176 fhdr = (struct ieee80211_hdr *)
177 tx->extra_frag[i]->data;
178 fhdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
179 }
180 }
181} 176}
182 177
183int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 178int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
@@ -344,42 +339,21 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
344{ 339{
345 struct ieee80211_local *local = hw_to_local(hw); 340 struct ieee80211_local *local = hw_to_local(hw);
346 341
347 if (queue >= hw->queues) { 342 if (WARN_ON(queue >= hw->queues))
348 if (local->ampdu_ac_queue[queue - hw->queues] < 0) 343 return;
349 return;
350
351 /*
352 * for virtual aggregation queues, we need to refcount the
353 * internal mac80211 disable (multiple times!), keep track of
354 * driver disable _and_ make sure the regular queue is
355 * actually enabled.
356 */
357 if (reason == IEEE80211_QUEUE_STOP_REASON_AGGREGATION)
358 local->amdpu_ac_stop_refcnt[queue - hw->queues]--;
359 else
360 __clear_bit(reason, &local->queue_stop_reasons[queue]);
361
362 if (local->queue_stop_reasons[queue] ||
363 local->amdpu_ac_stop_refcnt[queue - hw->queues])
364 return;
365
366 /* now go on to treat the corresponding regular queue */
367 queue = local->ampdu_ac_queue[queue - hw->queues];
368 reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION;
369 }
370 344
371 __clear_bit(reason, &local->queue_stop_reasons[queue]); 345 __clear_bit(reason, &local->queue_stop_reasons[queue]);
372 346
347 if (!skb_queue_empty(&local->pending[queue]) &&
348 local->queue_stop_reasons[queue] ==
349 BIT(IEEE80211_QUEUE_STOP_REASON_PENDING))
350 tasklet_schedule(&local->tx_pending_tasklet);
351
373 if (local->queue_stop_reasons[queue] != 0) 352 if (local->queue_stop_reasons[queue] != 0)
374 /* someone still has this queue stopped */ 353 /* someone still has this queue stopped */
375 return; 354 return;
376 355
377 if (test_bit(queue, local->queues_pending)) { 356 netif_wake_subqueue(local->mdev, queue);
378 set_bit(queue, local->queues_pending_run);
379 tasklet_schedule(&local->tx_pending_tasklet);
380 } else {
381 netif_wake_subqueue(local->mdev, queue);
382 }
383} 357}
384 358
385void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, 359void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -405,29 +379,18 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
405{ 379{
406 struct ieee80211_local *local = hw_to_local(hw); 380 struct ieee80211_local *local = hw_to_local(hw);
407 381
408 if (queue >= hw->queues) { 382 if (WARN_ON(queue >= hw->queues))
409 if (local->ampdu_ac_queue[queue - hw->queues] < 0) 383 return;
410 return;
411
412 /*
413 * for virtual aggregation queues, we need to refcount the
414 * internal mac80211 disable (multiple times!), keep track of
415 * driver disable _and_ make sure the regular queue is
416 * actually enabled.
417 */
418 if (reason == IEEE80211_QUEUE_STOP_REASON_AGGREGATION)
419 local->amdpu_ac_stop_refcnt[queue - hw->queues]++;
420 else
421 __set_bit(reason, &local->queue_stop_reasons[queue]);
422 384
423 /* now go on to treat the corresponding regular queue */ 385 /*
424 queue = local->ampdu_ac_queue[queue - hw->queues]; 386 * Only stop if it was previously running, this is necessary
425 reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION; 387 * for correct pending packets handling because there we may
426 } 388 * start (but not wake) the queue and rely on that.
389 */
390 if (!local->queue_stop_reasons[queue])
391 netif_stop_subqueue(local->mdev, queue);
427 392
428 __set_bit(reason, &local->queue_stop_reasons[queue]); 393 __set_bit(reason, &local->queue_stop_reasons[queue]);
429
430 netif_stop_subqueue(local->mdev, queue);
431} 394}
432 395
433void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, 396void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
@@ -473,15 +436,9 @@ EXPORT_SYMBOL(ieee80211_stop_queues);
473int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) 436int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
474{ 437{
475 struct ieee80211_local *local = hw_to_local(hw); 438 struct ieee80211_local *local = hw_to_local(hw);
476 unsigned long flags;
477 439
478 if (queue >= hw->queues) { 440 if (WARN_ON(queue >= hw->queues))
479 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 441 return true;
480 queue = local->ampdu_ac_queue[queue - hw->queues];
481 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
482 if (queue < 0)
483 return true;
484 }
485 442
486 return __netif_subqueue_stopped(local->mdev, queue); 443 return __netif_subqueue_stopped(local->mdev, queue);
487} 444}
@@ -496,7 +453,7 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
496 453
497 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 454 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
498 455
499 for (i = 0; i < hw->queues + hw->ampdu_queues; i++) 456 for (i = 0; i < hw->queues; i++)
500 __ieee80211_wake_queue(hw, i, reason); 457 __ieee80211_wake_queue(hw, i, reason);
501 458
502 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 459 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
@@ -846,16 +803,9 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
846 struct ieee80211_local *local = sdata->local; 803 struct ieee80211_local *local = sdata->local;
847 struct sk_buff *skb; 804 struct sk_buff *skb;
848 struct ieee80211_mgmt *mgmt; 805 struct ieee80211_mgmt *mgmt;
849 const u8 *ie_auth = NULL;
850 int ie_auth_len = 0;
851
852 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
853 ie_auth_len = sdata->u.mgd.ie_auth_len;
854 ie_auth = sdata->u.mgd.ie_auth;
855 }
856 806
857 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 807 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
858 sizeof(*mgmt) + 6 + extra_len + ie_auth_len); 808 sizeof(*mgmt) + 6 + extra_len);
859 if (!skb) { 809 if (!skb) {
860 printk(KERN_DEBUG "%s: failed to allocate buffer for auth " 810 printk(KERN_DEBUG "%s: failed to allocate buffer for auth "
861 "frame\n", sdata->dev->name); 811 "frame\n", sdata->dev->name);
@@ -877,8 +827,6 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
877 mgmt->u.auth.status_code = cpu_to_le16(0); 827 mgmt->u.auth.status_code = cpu_to_le16(0);
878 if (extra) 828 if (extra)
879 memcpy(skb_put(skb, extra_len), extra, extra_len); 829 memcpy(skb_put(skb, extra_len), extra, extra_len);
880 if (ie_auth)
881 memcpy(skb_put(skb, ie_auth_len), ie_auth, ie_auth_len);
882 830
883 ieee80211_tx_skb(sdata, skb, encrypt); 831 ieee80211_tx_skb(sdata, skb, encrypt);
884} 832}
@@ -891,20 +839,11 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
891 struct ieee80211_supported_band *sband; 839 struct ieee80211_supported_band *sband;
892 struct sk_buff *skb; 840 struct sk_buff *skb;
893 struct ieee80211_mgmt *mgmt; 841 struct ieee80211_mgmt *mgmt;
894 u8 *pos, *supp_rates, *esupp_rates = NULL, *extra_preq_ie = NULL; 842 u8 *pos, *supp_rates, *esupp_rates = NULL;
895 int i, extra_preq_ie_len = 0; 843 int i;
896
897 switch (sdata->vif.type) {
898 case NL80211_IFTYPE_STATION:
899 extra_preq_ie_len = sdata->u.mgd.ie_probereq_len;
900 extra_preq_ie = sdata->u.mgd.ie_probereq;
901 break;
902 default:
903 break;
904 }
905 844
906 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 + 845 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*mgmt) + 200 +
907 ie_len + extra_preq_ie_len); 846 ie_len);
908 if (!skb) { 847 if (!skb) {
909 printk(KERN_DEBUG "%s: failed to allocate buffer for probe " 848 printk(KERN_DEBUG "%s: failed to allocate buffer for probe "
910 "request\n", sdata->dev->name); 849 "request\n", sdata->dev->name);
@@ -953,9 +892,6 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
953 892
954 if (ie) 893 if (ie)
955 memcpy(skb_put(skb, ie_len), ie, ie_len); 894 memcpy(skb_put(skb, ie_len), ie, ie_len);
956 if (extra_preq_ie)
957 memcpy(skb_put(skb, extra_preq_ie_len), extra_preq_ie,
958 extra_preq_ie_len);
959 895
960 ieee80211_tx_skb(sdata, skb, 0); 896 ieee80211_tx_skb(sdata, skb, 0);
961} 897}
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index 7043ddc7549..ef73105b306 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -329,24 +329,17 @@ static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
329ieee80211_tx_result 329ieee80211_tx_result
330ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) 330ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx)
331{ 331{
332 int i; 332 struct sk_buff *skb;
333 333
334 ieee80211_tx_set_protected(tx); 334 ieee80211_tx_set_protected(tx);
335 335
336 if (wep_encrypt_skb(tx, tx->skb) < 0) { 336 skb = tx->skb;
337 I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); 337 do {
338 return TX_DROP; 338 if (wep_encrypt_skb(tx, skb) < 0) {
339 } 339 I802_DEBUG_INC(tx->local->tx_handlers_drop_wep);
340 340 return TX_DROP;
341 if (tx->extra_frag) {
342 for (i = 0; i < tx->num_extra_frag; i++) {
343 if (wep_encrypt_skb(tx, tx->extra_frag[i])) {
344 I802_DEBUG_INC(tx->local->
345 tx_handlers_drop_wep);
346 return TX_DROP;
347 }
348 } 341 }
349 } 342 } while ((skb = skb->next));
350 343
351 return TX_CONTINUE; 344 return TX_CONTINUE;
352} 345}
diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c
index 935c63ed3df..deb4ecec122 100644
--- a/net/mac80211/wext.c
+++ b/net/mac80211/wext.c
@@ -129,14 +129,12 @@ static int ieee80211_ioctl_siwgenie(struct net_device *dev,
129 129
130 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 130 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
131 131
132 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME)
133 return -EOPNOTSUPP;
134
135 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 132 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
136 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length); 133 int ret = ieee80211_sta_set_extra_ie(sdata, extra, data->length);
137 if (ret) 134 if (ret)
138 return ret; 135 return ret;
139 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL; 136 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_BSSID_SEL;
137 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
140 ieee80211_sta_req_auth(sdata); 138 ieee80211_sta_req_auth(sdata);
141 return 0; 139 return 0;
142 } 140 }
@@ -207,14 +205,6 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
207 205
208 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 206 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
209 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 207 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
210 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) {
211 if (len > IEEE80211_MAX_SSID_LEN)
212 return -EINVAL;
213 memcpy(sdata->u.mgd.ssid, ssid, len);
214 sdata->u.mgd.ssid_len = len;
215 return 0;
216 }
217
218 if (data->flags) 208 if (data->flags)
219 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL; 209 sdata->u.mgd.flags &= ~IEEE80211_STA_AUTO_SSID_SEL;
220 else 210 else
@@ -224,6 +214,7 @@ static int ieee80211_ioctl_siwessid(struct net_device *dev,
224 if (ret) 214 if (ret)
225 return ret; 215 return ret;
226 216
217 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
227 ieee80211_sta_req_auth(sdata); 218 ieee80211_sta_req_auth(sdata);
228 return 0; 219 return 0;
229 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) 220 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
@@ -272,11 +263,7 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
272 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 263 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
273 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 264 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
274 int ret; 265 int ret;
275 if (sdata->flags & IEEE80211_SDATA_USERSPACE_MLME) { 266
276 memcpy(sdata->u.mgd.bssid, (u8 *) &ap_addr->sa_data,
277 ETH_ALEN);
278 return 0;
279 }
280 if (is_zero_ether_addr((u8 *) &ap_addr->sa_data)) 267 if (is_zero_ether_addr((u8 *) &ap_addr->sa_data))
281 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_BSSID_SEL | 268 sdata->u.mgd.flags |= IEEE80211_STA_AUTO_BSSID_SEL |
282 IEEE80211_STA_AUTO_CHANNEL_SEL; 269 IEEE80211_STA_AUTO_CHANNEL_SEL;
@@ -287,6 +274,7 @@ static int ieee80211_ioctl_siwap(struct net_device *dev,
287 ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data); 274 ret = ieee80211_sta_set_bssid(sdata, (u8 *) &ap_addr->sa_data);
288 if (ret) 275 if (ret)
289 return ret; 276 return ret;
277 sdata->u.mgd.flags &= ~IEEE80211_STA_EXT_SME;
290 ieee80211_sta_req_auth(sdata); 278 ieee80211_sta_req_auth(sdata);
291 return 0; 279 return 0;
292 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { 280 } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
@@ -630,7 +618,7 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev,
630 struct ieee80211_sub_if_data *sdata; 618 struct ieee80211_sub_if_data *sdata;
631 int idx, i, alg = ALG_WEP; 619 int idx, i, alg = ALG_WEP;
632 u8 bcaddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 620 u8 bcaddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
633 int remove = 0; 621 int remove = 0, ret;
634 622
635 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 623 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
636 624
@@ -656,11 +644,20 @@ static int ieee80211_ioctl_siwencode(struct net_device *dev,
656 return 0; 644 return 0;
657 } 645 }
658 646
659 return ieee80211_set_encryption( 647 ret = ieee80211_set_encryption(
660 sdata, bcaddr, 648 sdata, bcaddr,
661 idx, alg, remove, 649 idx, alg, remove,
662 !sdata->default_key, 650 !sdata->default_key,
663 keybuf, erq->length); 651 keybuf, erq->length);
652
653 if (!ret) {
654 if (remove)
655 sdata->u.mgd.flags &= ~IEEE80211_STA_TKIP_WEP_USED;
656 else
657 sdata->u.mgd.flags |= IEEE80211_STA_TKIP_WEP_USED;
658 }
659
660 return ret;
664} 661}
665 662
666 663
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 9101b48ec2a..4f8bfea278f 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -196,19 +196,13 @@ ieee80211_tx_result
196ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) 196ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx)
197{ 197{
198 struct sk_buff *skb = tx->skb; 198 struct sk_buff *skb = tx->skb;
199 int i;
200 199
201 ieee80211_tx_set_protected(tx); 200 ieee80211_tx_set_protected(tx);
202 201
203 if (tkip_encrypt_skb(tx, skb) < 0) 202 do {
204 return TX_DROP; 203 if (tkip_encrypt_skb(tx, skb) < 0)
205 204 return TX_DROP;
206 if (tx->extra_frag) { 205 } while ((skb = skb->next));
207 for (i = 0; i < tx->num_extra_frag; i++) {
208 if (tkip_encrypt_skb(tx, tx->extra_frag[i]))
209 return TX_DROP;
210 }
211 }
212 206
213 return TX_CONTINUE; 207 return TX_CONTINUE;
214} 208}
@@ -428,19 +422,13 @@ ieee80211_tx_result
428ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) 422ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx)
429{ 423{
430 struct sk_buff *skb = tx->skb; 424 struct sk_buff *skb = tx->skb;
431 int i;
432 425
433 ieee80211_tx_set_protected(tx); 426 ieee80211_tx_set_protected(tx);
434 427
435 if (ccmp_encrypt_skb(tx, skb) < 0) 428 do {
436 return TX_DROP; 429 if (ccmp_encrypt_skb(tx, skb) < 0)
437 430 return TX_DROP;
438 if (tx->extra_frag) { 431 } while ((skb = skb->next));
439 for (i = 0; i < tx->num_extra_frag; i++) {
440 if (ccmp_encrypt_skb(tx, tx->extra_frag[i]))
441 return TX_DROP;
442 }
443 }
444 432
445 return TX_CONTINUE; 433 return TX_CONTINUE;
446} 434}