aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/mac80211.h5
-rw-r--r--net/mac80211/agg-tx.c186
-rw-r--r--net/mac80211/ieee80211_i.h20
-rw-r--r--net/mac80211/main.c9
-rw-r--r--net/mac80211/sta_info.c15
-rw-r--r--net/mac80211/sta_info.h4
-rw-r--r--net/mac80211/tx.c18
-rw-r--r--net/mac80211/util.c75
-rw-r--r--net/mac80211/wme.c161
-rw-r--r--net/mac80211/wme.h6
10 files changed, 241 insertions, 258 deletions
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 88fa3e03e3e9..31fd8bab2173 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1022,11 +1022,6 @@ static inline int ieee80211_num_regular_queues(struct ieee80211_hw *hw)
1022 return hw->queues; 1022 return hw->queues;
1023} 1023}
1024 1024
1025static inline int ieee80211_num_queues(struct ieee80211_hw *hw)
1026{
1027 return hw->queues + hw->ampdu_queues;
1028}
1029
1030static inline struct ieee80211_rate * 1025static inline struct ieee80211_rate *
1031ieee80211_get_tx_rate(const struct ieee80211_hw *hw, 1026ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
1032 const struct ieee80211_tx_info *c) 1027 const struct ieee80211_tx_info *c)
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 1232d9f01ca9..0217b68c47ca 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -132,9 +132,24 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
132 132
133 state = &sta->ampdu_mlme.tid_state_tx[tid]; 133 state = &sta->ampdu_mlme.tid_state_tx[tid];
134 134
135 if (local->hw.ampdu_queues) 135 if (local->hw.ampdu_queues) {
136 ieee80211_stop_queue(&local->hw, sta->tid_to_tx_q[tid]); 136 if (initiator) {
137 /*
138 * Stop the AC queue to avoid issues where we send
139 * unaggregated frames already before the delba.
140 */
141 ieee80211_stop_queue_by_reason(&local->hw,
142 local->hw.queues + sta->tid_to_tx_q[tid],
143 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
144 }
137 145
146 /*
147 * Pretend the driver woke the queue, just in case
148 * it disabled it before the session was stopped.
149 */
150 ieee80211_wake_queue(
151 &local->hw, local->hw.queues + sta->tid_to_tx_q[tid]);
152 }
138 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 153 *state = HT_AGG_STATE_REQ_STOP_BA_MSK |
139 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 154 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
140 155
@@ -144,8 +159,6 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
144 /* HW shall not deny going back to legacy */ 159 /* HW shall not deny going back to legacy */
145 if (WARN_ON(ret)) { 160 if (WARN_ON(ret)) {
146 *state = HT_AGG_STATE_OPERATIONAL; 161 *state = HT_AGG_STATE_OPERATIONAL;
147 if (local->hw.ampdu_queues)
148 ieee80211_wake_queue(&local->hw, sta->tid_to_tx_q[tid]);
149 } 162 }
150 163
151 return ret; 164 return ret;
@@ -189,14 +202,19 @@ static void sta_addba_resp_timer_expired(unsigned long data)
189 spin_unlock_bh(&sta->lock); 202 spin_unlock_bh(&sta->lock);
190} 203}
191 204
205static inline int ieee80211_ac_from_tid(int tid)
206{
207 return ieee802_1d_to_ac[tid & 7];
208}
209
192int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid) 210int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
193{ 211{
194 struct ieee80211_local *local = hw_to_local(hw); 212 struct ieee80211_local *local = hw_to_local(hw);
195 struct sta_info *sta; 213 struct sta_info *sta;
196 struct ieee80211_sub_if_data *sdata; 214 struct ieee80211_sub_if_data *sdata;
197 u16 start_seq_num;
198 u8 *state; 215 u8 *state;
199 int ret = 0; 216 int i, qn = -1, ret = 0;
217 u16 start_seq_num;
200 218
201 if (WARN_ON(!local->ops->ampdu_action)) 219 if (WARN_ON(!local->ops->ampdu_action))
202 return -EINVAL; 220 return -EINVAL;
@@ -209,6 +227,13 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
209 ra, tid); 227 ra, tid);
210#endif /* CONFIG_MAC80211_HT_DEBUG */ 228#endif /* CONFIG_MAC80211_HT_DEBUG */
211 229
230 if (hw->ampdu_queues && ieee80211_ac_from_tid(tid) == 0) {
231#ifdef CONFIG_MAC80211_HT_DEBUG
232 printk(KERN_DEBUG "rejecting on voice AC\n");
233#endif
234 return -EINVAL;
235 }
236
212 rcu_read_lock(); 237 rcu_read_lock();
213 238
214 sta = sta_info_get(local, ra); 239 sta = sta_info_get(local, ra);
@@ -217,7 +242,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
217 printk(KERN_DEBUG "Could not find the station\n"); 242 printk(KERN_DEBUG "Could not find the station\n");
218#endif 243#endif
219 ret = -ENOENT; 244 ret = -ENOENT;
220 goto exit; 245 goto unlock;
221 } 246 }
222 247
223 /* 248 /*
@@ -230,11 +255,13 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
230 sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 255 sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
231 sta->sdata->vif.type != NL80211_IFTYPE_AP) { 256 sta->sdata->vif.type != NL80211_IFTYPE_AP) {
232 ret = -EINVAL; 257 ret = -EINVAL;
233 goto exit; 258 goto unlock;
234 } 259 }
235 260
236 spin_lock_bh(&sta->lock); 261 spin_lock_bh(&sta->lock);
237 262
263 sdata = sta->sdata;
264
238 /* we have tried too many times, receiver does not want A-MPDU */ 265 /* we have tried too many times, receiver does not want A-MPDU */
239 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) { 266 if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
240 ret = -EBUSY; 267 ret = -EBUSY;
@@ -252,6 +279,42 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
252 goto err_unlock_sta; 279 goto err_unlock_sta;
253 } 280 }
254 281
282 if (hw->ampdu_queues) {
283 spin_lock(&local->queue_stop_reason_lock);
284 /* reserve a new queue for this session */
285 for (i = 0; i < local->hw.ampdu_queues; i++) {
286 if (local->ampdu_ac_queue[i] < 0) {
287 qn = i;
288 local->ampdu_ac_queue[qn] =
289 ieee80211_ac_from_tid(tid);
290 break;
291 }
292 }
293 spin_unlock(&local->queue_stop_reason_lock);
294
295 if (qn < 0) {
296#ifdef CONFIG_MAC80211_HT_DEBUG
297 printk(KERN_DEBUG "BA request denied - "
298 "queue unavailable for tid %d\n", tid);
299#endif /* CONFIG_MAC80211_HT_DEBUG */
300 ret = -ENOSPC;
301 goto err_unlock_sta;
302 }
303
304 /*
305 * If we successfully allocate the session, we can't have
306 * anything going on on the queue this TID maps into, so
307 * stop it for now. This is a "virtual" stop using the same
308 * mechanism that drivers will use.
309 *
310 * XXX: queue up frames for this session in the sta_info
311 * struct instead to avoid hitting all other STAs.
312 */
313 ieee80211_stop_queue_by_reason(
314 &local->hw, hw->queues + qn,
315 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
316 }
317
255 /* prepare A-MPDU MLME for Tx aggregation */ 318 /* prepare A-MPDU MLME for Tx aggregation */
256 sta->ampdu_mlme.tid_tx[tid] = 319 sta->ampdu_mlme.tid_tx[tid] =
257 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); 320 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
@@ -262,8 +325,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
262 tid); 325 tid);
263#endif 326#endif
264 ret = -ENOMEM; 327 ret = -ENOMEM;
265 goto err_unlock_sta; 328 goto err_return_queue;
266 } 329 }
330
267 /* Tx timer */ 331 /* Tx timer */
268 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = 332 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function =
269 sta_addba_resp_timer_expired; 333 sta_addba_resp_timer_expired;
@@ -271,49 +335,25 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
271 (unsigned long)&sta->timer_to_tid[tid]; 335 (unsigned long)&sta->timer_to_tid[tid];
272 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 336 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
273 337
274 if (hw->ampdu_queues) {
275 /* create a new queue for this aggregation */
276 ret = ieee80211_ht_agg_queue_add(local, sta, tid);
277
278 /* case no queue is available to aggregation
279 * don't switch to aggregation */
280 if (ret) {
281#ifdef CONFIG_MAC80211_HT_DEBUG
282 printk(KERN_DEBUG "BA request denied - "
283 "queue unavailable for tid %d\n", tid);
284#endif /* CONFIG_MAC80211_HT_DEBUG */
285 goto err_unlock_queue;
286 }
287 }
288 sdata = sta->sdata;
289
290 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the 338 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
291 * call back right away, it must see that the flow has begun */ 339 * call back right away, it must see that the flow has begun */
292 *state |= HT_ADDBA_REQUESTED_MSK; 340 *state |= HT_ADDBA_REQUESTED_MSK;
293 341
294 /* This is slightly racy because the queue isn't stopped */
295 start_seq_num = sta->tid_seq[tid]; 342 start_seq_num = sta->tid_seq[tid];
296 343
297 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START, 344 ret = local->ops->ampdu_action(hw, IEEE80211_AMPDU_TX_START,
298 &sta->sta, tid, &start_seq_num); 345 &sta->sta, tid, &start_seq_num);
299 346
300 if (ret) { 347 if (ret) {
301 /* No need to requeue the packets in the agg queue, since we
302 * held the tx lock: no packet could be enqueued to the newly
303 * allocated queue */
304 if (hw->ampdu_queues)
305 ieee80211_ht_agg_queue_remove(local, sta, tid, 0);
306#ifdef CONFIG_MAC80211_HT_DEBUG 348#ifdef CONFIG_MAC80211_HT_DEBUG
307 printk(KERN_DEBUG "BA request denied - HW unavailable for" 349 printk(KERN_DEBUG "BA request denied - HW unavailable for"
308 " tid %d\n", tid); 350 " tid %d\n", tid);
309#endif /* CONFIG_MAC80211_HT_DEBUG */ 351#endif /* CONFIG_MAC80211_HT_DEBUG */
310 *state = HT_AGG_STATE_IDLE; 352 *state = HT_AGG_STATE_IDLE;
311 goto err_unlock_queue; 353 goto err_free;
312 } 354 }
355 sta->tid_to_tx_q[tid] = qn;
313 356
314 /* Will put all the packets in the new SW queue */
315 if (hw->ampdu_queues)
316 ieee80211_requeue(local, ieee802_1d_to_ac[tid]);
317 spin_unlock_bh(&sta->lock); 357 spin_unlock_bh(&sta->lock);
318 358
319 /* send an addBA request */ 359 /* send an addBA request */
@@ -322,7 +362,6 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
322 sta->ampdu_mlme.dialog_token_allocator; 362 sta->ampdu_mlme.dialog_token_allocator;
323 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; 363 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num;
324 364
325
326 ieee80211_send_addba_request(sta->sdata, ra, tid, 365 ieee80211_send_addba_request(sta->sdata, ra, tid,
327 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 366 sta->ampdu_mlme.tid_tx[tid]->dialog_token,
328 sta->ampdu_mlme.tid_tx[tid]->ssn, 367 sta->ampdu_mlme.tid_tx[tid]->ssn,
@@ -334,15 +373,24 @@ int ieee80211_start_tx_ba_session(struct ieee80211_hw *hw, u8 *ra, u16 tid)
334#ifdef CONFIG_MAC80211_HT_DEBUG 373#ifdef CONFIG_MAC80211_HT_DEBUG
335 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid); 374 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
336#endif 375#endif
337 goto exit; 376 goto unlock;
338 377
339err_unlock_queue: 378 err_free:
340 kfree(sta->ampdu_mlme.tid_tx[tid]); 379 kfree(sta->ampdu_mlme.tid_tx[tid]);
341 sta->ampdu_mlme.tid_tx[tid] = NULL; 380 sta->ampdu_mlme.tid_tx[tid] = NULL;
342 ret = -EBUSY; 381 err_return_queue:
343err_unlock_sta: 382 if (qn >= 0) {
383 /* We failed, so start queue again right away. */
384 ieee80211_wake_queue_by_reason(hw, hw->queues + qn,
385 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
386 /* give queue back to pool */
387 spin_lock(&local->queue_stop_reason_lock);
388 local->ampdu_ac_queue[qn] = -1;
389 spin_unlock(&local->queue_stop_reason_lock);
390 }
391 err_unlock_sta:
344 spin_unlock_bh(&sta->lock); 392 spin_unlock_bh(&sta->lock);
345exit: 393 unlock:
346 rcu_read_unlock(); 394 rcu_read_unlock();
347 return ret; 395 return ret;
348} 396}
@@ -375,7 +423,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
375 state = &sta->ampdu_mlme.tid_state_tx[tid]; 423 state = &sta->ampdu_mlme.tid_state_tx[tid];
376 spin_lock_bh(&sta->lock); 424 spin_lock_bh(&sta->lock);
377 425
378 if (!(*state & HT_ADDBA_REQUESTED_MSK)) { 426 if (WARN_ON(!(*state & HT_ADDBA_REQUESTED_MSK))) {
379#ifdef CONFIG_MAC80211_HT_DEBUG 427#ifdef CONFIG_MAC80211_HT_DEBUG
380 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", 428 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n",
381 *state); 429 *state);
@@ -385,7 +433,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
385 return; 433 return;
386 } 434 }
387 435
388 WARN_ON_ONCE(*state & HT_ADDBA_DRV_READY_MSK); 436 if (WARN_ON(*state & HT_ADDBA_DRV_READY_MSK))
437 goto out;
389 438
390 *state |= HT_ADDBA_DRV_READY_MSK; 439 *state |= HT_ADDBA_DRV_READY_MSK;
391 440
@@ -393,9 +442,18 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u16 tid)
393#ifdef CONFIG_MAC80211_HT_DEBUG 442#ifdef CONFIG_MAC80211_HT_DEBUG
394 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); 443 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid);
395#endif 444#endif
396 if (hw->ampdu_queues) 445 if (hw->ampdu_queues) {
397 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); 446 /*
447 * Wake up this queue, we stopped it earlier,
448 * this will in turn wake the entire AC.
449 */
450 ieee80211_wake_queue_by_reason(hw,
451 hw->queues + sta->tid_to_tx_q[tid],
452 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
453 }
398 } 454 }
455
456 out:
399 spin_unlock_bh(&sta->lock); 457 spin_unlock_bh(&sta->lock);
400 rcu_read_unlock(); 458 rcu_read_unlock();
401} 459}
@@ -485,7 +543,6 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
485 struct ieee80211_local *local = hw_to_local(hw); 543 struct ieee80211_local *local = hw_to_local(hw);
486 struct sta_info *sta; 544 struct sta_info *sta;
487 u8 *state; 545 u8 *state;
488 int agg_queue;
489 546
490 if (tid >= STA_TID_NUM) { 547 if (tid >= STA_TID_NUM) {
491#ifdef CONFIG_MAC80211_HT_DEBUG 548#ifdef CONFIG_MAC80211_HT_DEBUG
@@ -527,19 +584,19 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_hw *hw, u8 *ra, u8 tid)
527 ieee80211_send_delba(sta->sdata, ra, tid, 584 ieee80211_send_delba(sta->sdata, ra, tid,
528 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 585 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
529 586
530 if (hw->ampdu_queues) { 587 spin_lock_bh(&sta->lock);
531 agg_queue = sta->tid_to_tx_q[tid];
532 ieee80211_ht_agg_queue_remove(local, sta, tid, 1);
533 588
534 /* We just requeued the all the frames that were in the 589 if (*state & HT_AGG_STATE_INITIATOR_MSK &&
535 * removed queue, and since we might miss a softirq we do 590 hw->ampdu_queues) {
536 * netif_schedule_queue. ieee80211_wake_queue is not used 591 /*
537 * here as this queue is not necessarily stopped 592 * Wake up this queue, we stopped it earlier,
593 * this will in turn wake the entire AC.
538 */ 594 */
539 netif_schedule_queue(netdev_get_tx_queue(local->mdev, 595 ieee80211_wake_queue_by_reason(hw,
540 agg_queue)); 596 hw->queues + sta->tid_to_tx_q[tid],
597 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
541 } 598 }
542 spin_lock_bh(&sta->lock); 599
543 *state = HT_AGG_STATE_IDLE; 600 *state = HT_AGG_STATE_IDLE;
544 sta->ampdu_mlme.addba_req_num[tid] = 0; 601 sta->ampdu_mlme.addba_req_num[tid] = 0;
545 kfree(sta->ampdu_mlme.tid_tx[tid]); 602 kfree(sta->ampdu_mlme.tid_tx[tid]);
@@ -613,12 +670,21 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
613#endif /* CONFIG_MAC80211_HT_DEBUG */ 670#endif /* CONFIG_MAC80211_HT_DEBUG */
614 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 671 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
615 == WLAN_STATUS_SUCCESS) { 672 == WLAN_STATUS_SUCCESS) {
673 u8 curstate = *state;
674
616 *state |= HT_ADDBA_RECEIVED_MSK; 675 *state |= HT_ADDBA_RECEIVED_MSK;
617 sta->ampdu_mlme.addba_req_num[tid] = 0;
618 676
619 if (*state == HT_AGG_STATE_OPERATIONAL && 677 if (hw->ampdu_queues && *state != curstate &&
620 local->hw.ampdu_queues) 678 *state == HT_AGG_STATE_OPERATIONAL) {
621 ieee80211_wake_queue(hw, sta->tid_to_tx_q[tid]); 679 /*
680 * Wake up this queue, we stopped it earlier,
681 * this will in turn wake the entire AC.
682 */
683 ieee80211_wake_queue_by_reason(hw,
684 hw->queues + sta->tid_to_tx_q[tid],
685 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
686 }
687 sta->ampdu_mlme.addba_req_num[tid] = 0;
622 688
623 if (local->ops->ampdu_action) { 689 if (local->ops->ampdu_action) {
624 (void)local->ops->ampdu_action(hw, 690 (void)local->ops->ampdu_action(hw,
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 2cb743ed9f9c..e2bbd3f11797 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -564,12 +564,10 @@ enum {
564enum queue_stop_reason { 564enum queue_stop_reason {
565 IEEE80211_QUEUE_STOP_REASON_DRIVER, 565 IEEE80211_QUEUE_STOP_REASON_DRIVER,
566 IEEE80211_QUEUE_STOP_REASON_PS, 566 IEEE80211_QUEUE_STOP_REASON_PS,
567 IEEE80211_QUEUE_STOP_REASON_CSA 567 IEEE80211_QUEUE_STOP_REASON_CSA,
568 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
568}; 569};
569 570
570/* maximum number of hardware queues we support. */
571#define QD_MAX_QUEUES (IEEE80211_MAX_AMPDU_QUEUES + IEEE80211_MAX_QUEUES)
572
573struct ieee80211_master_priv { 571struct ieee80211_master_priv {
574 struct ieee80211_local *local; 572 struct ieee80211_local *local;
575}; 573};
@@ -582,9 +580,15 @@ struct ieee80211_local {
582 580
583 const struct ieee80211_ops *ops; 581 const struct ieee80211_ops *ops;
584 582
585 unsigned long queue_pool[BITS_TO_LONGS(QD_MAX_QUEUES)]; 583 /* AC queue corresponding to each AMPDU queue */
586 unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES]; 584 s8 ampdu_ac_queue[IEEE80211_MAX_AMPDU_QUEUES];
585 unsigned int amdpu_ac_stop_refcnt[IEEE80211_MAX_AMPDU_QUEUES];
586
587 unsigned long queue_stop_reasons[IEEE80211_MAX_QUEUES +
588 IEEE80211_MAX_AMPDU_QUEUES];
589 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
587 spinlock_t queue_stop_reason_lock; 590 spinlock_t queue_stop_reason_lock;
591
588 struct net_device *mdev; /* wmaster# - "master" 802.11 device */ 592 struct net_device *mdev; /* wmaster# - "master" 802.11 device */
589 int open_count; 593 int open_count;
590 int monitors, cooked_mntrs; 594 int monitors, cooked_mntrs;
@@ -1042,6 +1046,10 @@ void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
1042 enum queue_stop_reason reason); 1046 enum queue_stop_reason reason);
1043void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, 1047void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
1044 enum queue_stop_reason reason); 1048 enum queue_stop_reason reason);
1049void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
1050 enum queue_stop_reason reason);
1051void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
1052 enum queue_stop_reason reason);
1045 1053
1046#ifdef CONFIG_MAC80211_NOINLINE 1054#ifdef CONFIG_MAC80211_NOINLINE
1047#define debug_noinline noinline 1055#define debug_noinline noinline
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 795f8c4a9fa0..e9181981adcd 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -705,7 +705,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
705 const struct ieee80211_ops *ops) 705 const struct ieee80211_ops *ops)
706{ 706{
707 struct ieee80211_local *local; 707 struct ieee80211_local *local;
708 int priv_size; 708 int priv_size, i;
709 struct wiphy *wiphy; 709 struct wiphy *wiphy;
710 710
711 /* Ensure 32-byte alignment of our private data and hw private data. 711 /* Ensure 32-byte alignment of our private data and hw private data.
@@ -779,6 +779,11 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
779 setup_timer(&local->dynamic_ps_timer, 779 setup_timer(&local->dynamic_ps_timer,
780 ieee80211_dynamic_ps_timer, (unsigned long) local); 780 ieee80211_dynamic_ps_timer, (unsigned long) local);
781 781
782 for (i = 0; i < IEEE80211_MAX_AMPDU_QUEUES; i++)
783 local->ampdu_ac_queue[i] = -1;
784 /* using an s8 won't work with more than that */
785 BUILD_BUG_ON(IEEE80211_MAX_AMPDU_QUEUES > 127);
786
782 sta_info_init(local); 787 sta_info_init(local);
783 788
784 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, 789 tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
@@ -872,7 +877,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
872 877
873 mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv), 878 mdev = alloc_netdev_mq(sizeof(struct ieee80211_master_priv),
874 "wmaster%d", ieee80211_master_setup, 879 "wmaster%d", ieee80211_master_setup,
875 ieee80211_num_queues(hw)); 880 hw->queues);
876 if (!mdev) 881 if (!mdev)
877 goto fail_mdev_alloc; 882 goto fail_mdev_alloc;
878 883
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 634f65c0130e..4ba3c540fcf3 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -202,6 +202,18 @@ void sta_info_destroy(struct sta_info *sta)
202 /* Make sure timer won't free the tid_rx struct, see below */ 202 /* Make sure timer won't free the tid_rx struct, see below */
203 if (tid_rx) 203 if (tid_rx)
204 tid_rx->shutdown = true; 204 tid_rx->shutdown = true;
205
206 /*
207 * The stop callback cannot find this station any more, but
208 * it didn't complete its work -- start the queue if necessary
209 */
210 if (sta->ampdu_mlme.tid_state_tx[i] & HT_AGG_STATE_INITIATOR_MSK &&
211 sta->ampdu_mlme.tid_state_tx[i] & HT_AGG_STATE_REQ_STOP_BA_MSK &&
212 local->hw.ampdu_queues)
213 ieee80211_wake_queue_by_reason(&local->hw,
214 local->hw.queues + sta->tid_to_tx_q[i],
215 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
216
205 spin_unlock_bh(&sta->lock); 217 spin_unlock_bh(&sta->lock);
206 218
207 /* 219 /*
@@ -275,8 +287,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
275 * enable session_timer's data differentiation. refer to 287 * enable session_timer's data differentiation. refer to
276 * sta_rx_agg_session_timer_expired for useage */ 288 * sta_rx_agg_session_timer_expired for useage */
277 sta->timer_to_tid[i] = i; 289 sta->timer_to_tid[i] = i;
278 /* tid to tx queue: initialize according to HW (0 is valid) */ 290 sta->tid_to_tx_q[i] = -1;
279 sta->tid_to_tx_q[i] = ieee80211_num_queues(&local->hw);
280 /* rx */ 291 /* rx */
281 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE; 292 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
282 sta->ampdu_mlme.tid_rx[i] = NULL; 293 sta->ampdu_mlme.tid_rx[i] = NULL;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index d9653231992f..a2921f15787b 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -200,7 +200,7 @@ struct sta_ampdu_mlme {
200 * @tid_seq: per-TID sequence numbers for sending to this STA 200 * @tid_seq: per-TID sequence numbers for sending to this STA
201 * @ampdu_mlme: A-MPDU state machine state 201 * @ampdu_mlme: A-MPDU state machine state
202 * @timer_to_tid: identity mapping to ID timers 202 * @timer_to_tid: identity mapping to ID timers
203 * @tid_to_tx_q: map tid to tx queue 203 * @tid_to_tx_q: map tid to tx queue (invalid == negative values)
204 * @llid: Local link ID 204 * @llid: Local link ID
205 * @plid: Peer link ID 205 * @plid: Peer link ID
206 * @reason: Cancel reason on PLINK_HOLDING state 206 * @reason: Cancel reason on PLINK_HOLDING state
@@ -275,7 +275,7 @@ struct sta_info {
275 */ 275 */
276 struct sta_ampdu_mlme ampdu_mlme; 276 struct sta_ampdu_mlme ampdu_mlme;
277 u8 timer_to_tid[STA_TID_NUM]; 277 u8 timer_to_tid[STA_TID_NUM];
278 u8 tid_to_tx_q[STA_TID_NUM]; 278 s8 tid_to_tx_q[STA_TID_NUM];
279 279
280#ifdef CONFIG_MAC80211_MESH 280#ifdef CONFIG_MAC80211_MESH
281 /* 281 /*
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 33926831c648..6aca49897d55 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -876,7 +876,6 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
876 return TX_CONTINUE; 876 return TX_CONTINUE;
877} 877}
878 878
879
880/* actual transmit path */ 879/* actual transmit path */
881 880
882/* 881/*
@@ -1016,12 +1015,20 @@ __ieee80211_tx_prepare(struct ieee80211_tx_data *tx,
1016 tx->sta = sta_info_get(local, hdr->addr1); 1015 tx->sta = sta_info_get(local, hdr->addr1);
1017 1016
1018 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control)) { 1017 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control)) {
1018 unsigned long flags;
1019 qc = ieee80211_get_qos_ctl(hdr); 1019 qc = ieee80211_get_qos_ctl(hdr);
1020 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1020 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1021 1021
1022 spin_lock_irqsave(&tx->sta->lock, flags);
1022 state = &tx->sta->ampdu_mlme.tid_state_tx[tid]; 1023 state = &tx->sta->ampdu_mlme.tid_state_tx[tid];
1023 if (*state == HT_AGG_STATE_OPERATIONAL) 1024 if (*state == HT_AGG_STATE_OPERATIONAL) {
1024 info->flags |= IEEE80211_TX_CTL_AMPDU; 1025 info->flags |= IEEE80211_TX_CTL_AMPDU;
1026 if (local->hw.ampdu_queues)
1027 skb_set_queue_mapping(
1028 skb, tx->local->hw.queues +
1029 tx->sta->tid_to_tx_q[tid]);
1030 }
1031 spin_unlock_irqrestore(&tx->sta->lock, flags);
1025 } 1032 }
1026 1033
1027 if (is_multicast_ether_addr(hdr->addr1)) { 1034 if (is_multicast_ether_addr(hdr->addr1)) {
@@ -1085,7 +1092,8 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1085 int ret, i; 1092 int ret, i;
1086 1093
1087 if (skb) { 1094 if (skb) {
1088 if (netif_subqueue_stopped(local->mdev, skb)) 1095 if (ieee80211_queue_stopped(&local->hw,
1096 skb_get_queue_mapping(skb)))
1089 return IEEE80211_TX_PENDING; 1097 return IEEE80211_TX_PENDING;
1090 1098
1091 ret = local->ops->tx(local_to_hw(local), skb); 1099 ret = local->ops->tx(local_to_hw(local), skb);
@@ -1101,8 +1109,8 @@ static int __ieee80211_tx(struct ieee80211_local *local, struct sk_buff *skb,
1101 info = IEEE80211_SKB_CB(tx->extra_frag[i]); 1109 info = IEEE80211_SKB_CB(tx->extra_frag[i]);
1102 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT | 1110 info->flags &= ~(IEEE80211_TX_CTL_CLEAR_PS_FILT |
1103 IEEE80211_TX_CTL_FIRST_FRAGMENT); 1111 IEEE80211_TX_CTL_FIRST_FRAGMENT);
1104 if (netif_subqueue_stopped(local->mdev, 1112 if (ieee80211_queue_stopped(&local->hw,
1105 tx->extra_frag[i])) 1113 skb_get_queue_mapping(tx->extra_frag[i])))
1106 return IEEE80211_TX_FRAG_AGAIN; 1114 return IEEE80211_TX_FRAG_AGAIN;
1107 1115
1108 ret = local->ops->tx(local_to_hw(local), 1116 ret = local->ops->tx(local_to_hw(local),
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 73c7d7345abd..92ea1770461b 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -344,15 +344,36 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
344{ 344{
345 struct ieee80211_local *local = hw_to_local(hw); 345 struct ieee80211_local *local = hw_to_local(hw);
346 346
347 /* we don't need to track ampdu queues */ 347 if (queue >= hw->queues) {
348 if (queue < ieee80211_num_regular_queues(hw)) { 348 if (local->ampdu_ac_queue[queue - hw->queues] < 0)
349 __clear_bit(reason, &local->queue_stop_reasons[queue]); 349 return;
350
351 /*
352 * for virtual aggregation queues, we need to refcount the
353 * internal mac80211 disable (multiple times!), keep track of
354 * driver disable _and_ make sure the regular queue is
355 * actually enabled.
356 */
357 if (reason == IEEE80211_QUEUE_STOP_REASON_AGGREGATION)
358 local->amdpu_ac_stop_refcnt[queue - hw->queues]--;
359 else
360 __clear_bit(reason, &local->queue_stop_reasons[queue]);
350 361
351 if (local->queue_stop_reasons[queue] != 0) 362 if (local->queue_stop_reasons[queue] ||
352 /* someone still has this queue stopped */ 363 local->amdpu_ac_stop_refcnt[queue - hw->queues])
353 return; 364 return;
365
366 /* now go on to treat the corresponding regular queue */
367 queue = local->ampdu_ac_queue[queue - hw->queues];
368 reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION;
354 } 369 }
355 370
371 __clear_bit(reason, &local->queue_stop_reasons[queue]);
372
373 if (local->queue_stop_reasons[queue] != 0)
374 /* someone still has this queue stopped */
375 return;
376
356 if (test_bit(queue, local->queues_pending)) { 377 if (test_bit(queue, local->queues_pending)) {
357 set_bit(queue, local->queues_pending_run); 378 set_bit(queue, local->queues_pending_run);
358 tasklet_schedule(&local->tx_pending_tasklet); 379 tasklet_schedule(&local->tx_pending_tasklet);
@@ -361,8 +382,8 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
361 } 382 }
362} 383}
363 384
364static void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue, 385void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
365 enum queue_stop_reason reason) 386 enum queue_stop_reason reason)
366{ 387{
367 struct ieee80211_local *local = hw_to_local(hw); 388 struct ieee80211_local *local = hw_to_local(hw);
368 unsigned long flags; 389 unsigned long flags;
@@ -384,15 +405,33 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
384{ 405{
385 struct ieee80211_local *local = hw_to_local(hw); 406 struct ieee80211_local *local = hw_to_local(hw);
386 407
387 /* we don't need to track ampdu queues */ 408 if (queue >= hw->queues) {
388 if (queue < ieee80211_num_regular_queues(hw)) 409 if (local->ampdu_ac_queue[queue - hw->queues] < 0)
389 __set_bit(reason, &local->queue_stop_reasons[queue]); 410 return;
411
412 /*
413 * for virtual aggregation queues, we need to refcount the
414 * internal mac80211 disable (multiple times!), keep track of
415 * driver disable _and_ make sure the regular queue is
416 * actually enabled.
417 */
418 if (reason == IEEE80211_QUEUE_STOP_REASON_AGGREGATION)
419 local->amdpu_ac_stop_refcnt[queue - hw->queues]++;
420 else
421 __set_bit(reason, &local->queue_stop_reasons[queue]);
422
423 /* now go on to treat the corresponding regular queue */
424 queue = local->ampdu_ac_queue[queue - hw->queues];
425 reason = IEEE80211_QUEUE_STOP_REASON_AGGREGATION;
426 }
427
428 __set_bit(reason, &local->queue_stop_reasons[queue]);
390 429
391 netif_stop_subqueue(local->mdev, queue); 430 netif_stop_subqueue(local->mdev, queue);
392} 431}
393 432
394static void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, 433void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
395 enum queue_stop_reason reason) 434 enum queue_stop_reason reason)
396{ 435{
397 struct ieee80211_local *local = hw_to_local(hw); 436 struct ieee80211_local *local = hw_to_local(hw);
398 unsigned long flags; 437 unsigned long flags;
@@ -418,7 +457,7 @@ void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
418 457
419 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 458 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
420 459
421 for (i = 0; i < ieee80211_num_queues(hw); i++) 460 for (i = 0; i < hw->queues; i++)
422 __ieee80211_stop_queue(hw, i, reason); 461 __ieee80211_stop_queue(hw, i, reason);
423 462
424 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 463 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
@@ -434,6 +473,16 @@ EXPORT_SYMBOL(ieee80211_stop_queues);
434int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue) 473int ieee80211_queue_stopped(struct ieee80211_hw *hw, int queue)
435{ 474{
436 struct ieee80211_local *local = hw_to_local(hw); 475 struct ieee80211_local *local = hw_to_local(hw);
476 unsigned long flags;
477
478 if (queue >= hw->queues) {
479 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
480 queue = local->ampdu_ac_queue[queue - hw->queues];
481 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
482 if (queue < 0)
483 return true;
484 }
485
437 return __netif_subqueue_stopped(local->mdev, queue); 486 return __netif_subqueue_stopped(local->mdev, queue);
438} 487}
439EXPORT_SYMBOL(ieee80211_queue_stopped); 488EXPORT_SYMBOL(ieee80211_queue_stopped);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index ac71b38f7cb5..093a4ab7f28b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -114,9 +114,7 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
114{ 114{
115 struct ieee80211_master_priv *mpriv = netdev_priv(dev); 115 struct ieee80211_master_priv *mpriv = netdev_priv(dev);
116 struct ieee80211_local *local = mpriv->local; 116 struct ieee80211_local *local = mpriv->local;
117 struct ieee80211_hw *hw = &local->hw;
118 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 117 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
119 struct sta_info *sta;
120 u16 queue; 118 u16 queue;
121 u8 tid; 119 u8 tid;
122 120
@@ -124,29 +122,11 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
124 if (unlikely(queue >= local->hw.queues)) 122 if (unlikely(queue >= local->hw.queues))
125 queue = local->hw.queues - 1; 123 queue = local->hw.queues - 1;
126 124
127 if (skb->requeue) { 125 /*
128 if (!hw->ampdu_queues) 126 * Now we know the 1d priority, fill in the QoS header if
129 return queue; 127 * there is one (and we haven't done this before).
130
131 rcu_read_lock();
132 sta = sta_info_get(local, hdr->addr1);
133 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
134 if (sta) {
135 int ampdu_queue = sta->tid_to_tx_q[tid];
136
137 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
138 test_bit(ampdu_queue, local->queue_pool))
139 queue = ampdu_queue;
140 }
141 rcu_read_unlock();
142
143 return queue;
144 }
145
146 /* Now we know the 1d priority, fill in the QoS header if
147 * there is one.
148 */ 128 */
149 if (ieee80211_is_data_qos(hdr->frame_control)) { 129 if (!skb->requeue && ieee80211_is_data_qos(hdr->frame_control)) {
150 u8 *p = ieee80211_get_qos_ctl(hdr); 130 u8 *p = ieee80211_get_qos_ctl(hdr);
151 u8 ack_policy = 0; 131 u8 ack_policy = 0;
152 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; 132 tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
@@ -156,140 +136,7 @@ u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb)
156 /* qos header is 2 bytes, second reserved */ 136 /* qos header is 2 bytes, second reserved */
157 *p++ = ack_policy | tid; 137 *p++ = ack_policy | tid;
158 *p = 0; 138 *p = 0;
159
160 if (!hw->ampdu_queues)
161 return queue;
162
163 rcu_read_lock();
164
165 sta = sta_info_get(local, hdr->addr1);
166 if (sta) {
167 int ampdu_queue = sta->tid_to_tx_q[tid];
168
169 if ((ampdu_queue < ieee80211_num_queues(hw)) &&
170 test_bit(ampdu_queue, local->queue_pool))
171 queue = ampdu_queue;
172 }
173
174 rcu_read_unlock();
175 } 139 }
176 140
177 return queue; 141 return queue;
178} 142}
179
180int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
181 struct sta_info *sta, u16 tid)
182{
183 int i;
184
185 /* XXX: currently broken due to cb/requeue use */
186 return -EPERM;
187
188 /* prepare the filter and save it for the SW queue
189 * matching the received HW queue */
190
191 if (!local->hw.ampdu_queues)
192 return -EPERM;
193
194 /* try to get a Qdisc from the pool */
195 for (i = local->hw.queues; i < ieee80211_num_queues(&local->hw); i++)
196 if (!test_and_set_bit(i, local->queue_pool)) {
197 ieee80211_stop_queue(local_to_hw(local), i);
198 sta->tid_to_tx_q[tid] = i;
199
200 /* IF there are already pending packets
201 * on this tid first we need to drain them
202 * on the previous queue
203 * since HT is strict in order */
204#ifdef CONFIG_MAC80211_HT_DEBUG
205 if (net_ratelimit())
206 printk(KERN_DEBUG "allocated aggregation queue"
207 " %d tid %d addr %pM pool=0x%lX\n",
208 i, tid, sta->sta.addr,
209 local->queue_pool[0]);
210#endif /* CONFIG_MAC80211_HT_DEBUG */
211 return 0;
212 }
213
214 return -EAGAIN;
215}
216
217/**
218 * the caller needs to hold netdev_get_tx_queue(local->mdev, X)->lock
219 */
220void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
221 struct sta_info *sta, u16 tid,
222 u8 requeue)
223{
224 int agg_queue = sta->tid_to_tx_q[tid];
225 struct ieee80211_hw *hw = &local->hw;
226
227 /* return the qdisc to the pool */
228 clear_bit(agg_queue, local->queue_pool);
229 sta->tid_to_tx_q[tid] = ieee80211_num_queues(hw);
230
231 if (requeue) {
232 ieee80211_requeue(local, agg_queue);
233 } else {
234 struct netdev_queue *txq;
235 spinlock_t *root_lock;
236 struct Qdisc *q;
237
238 txq = netdev_get_tx_queue(local->mdev, agg_queue);
239 q = rcu_dereference(txq->qdisc);
240 root_lock = qdisc_lock(q);
241
242 spin_lock_bh(root_lock);
243 qdisc_reset(q);
244 spin_unlock_bh(root_lock);
245 }
246}
247
248void ieee80211_requeue(struct ieee80211_local *local, int queue)
249{
250 struct netdev_queue *txq = netdev_get_tx_queue(local->mdev, queue);
251 struct sk_buff_head list;
252 spinlock_t *root_lock;
253 struct Qdisc *qdisc;
254 u32 len;
255
256 rcu_read_lock_bh();
257
258 qdisc = rcu_dereference(txq->qdisc);
259 if (!qdisc || !qdisc->dequeue)
260 goto out_unlock;
261
262 skb_queue_head_init(&list);
263
264 root_lock = qdisc_root_lock(qdisc);
265 spin_lock(root_lock);
266 for (len = qdisc->q.qlen; len > 0; len--) {
267 struct sk_buff *skb = qdisc->dequeue(qdisc);
268
269 if (skb)
270 __skb_queue_tail(&list, skb);
271 }
272 spin_unlock(root_lock);
273
274 for (len = list.qlen; len > 0; len--) {
275 struct sk_buff *skb = __skb_dequeue(&list);
276 u16 new_queue;
277
278 BUG_ON(!skb);
279 new_queue = ieee80211_select_queue(local->mdev, skb);
280 skb_set_queue_mapping(skb, new_queue);
281
282 txq = netdev_get_tx_queue(local->mdev, new_queue);
283
284
285 qdisc = rcu_dereference(txq->qdisc);
286 root_lock = qdisc_root_lock(qdisc);
287
288 spin_lock(root_lock);
289 qdisc_enqueue_root(skb, qdisc);
290 spin_unlock(root_lock);
291 }
292
293out_unlock:
294 rcu_read_unlock_bh();
295}
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index bc62f28a4d3d..7520d2e014dc 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -21,11 +21,5 @@
21extern const int ieee802_1d_to_ac[8]; 21extern const int ieee802_1d_to_ac[8];
22 22
23u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb); 23u16 ieee80211_select_queue(struct net_device *dev, struct sk_buff *skb);
24int ieee80211_ht_agg_queue_add(struct ieee80211_local *local,
25 struct sta_info *sta, u16 tid);
26void ieee80211_ht_agg_queue_remove(struct ieee80211_local *local,
27 struct sta_info *sta, u16 tid,
28 u8 requeue);
29void ieee80211_requeue(struct ieee80211_local *local, int queue);
30 24
31#endif /* _WME_H */ 25#endif /* _WME_H */