aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/agg-tx.c
diff options
context:
space:
mode:
authorJohannes Berg <johannes.berg@intel.com>2010-06-10 04:21:39 -0400
committerJohn W. Linville <linville@tuxdriver.com>2010-06-14 15:39:27 -0400
commita622ab72b4dcfdf53e24b16e9530cb876979a00c (patch)
tree170d2ccf3a594f3675b5fa58378319031054e806 /net/mac80211/agg-tx.c
parenta87f736d942c86255e3088c606f0e3eab6bbf784 (diff)
mac80211: use RCU for TX aggregation
Currently we allocate some memory for each TX aggregation session and additionally keep a state bitmap indicating the state it is in. By using RCU to protect the pointer, moving the state into the structure and some locking trickery we can avoid locking when the TX agg session is fully operational. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'net/mac80211/agg-tx.c')
-rw-r--r--net/mac80211/agg-tx.c229
1 files changed, 117 insertions, 112 deletions
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index c7b7ac40316a..7d8656d51c6b 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -125,25 +125,42 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
125 ieee80211_tx_skb(sdata, skb); 125 ieee80211_tx_skb(sdata, skb);
126} 126}
127 127
128int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 128static void kfree_tid_tx(struct rcu_head *rcu_head)
129 enum ieee80211_back_parties initiator) 129{
130 struct tid_ampdu_tx *tid_tx =
131 container_of(rcu_head, struct tid_ampdu_tx, rcu_head);
132
133 kfree(tid_tx);
134}
135
136static int ___ieee80211_stop_tx_ba_session(
137 struct sta_info *sta, u16 tid,
138 enum ieee80211_back_parties initiator)
130{ 139{
131 struct ieee80211_local *local = sta->local; 140 struct ieee80211_local *local = sta->local;
141 struct tid_ampdu_tx *tid_tx = sta->ampdu_mlme.tid_tx[tid];
132 int ret; 142 int ret;
133 u8 *state; 143
144 lockdep_assert_held(&sta->lock);
145
146 if (WARN_ON(!tid_tx))
147 return -ENOENT;
134 148
135#ifdef CONFIG_MAC80211_HT_DEBUG 149#ifdef CONFIG_MAC80211_HT_DEBUG
136 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n", 150 printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
137 sta->sta.addr, tid); 151 sta->sta.addr, tid);
138#endif /* CONFIG_MAC80211_HT_DEBUG */ 152#endif /* CONFIG_MAC80211_HT_DEBUG */
139 153
140 state = &sta->ampdu_mlme.tid_state_tx[tid]; 154 set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
141 155
142 if (*state == HT_AGG_STATE_OPERATIONAL) 156 /*
143 sta->ampdu_mlme.addba_req_num[tid] = 0; 157 * After this packets are no longer handed right through
158 * to the driver but are put onto tid_tx->pending instead,
159 * with locking to ensure proper access.
160 */
161 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
144 162
145 *state = HT_AGG_STATE_REQ_STOP_BA_MSK | 163 tid_tx->stop_initiator = initiator;
146 (initiator << HT_AGG_STATE_INITIATOR_SHIFT);
147 164
148 ret = drv_ampdu_action(local, sta->sdata, 165 ret = drv_ampdu_action(local, sta->sdata,
149 IEEE80211_AMPDU_TX_STOP, 166 IEEE80211_AMPDU_TX_STOP,
@@ -174,15 +191,13 @@ static void sta_addba_resp_timer_expired(unsigned long data)
174 u16 tid = *(u8 *)data; 191 u16 tid = *(u8 *)data;
175 struct sta_info *sta = container_of((void *)data, 192 struct sta_info *sta = container_of((void *)data,
176 struct sta_info, timer_to_tid[tid]); 193 struct sta_info, timer_to_tid[tid]);
177 u8 *state; 194 struct tid_ampdu_tx *tid_tx;
178
179 state = &sta->ampdu_mlme.tid_state_tx[tid];
180 195
181 /* check if the TID waits for addBA response */ 196 /* check if the TID waits for addBA response */
182 spin_lock_bh(&sta->lock); 197 spin_lock_bh(&sta->lock);
183 if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK | 198 tid_tx = sta->ampdu_mlme.tid_tx[tid];
184 HT_AGG_STATE_REQ_STOP_BA_MSK)) != 199 if (!tid_tx ||
185 HT_ADDBA_REQUESTED_MSK) { 200 test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
186 spin_unlock_bh(&sta->lock); 201 spin_unlock_bh(&sta->lock);
187#ifdef CONFIG_MAC80211_HT_DEBUG 202#ifdef CONFIG_MAC80211_HT_DEBUG
188 printk(KERN_DEBUG "timer expired on tid %d but we are not " 203 printk(KERN_DEBUG "timer expired on tid %d but we are not "
@@ -210,7 +225,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
210 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 225 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
211 struct ieee80211_sub_if_data *sdata = sta->sdata; 226 struct ieee80211_sub_if_data *sdata = sta->sdata;
212 struct ieee80211_local *local = sdata->local; 227 struct ieee80211_local *local = sdata->local;
213 u8 *state; 228 struct tid_ampdu_tx *tid_tx;
214 int ret = 0; 229 int ret = 0;
215 u16 start_seq_num; 230 u16 start_seq_num;
216 231
@@ -256,9 +271,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
256 goto err_unlock_sta; 271 goto err_unlock_sta;
257 } 272 }
258 273
259 state = &sta->ampdu_mlme.tid_state_tx[tid]; 274 tid_tx = sta->ampdu_mlme.tid_tx[tid];
260 /* check if the TID is not in aggregation flow already */ 275 /* check if the TID is not in aggregation flow already */
261 if (*state != HT_AGG_STATE_IDLE) { 276 if (tid_tx) {
262#ifdef CONFIG_MAC80211_HT_DEBUG 277#ifdef CONFIG_MAC80211_HT_DEBUG
263 printk(KERN_DEBUG "BA request denied - session is not " 278 printk(KERN_DEBUG "BA request denied - session is not "
264 "idle on tid %u\n", tid); 279 "idle on tid %u\n", tid);
@@ -279,9 +294,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
279 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 294 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
280 295
281 /* prepare A-MPDU MLME for Tx aggregation */ 296 /* prepare A-MPDU MLME for Tx aggregation */
282 sta->ampdu_mlme.tid_tx[tid] = 297 tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
283 kmalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC); 298 if (!tid_tx) {
284 if (!sta->ampdu_mlme.tid_tx[tid]) {
285#ifdef CONFIG_MAC80211_HT_DEBUG 299#ifdef CONFIG_MAC80211_HT_DEBUG
286 if (net_ratelimit()) 300 if (net_ratelimit())
287 printk(KERN_ERR "allocate tx mlme to tid %d failed\n", 301 printk(KERN_ERR "allocate tx mlme to tid %d failed\n",
@@ -291,33 +305,27 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
291 goto err_wake_queue; 305 goto err_wake_queue;
292 } 306 }
293 307
294 skb_queue_head_init(&sta->ampdu_mlme.tid_tx[tid]->pending); 308 skb_queue_head_init(&tid_tx->pending);
295 309
296 /* Tx timer */ 310 /* Tx timer */
297 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.function = 311 tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
298 sta_addba_resp_timer_expired; 312 tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
299 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.data = 313 init_timer(&tid_tx->addba_resp_timer);
300 (unsigned long)&sta->timer_to_tid[tid];
301 init_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
302
303 /* Ok, the Addba frame hasn't been sent yet, but if the driver calls the
304 * call back right away, it must see that the flow has begun */
305 *state |= HT_ADDBA_REQUESTED_MSK;
306 314
307 start_seq_num = sta->tid_seq[tid] >> 4; 315 start_seq_num = sta->tid_seq[tid] >> 4;
308 316
309 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START, 317 ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
310 pubsta, tid, &start_seq_num); 318 pubsta, tid, &start_seq_num);
311
312 if (ret) { 319 if (ret) {
313#ifdef CONFIG_MAC80211_HT_DEBUG 320#ifdef CONFIG_MAC80211_HT_DEBUG
314 printk(KERN_DEBUG "BA request denied - HW unavailable for" 321 printk(KERN_DEBUG "BA request denied - HW unavailable for"
315 " tid %d\n", tid); 322 " tid %d\n", tid);
316#endif /* CONFIG_MAC80211_HT_DEBUG */ 323#endif /* CONFIG_MAC80211_HT_DEBUG */
317 *state = HT_AGG_STATE_IDLE;
318 goto err_free; 324 goto err_free;
319 } 325 }
320 326
327 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
328
321 /* Driver vetoed or OKed, but we can take packets again now */ 329 /* Driver vetoed or OKed, but we can take packets again now */
322 ieee80211_wake_queue_by_reason( 330 ieee80211_wake_queue_by_reason(
323 &local->hw, ieee80211_ac_from_tid(tid), 331 &local->hw, ieee80211_ac_from_tid(tid),
@@ -325,32 +333,30 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
325 333
326 spin_unlock(&local->ampdu_lock); 334 spin_unlock(&local->ampdu_lock);
327 335
336 /* activate the timer for the recipient's addBA response */
337 tid_tx->addba_resp_timer.expires = jiffies + ADDBA_RESP_INTERVAL;
338 add_timer(&tid_tx->addba_resp_timer);
339#ifdef CONFIG_MAC80211_HT_DEBUG
340 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
341#endif
342
328 /* prepare tid data */ 343 /* prepare tid data */
329 sta->ampdu_mlme.dialog_token_allocator++; 344 sta->ampdu_mlme.dialog_token_allocator++;
330 sta->ampdu_mlme.tid_tx[tid]->dialog_token = 345 tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;
331 sta->ampdu_mlme.dialog_token_allocator; 346 tid_tx->ssn = start_seq_num;
332 sta->ampdu_mlme.tid_tx[tid]->ssn = start_seq_num; 347
348 sta->ampdu_mlme.addba_req_num[tid]++;
333 349
334 spin_unlock_bh(&sta->lock); 350 spin_unlock_bh(&sta->lock);
335 351
336 /* send AddBA request */ 352 /* send AddBA request */
337 ieee80211_send_addba_request(sdata, pubsta->addr, tid, 353 ieee80211_send_addba_request(sdata, pubsta->addr, tid,
338 sta->ampdu_mlme.tid_tx[tid]->dialog_token, 354 tid_tx->dialog_token, tid_tx->ssn,
339 sta->ampdu_mlme.tid_tx[tid]->ssn,
340 0x40, 5000); 355 0x40, 5000);
341 sta->ampdu_mlme.addba_req_num[tid]++;
342 /* activate the timer for the recipient's addBA response */
343 sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer.expires =
344 jiffies + ADDBA_RESP_INTERVAL;
345 add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
346#ifdef CONFIG_MAC80211_HT_DEBUG
347 printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
348#endif
349 return 0; 356 return 0;
350 357
351 err_free: 358 err_free:
352 kfree(sta->ampdu_mlme.tid_tx[tid]); 359 kfree(tid_tx);
353 sta->ampdu_mlme.tid_tx[tid] = NULL;
354 err_wake_queue: 360 err_wake_queue:
355 ieee80211_wake_queue_by_reason( 361 ieee80211_wake_queue_by_reason(
356 &local->hw, ieee80211_ac_from_tid(tid), 362 &local->hw, ieee80211_ac_from_tid(tid),
@@ -368,7 +374,8 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
368 * local->ampdu_lock across both calls. 374 * local->ampdu_lock across both calls.
369 */ 375 */
370static void ieee80211_agg_splice_packets(struct ieee80211_local *local, 376static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
371 struct sta_info *sta, u16 tid) 377 struct tid_ampdu_tx *tid_tx,
378 u16 tid)
372{ 379{
373 unsigned long flags; 380 unsigned long flags;
374 u16 queue = ieee80211_ac_from_tid(tid); 381 u16 queue = ieee80211_ac_from_tid(tid);
@@ -377,31 +384,23 @@ static void ieee80211_agg_splice_packets(struct ieee80211_local *local,
377 &local->hw, queue, 384 &local->hw, queue,
378 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 385 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
379 386
380 if (!(sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)) 387 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
381 return; 388 " from the pending queue\n", tid))
382
383 if (WARN(!sta->ampdu_mlme.tid_tx[tid],
384 "TID %d gone but expected when splicing aggregates from"
385 "the pending queue\n", tid))
386 return; 389 return;
387 390
388 if (!skb_queue_empty(&sta->ampdu_mlme.tid_tx[tid]->pending)) { 391 if (!skb_queue_empty(&tid_tx->pending)) {
389 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 392 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
390 /* copy over remaining packets */ 393 /* copy over remaining packets */
391 skb_queue_splice_tail_init( 394 skb_queue_splice_tail_init(&tid_tx->pending,
392 &sta->ampdu_mlme.tid_tx[tid]->pending, 395 &local->pending[queue]);
393 &local->pending[queue]);
394 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 396 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
395 } 397 }
396} 398}
397 399
398static void ieee80211_agg_splice_finish(struct ieee80211_local *local, 400static void ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
399 struct sta_info *sta, u16 tid)
400{ 401{
401 u16 queue = ieee80211_ac_from_tid(tid);
402
403 ieee80211_wake_queue_by_reason( 402 ieee80211_wake_queue_by_reason(
404 &local->hw, queue, 403 &local->hw, ieee80211_ac_from_tid(tid),
405 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 404 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
406} 405}
407 406
@@ -409,19 +408,21 @@ static void ieee80211_agg_splice_finish(struct ieee80211_local *local,
409static void ieee80211_agg_tx_operational(struct ieee80211_local *local, 408static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
410 struct sta_info *sta, u16 tid) 409 struct sta_info *sta, u16 tid)
411{ 410{
411 lockdep_assert_held(&sta->lock);
412
412#ifdef CONFIG_MAC80211_HT_DEBUG 413#ifdef CONFIG_MAC80211_HT_DEBUG
413 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid); 414 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
414#endif 415#endif
415 416
416 spin_lock(&local->ampdu_lock); 417 spin_lock(&local->ampdu_lock);
417 ieee80211_agg_splice_packets(local, sta, tid); 418 ieee80211_agg_splice_packets(local, sta->ampdu_mlme.tid_tx[tid], tid);
418 /* 419 /*
419 * NB: we rely on sta->lock being taken in the TX 420 * Now mark as operational. This will be visible
420 * processing here when adding to the pending queue, 421 * in the TX path, and lets it go lock-free in
421 * otherwise we could only change the state of the 422 * the common case.
422 * session to OPERATIONAL _here_.
423 */ 423 */
424 ieee80211_agg_splice_finish(local, sta, tid); 424 set_bit(HT_AGG_STATE_OPERATIONAL, &sta->ampdu_mlme.tid_tx[tid]->state);
425 ieee80211_agg_splice_finish(local, tid);
425 spin_unlock(&local->ampdu_lock); 426 spin_unlock(&local->ampdu_lock);
426 427
427 drv_ampdu_action(local, sta->sdata, 428 drv_ampdu_action(local, sta->sdata,
@@ -434,7 +435,7 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
434 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 435 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
435 struct ieee80211_local *local = sdata->local; 436 struct ieee80211_local *local = sdata->local;
436 struct sta_info *sta; 437 struct sta_info *sta;
437 u8 *state; 438 struct tid_ampdu_tx *tid_tx;
438 439
439 trace_api_start_tx_ba_cb(sdata, ra, tid); 440 trace_api_start_tx_ba_cb(sdata, ra, tid);
440 441
@@ -456,25 +457,22 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
456 return; 457 return;
457 } 458 }
458 459
459 state = &sta->ampdu_mlme.tid_state_tx[tid];
460 spin_lock_bh(&sta->lock); 460 spin_lock_bh(&sta->lock);
461 tid_tx = sta->ampdu_mlme.tid_tx[tid];
461 462
462 if (WARN_ON(!(*state & HT_ADDBA_REQUESTED_MSK))) { 463 if (WARN_ON(!tid_tx)) {
463#ifdef CONFIG_MAC80211_HT_DEBUG 464#ifdef CONFIG_MAC80211_HT_DEBUG
464 printk(KERN_DEBUG "addBA was not requested yet, state is %d\n", 465 printk(KERN_DEBUG "addBA was not requested!\n");
465 *state);
466#endif 466#endif
467 spin_unlock_bh(&sta->lock); 467 spin_unlock_bh(&sta->lock);
468 rcu_read_unlock(); 468 rcu_read_unlock();
469 return; 469 return;
470 } 470 }
471 471
472 if (WARN_ON(*state & HT_ADDBA_DRV_READY_MSK)) 472 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
473 goto out; 473 goto out;
474 474
475 *state |= HT_ADDBA_DRV_READY_MSK; 475 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
476
477 if (*state == HT_AGG_STATE_OPERATIONAL)
478 ieee80211_agg_tx_operational(local, sta, tid); 476 ieee80211_agg_tx_operational(local, sta, tid);
479 477
480 out: 478 out:
@@ -512,14 +510,14 @@ EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
512int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 510int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
513 enum ieee80211_back_parties initiator) 511 enum ieee80211_back_parties initiator)
514{ 512{
515 u8 *state; 513 struct tid_ampdu_tx *tid_tx;
516 int ret; 514 int ret;
517 515
518 /* check if the TID is in aggregation */
519 state = &sta->ampdu_mlme.tid_state_tx[tid];
520 spin_lock_bh(&sta->lock); 516 spin_lock_bh(&sta->lock);
517 tid_tx = sta->ampdu_mlme.tid_tx[tid];
521 518
522 if (*state != HT_AGG_STATE_OPERATIONAL) { 519 /* check if the TID is in aggregation */
520 if (!tid_tx || !test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
523 ret = -ENOENT; 521 ret = -ENOENT;
524 goto unlock; 522 goto unlock;
525 } 523 }
@@ -554,7 +552,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
554 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 552 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
555 struct ieee80211_local *local = sdata->local; 553 struct ieee80211_local *local = sdata->local;
556 struct sta_info *sta; 554 struct sta_info *sta;
557 u8 *state; 555 struct tid_ampdu_tx *tid_tx;
558 556
559 trace_api_stop_tx_ba_cb(sdata, ra, tid); 557 trace_api_stop_tx_ba_cb(sdata, ra, tid);
560 558
@@ -580,39 +578,45 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
580 rcu_read_unlock(); 578 rcu_read_unlock();
581 return; 579 return;
582 } 580 }
583 state = &sta->ampdu_mlme.tid_state_tx[tid];
584 581
585 /* NOTE: no need to use sta->lock in this state check, as 582 spin_lock_bh(&sta->lock);
586 * ieee80211_stop_tx_ba_session will let only one stop call to 583 tid_tx = sta->ampdu_mlme.tid_tx[tid];
587 * pass through per sta/tid 584
588 */ 585 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
589 if ((*state & HT_AGG_STATE_REQ_STOP_BA_MSK) == 0) {
590#ifdef CONFIG_MAC80211_HT_DEBUG 586#ifdef CONFIG_MAC80211_HT_DEBUG
591 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n"); 587 printk(KERN_DEBUG "unexpected callback to A-MPDU stop\n");
592#endif 588#endif
589 spin_unlock_bh(&sta->lock);
593 rcu_read_unlock(); 590 rcu_read_unlock();
594 return; 591 return;
595 } 592 }
596 593
597 if (*state & HT_AGG_STATE_INITIATOR_MSK) 594 if (tid_tx->stop_initiator == WLAN_BACK_INITIATOR)
598 ieee80211_send_delba(sta->sdata, ra, tid, 595 ieee80211_send_delba(sta->sdata, ra, tid,
599 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 596 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
600 597
601 spin_lock_bh(&sta->lock); 598 /*
602 spin_lock(&local->ampdu_lock); 599 * When we get here, the TX path will not be lockless any more wrt.
600 * aggregation, since the OPERATIONAL bit has long been cleared.
601 * Thus it will block on getting the lock, if it occurs. So if we
602 * stop the queue now, we will not get any more packets, and any
603 * that might be being processed will wait for us here, thereby
604 * guaranteeing that no packets go to the tid_tx pending queue any
605 * more.
606 */
603 607
604 ieee80211_agg_splice_packets(local, sta, tid); 608 spin_lock(&local->ampdu_lock);
609 ieee80211_agg_splice_packets(local, tid_tx, tid);
605 610
606 *state = HT_AGG_STATE_IDLE; 611 /* future packets must not find the tid_tx struct any more */
607 /* from now on packets are no longer put onto sta->pending */ 612 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], NULL);
608 kfree(sta->ampdu_mlme.tid_tx[tid]);
609 sta->ampdu_mlme.tid_tx[tid] = NULL;
610 613
611 ieee80211_agg_splice_finish(local, sta, tid); 614 ieee80211_agg_splice_finish(local, tid);
612 615
616 call_rcu(&tid_tx->rcu_head, kfree_tid_tx);
613 spin_unlock(&local->ampdu_lock); 617 spin_unlock(&local->ampdu_lock);
614 spin_unlock_bh(&sta->lock);
615 618
619 spin_unlock_bh(&sta->lock);
616 rcu_read_unlock(); 620 rcu_read_unlock();
617} 621}
618EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb); 622EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb);
@@ -649,40 +653,41 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
649 struct ieee80211_mgmt *mgmt, 653 struct ieee80211_mgmt *mgmt,
650 size_t len) 654 size_t len)
651{ 655{
656 struct tid_ampdu_tx *tid_tx;
652 u16 capab, tid; 657 u16 capab, tid;
653 u8 *state;
654 658
655 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab); 659 capab = le16_to_cpu(mgmt->u.action.u.addba_resp.capab);
656 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2; 660 tid = (capab & IEEE80211_ADDBA_PARAM_TID_MASK) >> 2;
657 661
658 state = &sta->ampdu_mlme.tid_state_tx[tid];
659
660 spin_lock_bh(&sta->lock); 662 spin_lock_bh(&sta->lock);
661 663
662 if (!(*state & HT_ADDBA_REQUESTED_MSK)) 664 tid_tx = sta->ampdu_mlme.tid_tx[tid];
665
666 if (!tid_tx)
663 goto out; 667 goto out;
664 668
665 if (mgmt->u.action.u.addba_resp.dialog_token != 669 if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) {
666 sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
667#ifdef CONFIG_MAC80211_HT_DEBUG 670#ifdef CONFIG_MAC80211_HT_DEBUG
668 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid); 671 printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
669#endif /* CONFIG_MAC80211_HT_DEBUG */ 672#endif
670 goto out; 673 goto out;
671 } 674 }
672 675
673 del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 676 del_timer(&tid_tx->addba_resp_timer);
674 677
675#ifdef CONFIG_MAC80211_HT_DEBUG 678#ifdef CONFIG_MAC80211_HT_DEBUG
676 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid); 679 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
677#endif /* CONFIG_MAC80211_HT_DEBUG */ 680#endif
678 681
679 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 682 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
680 == WLAN_STATUS_SUCCESS) { 683 == WLAN_STATUS_SUCCESS) {
681 u8 curstate = *state; 684 if (test_and_set_bit(HT_AGG_STATE_RESPONSE_RECEIVED,
682 685 &tid_tx->state)) {
683 *state |= HT_ADDBA_RECEIVED_MSK; 686 /* ignore duplicate response */
687 goto out;
688 }
684 689
685 if (*state != curstate && *state == HT_AGG_STATE_OPERATIONAL) 690 if (test_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))
686 ieee80211_agg_tx_operational(local, sta, tid); 691 ieee80211_agg_tx_operational(local, sta, tid);
687 692
688 sta->ampdu_mlme.addba_req_num[tid] = 0; 693 sta->ampdu_mlme.addba_req_num[tid] = 0;