diff options
Diffstat (limited to 'net/mac80211/agg-tx.c')
-rw-r--r-- | net/mac80211/agg-tx.c | 300 |
1 files changed, 177 insertions, 123 deletions
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index eb9df22418f0..13b7683de5a4 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -149,16 +149,133 @@ void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, | |||
149 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); | 149 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); |
150 | } | 150 | } |
151 | 151 | ||
152 | static inline int ieee80211_ac_from_tid(int tid) | ||
153 | { | ||
154 | return ieee802_1d_to_ac[tid & 7]; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * When multiple aggregation sessions on multiple stations | ||
159 | * are being created/destroyed simultaneously, we need to | ||
160 | * refcount the global queue stop caused by that in order | ||
161 | * to not get into a situation where one of the aggregation | ||
162 | * setup or teardown re-enables queues before the other is | ||
163 | * ready to handle that. | ||
164 | * | ||
165 | * These two functions take care of this issue by keeping | ||
166 | * a global "agg_queue_stop" refcount. | ||
167 | */ | ||
168 | static void __acquires(agg_queue) | ||
169 | ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) | ||
170 | { | ||
171 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
172 | |||
173 | if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) | ||
174 | ieee80211_stop_queue_by_reason( | ||
175 | &sdata->local->hw, queue, | ||
176 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
177 | __acquire(agg_queue); | ||
178 | } | ||
179 | |||
180 | static void __releases(agg_queue) | ||
181 | ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) | ||
182 | { | ||
183 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
184 | |||
185 | if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) | ||
186 | ieee80211_wake_queue_by_reason( | ||
187 | &sdata->local->hw, queue, | ||
188 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
189 | __release(agg_queue); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * splice packets from the STA's pending to the local pending, | ||
194 | * requires a call to ieee80211_agg_splice_finish later | ||
195 | */ | ||
196 | static void __acquires(agg_queue) | ||
197 | ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, | ||
198 | struct tid_ampdu_tx *tid_tx, u16 tid) | ||
199 | { | ||
200 | struct ieee80211_local *local = sdata->local; | ||
201 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
202 | unsigned long flags; | ||
203 | |||
204 | ieee80211_stop_queue_agg(sdata, tid); | ||
205 | |||
206 | if (WARN(!tid_tx, | ||
207 | "TID %d gone but expected when splicing aggregates from the pending queue\n", | ||
208 | tid)) | ||
209 | return; | ||
210 | |||
211 | if (!skb_queue_empty(&tid_tx->pending)) { | ||
212 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
213 | /* copy over remaining packets */ | ||
214 | skb_queue_splice_tail_init(&tid_tx->pending, | ||
215 | &local->pending[queue]); | ||
216 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static void __releases(agg_queue) | ||
221 | ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) | ||
222 | { | ||
223 | ieee80211_wake_queue_agg(sdata, tid); | ||
224 | } | ||
225 | |||
226 | static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid) | ||
227 | { | ||
228 | struct tid_ampdu_tx *tid_tx; | ||
229 | |||
230 | lockdep_assert_held(&sta->ampdu_mlme.mtx); | ||
231 | lockdep_assert_held(&sta->lock); | ||
232 | |||
233 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); | ||
234 | |||
235 | /* | ||
236 | * When we get here, the TX path will not be lockless any more wrt. | ||
237 | * aggregation, since the OPERATIONAL bit has long been cleared. | ||
238 | * Thus it will block on getting the lock, if it occurs. So if we | ||
239 | * stop the queue now, we will not get any more packets, and any | ||
240 | * that might be being processed will wait for us here, thereby | ||
241 | * guaranteeing that no packets go to the tid_tx pending queue any | ||
242 | * more. | ||
243 | */ | ||
244 | |||
245 | ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); | ||
246 | |||
247 | /* future packets must not find the tid_tx struct any more */ | ||
248 | ieee80211_assign_tid_tx(sta, tid, NULL); | ||
249 | |||
250 | ieee80211_agg_splice_finish(sta->sdata, tid); | ||
251 | |||
252 | kfree_rcu(tid_tx, rcu_head); | ||
253 | } | ||
254 | |||
152 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | 255 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, |
153 | enum ieee80211_back_parties initiator, | 256 | enum ieee80211_agg_stop_reason reason) |
154 | bool tx) | ||
155 | { | 257 | { |
156 | struct ieee80211_local *local = sta->local; | 258 | struct ieee80211_local *local = sta->local; |
157 | struct tid_ampdu_tx *tid_tx; | 259 | struct tid_ampdu_tx *tid_tx; |
260 | enum ieee80211_ampdu_mlme_action action; | ||
158 | int ret; | 261 | int ret; |
159 | 262 | ||
160 | lockdep_assert_held(&sta->ampdu_mlme.mtx); | 263 | lockdep_assert_held(&sta->ampdu_mlme.mtx); |
161 | 264 | ||
265 | switch (reason) { | ||
266 | case AGG_STOP_DECLINED: | ||
267 | case AGG_STOP_LOCAL_REQUEST: | ||
268 | case AGG_STOP_PEER_REQUEST: | ||
269 | action = IEEE80211_AMPDU_TX_STOP_CONT; | ||
270 | break; | ||
271 | case AGG_STOP_DESTROY_STA: | ||
272 | action = IEEE80211_AMPDU_TX_STOP_FLUSH; | ||
273 | break; | ||
274 | default: | ||
275 | WARN_ON_ONCE(1); | ||
276 | return -EINVAL; | ||
277 | } | ||
278 | |||
162 | spin_lock_bh(&sta->lock); | 279 | spin_lock_bh(&sta->lock); |
163 | 280 | ||
164 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); | 281 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); |
@@ -167,10 +284,19 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
167 | return -ENOENT; | 284 | return -ENOENT; |
168 | } | 285 | } |
169 | 286 | ||
170 | /* if we're already stopping ignore any new requests to stop */ | 287 | /* |
288 | * if we're already stopping ignore any new requests to stop | ||
289 | * unless we're destroying it in which case notify the driver | ||
290 | */ | ||
171 | if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { | 291 | if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { |
172 | spin_unlock_bh(&sta->lock); | 292 | spin_unlock_bh(&sta->lock); |
173 | return -EALREADY; | 293 | if (reason != AGG_STOP_DESTROY_STA) |
294 | return -EALREADY; | ||
295 | ret = drv_ampdu_action(local, sta->sdata, | ||
296 | IEEE80211_AMPDU_TX_STOP_FLUSH_CONT, | ||
297 | &sta->sta, tid, NULL, 0); | ||
298 | WARN_ON_ONCE(ret); | ||
299 | return 0; | ||
174 | } | 300 | } |
175 | 301 | ||
176 | if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { | 302 | if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { |
@@ -212,11 +338,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
212 | */ | 338 | */ |
213 | synchronize_net(); | 339 | synchronize_net(); |
214 | 340 | ||
215 | tid_tx->stop_initiator = initiator; | 341 | tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ? |
216 | tid_tx->tx_stop = tx; | 342 | WLAN_BACK_RECIPIENT : |
343 | WLAN_BACK_INITIATOR; | ||
344 | tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST; | ||
217 | 345 | ||
218 | ret = drv_ampdu_action(local, sta->sdata, | 346 | ret = drv_ampdu_action(local, sta->sdata, action, |
219 | IEEE80211_AMPDU_TX_STOP, | ||
220 | &sta->sta, tid, NULL, 0); | 347 | &sta->sta, tid, NULL, 0); |
221 | 348 | ||
222 | /* HW shall not deny going back to legacy */ | 349 | /* HW shall not deny going back to legacy */ |
@@ -227,7 +354,17 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
227 | */ | 354 | */ |
228 | } | 355 | } |
229 | 356 | ||
230 | return ret; | 357 | /* |
358 | * In the case of AGG_STOP_DESTROY_STA, the driver won't | ||
359 | * necessarily call ieee80211_stop_tx_ba_cb(), so this may | ||
360 | * seem like we can leave the tid_tx data pending forever. | ||
361 | * This is true, in a way, but "forever" is only until the | ||
362 | * station struct is actually destroyed. In the meantime, | ||
363 | * leaving it around ensures that we don't transmit packets | ||
364 | * to the driver on this TID which might confuse it. | ||
365 | */ | ||
366 | |||
367 | return 0; | ||
231 | } | 368 | } |
232 | 369 | ||
233 | /* | 370 | /* |
@@ -253,91 +390,18 @@ static void sta_addba_resp_timer_expired(unsigned long data) | |||
253 | test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { | 390 | test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) { |
254 | rcu_read_unlock(); | 391 | rcu_read_unlock(); |
255 | ht_dbg(sta->sdata, | 392 | ht_dbg(sta->sdata, |
256 | "timer expired on tid %d but we are not (or no longer) expecting addBA response there\n", | 393 | "timer expired on %pM tid %d but we are not (or no longer) expecting addBA response there\n", |
257 | tid); | 394 | sta->sta.addr, tid); |
258 | return; | 395 | return; |
259 | } | 396 | } |
260 | 397 | ||
261 | ht_dbg(sta->sdata, "addBA response timer expired on tid %d\n", tid); | 398 | ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n", |
399 | sta->sta.addr, tid); | ||
262 | 400 | ||
263 | ieee80211_stop_tx_ba_session(&sta->sta, tid); | 401 | ieee80211_stop_tx_ba_session(&sta->sta, tid); |
264 | rcu_read_unlock(); | 402 | rcu_read_unlock(); |
265 | } | 403 | } |
266 | 404 | ||
267 | static inline int ieee80211_ac_from_tid(int tid) | ||
268 | { | ||
269 | return ieee802_1d_to_ac[tid & 7]; | ||
270 | } | ||
271 | |||
272 | /* | ||
273 | * When multiple aggregation sessions on multiple stations | ||
274 | * are being created/destroyed simultaneously, we need to | ||
275 | * refcount the global queue stop caused by that in order | ||
276 | * to not get into a situation where one of the aggregation | ||
277 | * setup or teardown re-enables queues before the other is | ||
278 | * ready to handle that. | ||
279 | * | ||
280 | * These two functions take care of this issue by keeping | ||
281 | * a global "agg_queue_stop" refcount. | ||
282 | */ | ||
283 | static void __acquires(agg_queue) | ||
284 | ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) | ||
285 | { | ||
286 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
287 | |||
288 | if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) | ||
289 | ieee80211_stop_queue_by_reason( | ||
290 | &sdata->local->hw, queue, | ||
291 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
292 | __acquire(agg_queue); | ||
293 | } | ||
294 | |||
295 | static void __releases(agg_queue) | ||
296 | ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) | ||
297 | { | ||
298 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
299 | |||
300 | if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) | ||
301 | ieee80211_wake_queue_by_reason( | ||
302 | &sdata->local->hw, queue, | ||
303 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
304 | __release(agg_queue); | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * splice packets from the STA's pending to the local pending, | ||
309 | * requires a call to ieee80211_agg_splice_finish later | ||
310 | */ | ||
311 | static void __acquires(agg_queue) | ||
312 | ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, | ||
313 | struct tid_ampdu_tx *tid_tx, u16 tid) | ||
314 | { | ||
315 | struct ieee80211_local *local = sdata->local; | ||
316 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
317 | unsigned long flags; | ||
318 | |||
319 | ieee80211_stop_queue_agg(sdata, tid); | ||
320 | |||
321 | if (WARN(!tid_tx, | ||
322 | "TID %d gone but expected when splicing aggregates from the pending queue\n", | ||
323 | tid)) | ||
324 | return; | ||
325 | |||
326 | if (!skb_queue_empty(&tid_tx->pending)) { | ||
327 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
328 | /* copy over remaining packets */ | ||
329 | skb_queue_splice_tail_init(&tid_tx->pending, | ||
330 | &local->pending[queue]); | ||
331 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
332 | } | ||
333 | } | ||
334 | |||
335 | static void __releases(agg_queue) | ||
336 | ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) | ||
337 | { | ||
338 | ieee80211_wake_queue_agg(sdata, tid); | ||
339 | } | ||
340 | |||
341 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | 405 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) |
342 | { | 406 | { |
343 | struct tid_ampdu_tx *tid_tx; | 407 | struct tid_ampdu_tx *tid_tx; |
@@ -369,7 +433,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | |||
369 | &sta->sta, tid, &start_seq_num, 0); | 433 | &sta->sta, tid, &start_seq_num, 0); |
370 | if (ret) { | 434 | if (ret) { |
371 | ht_dbg(sdata, | 435 | ht_dbg(sdata, |
372 | "BA request denied - HW unavailable for tid %d\n", tid); | 436 | "BA request denied - HW unavailable for %pM tid %d\n", |
437 | sta->sta.addr, tid); | ||
373 | spin_lock_bh(&sta->lock); | 438 | spin_lock_bh(&sta->lock); |
374 | ieee80211_agg_splice_packets(sdata, tid_tx, tid); | 439 | ieee80211_agg_splice_packets(sdata, tid_tx, tid); |
375 | ieee80211_assign_tid_tx(sta, tid, NULL); | 440 | ieee80211_assign_tid_tx(sta, tid, NULL); |
@@ -382,7 +447,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | |||
382 | 447 | ||
383 | /* activate the timer for the recipient's addBA response */ | 448 | /* activate the timer for the recipient's addBA response */ |
384 | mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); | 449 | mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL); |
385 | ht_dbg(sdata, "activated addBA response timer on tid %d\n", tid); | 450 | ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n", |
451 | sta->sta.addr, tid); | ||
386 | 452 | ||
387 | spin_lock_bh(&sta->lock); | 453 | spin_lock_bh(&sta->lock); |
388 | sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; | 454 | sta->ampdu_mlme.last_addba_req_time[tid] = jiffies; |
@@ -429,7 +495,8 @@ static void sta_tx_agg_session_timer_expired(unsigned long data) | |||
429 | 495 | ||
430 | rcu_read_unlock(); | 496 | rcu_read_unlock(); |
431 | 497 | ||
432 | ht_dbg(sta->sdata, "tx session timer expired on tid %d\n", (u16)*ptid); | 498 | ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n", |
499 | sta->sta.addr, (u16)*ptid); | ||
433 | 500 | ||
434 | ieee80211_stop_tx_ba_session(&sta->sta, *ptid); | 501 | ieee80211_stop_tx_ba_session(&sta->sta, *ptid); |
435 | } | 502 | } |
@@ -465,7 +532,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | |||
465 | 532 | ||
466 | if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { | 533 | if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) { |
467 | ht_dbg(sdata, | 534 | ht_dbg(sdata, |
468 | "BA sessions blocked - Denying BA session request\n"); | 535 | "BA sessions blocked - Denying BA session request %pM tid %d\n", |
536 | sta->sta.addr, tid); | ||
469 | return -EINVAL; | 537 | return -EINVAL; |
470 | } | 538 | } |
471 | 539 | ||
@@ -506,8 +574,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | |||
506 | time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + | 574 | time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] + |
507 | HT_AGG_RETRIES_PERIOD)) { | 575 | HT_AGG_RETRIES_PERIOD)) { |
508 | ht_dbg(sdata, | 576 | ht_dbg(sdata, |
509 | "BA request denied - waiting a grace period after %d failed requests on tid %u\n", | 577 | "BA request denied - waiting a grace period after %d failed requests on %pM tid %u\n", |
510 | sta->ampdu_mlme.addba_req_num[tid], tid); | 578 | sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid); |
511 | ret = -EBUSY; | 579 | ret = -EBUSY; |
512 | goto err_unlock_sta; | 580 | goto err_unlock_sta; |
513 | } | 581 | } |
@@ -516,8 +584,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, | |||
516 | /* check if the TID is not in aggregation flow already */ | 584 | /* check if the TID is not in aggregation flow already */ |
517 | if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { | 585 | if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) { |
518 | ht_dbg(sdata, | 586 | ht_dbg(sdata, |
519 | "BA request denied - session is not idle on tid %u\n", | 587 | "BA request denied - session is not idle on %pM tid %u\n", |
520 | tid); | 588 | sta->sta.addr, tid); |
521 | ret = -EAGAIN; | 589 | ret = -EAGAIN; |
522 | goto err_unlock_sta; | 590 | goto err_unlock_sta; |
523 | } | 591 | } |
@@ -572,7 +640,8 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, | |||
572 | 640 | ||
573 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); | 641 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); |
574 | 642 | ||
575 | ht_dbg(sta->sdata, "Aggregation is on for tid %d\n", tid); | 643 | ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n", |
644 | sta->sta.addr, tid); | ||
576 | 645 | ||
577 | drv_ampdu_action(local, sta->sdata, | 646 | drv_ampdu_action(local, sta->sdata, |
578 | IEEE80211_AMPDU_TX_OPERATIONAL, | 647 | IEEE80211_AMPDU_TX_OPERATIONAL, |
@@ -660,14 +729,13 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | |||
660 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); | 729 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); |
661 | 730 | ||
662 | int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | 731 | int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, |
663 | enum ieee80211_back_parties initiator, | 732 | enum ieee80211_agg_stop_reason reason) |
664 | bool tx) | ||
665 | { | 733 | { |
666 | int ret; | 734 | int ret; |
667 | 735 | ||
668 | mutex_lock(&sta->ampdu_mlme.mtx); | 736 | mutex_lock(&sta->ampdu_mlme.mtx); |
669 | 737 | ||
670 | ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator, tx); | 738 | ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason); |
671 | 739 | ||
672 | mutex_unlock(&sta->ampdu_mlme.mtx); | 740 | mutex_unlock(&sta->ampdu_mlme.mtx); |
673 | 741 | ||
@@ -743,7 +811,9 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | |||
743 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); | 811 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); |
744 | 812 | ||
745 | if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { | 813 | if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { |
746 | ht_dbg(sdata, "unexpected callback to A-MPDU stop\n"); | 814 | ht_dbg(sdata, |
815 | "unexpected callback to A-MPDU stop for %pM tid %d\n", | ||
816 | sta->sta.addr, tid); | ||
747 | goto unlock_sta; | 817 | goto unlock_sta; |
748 | } | 818 | } |
749 | 819 | ||
@@ -751,24 +821,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | |||
751 | ieee80211_send_delba(sta->sdata, ra, tid, | 821 | ieee80211_send_delba(sta->sdata, ra, tid, |
752 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); | 822 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); |
753 | 823 | ||
754 | /* | 824 | ieee80211_remove_tid_tx(sta, tid); |
755 | * When we get here, the TX path will not be lockless any more wrt. | ||
756 | * aggregation, since the OPERATIONAL bit has long been cleared. | ||
757 | * Thus it will block on getting the lock, if it occurs. So if we | ||
758 | * stop the queue now, we will not get any more packets, and any | ||
759 | * that might be being processed will wait for us here, thereby | ||
760 | * guaranteeing that no packets go to the tid_tx pending queue any | ||
761 | * more. | ||
762 | */ | ||
763 | |||
764 | ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); | ||
765 | |||
766 | /* future packets must not find the tid_tx struct any more */ | ||
767 | ieee80211_assign_tid_tx(sta, tid, NULL); | ||
768 | |||
769 | ieee80211_agg_splice_finish(sta->sdata, tid); | ||
770 | |||
771 | kfree_rcu(tid_tx, rcu_head); | ||
772 | 825 | ||
773 | unlock_sta: | 826 | unlock_sta: |
774 | spin_unlock_bh(&sta->lock); | 827 | spin_unlock_bh(&sta->lock); |
@@ -819,13 +872,15 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
819 | goto out; | 872 | goto out; |
820 | 873 | ||
821 | if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { | 874 | if (mgmt->u.action.u.addba_resp.dialog_token != tid_tx->dialog_token) { |
822 | ht_dbg(sta->sdata, "wrong addBA response token, tid %d\n", tid); | 875 | ht_dbg(sta->sdata, "wrong addBA response token, %pM tid %d\n", |
876 | sta->sta.addr, tid); | ||
823 | goto out; | 877 | goto out; |
824 | } | 878 | } |
825 | 879 | ||
826 | del_timer_sync(&tid_tx->addba_resp_timer); | 880 | del_timer_sync(&tid_tx->addba_resp_timer); |
827 | 881 | ||
828 | ht_dbg(sta->sdata, "switched off addBA timer for tid %d\n", tid); | 882 | ht_dbg(sta->sdata, "switched off addBA timer for %pM tid %d\n", |
883 | sta->sta.addr, tid); | ||
829 | 884 | ||
830 | /* | 885 | /* |
831 | * addba_resp_timer may have fired before we got here, and | 886 | * addba_resp_timer may have fired before we got here, and |
@@ -835,8 +890,8 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
835 | if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || | 890 | if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) || |
836 | test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { | 891 | test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { |
837 | ht_dbg(sta->sdata, | 892 | ht_dbg(sta->sdata, |
838 | "got addBA resp for tid %d but we already gave up\n", | 893 | "got addBA resp for %pM tid %d but we already gave up\n", |
839 | tid); | 894 | sta->sta.addr, tid); |
840 | goto out; | 895 | goto out; |
841 | } | 896 | } |
842 | 897 | ||
@@ -868,8 +923,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local, | |||
868 | } | 923 | } |
869 | 924 | ||
870 | } else { | 925 | } else { |
871 | ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, | 926 | ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED); |
872 | false); | ||
873 | } | 927 | } |
874 | 928 | ||
875 | out: | 929 | out: |