aboutsummaryrefslogtreecommitdiffstats
path: root/net/mac80211/agg-tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/mac80211/agg-tx.c')
-rw-r--r--net/mac80211/agg-tx.c253
1 files changed, 147 insertions, 106 deletions
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index eb9df22418f0..2f0ccbc5f13e 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -149,16 +149,133 @@ void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
149 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); 149 rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx);
150} 150}
151 151
152static inline int ieee80211_ac_from_tid(int tid)
153{
154 return ieee802_1d_to_ac[tid & 7];
155}
156
157/*
158 * When multiple aggregation sessions on multiple stations
159 * are being created/destroyed simultaneously, we need to
160 * refcount the global queue stop caused by that in order
161 * to not get into a situation where one of the aggregation
162 * setup or teardown re-enables queues before the other is
163 * ready to handle that.
164 *
165 * These two functions take care of this issue by keeping
166 * a global "agg_queue_stop" refcount.
167 */
168static void __acquires(agg_queue)
169ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
170{
171 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
172
173 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
174 ieee80211_stop_queue_by_reason(
175 &sdata->local->hw, queue,
176 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
177 __acquire(agg_queue);
178}
179
180static void __releases(agg_queue)
181ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
182{
183 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
184
185 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
186 ieee80211_wake_queue_by_reason(
187 &sdata->local->hw, queue,
188 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
189 __release(agg_queue);
190}
191
192/*
193 * splice packets from the STA's pending to the local pending,
194 * requires a call to ieee80211_agg_splice_finish later
195 */
196static void __acquires(agg_queue)
197ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
198 struct tid_ampdu_tx *tid_tx, u16 tid)
199{
200 struct ieee80211_local *local = sdata->local;
201 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
202 unsigned long flags;
203
204 ieee80211_stop_queue_agg(sdata, tid);
205
206 if (WARN(!tid_tx,
207 "TID %d gone but expected when splicing aggregates from the pending queue\n",
208 tid))
209 return;
210
211 if (!skb_queue_empty(&tid_tx->pending)) {
212 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
213 /* copy over remaining packets */
214 skb_queue_splice_tail_init(&tid_tx->pending,
215 &local->pending[queue]);
216 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
217 }
218}
219
220static void __releases(agg_queue)
221ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
222{
223 ieee80211_wake_queue_agg(sdata, tid);
224}
225
226static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
227{
228 struct tid_ampdu_tx *tid_tx;
229
230 lockdep_assert_held(&sta->ampdu_mlme.mtx);
231 lockdep_assert_held(&sta->lock);
232
233 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
234
235 /*
236 * When we get here, the TX path will not be lockless any more wrt.
237 * aggregation, since the OPERATIONAL bit has long been cleared.
238 * Thus it will block on getting the lock, if it occurs. So if we
239 * stop the queue now, we will not get any more packets, and any
240 * that might be being processed will wait for us here, thereby
241 * guaranteeing that no packets go to the tid_tx pending queue any
242 * more.
243 */
244
245 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
246
247 /* future packets must not find the tid_tx struct any more */
248 ieee80211_assign_tid_tx(sta, tid, NULL);
249
250 ieee80211_agg_splice_finish(sta->sdata, tid);
251
252 kfree_rcu(tid_tx, rcu_head);
253}
254
152int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 255int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
153 enum ieee80211_back_parties initiator, 256 enum ieee80211_agg_stop_reason reason)
154 bool tx)
155{ 257{
156 struct ieee80211_local *local = sta->local; 258 struct ieee80211_local *local = sta->local;
157 struct tid_ampdu_tx *tid_tx; 259 struct tid_ampdu_tx *tid_tx;
260 enum ieee80211_ampdu_mlme_action action;
158 int ret; 261 int ret;
159 262
160 lockdep_assert_held(&sta->ampdu_mlme.mtx); 263 lockdep_assert_held(&sta->ampdu_mlme.mtx);
161 264
265 switch (reason) {
266 case AGG_STOP_DECLINED:
267 case AGG_STOP_LOCAL_REQUEST:
268 case AGG_STOP_PEER_REQUEST:
269 action = IEEE80211_AMPDU_TX_STOP_CONT;
270 break;
271 case AGG_STOP_DESTROY_STA:
272 action = IEEE80211_AMPDU_TX_STOP_FLUSH;
273 break;
274 default:
275 WARN_ON_ONCE(1);
276 return -EINVAL;
277 }
278
162 spin_lock_bh(&sta->lock); 279 spin_lock_bh(&sta->lock);
163 280
164 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 281 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
@@ -167,10 +284,19 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
167 return -ENOENT; 284 return -ENOENT;
168 } 285 }
169 286
170 /* if we're already stopping ignore any new requests to stop */ 287 /*
288 * if we're already stopping ignore any new requests to stop
289 * unless we're destroying it in which case notify the driver
290 */
171 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 291 if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
172 spin_unlock_bh(&sta->lock); 292 spin_unlock_bh(&sta->lock);
173 return -EALREADY; 293 if (reason != AGG_STOP_DESTROY_STA)
294 return -EALREADY;
295 ret = drv_ampdu_action(local, sta->sdata,
296 IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
297 &sta->sta, tid, NULL, 0);
298 WARN_ON_ONCE(ret);
299 goto remove_tid_tx;
174 } 300 }
175 301
176 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) { 302 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
@@ -212,11 +338,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
212 */ 338 */
213 synchronize_net(); 339 synchronize_net();
214 340
215 tid_tx->stop_initiator = initiator; 341 tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
216 tid_tx->tx_stop = tx; 342 WLAN_BACK_RECIPIENT :
343 WLAN_BACK_INITIATOR;
344 tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
217 345
218 ret = drv_ampdu_action(local, sta->sdata, 346 ret = drv_ampdu_action(local, sta->sdata, action,
219 IEEE80211_AMPDU_TX_STOP,
220 &sta->sta, tid, NULL, 0); 347 &sta->sta, tid, NULL, 0);
221 348
222 /* HW shall not deny going back to legacy */ 349 /* HW shall not deny going back to legacy */
@@ -227,7 +354,14 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
227 */ 354 */
228 } 355 }
229 356
230 return ret; 357 if (reason == AGG_STOP_DESTROY_STA) {
358 remove_tid_tx:
359 spin_lock_bh(&sta->lock);
360 ieee80211_remove_tid_tx(sta, tid);
361 spin_unlock_bh(&sta->lock);
362 }
363
364 return 0;
231} 365}
232 366
233/* 367/*
@@ -264,80 +398,6 @@ static void sta_addba_resp_timer_expired(unsigned long data)
264 rcu_read_unlock(); 398 rcu_read_unlock();
265} 399}
266 400
267static inline int ieee80211_ac_from_tid(int tid)
268{
269 return ieee802_1d_to_ac[tid & 7];
270}
271
272/*
273 * When multiple aggregation sessions on multiple stations
274 * are being created/destroyed simultaneously, we need to
275 * refcount the global queue stop caused by that in order
276 * to not get into a situation where one of the aggregation
277 * setup or teardown re-enables queues before the other is
278 * ready to handle that.
279 *
280 * These two functions take care of this issue by keeping
281 * a global "agg_queue_stop" refcount.
282 */
283static void __acquires(agg_queue)
284ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
285{
286 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
287
288 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
289 ieee80211_stop_queue_by_reason(
290 &sdata->local->hw, queue,
291 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
292 __acquire(agg_queue);
293}
294
295static void __releases(agg_queue)
296ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
297{
298 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
299
300 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
301 ieee80211_wake_queue_by_reason(
302 &sdata->local->hw, queue,
303 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
304 __release(agg_queue);
305}
306
307/*
308 * splice packets from the STA's pending to the local pending,
309 * requires a call to ieee80211_agg_splice_finish later
310 */
311static void __acquires(agg_queue)
312ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
313 struct tid_ampdu_tx *tid_tx, u16 tid)
314{
315 struct ieee80211_local *local = sdata->local;
316 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
317 unsigned long flags;
318
319 ieee80211_stop_queue_agg(sdata, tid);
320
321 if (WARN(!tid_tx,
322 "TID %d gone but expected when splicing aggregates from the pending queue\n",
323 tid))
324 return;
325
326 if (!skb_queue_empty(&tid_tx->pending)) {
327 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
328 /* copy over remaining packets */
329 skb_queue_splice_tail_init(&tid_tx->pending,
330 &local->pending[queue]);
331 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
332 }
333}
334
335static void __releases(agg_queue)
336ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
337{
338 ieee80211_wake_queue_agg(sdata, tid);
339}
340
341void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 401void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
342{ 402{
343 struct tid_ampdu_tx *tid_tx; 403 struct tid_ampdu_tx *tid_tx;
@@ -660,14 +720,13 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
660EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 720EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
661 721
662int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 722int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
663 enum ieee80211_back_parties initiator, 723 enum ieee80211_agg_stop_reason reason)
664 bool tx)
665{ 724{
666 int ret; 725 int ret;
667 726
668 mutex_lock(&sta->ampdu_mlme.mtx); 727 mutex_lock(&sta->ampdu_mlme.mtx);
669 728
670 ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator, tx); 729 ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);
671 730
672 mutex_unlock(&sta->ampdu_mlme.mtx); 731 mutex_unlock(&sta->ampdu_mlme.mtx);
673 732
@@ -751,24 +810,7 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
751 ieee80211_send_delba(sta->sdata, ra, tid, 810 ieee80211_send_delba(sta->sdata, ra, tid,
752 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 811 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
753 812
754 /* 813 ieee80211_remove_tid_tx(sta, tid);
755 * When we get here, the TX path will not be lockless any more wrt.
756 * aggregation, since the OPERATIONAL bit has long been cleared.
757 * Thus it will block on getting the lock, if it occurs. So if we
758 * stop the queue now, we will not get any more packets, and any
759 * that might be being processed will wait for us here, thereby
760 * guaranteeing that no packets go to the tid_tx pending queue any
761 * more.
762 */
763
764 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
765
766 /* future packets must not find the tid_tx struct any more */
767 ieee80211_assign_tid_tx(sta, tid, NULL);
768
769 ieee80211_agg_splice_finish(sta->sdata, tid);
770
771 kfree_rcu(tid_tx, rcu_head);
772 814
773 unlock_sta: 815 unlock_sta:
774 spin_unlock_bh(&sta->lock); 816 spin_unlock_bh(&sta->lock);
@@ -868,8 +910,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
868 } 910 }
869 911
870 } else { 912 } else {
871 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 913 ___ieee80211_stop_tx_ba_session(sta, tid, AGG_STOP_DECLINED);
872 false);
873 } 914 }
874 915
875 out: 916 out: