diff options
author | Johannes Berg <johannes.berg@intel.com> | 2012-12-28 03:43:03 -0500 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2013-01-03 07:01:42 -0500 |
commit | 30bf5f1f433c7612857ed13c50525945c483dfe0 (patch) | |
tree | a9dbe1536cf42c42bd03e232c632ab18140dae86 /net/mac80211/agg-tx.c | |
parent | faec12ee2dd92edc09f75aab3d8c5085102052c5 (diff) |
mac80211: move ieee80211_remove_tid_tx function
To call it from ___ieee80211_stop_tx_ba_session,
move the function and dependencies up.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'net/mac80211/agg-tx.c')
-rw-r--r-- | net/mac80211/agg-tx.c | 206 |
1 files changed, 103 insertions, 103 deletions
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 101bbfba9c8d..dda8d7df4b54 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -149,6 +149,109 @@ void ieee80211_assign_tid_tx(struct sta_info *sta, int tid, | |||
149 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); | 149 | rcu_assign_pointer(sta->ampdu_mlme.tid_tx[tid], tid_tx); |
150 | } | 150 | } |
151 | 151 | ||
152 | static inline int ieee80211_ac_from_tid(int tid) | ||
153 | { | ||
154 | return ieee802_1d_to_ac[tid & 7]; | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * When multiple aggregation sessions on multiple stations | ||
159 | * are being created/destroyed simultaneously, we need to | ||
160 | * refcount the global queue stop caused by that in order | ||
161 | * to not get into a situation where one of the aggregation | ||
162 | * setup or teardown re-enables queues before the other is | ||
163 | * ready to handle that. | ||
164 | * | ||
165 | * These two functions take care of this issue by keeping | ||
166 | * a global "agg_queue_stop" refcount. | ||
167 | */ | ||
168 | static void __acquires(agg_queue) | ||
169 | ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) | ||
170 | { | ||
171 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
172 | |||
173 | if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) | ||
174 | ieee80211_stop_queue_by_reason( | ||
175 | &sdata->local->hw, queue, | ||
176 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
177 | __acquire(agg_queue); | ||
178 | } | ||
179 | |||
180 | static void __releases(agg_queue) | ||
181 | ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) | ||
182 | { | ||
183 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
184 | |||
185 | if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) | ||
186 | ieee80211_wake_queue_by_reason( | ||
187 | &sdata->local->hw, queue, | ||
188 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
189 | __release(agg_queue); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * splice packets from the STA's pending to the local pending, | ||
194 | * requires a call to ieee80211_agg_splice_finish later | ||
195 | */ | ||
196 | static void __acquires(agg_queue) | ||
197 | ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, | ||
198 | struct tid_ampdu_tx *tid_tx, u16 tid) | ||
199 | { | ||
200 | struct ieee80211_local *local = sdata->local; | ||
201 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
202 | unsigned long flags; | ||
203 | |||
204 | ieee80211_stop_queue_agg(sdata, tid); | ||
205 | |||
206 | if (WARN(!tid_tx, | ||
207 | "TID %d gone but expected when splicing aggregates from the pending queue\n", | ||
208 | tid)) | ||
209 | return; | ||
210 | |||
211 | if (!skb_queue_empty(&tid_tx->pending)) { | ||
212 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
213 | /* copy over remaining packets */ | ||
214 | skb_queue_splice_tail_init(&tid_tx->pending, | ||
215 | &local->pending[queue]); | ||
216 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
217 | } | ||
218 | } | ||
219 | |||
220 | static void __releases(agg_queue) | ||
221 | ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) | ||
222 | { | ||
223 | ieee80211_wake_queue_agg(sdata, tid); | ||
224 | } | ||
225 | |||
226 | static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid) | ||
227 | { | ||
228 | struct tid_ampdu_tx *tid_tx; | ||
229 | |||
230 | lockdep_assert_held(&sta->ampdu_mlme.mtx); | ||
231 | lockdep_assert_held(&sta->lock); | ||
232 | |||
233 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); | ||
234 | |||
235 | /* | ||
236 | * When we get here, the TX path will not be lockless any more wrt. | ||
237 | * aggregation, since the OPERATIONAL bit has long been cleared. | ||
238 | * Thus it will block on getting the lock, if it occurs. So if we | ||
239 | * stop the queue now, we will not get any more packets, and any | ||
240 | * that might be being processed will wait for us here, thereby | ||
241 | * guaranteeing that no packets go to the tid_tx pending queue any | ||
242 | * more. | ||
243 | */ | ||
244 | |||
245 | ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); | ||
246 | |||
247 | /* future packets must not find the tid_tx struct any more */ | ||
248 | ieee80211_assign_tid_tx(sta, tid, NULL); | ||
249 | |||
250 | ieee80211_agg_splice_finish(sta->sdata, tid); | ||
251 | |||
252 | kfree_rcu(tid_tx, rcu_head); | ||
253 | } | ||
254 | |||
152 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | 255 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, |
153 | enum ieee80211_agg_stop_reason reason) | 256 | enum ieee80211_agg_stop_reason reason) |
154 | { | 257 | { |
@@ -265,80 +368,6 @@ static void sta_addba_resp_timer_expired(unsigned long data) | |||
265 | rcu_read_unlock(); | 368 | rcu_read_unlock(); |
266 | } | 369 | } |
267 | 370 | ||
268 | static inline int ieee80211_ac_from_tid(int tid) | ||
269 | { | ||
270 | return ieee802_1d_to_ac[tid & 7]; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * When multiple aggregation sessions on multiple stations | ||
275 | * are being created/destroyed simultaneously, we need to | ||
276 | * refcount the global queue stop caused by that in order | ||
277 | * to not get into a situation where one of the aggregation | ||
278 | * setup or teardown re-enables queues before the other is | ||
279 | * ready to handle that. | ||
280 | * | ||
281 | * These two functions take care of this issue by keeping | ||
282 | * a global "agg_queue_stop" refcount. | ||
283 | */ | ||
284 | static void __acquires(agg_queue) | ||
285 | ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) | ||
286 | { | ||
287 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
288 | |||
289 | if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) | ||
290 | ieee80211_stop_queue_by_reason( | ||
291 | &sdata->local->hw, queue, | ||
292 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
293 | __acquire(agg_queue); | ||
294 | } | ||
295 | |||
296 | static void __releases(agg_queue) | ||
297 | ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid) | ||
298 | { | ||
299 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
300 | |||
301 | if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) | ||
302 | ieee80211_wake_queue_by_reason( | ||
303 | &sdata->local->hw, queue, | ||
304 | IEEE80211_QUEUE_STOP_REASON_AGGREGATION); | ||
305 | __release(agg_queue); | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * splice packets from the STA's pending to the local pending, | ||
310 | * requires a call to ieee80211_agg_splice_finish later | ||
311 | */ | ||
312 | static void __acquires(agg_queue) | ||
313 | ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata, | ||
314 | struct tid_ampdu_tx *tid_tx, u16 tid) | ||
315 | { | ||
316 | struct ieee80211_local *local = sdata->local; | ||
317 | int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; | ||
318 | unsigned long flags; | ||
319 | |||
320 | ieee80211_stop_queue_agg(sdata, tid); | ||
321 | |||
322 | if (WARN(!tid_tx, | ||
323 | "TID %d gone but expected when splicing aggregates from the pending queue\n", | ||
324 | tid)) | ||
325 | return; | ||
326 | |||
327 | if (!skb_queue_empty(&tid_tx->pending)) { | ||
328 | spin_lock_irqsave(&local->queue_stop_reason_lock, flags); | ||
329 | /* copy over remaining packets */ | ||
330 | skb_queue_splice_tail_init(&tid_tx->pending, | ||
331 | &local->pending[queue]); | ||
332 | spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); | ||
333 | } | ||
334 | } | ||
335 | |||
336 | static void __releases(agg_queue) | ||
337 | ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid) | ||
338 | { | ||
339 | ieee80211_wake_queue_agg(sdata, tid); | ||
340 | } | ||
341 | |||
342 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) | 371 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) |
343 | { | 372 | { |
344 | struct tid_ampdu_tx *tid_tx; | 373 | struct tid_ampdu_tx *tid_tx; |
@@ -713,35 +742,6 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
713 | } | 742 | } |
714 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); | 743 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); |
715 | 744 | ||
716 | static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid) | ||
717 | { | ||
718 | struct tid_ampdu_tx *tid_tx; | ||
719 | |||
720 | lockdep_assert_held(&sta->ampdu_mlme.mtx); | ||
721 | lockdep_assert_held(&sta->lock); | ||
722 | |||
723 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); | ||
724 | |||
725 | /* | ||
726 | * When we get here, the TX path will not be lockless any more wrt. | ||
727 | * aggregation, since the OPERATIONAL bit has long been cleared. | ||
728 | * Thus it will block on getting the lock, if it occurs. So if we | ||
729 | * stop the queue now, we will not get any more packets, and any | ||
730 | * that might be being processed will wait for us here, thereby | ||
731 | * guaranteeing that no packets go to the tid_tx pending queue any | ||
732 | * more. | ||
733 | */ | ||
734 | |||
735 | ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid); | ||
736 | |||
737 | /* future packets must not find the tid_tx struct any more */ | ||
738 | ieee80211_assign_tid_tx(sta, tid, NULL); | ||
739 | |||
740 | ieee80211_agg_splice_finish(sta->sdata, tid); | ||
741 | |||
742 | kfree_rcu(tid_tx, rcu_head); | ||
743 | } | ||
744 | |||
745 | void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | 745 | void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) |
746 | { | 746 | { |
747 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 747 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |