diff options
Diffstat (limited to 'drivers/net/wireless/ath/carl9170/tx.c')
-rw-r--r-- | drivers/net/wireless/ath/carl9170/tx.c | 1440 |
1 files changed, 1440 insertions, 0 deletions
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c new file mode 100644 index 000000000000..e94084fcf6f5 --- /dev/null +++ b/drivers/net/wireless/ath/carl9170/tx.c | |||
@@ -0,0 +1,1440 @@ | |||
1 | /* | ||
2 | * Atheros CARL9170 driver | ||
3 | * | ||
4 | * 802.11 xmit & status routines | ||
5 | * | ||
6 | * Copyright 2008, Johannes Berg <johannes@sipsolutions.net> | ||
7 | * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; see the file COPYING. If not, see | ||
21 | * http://www.gnu.org/licenses/. | ||
22 | * | ||
23 | * This file incorporates work covered by the following copyright and | ||
24 | * permission notice: | ||
25 | * Copyright (c) 2007-2008 Atheros Communications, Inc. | ||
26 | * | ||
27 | * Permission to use, copy, modify, and/or distribute this software for any | ||
28 | * purpose with or without fee is hereby granted, provided that the above | ||
29 | * copyright notice and this permission notice appear in all copies. | ||
30 | * | ||
31 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
32 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
33 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
34 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
35 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
36 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
37 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
38 | */ | ||
39 | |||
40 | #include <linux/init.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/module.h> | ||
43 | #include <linux/etherdevice.h> | ||
44 | #include <net/mac80211.h> | ||
45 | #include "carl9170.h" | ||
46 | #include "hw.h" | ||
47 | #include "cmd.h" | ||
48 | |||
49 | static inline unsigned int __carl9170_get_queue(struct ar9170 *ar, | ||
50 | unsigned int queue) | ||
51 | { | ||
52 | if (unlikely(modparam_noht)) { | ||
53 | return queue; | ||
54 | } else { | ||
55 | /* | ||
56 | * This is just another workaround, until | ||
57 | * someone figures out how to get QoS and | ||
58 | * AMPDU to play nicely together. | ||
59 | */ | ||
60 | |||
61 | return 2; /* AC_BE */ | ||
62 | } | ||
63 | } | ||
64 | |||
65 | static inline unsigned int carl9170_get_queue(struct ar9170 *ar, | ||
66 | struct sk_buff *skb) | ||
67 | { | ||
68 | return __carl9170_get_queue(ar, skb_get_queue_mapping(skb)); | ||
69 | } | ||
70 | |||
71 | static bool is_mem_full(struct ar9170 *ar) | ||
72 | { | ||
73 | return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) > | ||
74 | atomic_read(&ar->mem_free_blocks)); | ||
75 | } | ||
76 | |||
77 | static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb) | ||
78 | { | ||
79 | int queue, i; | ||
80 | bool mem_full; | ||
81 | |||
82 | atomic_inc(&ar->tx_total_queued); | ||
83 | |||
84 | queue = skb_get_queue_mapping(skb); | ||
85 | spin_lock_bh(&ar->tx_stats_lock); | ||
86 | |||
87 | /* | ||
88 | * The driver has to accept the frame, regardless if the queue is | ||
89 | * full to the brim, or not. We have to do the queuing internally, | ||
90 | * since mac80211 assumes that a driver which can operate with | ||
91 | * aggregated frames does not reject frames for this reason. | ||
92 | */ | ||
93 | ar->tx_stats[queue].len++; | ||
94 | ar->tx_stats[queue].count++; | ||
95 | |||
96 | mem_full = is_mem_full(ar); | ||
97 | for (i = 0; i < ar->hw->queues; i++) { | ||
98 | if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) { | ||
99 | ieee80211_stop_queue(ar->hw, i); | ||
100 | ar->queue_stop_timeout[i] = jiffies; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | spin_unlock_bh(&ar->tx_stats_lock); | ||
105 | } | ||
106 | |||
107 | /* needs rcu_read_lock */ | ||
108 | static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar, | ||
109 | struct sk_buff *skb) | ||
110 | { | ||
111 | struct _carl9170_tx_superframe *super = (void *) skb->data; | ||
112 | struct ieee80211_hdr *hdr = (void *) super->frame_data; | ||
113 | struct ieee80211_vif *vif; | ||
114 | unsigned int vif_id; | ||
115 | |||
116 | vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >> | ||
117 | CARL9170_TX_SUPER_MISC_VIF_ID_S; | ||
118 | |||
119 | if (WARN_ON_ONCE(vif_id >= AR9170_MAX_VIRTUAL_MAC)) | ||
120 | return NULL; | ||
121 | |||
122 | vif = rcu_dereference(ar->vif_priv[vif_id].vif); | ||
123 | if (unlikely(!vif)) | ||
124 | return NULL; | ||
125 | |||
126 | /* | ||
127 | * Normally we should use wrappers like ieee80211_get_DA to get | ||
128 | * the correct peer ieee80211_sta. | ||
129 | * | ||
130 | * But there is a problem with indirect traffic (broadcasts, or | ||
131 | * data which is designated for other stations) in station mode. | ||
132 | * The frame will be directed to the AP for distribution and not | ||
133 | * to the actual destination. | ||
134 | */ | ||
135 | |||
136 | return ieee80211_find_sta(vif, hdr->addr1); | ||
137 | } | ||
138 | |||
139 | static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb) | ||
140 | { | ||
141 | struct ieee80211_sta *sta; | ||
142 | struct carl9170_sta_info *sta_info; | ||
143 | |||
144 | rcu_read_lock(); | ||
145 | sta = __carl9170_get_tx_sta(ar, skb); | ||
146 | if (unlikely(!sta)) | ||
147 | goto out_rcu; | ||
148 | |||
149 | sta_info = (struct carl9170_sta_info *) sta->drv_priv; | ||
150 | if (atomic_dec_return(&sta_info->pending_frames) == 0) | ||
151 | ieee80211_sta_block_awake(ar->hw, sta, false); | ||
152 | |||
153 | out_rcu: | ||
154 | rcu_read_unlock(); | ||
155 | } | ||
156 | |||
157 | static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb) | ||
158 | { | ||
159 | int queue; | ||
160 | |||
161 | queue = skb_get_queue_mapping(skb); | ||
162 | |||
163 | spin_lock_bh(&ar->tx_stats_lock); | ||
164 | |||
165 | ar->tx_stats[queue].len--; | ||
166 | |||
167 | if (!is_mem_full(ar)) { | ||
168 | unsigned int i; | ||
169 | for (i = 0; i < ar->hw->queues; i++) { | ||
170 | if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT) | ||
171 | continue; | ||
172 | |||
173 | if (ieee80211_queue_stopped(ar->hw, i)) { | ||
174 | unsigned long tmp; | ||
175 | |||
176 | tmp = jiffies - ar->queue_stop_timeout[i]; | ||
177 | if (tmp > ar->max_queue_stop_timeout[i]) | ||
178 | ar->max_queue_stop_timeout[i] = tmp; | ||
179 | } | ||
180 | |||
181 | ieee80211_wake_queue(ar->hw, i); | ||
182 | } | ||
183 | } | ||
184 | |||
185 | spin_unlock_bh(&ar->tx_stats_lock); | ||
186 | |||
187 | if (atomic_dec_and_test(&ar->tx_total_queued)) | ||
188 | complete(&ar->tx_flush); | ||
189 | } | ||
190 | |||
191 | static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb) | ||
192 | { | ||
193 | struct _carl9170_tx_superframe *super = (void *) skb->data; | ||
194 | unsigned int chunks; | ||
195 | int cookie = -1; | ||
196 | |||
197 | atomic_inc(&ar->mem_allocs); | ||
198 | |||
199 | chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size); | ||
200 | if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) { | ||
201 | atomic_add(chunks, &ar->mem_free_blocks); | ||
202 | return -ENOSPC; | ||
203 | } | ||
204 | |||
205 | spin_lock_bh(&ar->mem_lock); | ||
206 | cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0); | ||
207 | spin_unlock_bh(&ar->mem_lock); | ||
208 | |||
209 | if (unlikely(cookie < 0)) { | ||
210 | atomic_add(chunks, &ar->mem_free_blocks); | ||
211 | return -ENOSPC; | ||
212 | } | ||
213 | |||
214 | super = (void *) skb->data; | ||
215 | |||
216 | /* | ||
217 | * Cookie #0 serves two special purposes: | ||
218 | * 1. The firmware might use it generate BlockACK frames | ||
219 | * in responds of an incoming BlockAckReqs. | ||
220 | * | ||
221 | * 2. Prevent double-free bugs. | ||
222 | */ | ||
223 | super->s.cookie = (u8) cookie + 1; | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb) | ||
228 | { | ||
229 | struct _carl9170_tx_superframe *super = (void *) skb->data; | ||
230 | int cookie; | ||
231 | |||
232 | /* make a local copy of the cookie */ | ||
233 | cookie = super->s.cookie; | ||
234 | /* invalidate cookie */ | ||
235 | super->s.cookie = 0; | ||
236 | |||
237 | /* | ||
238 | * Do a out-of-bounds check on the cookie: | ||
239 | * | ||
240 | * * cookie "0" is reserved and won't be assigned to any | ||
241 | * out-going frame. Internally however, it is used to | ||
242 | * mark no longer/un-accounted frames and serves as a | ||
243 | * cheap way of preventing frames from being freed | ||
244 | * twice by _accident_. NB: There is a tiny race... | ||
245 | * | ||
246 | * * obviously, cookie number is limited by the amount | ||
247 | * of available memory blocks, so the number can | ||
248 | * never execeed the mem_blocks count. | ||
249 | */ | ||
250 | if (unlikely(WARN_ON_ONCE(cookie == 0) || | ||
251 | WARN_ON_ONCE(cookie > ar->fw.mem_blocks))) | ||
252 | return; | ||
253 | |||
254 | atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size), | ||
255 | &ar->mem_free_blocks); | ||
256 | |||
257 | spin_lock_bh(&ar->mem_lock); | ||
258 | bitmap_release_region(ar->mem_bitmap, cookie - 1, 0); | ||
259 | spin_unlock_bh(&ar->mem_lock); | ||
260 | } | ||
261 | |||
262 | /* Called from any context */ | ||
263 | static void carl9170_tx_release(struct kref *ref) | ||
264 | { | ||
265 | struct ar9170 *ar; | ||
266 | struct carl9170_tx_info *arinfo; | ||
267 | struct ieee80211_tx_info *txinfo; | ||
268 | struct sk_buff *skb; | ||
269 | |||
270 | arinfo = container_of(ref, struct carl9170_tx_info, ref); | ||
271 | txinfo = container_of((void *) arinfo, struct ieee80211_tx_info, | ||
272 | rate_driver_data); | ||
273 | skb = container_of((void *) txinfo, struct sk_buff, cb); | ||
274 | |||
275 | ar = arinfo->ar; | ||
276 | if (WARN_ON_ONCE(!ar)) | ||
277 | return; | ||
278 | |||
279 | BUILD_BUG_ON( | ||
280 | offsetof(struct ieee80211_tx_info, status.ampdu_ack_len) != 23); | ||
281 | |||
282 | memset(&txinfo->status.ampdu_ack_len, 0, | ||
283 | sizeof(struct ieee80211_tx_info) - | ||
284 | offsetof(struct ieee80211_tx_info, status.ampdu_ack_len)); | ||
285 | |||
286 | if (atomic_read(&ar->tx_total_queued)) | ||
287 | ar->tx_schedule = true; | ||
288 | |||
289 | if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) { | ||
290 | if (!atomic_read(&ar->tx_ampdu_upload)) | ||
291 | ar->tx_ampdu_schedule = true; | ||
292 | |||
293 | if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) { | ||
294 | struct _carl9170_tx_superframe *super; | ||
295 | |||
296 | super = (void *)skb->data; | ||
297 | txinfo->status.ampdu_len = super->s.rix; | ||
298 | txinfo->status.ampdu_ack_len = super->s.cnt; | ||
299 | } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) { | ||
300 | /* | ||
301 | * drop redundant tx_status reports: | ||
302 | * | ||
303 | * 1. ampdu_ack_len of the final tx_status does | ||
304 | * include the feedback of this particular frame. | ||
305 | * | ||
306 | * 2. tx_status_irqsafe only queues up to 128 | ||
307 | * tx feedback reports and discards the rest. | ||
308 | * | ||
309 | * 3. minstrel_ht is picky, it only accepts | ||
310 | * reports of frames with the TX_STATUS_AMPDU flag. | ||
311 | */ | ||
312 | |||
313 | dev_kfree_skb_any(skb); | ||
314 | return; | ||
315 | } else { | ||
316 | /* | ||
317 | * Frame has failed, but we want to keep it in | ||
318 | * case it was lost due to a power-state | ||
319 | * transition. | ||
320 | */ | ||
321 | } | ||
322 | } | ||
323 | |||
324 | skb_pull(skb, sizeof(struct _carl9170_tx_superframe)); | ||
325 | ieee80211_tx_status_irqsafe(ar->hw, skb); | ||
326 | } | ||
327 | |||
328 | void carl9170_tx_get_skb(struct sk_buff *skb) | ||
329 | { | ||
330 | struct carl9170_tx_info *arinfo = (void *) | ||
331 | (IEEE80211_SKB_CB(skb))->rate_driver_data; | ||
332 | kref_get(&arinfo->ref); | ||
333 | } | ||
334 | |||
335 | int carl9170_tx_put_skb(struct sk_buff *skb) | ||
336 | { | ||
337 | struct carl9170_tx_info *arinfo = (void *) | ||
338 | (IEEE80211_SKB_CB(skb))->rate_driver_data; | ||
339 | |||
340 | return kref_put(&arinfo->ref, carl9170_tx_release); | ||
341 | } | ||
342 | |||
343 | /* Caller must hold the tid_info->lock & rcu_read_lock */ | ||
344 | static void carl9170_tx_shift_bm(struct ar9170 *ar, | ||
345 | struct carl9170_sta_tid *tid_info, u16 seq) | ||
346 | { | ||
347 | u16 off; | ||
348 | |||
349 | off = SEQ_DIFF(seq, tid_info->bsn); | ||
350 | |||
351 | if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS)) | ||
352 | return; | ||
353 | |||
354 | /* | ||
355 | * Sanity check. For each MPDU we set the bit in bitmap and | ||
356 | * clear it once we received the tx_status. | ||
357 | * But if the bit is already cleared then we've been bitten | ||
358 | * by a bug. | ||
359 | */ | ||
360 | WARN_ON_ONCE(!test_and_clear_bit(off, tid_info->bitmap)); | ||
361 | |||
362 | off = SEQ_DIFF(tid_info->snx, tid_info->bsn); | ||
363 | if (WARN_ON_ONCE(off >= CARL9170_BAW_BITS)) | ||
364 | return; | ||
365 | |||
366 | if (!bitmap_empty(tid_info->bitmap, off)) | ||
367 | off = find_first_bit(tid_info->bitmap, off); | ||
368 | |||
369 | tid_info->bsn += off; | ||
370 | tid_info->bsn &= 0x0fff; | ||
371 | |||
372 | bitmap_shift_right(tid_info->bitmap, tid_info->bitmap, | ||
373 | off, CARL9170_BAW_BITS); | ||
374 | } | ||
375 | |||
376 | static void carl9170_tx_status_process_ampdu(struct ar9170 *ar, | ||
377 | struct sk_buff *skb, struct ieee80211_tx_info *txinfo) | ||
378 | { | ||
379 | struct _carl9170_tx_superframe *super = (void *) skb->data; | ||
380 | struct ieee80211_hdr *hdr = (void *) super->frame_data; | ||
381 | struct ieee80211_sta *sta; | ||
382 | struct carl9170_sta_info *sta_info; | ||
383 | struct carl9170_sta_tid *tid_info; | ||
384 | u8 tid; | ||
385 | |||
386 | if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) || | ||
387 | txinfo->flags & IEEE80211_TX_CTL_INJECTED || | ||
388 | (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR)))) | ||
389 | return; | ||
390 | |||
391 | rcu_read_lock(); | ||
392 | sta = __carl9170_get_tx_sta(ar, skb); | ||
393 | if (unlikely(!sta)) | ||
394 | goto out_rcu; | ||
395 | |||
396 | tid = get_tid_h(hdr); | ||
397 | |||
398 | sta_info = (void *) sta->drv_priv; | ||
399 | tid_info = rcu_dereference(sta_info->agg[tid]); | ||
400 | if (!tid_info) | ||
401 | goto out_rcu; | ||
402 | |||
403 | spin_lock_bh(&tid_info->lock); | ||
404 | if (likely(tid_info->state >= CARL9170_TID_STATE_IDLE)) | ||
405 | carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr)); | ||
406 | |||
407 | if (sta_info->stats[tid].clear) { | ||
408 | sta_info->stats[tid].clear = false; | ||
409 | sta_info->stats[tid].req = false; | ||
410 | sta_info->stats[tid].ampdu_len = 0; | ||
411 | sta_info->stats[tid].ampdu_ack_len = 0; | ||
412 | } | ||
413 | |||
414 | sta_info->stats[tid].ampdu_len++; | ||
415 | if (txinfo->status.rates[0].count == 1) | ||
416 | sta_info->stats[tid].ampdu_ack_len++; | ||
417 | |||
418 | if (!(txinfo->flags & IEEE80211_TX_STAT_ACK)) | ||
419 | sta_info->stats[tid].req = true; | ||
420 | |||
421 | if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) { | ||
422 | super->s.rix = sta_info->stats[tid].ampdu_len; | ||
423 | super->s.cnt = sta_info->stats[tid].ampdu_ack_len; | ||
424 | txinfo->flags |= IEEE80211_TX_STAT_AMPDU; | ||
425 | if (sta_info->stats[tid].req) | ||
426 | txinfo->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | ||
427 | |||
428 | sta_info->stats[tid].clear = true; | ||
429 | } | ||
430 | spin_unlock_bh(&tid_info->lock); | ||
431 | |||
432 | out_rcu: | ||
433 | rcu_read_unlock(); | ||
434 | } | ||
435 | |||
436 | void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb, | ||
437 | const bool success) | ||
438 | { | ||
439 | struct ieee80211_tx_info *txinfo; | ||
440 | |||
441 | carl9170_tx_accounting_free(ar, skb); | ||
442 | |||
443 | txinfo = IEEE80211_SKB_CB(skb); | ||
444 | |||
445 | if (success) | ||
446 | txinfo->flags |= IEEE80211_TX_STAT_ACK; | ||
447 | else | ||
448 | ar->tx_ack_failures++; | ||
449 | |||
450 | if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) | ||
451 | carl9170_tx_status_process_ampdu(ar, skb, txinfo); | ||
452 | |||
453 | carl9170_tx_ps_unblock(ar, skb); | ||
454 | carl9170_tx_put_skb(skb); | ||
455 | } | ||
456 | |||
457 | /* This function may be called form any context */ | ||
458 | void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb) | ||
459 | { | ||
460 | struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); | ||
461 | |||
462 | atomic_dec(&ar->tx_total_pending); | ||
463 | |||
464 | if (txinfo->flags & IEEE80211_TX_CTL_AMPDU) | ||
465 | atomic_dec(&ar->tx_ampdu_upload); | ||
466 | |||
467 | if (carl9170_tx_put_skb(skb)) | ||
468 | tasklet_hi_schedule(&ar->usb_tasklet); | ||
469 | } | ||
470 | |||
471 | static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie, | ||
472 | struct sk_buff_head *queue) | ||
473 | { | ||
474 | struct sk_buff *skb; | ||
475 | |||
476 | spin_lock_bh(&queue->lock); | ||
477 | skb_queue_walk(queue, skb) { | ||
478 | struct _carl9170_tx_superframe *txc = (void *) skb->data; | ||
479 | |||
480 | if (txc->s.cookie != cookie) | ||
481 | continue; | ||
482 | |||
483 | __skb_unlink(skb, queue); | ||
484 | spin_unlock_bh(&queue->lock); | ||
485 | |||
486 | carl9170_release_dev_space(ar, skb); | ||
487 | return skb; | ||
488 | } | ||
489 | spin_unlock_bh(&queue->lock); | ||
490 | |||
491 | return NULL; | ||
492 | } | ||
493 | |||
494 | static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix, | ||
495 | unsigned int tries, struct ieee80211_tx_info *txinfo) | ||
496 | { | ||
497 | unsigned int i; | ||
498 | |||
499 | for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { | ||
500 | if (txinfo->status.rates[i].idx < 0) | ||
501 | break; | ||
502 | |||
503 | if (i == rix) { | ||
504 | txinfo->status.rates[i].count = tries; | ||
505 | i++; | ||
506 | break; | ||
507 | } | ||
508 | } | ||
509 | |||
510 | for (; i < IEEE80211_TX_MAX_RATES; i++) { | ||
511 | txinfo->status.rates[i].idx = -1; | ||
512 | txinfo->status.rates[i].count = 0; | ||
513 | } | ||
514 | } | ||
515 | |||
516 | static void carl9170_check_queue_stop_timeout(struct ar9170 *ar) | ||
517 | { | ||
518 | int i; | ||
519 | struct sk_buff *skb; | ||
520 | struct ieee80211_tx_info *txinfo; | ||
521 | struct carl9170_tx_info *arinfo; | ||
522 | bool restart = false; | ||
523 | |||
524 | for (i = 0; i < ar->hw->queues; i++) { | ||
525 | spin_lock_bh(&ar->tx_status[i].lock); | ||
526 | |||
527 | skb = skb_peek(&ar->tx_status[i]); | ||
528 | |||
529 | if (!skb) | ||
530 | goto next; | ||
531 | |||
532 | txinfo = IEEE80211_SKB_CB(skb); | ||
533 | arinfo = (void *) txinfo->rate_driver_data; | ||
534 | |||
535 | if (time_is_before_jiffies(arinfo->timeout + | ||
536 | msecs_to_jiffies(CARL9170_QUEUE_STUCK_TIMEOUT)) == true) | ||
537 | restart = true; | ||
538 | |||
539 | next: | ||
540 | spin_unlock_bh(&ar->tx_status[i].lock); | ||
541 | } | ||
542 | |||
543 | if (restart) { | ||
544 | /* | ||
545 | * At least one queue has been stuck for long enough. | ||
546 | * Give the device a kick and hope it gets back to | ||
547 | * work. | ||
548 | * | ||
549 | * possible reasons may include: | ||
550 | * - frames got lost/corrupted (bad connection to the device) | ||
551 | * - stalled rx processing/usb controller hiccups | ||
552 | * - firmware errors/bugs | ||
553 | * - every bug you can think of. | ||
554 | * - all bugs you can't... | ||
555 | * - ... | ||
556 | */ | ||
557 | carl9170_restart(ar, CARL9170_RR_STUCK_TX); | ||
558 | } | ||
559 | } | ||
560 | |||
561 | static void carl9170_tx_ampdu_timeout(struct ar9170 *ar) | ||
562 | { | ||
563 | struct carl9170_sta_tid *iter; | ||
564 | struct sk_buff *skb; | ||
565 | struct ieee80211_tx_info *txinfo; | ||
566 | struct carl9170_tx_info *arinfo; | ||
567 | struct ieee80211_sta *sta; | ||
568 | |||
569 | rcu_read_lock(); | ||
570 | list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) { | ||
571 | if (iter->state < CARL9170_TID_STATE_IDLE) | ||
572 | continue; | ||
573 | |||
574 | spin_lock_bh(&iter->lock); | ||
575 | skb = skb_peek(&iter->queue); | ||
576 | if (!skb) | ||
577 | goto unlock; | ||
578 | |||
579 | txinfo = IEEE80211_SKB_CB(skb); | ||
580 | arinfo = (void *)txinfo->rate_driver_data; | ||
581 | if (time_is_after_jiffies(arinfo->timeout + | ||
582 | msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT))) | ||
583 | goto unlock; | ||
584 | |||
585 | sta = __carl9170_get_tx_sta(ar, skb); | ||
586 | if (WARN_ON(!sta)) | ||
587 | goto unlock; | ||
588 | |||
589 | ieee80211_stop_tx_ba_session(sta, iter->tid); | ||
590 | unlock: | ||
591 | spin_unlock_bh(&iter->lock); | ||
592 | |||
593 | } | ||
594 | rcu_read_unlock(); | ||
595 | } | ||
596 | |||
597 | void carl9170_tx_janitor(struct work_struct *work) | ||
598 | { | ||
599 | struct ar9170 *ar = container_of(work, struct ar9170, | ||
600 | tx_janitor.work); | ||
601 | if (!IS_STARTED(ar)) | ||
602 | return; | ||
603 | |||
604 | ar->tx_janitor_last_run = jiffies; | ||
605 | |||
606 | carl9170_check_queue_stop_timeout(ar); | ||
607 | carl9170_tx_ampdu_timeout(ar); | ||
608 | |||
609 | if (!atomic_read(&ar->tx_total_queued)) | ||
610 | return; | ||
611 | |||
612 | ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor, | ||
613 | msecs_to_jiffies(CARL9170_TX_TIMEOUT)); | ||
614 | } | ||
615 | |||
616 | static void __carl9170_tx_process_status(struct ar9170 *ar, | ||
617 | const uint8_t cookie, const uint8_t info) | ||
618 | { | ||
619 | struct sk_buff *skb; | ||
620 | struct ieee80211_tx_info *txinfo; | ||
621 | unsigned int r, t, q; | ||
622 | bool success = true; | ||
623 | |||
624 | q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE]; | ||
625 | |||
626 | skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]); | ||
627 | if (!skb) { | ||
628 | /* | ||
629 | * We have lost the race to another thread. | ||
630 | */ | ||
631 | |||
632 | return ; | ||
633 | } | ||
634 | |||
635 | txinfo = IEEE80211_SKB_CB(skb); | ||
636 | |||
637 | if (!(info & CARL9170_TX_STATUS_SUCCESS)) | ||
638 | success = false; | ||
639 | |||
640 | r = (info & CARL9170_TX_STATUS_RIX) >> CARL9170_TX_STATUS_RIX_S; | ||
641 | t = (info & CARL9170_TX_STATUS_TRIES) >> CARL9170_TX_STATUS_TRIES_S; | ||
642 | |||
643 | carl9170_tx_fill_rateinfo(ar, r, t, txinfo); | ||
644 | carl9170_tx_status(ar, skb, success); | ||
645 | } | ||
646 | |||
647 | void carl9170_tx_process_status(struct ar9170 *ar, | ||
648 | const struct carl9170_rsp *cmd) | ||
649 | { | ||
650 | unsigned int i; | ||
651 | |||
652 | for (i = 0; i < cmd->hdr.ext; i++) { | ||
653 | if (WARN_ON(i > ((cmd->hdr.len / 2) + 1))) { | ||
654 | print_hex_dump_bytes("UU:", DUMP_PREFIX_NONE, | ||
655 | (void *) cmd, cmd->hdr.len + 4); | ||
656 | break; | ||
657 | } | ||
658 | |||
659 | __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie, | ||
660 | cmd->_tx_status[i].info); | ||
661 | } | ||
662 | } | ||
663 | |||
664 | static __le32 carl9170_tx_physet(struct ar9170 *ar, | ||
665 | struct ieee80211_tx_info *info, struct ieee80211_tx_rate *txrate) | ||
666 | { | ||
667 | struct ieee80211_rate *rate = NULL; | ||
668 | u32 power, chains; | ||
669 | __le32 tmp; | ||
670 | |||
671 | tmp = cpu_to_le32(0); | ||
672 | |||
673 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) | ||
674 | tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ << | ||
675 | AR9170_TX_PHY_BW_S); | ||
676 | /* this works because 40 MHz is 2 and dup is 3 */ | ||
677 | if (txrate->flags & IEEE80211_TX_RC_DUP_DATA) | ||
678 | tmp |= cpu_to_le32(AR9170_TX_PHY_BW_40MHZ_DUP << | ||
679 | AR9170_TX_PHY_BW_S); | ||
680 | |||
681 | if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) | ||
682 | tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_GI); | ||
683 | |||
684 | if (txrate->flags & IEEE80211_TX_RC_MCS) { | ||
685 | u32 r = txrate->idx; | ||
686 | u8 *txpower; | ||
687 | |||
688 | /* heavy clip control */ | ||
689 | tmp |= cpu_to_le32((r & 0x7) << | ||
690 | AR9170_TX_PHY_TX_HEAVY_CLIP_S); | ||
691 | |||
692 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) { | ||
693 | if (info->band == IEEE80211_BAND_5GHZ) | ||
694 | txpower = ar->power_5G_ht40; | ||
695 | else | ||
696 | txpower = ar->power_2G_ht40; | ||
697 | } else { | ||
698 | if (info->band == IEEE80211_BAND_5GHZ) | ||
699 | txpower = ar->power_5G_ht20; | ||
700 | else | ||
701 | txpower = ar->power_2G_ht20; | ||
702 | } | ||
703 | |||
704 | power = txpower[r & 7]; | ||
705 | |||
706 | /* +1 dBm for HT40 */ | ||
707 | if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) | ||
708 | power += 2; | ||
709 | |||
710 | r <<= AR9170_TX_PHY_MCS_S; | ||
711 | BUG_ON(r & ~AR9170_TX_PHY_MCS); | ||
712 | |||
713 | tmp |= cpu_to_le32(r & AR9170_TX_PHY_MCS); | ||
714 | tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_HT); | ||
715 | |||
716 | /* | ||
717 | * green field preamble does not work. | ||
718 | * | ||
719 | * if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) | ||
720 | * tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD); | ||
721 | */ | ||
722 | } else { | ||
723 | u8 *txpower; | ||
724 | u32 mod; | ||
725 | u32 phyrate; | ||
726 | u8 idx = txrate->idx; | ||
727 | |||
728 | if (info->band != IEEE80211_BAND_2GHZ) { | ||
729 | idx += 4; | ||
730 | txpower = ar->power_5G_leg; | ||
731 | mod = AR9170_TX_PHY_MOD_OFDM; | ||
732 | } else { | ||
733 | if (idx < 4) { | ||
734 | txpower = ar->power_2G_cck; | ||
735 | mod = AR9170_TX_PHY_MOD_CCK; | ||
736 | } else { | ||
737 | mod = AR9170_TX_PHY_MOD_OFDM; | ||
738 | txpower = ar->power_2G_ofdm; | ||
739 | } | ||
740 | } | ||
741 | |||
742 | rate = &__carl9170_ratetable[idx]; | ||
743 | |||
744 | phyrate = rate->hw_value & 0xF; | ||
745 | power = txpower[(rate->hw_value & 0x30) >> 4]; | ||
746 | phyrate <<= AR9170_TX_PHY_MCS_S; | ||
747 | |||
748 | tmp |= cpu_to_le32(mod); | ||
749 | tmp |= cpu_to_le32(phyrate); | ||
750 | |||
751 | /* | ||
752 | * short preamble seems to be broken too. | ||
753 | * | ||
754 | * if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) | ||
755 | * tmp |= cpu_to_le32(AR9170_TX_PHY_SHORT_PREAMBLE); | ||
756 | */ | ||
757 | } | ||
758 | power <<= AR9170_TX_PHY_TX_PWR_S; | ||
759 | power &= AR9170_TX_PHY_TX_PWR; | ||
760 | tmp |= cpu_to_le32(power); | ||
761 | |||
762 | /* set TX chains */ | ||
763 | if (ar->eeprom.tx_mask == 1) { | ||
764 | chains = AR9170_TX_PHY_TXCHAIN_1; | ||
765 | } else { | ||
766 | chains = AR9170_TX_PHY_TXCHAIN_2; | ||
767 | |||
768 | /* >= 36M legacy OFDM - use only one chain */ | ||
769 | if (rate && rate->bitrate >= 360 && | ||
770 | !(txrate->flags & IEEE80211_TX_RC_MCS)) | ||
771 | chains = AR9170_TX_PHY_TXCHAIN_1; | ||
772 | } | ||
773 | tmp |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_S); | ||
774 | |||
775 | return tmp; | ||
776 | } | ||
777 | |||
778 | static bool carl9170_tx_rts_check(struct ar9170 *ar, | ||
779 | struct ieee80211_tx_rate *rate, | ||
780 | bool ampdu, bool multi) | ||
781 | { | ||
782 | switch (ar->erp_mode) { | ||
783 | case CARL9170_ERP_AUTO: | ||
784 | if (ampdu) | ||
785 | break; | ||
786 | |||
787 | case CARL9170_ERP_MAC80211: | ||
788 | if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS)) | ||
789 | break; | ||
790 | |||
791 | case CARL9170_ERP_RTS: | ||
792 | if (likely(!multi)) | ||
793 | return true; | ||
794 | |||
795 | default: | ||
796 | break; | ||
797 | } | ||
798 | |||
799 | return false; | ||
800 | } | ||
801 | |||
802 | static bool carl9170_tx_cts_check(struct ar9170 *ar, | ||
803 | struct ieee80211_tx_rate *rate) | ||
804 | { | ||
805 | switch (ar->erp_mode) { | ||
806 | case CARL9170_ERP_AUTO: | ||
807 | case CARL9170_ERP_MAC80211: | ||
808 | if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) | ||
809 | break; | ||
810 | |||
811 | case CARL9170_ERP_CTS: | ||
812 | return true; | ||
813 | |||
814 | default: | ||
815 | break; | ||
816 | } | ||
817 | |||
818 | return false; | ||
819 | } | ||
820 | |||
821 | static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) | ||
822 | { | ||
823 | struct ieee80211_hdr *hdr; | ||
824 | struct _carl9170_tx_superframe *txc; | ||
825 | struct carl9170_vif_info *cvif; | ||
826 | struct ieee80211_tx_info *info; | ||
827 | struct ieee80211_tx_rate *txrate; | ||
828 | struct ieee80211_sta *sta; | ||
829 | struct carl9170_tx_info *arinfo; | ||
830 | unsigned int hw_queue; | ||
831 | int i; | ||
832 | __le16 mac_tmp; | ||
833 | u16 len; | ||
834 | bool ampdu, no_ack; | ||
835 | |||
836 | BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); | ||
837 | BUILD_BUG_ON(sizeof(struct _carl9170_tx_superdesc) != | ||
838 | CARL9170_TX_SUPERDESC_LEN); | ||
839 | |||
840 | BUILD_BUG_ON(sizeof(struct _ar9170_tx_hwdesc) != | ||
841 | AR9170_TX_HWDESC_LEN); | ||
842 | |||
843 | BUILD_BUG_ON(IEEE80211_TX_MAX_RATES < CARL9170_TX_MAX_RATES); | ||
844 | |||
845 | BUILD_BUG_ON(AR9170_MAX_VIRTUAL_MAC > | ||
846 | ((CARL9170_TX_SUPER_MISC_VIF_ID >> | ||
847 | CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1)); | ||
848 | |||
849 | hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)]; | ||
850 | |||
851 | hdr = (void *)skb->data; | ||
852 | info = IEEE80211_SKB_CB(skb); | ||
853 | len = skb->len; | ||
854 | |||
855 | /* | ||
856 | * Note: If the frame was sent through a monitor interface, | ||
857 | * the ieee80211_vif pointer can be NULL. | ||
858 | */ | ||
859 | if (likely(info->control.vif)) | ||
860 | cvif = (void *) info->control.vif->drv_priv; | ||
861 | else | ||
862 | cvif = NULL; | ||
863 | |||
864 | sta = info->control.sta; | ||
865 | |||
866 | txc = (void *)skb_push(skb, sizeof(*txc)); | ||
867 | memset(txc, 0, sizeof(*txc)); | ||
868 | |||
869 | SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, txc->s.misc, hw_queue); | ||
870 | |||
871 | if (likely(cvif)) | ||
872 | SET_VAL(CARL9170_TX_SUPER_MISC_VIF_ID, txc->s.misc, cvif->id); | ||
873 | |||
874 | if (unlikely(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)) | ||
875 | txc->s.misc |= CARL9170_TX_SUPER_MISC_CAB; | ||
876 | |||
877 | if (unlikely(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) | ||
878 | txc->s.misc |= CARL9170_TX_SUPER_MISC_ASSIGN_SEQ; | ||
879 | |||
880 | if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) | ||
881 | txc->s.misc |= CARL9170_TX_SUPER_MISC_FILL_IN_TSF; | ||
882 | |||
883 | mac_tmp = cpu_to_le16(AR9170_TX_MAC_HW_DURATION | | ||
884 | AR9170_TX_MAC_BACKOFF); | ||
885 | mac_tmp |= cpu_to_le16((hw_queue << AR9170_TX_MAC_QOS_S) & | ||
886 | AR9170_TX_MAC_QOS); | ||
887 | |||
888 | no_ack = !!(info->flags & IEEE80211_TX_CTL_NO_ACK); | ||
889 | if (unlikely(no_ack)) | ||
890 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_NO_ACK); | ||
891 | |||
892 | if (info->control.hw_key) { | ||
893 | len += info->control.hw_key->icv_len; | ||
894 | |||
895 | switch (info->control.hw_key->cipher) { | ||
896 | case WLAN_CIPHER_SUITE_WEP40: | ||
897 | case WLAN_CIPHER_SUITE_WEP104: | ||
898 | case WLAN_CIPHER_SUITE_TKIP: | ||
899 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_RC4); | ||
900 | break; | ||
901 | case WLAN_CIPHER_SUITE_CCMP: | ||
902 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_ENCR_AES); | ||
903 | break; | ||
904 | default: | ||
905 | WARN_ON(1); | ||
906 | goto err_out; | ||
907 | } | ||
908 | } | ||
909 | |||
910 | ampdu = !!(info->flags & IEEE80211_TX_CTL_AMPDU); | ||
911 | if (ampdu) { | ||
912 | unsigned int density, factor; | ||
913 | |||
914 | if (unlikely(!sta || !cvif)) | ||
915 | goto err_out; | ||
916 | |||
917 | factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor); | ||
918 | density = sta->ht_cap.ampdu_density; | ||
919 | |||
920 | if (density) { | ||
921 | /* | ||
922 | * Watch out! | ||
923 | * | ||
924 | * Otus uses slightly different density values than | ||
925 | * those from the 802.11n spec. | ||
926 | */ | ||
927 | |||
928 | density = max_t(unsigned int, density + 1, 7u); | ||
929 | } | ||
930 | |||
931 | SET_VAL(CARL9170_TX_SUPER_AMPDU_DENSITY, | ||
932 | txc->s.ampdu_settings, density); | ||
933 | |||
934 | SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR, | ||
935 | txc->s.ampdu_settings, factor); | ||
936 | |||
937 | for (i = 0; i < CARL9170_TX_MAX_RATES; i++) { | ||
938 | txrate = &info->control.rates[i]; | ||
939 | if (txrate->idx >= 0) { | ||
940 | txc->s.ri[i] = | ||
941 | CARL9170_TX_SUPER_RI_AMPDU; | ||
942 | |||
943 | if (WARN_ON(!(txrate->flags & | ||
944 | IEEE80211_TX_RC_MCS))) { | ||
945 | /* | ||
946 | * Not sure if it's even possible | ||
947 | * to aggregate non-ht rates with | ||
948 | * this HW. | ||
949 | */ | ||
950 | goto err_out; | ||
951 | } | ||
952 | continue; | ||
953 | } | ||
954 | |||
955 | txrate->idx = 0; | ||
956 | txrate->count = ar->hw->max_rate_tries; | ||
957 | } | ||
958 | |||
959 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR); | ||
960 | } | ||
961 | |||
962 | /* | ||
963 | * NOTE: For the first rate, the ERP & AMPDU flags are directly | ||
964 | * taken from mac_control. For all fallback rate, the firmware | ||
965 | * updates the mac_control flags from the rate info field. | ||
966 | */ | ||
967 | for (i = 1; i < CARL9170_TX_MAX_RATES; i++) { | ||
968 | txrate = &info->control.rates[i]; | ||
969 | if (txrate->idx < 0) | ||
970 | break; | ||
971 | |||
972 | SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i], | ||
973 | txrate->count); | ||
974 | |||
975 | if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) | ||
976 | txc->s.ri[i] |= (AR9170_TX_MAC_PROT_RTS << | ||
977 | CARL9170_TX_SUPER_RI_ERP_PROT_S); | ||
978 | else if (carl9170_tx_cts_check(ar, txrate)) | ||
979 | txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS << | ||
980 | CARL9170_TX_SUPER_RI_ERP_PROT_S); | ||
981 | |||
982 | txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate); | ||
983 | } | ||
984 | |||
985 | txrate = &info->control.rates[0]; | ||
986 | SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count); | ||
987 | |||
988 | if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack)) | ||
989 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS); | ||
990 | else if (carl9170_tx_cts_check(ar, txrate)) | ||
991 | mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS); | ||
992 | |||
993 | txc->s.len = cpu_to_le16(skb->len); | ||
994 | txc->f.length = cpu_to_le16(len + FCS_LEN); | ||
995 | txc->f.mac_control = mac_tmp; | ||
996 | txc->f.phy_control = carl9170_tx_physet(ar, info, txrate); | ||
997 | |||
998 | arinfo = (void *)info->rate_driver_data; | ||
999 | arinfo->timeout = jiffies; | ||
1000 | arinfo->ar = ar; | ||
1001 | kref_init(&arinfo->ref); | ||
1002 | return 0; | ||
1003 | |||
1004 | err_out: | ||
1005 | skb_pull(skb, sizeof(*txc)); | ||
1006 | return -EINVAL; | ||
1007 | } | ||
1008 | |||
1009 | static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb) | ||
1010 | { | ||
1011 | struct _carl9170_tx_superframe *super; | ||
1012 | |||
1013 | super = (void *) skb->data; | ||
1014 | super->f.mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_BA); | ||
1015 | } | ||
1016 | |||
1017 | static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb) | ||
1018 | { | ||
1019 | struct _carl9170_tx_superframe *super; | ||
1020 | int tmp; | ||
1021 | |||
1022 | super = (void *) skb->data; | ||
1023 | |||
1024 | tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_DENSITY) << | ||
1025 | CARL9170_TX_SUPER_AMPDU_DENSITY_S; | ||
1026 | |||
1027 | /* | ||
1028 | * If you haven't noticed carl9170_tx_prepare has already filled | ||
1029 | * in all ampdu spacing & factor parameters. | ||
1030 | * Now it's the time to check whenever the settings have to be | ||
1031 | * updated by the firmware, or if everything is still the same. | ||
1032 | * | ||
1033 | * There's no sane way to handle different density values with | ||
1034 | * this hardware, so we may as well just do the compare in the | ||
1035 | * driver. | ||
1036 | */ | ||
1037 | |||
1038 | if (tmp != ar->current_density) { | ||
1039 | ar->current_density = tmp; | ||
1040 | super->s.ampdu_settings |= | ||
1041 | CARL9170_TX_SUPER_AMPDU_COMMIT_DENSITY; | ||
1042 | } | ||
1043 | |||
1044 | tmp = (super->s.ampdu_settings & CARL9170_TX_SUPER_AMPDU_FACTOR) << | ||
1045 | CARL9170_TX_SUPER_AMPDU_FACTOR_S; | ||
1046 | |||
1047 | if (tmp != ar->current_factor) { | ||
1048 | ar->current_factor = tmp; | ||
1049 | super->s.ampdu_settings |= | ||
1050 | CARL9170_TX_SUPER_AMPDU_COMMIT_FACTOR; | ||
1051 | } | ||
1052 | } | ||
1053 | |||
1054 | static bool carl9170_tx_rate_check(struct ar9170 *ar, struct sk_buff *_dest, | ||
1055 | struct sk_buff *_src) | ||
1056 | { | ||
1057 | struct _carl9170_tx_superframe *dest, *src; | ||
1058 | |||
1059 | dest = (void *) _dest->data; | ||
1060 | src = (void *) _src->data; | ||
1061 | |||
1062 | /* | ||
1063 | * The mac80211 rate control algorithm expects that all MPDUs in | ||
1064 | * an AMPDU share the same tx vectors. | ||
1065 | * This is not really obvious right now, because the hardware | ||
1066 | * does the AMPDU setup according to its own rulebook. | ||
1067 | * Our nicely assembled, strictly monotonic increasing mpdu | ||
1068 | * chains will be broken up, mashed back together... | ||
1069 | */ | ||
1070 | |||
1071 | return (dest->f.phy_control == src->f.phy_control); | ||
1072 | } | ||
1073 | |||
1074 | static void carl9170_tx_ampdu(struct ar9170 *ar) | ||
1075 | { | ||
1076 | struct sk_buff_head agg; | ||
1077 | struct carl9170_sta_tid *tid_info; | ||
1078 | struct sk_buff *skb, *first; | ||
1079 | unsigned int i = 0, done_ampdus = 0; | ||
1080 | u16 seq, queue, tmpssn; | ||
1081 | |||
1082 | atomic_inc(&ar->tx_ampdu_scheduler); | ||
1083 | ar->tx_ampdu_schedule = false; | ||
1084 | |||
1085 | if (atomic_read(&ar->tx_ampdu_upload)) | ||
1086 | return; | ||
1087 | |||
1088 | if (!ar->tx_ampdu_list_len) | ||
1089 | return; | ||
1090 | |||
1091 | __skb_queue_head_init(&agg); | ||
1092 | |||
1093 | rcu_read_lock(); | ||
1094 | tid_info = rcu_dereference(ar->tx_ampdu_iter); | ||
1095 | if (WARN_ON_ONCE(!tid_info)) { | ||
1096 | rcu_read_unlock(); | ||
1097 | return; | ||
1098 | } | ||
1099 | |||
1100 | retry: | ||
1101 | list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) { | ||
1102 | i++; | ||
1103 | |||
1104 | if (tid_info->state < CARL9170_TID_STATE_PROGRESS) | ||
1105 | continue; | ||
1106 | |||
1107 | queue = TID_TO_WME_AC(tid_info->tid); | ||
1108 | |||
1109 | spin_lock_bh(&tid_info->lock); | ||
1110 | if (tid_info->state != CARL9170_TID_STATE_XMIT) | ||
1111 | goto processed; | ||
1112 | |||
1113 | tid_info->counter++; | ||
1114 | first = skb_peek(&tid_info->queue); | ||
1115 | tmpssn = carl9170_get_seq(first); | ||
1116 | seq = tid_info->snx; | ||
1117 | |||
1118 | if (unlikely(tmpssn != seq)) { | ||
1119 | tid_info->state = CARL9170_TID_STATE_IDLE; | ||
1120 | |||
1121 | goto processed; | ||
1122 | } | ||
1123 | |||
1124 | while ((skb = skb_peek(&tid_info->queue))) { | ||
1125 | /* strict 0, 1, ..., n - 1, n frame sequence order */ | ||
1126 | if (unlikely(carl9170_get_seq(skb) != seq)) | ||
1127 | break; | ||
1128 | |||
1129 | /* don't upload more than AMPDU FACTOR allows. */ | ||
1130 | if (unlikely(SEQ_DIFF(tid_info->snx, tid_info->bsn) >= | ||
1131 | (tid_info->max - 1))) | ||
1132 | break; | ||
1133 | |||
1134 | if (!carl9170_tx_rate_check(ar, skb, first)) | ||
1135 | break; | ||
1136 | |||
1137 | atomic_inc(&ar->tx_ampdu_upload); | ||
1138 | tid_info->snx = seq = SEQ_NEXT(seq); | ||
1139 | __skb_unlink(skb, &tid_info->queue); | ||
1140 | |||
1141 | __skb_queue_tail(&agg, skb); | ||
1142 | |||
1143 | if (skb_queue_len(&agg) >= CARL9170_NUM_TX_AGG_MAX) | ||
1144 | break; | ||
1145 | } | ||
1146 | |||
1147 | if (skb_queue_empty(&tid_info->queue) || | ||
1148 | carl9170_get_seq(skb_peek(&tid_info->queue)) != | ||
1149 | tid_info->snx) { | ||
1150 | /* | ||
1151 | * stop TID, if A-MPDU frames are still missing, | ||
1152 | * or whenever the queue is empty. | ||
1153 | */ | ||
1154 | |||
1155 | tid_info->state = CARL9170_TID_STATE_IDLE; | ||
1156 | } | ||
1157 | done_ampdus++; | ||
1158 | |||
1159 | processed: | ||
1160 | spin_unlock_bh(&tid_info->lock); | ||
1161 | |||
1162 | if (skb_queue_empty(&agg)) | ||
1163 | continue; | ||
1164 | |||
1165 | /* apply ampdu spacing & factor settings */ | ||
1166 | carl9170_set_ampdu_params(ar, skb_peek(&agg)); | ||
1167 | |||
1168 | /* set aggregation push bit */ | ||
1169 | carl9170_set_immba(ar, skb_peek_tail(&agg)); | ||
1170 | |||
1171 | spin_lock_bh(&ar->tx_pending[queue].lock); | ||
1172 | skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]); | ||
1173 | spin_unlock_bh(&ar->tx_pending[queue].lock); | ||
1174 | ar->tx_schedule = true; | ||
1175 | } | ||
1176 | if ((done_ampdus++ == 0) && (i++ == 0)) | ||
1177 | goto retry; | ||
1178 | |||
1179 | rcu_assign_pointer(ar->tx_ampdu_iter, tid_info); | ||
1180 | rcu_read_unlock(); | ||
1181 | } | ||
1182 | |||
1183 | static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar, | ||
1184 | struct sk_buff_head *queue) | ||
1185 | { | ||
1186 | struct sk_buff *skb; | ||
1187 | struct ieee80211_tx_info *info; | ||
1188 | struct carl9170_tx_info *arinfo; | ||
1189 | |||
1190 | BUILD_BUG_ON(sizeof(*arinfo) > sizeof(info->rate_driver_data)); | ||
1191 | |||
1192 | spin_lock_bh(&queue->lock); | ||
1193 | skb = skb_peek(queue); | ||
1194 | if (unlikely(!skb)) | ||
1195 | goto err_unlock; | ||
1196 | |||
1197 | if (carl9170_alloc_dev_space(ar, skb)) | ||
1198 | goto err_unlock; | ||
1199 | |||
1200 | __skb_unlink(skb, queue); | ||
1201 | spin_unlock_bh(&queue->lock); | ||
1202 | |||
1203 | info = IEEE80211_SKB_CB(skb); | ||
1204 | arinfo = (void *) info->rate_driver_data; | ||
1205 | |||
1206 | arinfo->timeout = jiffies; | ||
1207 | return skb; | ||
1208 | |||
1209 | err_unlock: | ||
1210 | spin_unlock_bh(&queue->lock); | ||
1211 | return NULL; | ||
1212 | } | ||
1213 | |||
1214 | void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb) | ||
1215 | { | ||
1216 | struct _carl9170_tx_superframe *super; | ||
1217 | uint8_t q = 0; | ||
1218 | |||
1219 | ar->tx_dropped++; | ||
1220 | |||
1221 | super = (void *)skb->data; | ||
1222 | SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q, | ||
1223 | ar9170_qmap[carl9170_get_queue(ar, skb)]); | ||
1224 | __carl9170_tx_process_status(ar, super->s.cookie, q); | ||
1225 | } | ||
1226 | |||
1227 | static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb) | ||
1228 | { | ||
1229 | struct ieee80211_sta *sta; | ||
1230 | struct carl9170_sta_info *sta_info; | ||
1231 | |||
1232 | rcu_read_lock(); | ||
1233 | sta = __carl9170_get_tx_sta(ar, skb); | ||
1234 | if (!sta) | ||
1235 | goto out_rcu; | ||
1236 | |||
1237 | sta_info = (void *) sta->drv_priv; | ||
1238 | if (unlikely(sta_info->sleeping)) { | ||
1239 | struct ieee80211_tx_info *tx_info; | ||
1240 | |||
1241 | rcu_read_unlock(); | ||
1242 | |||
1243 | tx_info = IEEE80211_SKB_CB(skb); | ||
1244 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) | ||
1245 | atomic_dec(&ar->tx_ampdu_upload); | ||
1246 | |||
1247 | tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; | ||
1248 | carl9170_tx_status(ar, skb, false); | ||
1249 | return true; | ||
1250 | } | ||
1251 | |||
1252 | out_rcu: | ||
1253 | rcu_read_unlock(); | ||
1254 | return false; | ||
1255 | } | ||
1256 | |||
1257 | static void carl9170_tx(struct ar9170 *ar) | ||
1258 | { | ||
1259 | struct sk_buff *skb; | ||
1260 | unsigned int i, q; | ||
1261 | bool schedule_garbagecollector = false; | ||
1262 | |||
1263 | ar->tx_schedule = false; | ||
1264 | |||
1265 | if (unlikely(!IS_STARTED(ar))) | ||
1266 | return; | ||
1267 | |||
1268 | carl9170_usb_handle_tx_err(ar); | ||
1269 | |||
1270 | for (i = 0; i < ar->hw->queues; i++) { | ||
1271 | while (!skb_queue_empty(&ar->tx_pending[i])) { | ||
1272 | skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]); | ||
1273 | if (unlikely(!skb)) | ||
1274 | break; | ||
1275 | |||
1276 | if (unlikely(carl9170_tx_ps_drop(ar, skb))) | ||
1277 | continue; | ||
1278 | |||
1279 | atomic_inc(&ar->tx_total_pending); | ||
1280 | |||
1281 | q = __carl9170_get_queue(ar, i); | ||
1282 | /* | ||
1283 | * NB: tx_status[i] vs. tx_status[q], | ||
1284 | * TODO: Move into pick_skb or alloc_dev_space. | ||
1285 | */ | ||
1286 | skb_queue_tail(&ar->tx_status[q], skb); | ||
1287 | |||
1288 | /* | ||
1289 | * increase ref count to "2". | ||
1290 | * Ref counting is the easiest way to solve the | ||
1291 | * race between the urb's completion routine: | ||
1292 | * carl9170_tx_callback | ||
1293 | * and wlan tx status functions: | ||
1294 | * carl9170_tx_status/janitor. | ||
1295 | */ | ||
1296 | carl9170_tx_get_skb(skb); | ||
1297 | |||
1298 | carl9170_usb_tx(ar, skb); | ||
1299 | schedule_garbagecollector = true; | ||
1300 | } | ||
1301 | } | ||
1302 | |||
1303 | if (!schedule_garbagecollector) | ||
1304 | return; | ||
1305 | |||
1306 | ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor, | ||
1307 | msecs_to_jiffies(CARL9170_TX_TIMEOUT)); | ||
1308 | } | ||
1309 | |||
1310 | static bool carl9170_tx_ampdu_queue(struct ar9170 *ar, | ||
1311 | struct ieee80211_sta *sta, struct sk_buff *skb) | ||
1312 | { | ||
1313 | struct _carl9170_tx_superframe *super = (void *) skb->data; | ||
1314 | struct carl9170_sta_info *sta_info; | ||
1315 | struct carl9170_sta_tid *agg; | ||
1316 | struct sk_buff *iter; | ||
1317 | u16 tid, seq, qseq, off; | ||
1318 | bool run = false; | ||
1319 | |||
1320 | tid = carl9170_get_tid(skb); | ||
1321 | seq = carl9170_get_seq(skb); | ||
1322 | sta_info = (void *) sta->drv_priv; | ||
1323 | |||
1324 | rcu_read_lock(); | ||
1325 | agg = rcu_dereference(sta_info->agg[tid]); | ||
1326 | |||
1327 | if (!agg) | ||
1328 | goto err_unlock_rcu; | ||
1329 | |||
1330 | spin_lock_bh(&agg->lock); | ||
1331 | if (unlikely(agg->state < CARL9170_TID_STATE_IDLE)) | ||
1332 | goto err_unlock; | ||
1333 | |||
1334 | /* check if sequence is within the BA window */ | ||
1335 | if (unlikely(!BAW_WITHIN(agg->bsn, CARL9170_BAW_BITS, seq))) | ||
1336 | goto err_unlock; | ||
1337 | |||
1338 | if (WARN_ON_ONCE(!BAW_WITHIN(agg->snx, CARL9170_BAW_BITS, seq))) | ||
1339 | goto err_unlock; | ||
1340 | |||
1341 | off = SEQ_DIFF(seq, agg->bsn); | ||
1342 | if (WARN_ON_ONCE(test_and_set_bit(off, agg->bitmap))) | ||
1343 | goto err_unlock; | ||
1344 | |||
1345 | if (likely(BAW_WITHIN(agg->hsn, CARL9170_BAW_BITS, seq))) { | ||
1346 | __skb_queue_tail(&agg->queue, skb); | ||
1347 | agg->hsn = seq; | ||
1348 | goto queued; | ||
1349 | } | ||
1350 | |||
1351 | skb_queue_reverse_walk(&agg->queue, iter) { | ||
1352 | qseq = carl9170_get_seq(iter); | ||
1353 | |||
1354 | if (BAW_WITHIN(qseq, CARL9170_BAW_BITS, seq)) { | ||
1355 | __skb_queue_after(&agg->queue, iter, skb); | ||
1356 | goto queued; | ||
1357 | } | ||
1358 | } | ||
1359 | |||
1360 | __skb_queue_head(&agg->queue, skb); | ||
1361 | queued: | ||
1362 | |||
1363 | if (unlikely(agg->state != CARL9170_TID_STATE_XMIT)) { | ||
1364 | if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) { | ||
1365 | agg->state = CARL9170_TID_STATE_XMIT; | ||
1366 | run = true; | ||
1367 | } | ||
1368 | } | ||
1369 | |||
1370 | spin_unlock_bh(&agg->lock); | ||
1371 | rcu_read_unlock(); | ||
1372 | |||
1373 | return run; | ||
1374 | |||
1375 | err_unlock: | ||
1376 | spin_unlock_bh(&agg->lock); | ||
1377 | |||
1378 | err_unlock_rcu: | ||
1379 | rcu_read_unlock(); | ||
1380 | super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR); | ||
1381 | carl9170_tx_status(ar, skb, false); | ||
1382 | ar->tx_dropped++; | ||
1383 | return false; | ||
1384 | } | ||
1385 | |||
1386 | void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | ||
1387 | { | ||
1388 | struct ar9170 *ar = hw->priv; | ||
1389 | struct ieee80211_tx_info *info; | ||
1390 | struct ieee80211_sta *sta; | ||
1391 | bool run; | ||
1392 | |||
1393 | if (unlikely(!IS_STARTED(ar))) | ||
1394 | goto err_free; | ||
1395 | |||
1396 | info = IEEE80211_SKB_CB(skb); | ||
1397 | sta = info->control.sta; | ||
1398 | |||
1399 | if (unlikely(carl9170_tx_prepare(ar, skb))) | ||
1400 | goto err_free; | ||
1401 | |||
1402 | carl9170_tx_accounting(ar, skb); | ||
1403 | /* | ||
1404 | * from now on, one has to use carl9170_tx_status to free | ||
1405 | * all ressouces which are associated with the frame. | ||
1406 | */ | ||
1407 | |||
1408 | if (sta) { | ||
1409 | struct carl9170_sta_info *stai = (void *) sta->drv_priv; | ||
1410 | atomic_inc(&stai->pending_frames); | ||
1411 | } | ||
1412 | |||
1413 | if (info->flags & IEEE80211_TX_CTL_AMPDU) { | ||
1414 | run = carl9170_tx_ampdu_queue(ar, sta, skb); | ||
1415 | if (run) | ||
1416 | carl9170_tx_ampdu(ar); | ||
1417 | |||
1418 | } else { | ||
1419 | unsigned int queue = skb_get_queue_mapping(skb); | ||
1420 | |||
1421 | skb_queue_tail(&ar->tx_pending[queue], skb); | ||
1422 | } | ||
1423 | |||
1424 | carl9170_tx(ar); | ||
1425 | return; | ||
1426 | |||
1427 | err_free: | ||
1428 | ar->tx_dropped++; | ||
1429 | dev_kfree_skb_any(skb); | ||
1430 | } | ||
1431 | |||
1432 | void carl9170_tx_scheduler(struct ar9170 *ar) | ||
1433 | { | ||
1434 | |||
1435 | if (ar->tx_ampdu_schedule) | ||
1436 | carl9170_tx_ampdu(ar); | ||
1437 | |||
1438 | if (ar->tx_schedule) | ||
1439 | carl9170_tx(ar); | ||
1440 | } | ||