diff options
author | Sujith <Sujith.Manoharan@atheros.com> | 2009-01-16 11:08:42 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2009-01-29 16:00:39 -0500 |
commit | e8324357902698ffb7615d128d612c85d8e21912 (patch) | |
tree | 4a3494bb750655d2312d0a7bb3cc8ca85da6f7a7 /drivers/net/wireless/ath9k/xmit.c | |
parent | dd006395688cd3ce6c92de288d8db090d98dc2c7 (diff) |
ath9k: Reorganize code in xmit.c
This patch starts cleaning up all the crufty code in transmission path,
grouping functions into logical blocks.
Signed-off-by: Sujith <Sujith.Manoharan@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath9k/xmit.c')
-rw-r--r-- | drivers/net/wireless/ath9k/xmit.c | 2596 |
1 files changed, 1179 insertions, 1417 deletions
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index 522078d80931..5e6e5cf9b67f 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c | |||
@@ -55,94 +55,19 @@ static u32 bits_per_symbol[][2] = { | |||
55 | 55 | ||
56 | #define IS_HT_RATE(_rate) ((_rate) & 0x80) | 56 | #define IS_HT_RATE(_rate) ((_rate) & 0x80) |
57 | 57 | ||
58 | /* | 58 | static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, |
59 | * Insert a chain of ath_buf (descriptors) on a txq and | 59 | struct ath_atx_tid *tid, |
60 | * assume the descriptors are already chained together by caller. | 60 | struct list_head *bf_head); |
61 | * NB: must be called with txq lock held | 61 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, |
62 | */ | 62 | struct list_head *bf_q, |
63 | 63 | int txok, int sendbar); | |
64 | static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, | 64 | static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, |
65 | struct list_head *head) | 65 | struct list_head *head); |
66 | { | 66 | static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); |
67 | struct ath_hal *ah = sc->sc_ah; | ||
68 | struct ath_buf *bf; | ||
69 | |||
70 | /* | ||
71 | * Insert the frame on the outbound list and | ||
72 | * pass it on to the hardware. | ||
73 | */ | ||
74 | |||
75 | if (list_empty(head)) | ||
76 | return; | ||
77 | |||
78 | bf = list_first_entry(head, struct ath_buf, list); | ||
79 | |||
80 | list_splice_tail_init(head, &txq->axq_q); | ||
81 | txq->axq_depth++; | ||
82 | txq->axq_totalqueued++; | ||
83 | txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); | ||
84 | |||
85 | DPRINTF(sc, ATH_DBG_QUEUE, | ||
86 | "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); | ||
87 | |||
88 | if (txq->axq_link == NULL) { | ||
89 | ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); | ||
90 | DPRINTF(sc, ATH_DBG_XMIT, | ||
91 | "TXDP[%u] = %llx (%p)\n", | ||
92 | txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); | ||
93 | } else { | ||
94 | *txq->axq_link = bf->bf_daddr; | ||
95 | DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n", | ||
96 | txq->axq_qnum, txq->axq_link, | ||
97 | ito64(bf->bf_daddr), bf->bf_desc); | ||
98 | } | ||
99 | txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); | ||
100 | ath9k_hw_txstart(ah, txq->axq_qnum); | ||
101 | } | ||
102 | 67 | ||
103 | static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | 68 | /*********************/ |
104 | struct ath_xmit_status *tx_status) | 69 | /* Aggregation logic */ |
105 | { | 70 | /*********************/ |
106 | struct ieee80211_hw *hw = sc->hw; | ||
107 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | ||
108 | struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); | ||
109 | int hdrlen, padsize; | ||
110 | |||
111 | DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); | ||
112 | |||
113 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK || | ||
114 | tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) { | ||
115 | kfree(tx_info_priv); | ||
116 | tx_info->rate_driver_data[0] = NULL; | ||
117 | } | ||
118 | |||
119 | if (tx_status->flags & ATH_TX_BAR) { | ||
120 | tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | ||
121 | tx_status->flags &= ~ATH_TX_BAR; | ||
122 | } | ||
123 | |||
124 | if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { | ||
125 | /* Frame was ACKed */ | ||
126 | tx_info->flags |= IEEE80211_TX_STAT_ACK; | ||
127 | } | ||
128 | |||
129 | tx_info->status.rates[0].count = tx_status->retries + 1; | ||
130 | |||
131 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
132 | padsize = hdrlen & 3; | ||
133 | if (padsize && hdrlen >= 24) { | ||
134 | /* | ||
135 | * Remove MAC header padding before giving the frame back to | ||
136 | * mac80211. | ||
137 | */ | ||
138 | memmove(skb->data + padsize, skb->data, hdrlen); | ||
139 | skb_pull(skb, padsize); | ||
140 | } | ||
141 | |||
142 | ieee80211_tx_status(hw, skb); | ||
143 | } | ||
144 | |||
145 | /* Check if it's okay to send out aggregates */ | ||
146 | 71 | ||
147 | static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno) | 72 | static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno) |
148 | { | 73 | { |
@@ -156,230 +81,19 @@ static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno) | |||
156 | return 0; | 81 | return 0; |
157 | } | 82 | } |
158 | 83 | ||
159 | static void ath_get_beaconconfig(struct ath_softc *sc, int if_id, | ||
160 | struct ath_beacon_config *conf) | ||
161 | { | ||
162 | struct ieee80211_hw *hw = sc->hw; | ||
163 | |||
164 | /* fill in beacon config data */ | ||
165 | |||
166 | conf->beacon_interval = hw->conf.beacon_int; | ||
167 | conf->listen_interval = 100; | ||
168 | conf->dtim_count = 1; | ||
169 | conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval; | ||
170 | } | ||
171 | |||
172 | /* Calculate Atheros packet type from IEEE80211 packet header */ | ||
173 | |||
174 | static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) | ||
175 | { | ||
176 | struct ieee80211_hdr *hdr; | ||
177 | enum ath9k_pkt_type htype; | ||
178 | __le16 fc; | ||
179 | |||
180 | hdr = (struct ieee80211_hdr *)skb->data; | ||
181 | fc = hdr->frame_control; | ||
182 | |||
183 | if (ieee80211_is_beacon(fc)) | ||
184 | htype = ATH9K_PKT_TYPE_BEACON; | ||
185 | else if (ieee80211_is_probe_resp(fc)) | ||
186 | htype = ATH9K_PKT_TYPE_PROBE_RESP; | ||
187 | else if (ieee80211_is_atim(fc)) | ||
188 | htype = ATH9K_PKT_TYPE_ATIM; | ||
189 | else if (ieee80211_is_pspoll(fc)) | ||
190 | htype = ATH9K_PKT_TYPE_PSPOLL; | ||
191 | else | ||
192 | htype = ATH9K_PKT_TYPE_NORMAL; | ||
193 | |||
194 | return htype; | ||
195 | } | ||
196 | |||
197 | static bool is_pae(struct sk_buff *skb) | ||
198 | { | ||
199 | struct ieee80211_hdr *hdr; | ||
200 | __le16 fc; | ||
201 | |||
202 | hdr = (struct ieee80211_hdr *)skb->data; | ||
203 | fc = hdr->frame_control; | ||
204 | |||
205 | if (ieee80211_is_data(fc)) { | ||
206 | if (ieee80211_is_nullfunc(fc) || | ||
207 | /* Port Access Entity (IEEE 802.1X) */ | ||
208 | (skb->protocol == cpu_to_be16(ETH_P_PAE))) { | ||
209 | return true; | ||
210 | } | ||
211 | } | ||
212 | |||
213 | return false; | ||
214 | } | ||
215 | |||
216 | static int get_hw_crypto_keytype(struct sk_buff *skb) | ||
217 | { | ||
218 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | ||
219 | |||
220 | if (tx_info->control.hw_key) { | ||
221 | if (tx_info->control.hw_key->alg == ALG_WEP) | ||
222 | return ATH9K_KEY_TYPE_WEP; | ||
223 | else if (tx_info->control.hw_key->alg == ALG_TKIP) | ||
224 | return ATH9K_KEY_TYPE_TKIP; | ||
225 | else if (tx_info->control.hw_key->alg == ALG_CCMP) | ||
226 | return ATH9K_KEY_TYPE_AES; | ||
227 | } | ||
228 | |||
229 | return ATH9K_KEY_TYPE_CLEAR; | ||
230 | } | ||
231 | |||
232 | /* Called only when tx aggregation is enabled and HT is supported */ | ||
233 | |||
234 | static void assign_aggr_tid_seqno(struct sk_buff *skb, | ||
235 | struct ath_buf *bf) | ||
236 | { | ||
237 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | ||
238 | struct ieee80211_hdr *hdr; | ||
239 | struct ath_node *an; | ||
240 | struct ath_atx_tid *tid; | ||
241 | __le16 fc; | ||
242 | u8 *qc; | ||
243 | |||
244 | if (!tx_info->control.sta) | ||
245 | return; | ||
246 | |||
247 | an = (struct ath_node *)tx_info->control.sta->drv_priv; | ||
248 | hdr = (struct ieee80211_hdr *)skb->data; | ||
249 | fc = hdr->frame_control; | ||
250 | |||
251 | /* Get tidno */ | ||
252 | |||
253 | if (ieee80211_is_data_qos(fc)) { | ||
254 | qc = ieee80211_get_qos_ctl(hdr); | ||
255 | bf->bf_tidno = qc[0] & 0xf; | ||
256 | } | ||
257 | |||
258 | /* Get seqno */ | ||
259 | /* For HT capable stations, we save tidno for later use. | ||
260 | * We also override seqno set by upper layer with the one | ||
261 | * in tx aggregation state. | ||
262 | * | ||
263 | * If fragmentation is on, the sequence number is | ||
264 | * not overridden, since it has been | ||
265 | * incremented by the fragmentation routine. | ||
266 | * | ||
267 | * FIXME: check if the fragmentation threshold exceeds | ||
268 | * IEEE80211 max. | ||
269 | */ | ||
270 | tid = ATH_AN_2_TID(an, bf->bf_tidno); | ||
271 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << | ||
272 | IEEE80211_SEQ_SEQ_SHIFT); | ||
273 | bf->bf_seqno = tid->seq_next; | ||
274 | INCR(tid->seq_next, IEEE80211_SEQ_MAX); | ||
275 | } | ||
276 | |||
277 | static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb, | ||
278 | struct ath_txq *txq) | ||
279 | { | ||
280 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | ||
281 | int flags = 0; | ||
282 | |||
283 | flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ | ||
284 | flags |= ATH9K_TXDESC_INTREQ; | ||
285 | |||
286 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) | ||
287 | flags |= ATH9K_TXDESC_NOACK; | ||
288 | if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) | ||
289 | flags |= ATH9K_TXDESC_RTSENA; | ||
290 | |||
291 | return flags; | ||
292 | } | ||
293 | |||
294 | static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) | ||
295 | { | ||
296 | struct ath_buf *bf = NULL; | ||
297 | |||
298 | spin_lock_bh(&sc->tx.txbuflock); | ||
299 | |||
300 | if (unlikely(list_empty(&sc->tx.txbuf))) { | ||
301 | spin_unlock_bh(&sc->tx.txbuflock); | ||
302 | return NULL; | ||
303 | } | ||
304 | |||
305 | bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); | ||
306 | list_del(&bf->list); | ||
307 | |||
308 | spin_unlock_bh(&sc->tx.txbuflock); | ||
309 | |||
310 | return bf; | ||
311 | } | ||
312 | |||
313 | /* To complete a chain of buffers associated a frame */ | ||
314 | |||
315 | static void ath_tx_complete_buf(struct ath_softc *sc, | ||
316 | struct ath_buf *bf, | ||
317 | struct list_head *bf_q, | ||
318 | int txok, int sendbar) | ||
319 | { | ||
320 | struct sk_buff *skb = bf->bf_mpdu; | ||
321 | struct ath_xmit_status tx_status; | ||
322 | unsigned long flags; | ||
323 | |||
324 | /* | ||
325 | * Set retry information. | ||
326 | * NB: Don't use the information in the descriptor, because the frame | ||
327 | * could be software retried. | ||
328 | */ | ||
329 | tx_status.retries = bf->bf_retries; | ||
330 | tx_status.flags = 0; | ||
331 | |||
332 | if (sendbar) | ||
333 | tx_status.flags = ATH_TX_BAR; | ||
334 | |||
335 | if (!txok) { | ||
336 | tx_status.flags |= ATH_TX_ERROR; | ||
337 | |||
338 | if (bf_isxretried(bf)) | ||
339 | tx_status.flags |= ATH_TX_XRETRY; | ||
340 | } | ||
341 | |||
342 | /* Unmap this frame */ | ||
343 | dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); | ||
344 | |||
345 | /* complete this frame */ | ||
346 | ath_tx_complete(sc, skb, &tx_status); | ||
347 | |||
348 | /* | ||
349 | * Return the list of ath_buf of this mpdu to free queue | ||
350 | */ | ||
351 | spin_lock_irqsave(&sc->tx.txbuflock, flags); | ||
352 | list_splice_tail_init(bf_q, &sc->tx.txbuf); | ||
353 | spin_unlock_irqrestore(&sc->tx.txbuflock, flags); | ||
354 | } | ||
355 | |||
356 | /* | ||
357 | * queue up a dest/ac pair for tx scheduling | ||
358 | * NB: must be called with txq lock held | ||
359 | */ | ||
360 | |||
361 | static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) | 84 | static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) |
362 | { | 85 | { |
363 | struct ath_atx_ac *ac = tid->ac; | 86 | struct ath_atx_ac *ac = tid->ac; |
364 | 87 | ||
365 | /* | ||
366 | * if tid is paused, hold off | ||
367 | */ | ||
368 | if (tid->paused) | 88 | if (tid->paused) |
369 | return; | 89 | return; |
370 | 90 | ||
371 | /* | ||
372 | * add tid to ac atmost once | ||
373 | */ | ||
374 | if (tid->sched) | 91 | if (tid->sched) |
375 | return; | 92 | return; |
376 | 93 | ||
377 | tid->sched = true; | 94 | tid->sched = true; |
378 | list_add_tail(&tid->list, &ac->tid_q); | 95 | list_add_tail(&tid->list, &ac->tid_q); |
379 | 96 | ||
380 | /* | ||
381 | * add node ac to txq atmost once | ||
382 | */ | ||
383 | if (ac->sched) | 97 | if (ac->sched) |
384 | return; | 98 | return; |
385 | 99 | ||
@@ -387,22 +101,16 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) | |||
387 | list_add_tail(&ac->list, &txq->axq_acq); | 101 | list_add_tail(&ac->list, &txq->axq_acq); |
388 | } | 102 | } |
389 | 103 | ||
390 | /* pause a tid */ | ||
391 | |||
392 | static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | 104 | static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) |
393 | { | 105 | { |
394 | struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; | 106 | struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; |
395 | 107 | ||
396 | spin_lock_bh(&txq->axq_lock); | 108 | spin_lock_bh(&txq->axq_lock); |
397 | |||
398 | tid->paused++; | 109 | tid->paused++; |
399 | |||
400 | spin_unlock_bh(&txq->axq_lock); | 110 | spin_unlock_bh(&txq->axq_lock); |
401 | } | 111 | } |
402 | 112 | ||
403 | /* resume a tid and schedule aggregate */ | 113 | static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) |
404 | |||
405 | void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | ||
406 | { | 114 | { |
407 | struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; | 115 | struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; |
408 | 116 | ||
@@ -417,63 +125,39 @@ void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | |||
417 | if (list_empty(&tid->buf_q)) | 125 | if (list_empty(&tid->buf_q)) |
418 | goto unlock; | 126 | goto unlock; |
419 | 127 | ||
420 | /* | ||
421 | * Add this TID to scheduler and try to send out aggregates | ||
422 | */ | ||
423 | ath_tx_queue_tid(txq, tid); | 128 | ath_tx_queue_tid(txq, tid); |
424 | ath_txq_schedule(sc, txq); | 129 | ath_txq_schedule(sc, txq); |
425 | unlock: | 130 | unlock: |
426 | spin_unlock_bh(&txq->axq_lock); | 131 | spin_unlock_bh(&txq->axq_lock); |
427 | } | 132 | } |
428 | 133 | ||
429 | /* Compute the number of bad frames */ | 134 | static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) |
430 | |||
431 | static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, | ||
432 | int txok) | ||
433 | { | 135 | { |
434 | struct ath_buf *bf_last = bf->bf_lastbf; | 136 | struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; |
435 | struct ath_desc *ds = bf_last->bf_desc; | 137 | struct ath_buf *bf; |
436 | u16 seq_st = 0; | 138 | struct list_head bf_head; |
437 | u32 ba[WME_BA_BMP_SIZE >> 5]; | 139 | INIT_LIST_HEAD(&bf_head); |
438 | int ba_index; | ||
439 | int nbad = 0; | ||
440 | int isaggr = 0; | ||
441 | |||
442 | if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) | ||
443 | return 0; | ||
444 | 140 | ||
445 | isaggr = bf_isaggr(bf); | 141 | ASSERT(tid->paused > 0); |
446 | if (isaggr) { | 142 | spin_lock_bh(&txq->axq_lock); |
447 | seq_st = ATH_DS_BA_SEQ(ds); | ||
448 | memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); | ||
449 | } | ||
450 | 143 | ||
451 | while (bf) { | 144 | tid->paused--; |
452 | ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno); | ||
453 | if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) | ||
454 | nbad++; | ||
455 | 145 | ||
456 | bf = bf->bf_next; | 146 | if (tid->paused > 0) { |
147 | spin_unlock_bh(&txq->axq_lock); | ||
148 | return; | ||
457 | } | 149 | } |
458 | 150 | ||
459 | return nbad; | 151 | while (!list_empty(&tid->buf_q)) { |
460 | } | 152 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); |
461 | 153 | ASSERT(!bf_isretried(bf)); | |
462 | static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) | 154 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); |
463 | { | 155 | ath_tx_send_normal(sc, txq, tid, &bf_head); |
464 | struct sk_buff *skb; | 156 | } |
465 | struct ieee80211_hdr *hdr; | ||
466 | |||
467 | bf->bf_state.bf_type |= BUF_RETRY; | ||
468 | bf->bf_retries++; | ||
469 | 157 | ||
470 | skb = bf->bf_mpdu; | 158 | spin_unlock_bh(&txq->axq_lock); |
471 | hdr = (struct ieee80211_hdr *)skb->data; | ||
472 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); | ||
473 | } | 159 | } |
474 | 160 | ||
475 | /* Update block ack window */ | ||
476 | |||
477 | static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, | 161 | static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, |
478 | int seqno) | 162 | int seqno) |
479 | { | 163 | { |
@@ -490,247 +174,75 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
490 | } | 174 | } |
491 | } | 175 | } |
492 | 176 | ||
493 | /* | 177 | static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, |
494 | * ath_pkt_dur - compute packet duration (NB: not NAV) | 178 | struct ath_buf *bf) |
495 | * | ||
496 | * rix - rate index | ||
497 | * pktlen - total bytes (delims + data + fcs + pads + pad delims) | ||
498 | * width - 0 for 20 MHz, 1 for 40 MHz | ||
499 | * half_gi - to use 4us v/s 3.6 us for symbol time | ||
500 | */ | ||
501 | static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf, | ||
502 | int width, int half_gi, bool shortPreamble) | ||
503 | { | ||
504 | struct ath_rate_table *rate_table = sc->cur_rate_table; | ||
505 | u32 nbits, nsymbits, duration, nsymbols; | ||
506 | u8 rc; | ||
507 | int streams, pktlen; | ||
508 | |||
509 | pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen; | ||
510 | rc = rate_table->info[rix].ratecode; | ||
511 | |||
512 | /* for legacy rates, use old function to compute packet duration */ | ||
513 | if (!IS_HT_RATE(rc)) | ||
514 | return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen, | ||
515 | rix, shortPreamble); | ||
516 | |||
517 | /* find number of symbols: PLCP + data */ | ||
518 | nbits = (pktlen << 3) + OFDM_PLCP_BITS; | ||
519 | nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; | ||
520 | nsymbols = (nbits + nsymbits - 1) / nsymbits; | ||
521 | |||
522 | if (!half_gi) | ||
523 | duration = SYMBOL_TIME(nsymbols); | ||
524 | else | ||
525 | duration = SYMBOL_TIME_HALFGI(nsymbols); | ||
526 | |||
527 | /* addup duration for legacy/ht training and signal fields */ | ||
528 | streams = HT_RC_2_STREAMS(rc); | ||
529 | duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); | ||
530 | |||
531 | return duration; | ||
532 | } | ||
533 | |||
534 | /* Rate module function to set rate related fields in tx descriptor */ | ||
535 | |||
536 | static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) | ||
537 | { | 179 | { |
538 | struct ath_hal *ah = sc->sc_ah; | 180 | int index, cindex; |
539 | struct ath_rate_table *rt; | ||
540 | struct ath_desc *ds = bf->bf_desc; | ||
541 | struct ath_desc *lastds = bf->bf_lastbf->bf_desc; | ||
542 | struct ath9k_11n_rate_series series[4]; | ||
543 | struct sk_buff *skb; | ||
544 | struct ieee80211_tx_info *tx_info; | ||
545 | struct ieee80211_tx_rate *rates; | ||
546 | struct ieee80211_hdr *hdr; | ||
547 | struct ieee80211_hw *hw = sc->hw; | ||
548 | int i, flags, rtsctsena = 0, enable_g_protection = 0; | ||
549 | u32 ctsduration = 0; | ||
550 | u8 rix = 0, cix, ctsrate = 0; | ||
551 | __le16 fc; | ||
552 | |||
553 | memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); | ||
554 | |||
555 | skb = (struct sk_buff *)bf->bf_mpdu; | ||
556 | hdr = (struct ieee80211_hdr *)skb->data; | ||
557 | fc = hdr->frame_control; | ||
558 | tx_info = IEEE80211_SKB_CB(skb); | ||
559 | rates = tx_info->control.rates; | ||
560 | |||
561 | if (ieee80211_has_morefrags(fc) || | ||
562 | (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) { | ||
563 | rates[1].count = rates[2].count = rates[3].count = 0; | ||
564 | rates[1].idx = rates[2].idx = rates[3].idx = 0; | ||
565 | rates[0].count = ATH_TXMAXTRY; | ||
566 | } | ||
567 | |||
568 | /* get the cix for the lowest valid rix */ | ||
569 | rt = sc->cur_rate_table; | ||
570 | for (i = 3; i >= 0; i--) { | ||
571 | if (rates[i].count && (rates[i].idx >= 0)) { | ||
572 | rix = rates[i].idx; | ||
573 | break; | ||
574 | } | ||
575 | } | ||
576 | |||
577 | flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)); | ||
578 | cix = rt->info[rix].ctrl_rate; | ||
579 | |||
580 | /* All protection frames are transmited at 2Mb/s for 802.11g, | ||
581 | * otherwise we transmit them at 1Mb/s */ | ||
582 | if (hw->conf.channel->band == IEEE80211_BAND_2GHZ && | ||
583 | !conf_is_ht(&hw->conf)) | ||
584 | enable_g_protection = 1; | ||
585 | |||
586 | /* | ||
587 | * If 802.11g protection is enabled, determine whether to use RTS/CTS or | ||
588 | * just CTS. Note that this is only done for OFDM/HT unicast frames. | ||
589 | */ | ||
590 | if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK) | ||
591 | && (rt->info[rix].phy == WLAN_RC_PHY_OFDM || | ||
592 | WLAN_RC_PHY_HT(rt->info[rix].phy))) { | ||
593 | if (sc->sc_protmode == PROT_M_RTSCTS) | ||
594 | flags = ATH9K_TXDESC_RTSENA; | ||
595 | else if (sc->sc_protmode == PROT_M_CTSONLY) | ||
596 | flags = ATH9K_TXDESC_CTSENA; | ||
597 | |||
598 | cix = rt->info[enable_g_protection].ctrl_rate; | ||
599 | rtsctsena = 1; | ||
600 | } | ||
601 | |||
602 | /* For 11n, the default behavior is to enable RTS for hw retried frames. | ||
603 | * We enable the global flag here and let rate series flags determine | ||
604 | * which rates will actually use RTS. | ||
605 | */ | ||
606 | if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) { | ||
607 | /* 802.11g protection not needed, use our default behavior */ | ||
608 | if (!rtsctsena) | ||
609 | flags = ATH9K_TXDESC_RTSENA; | ||
610 | } | ||
611 | |||
612 | /* Set protection if aggregate protection on */ | ||
613 | if (sc->sc_config.ath_aggr_prot && | ||
614 | (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) { | ||
615 | flags = ATH9K_TXDESC_RTSENA; | ||
616 | cix = rt->info[enable_g_protection].ctrl_rate; | ||
617 | rtsctsena = 1; | ||
618 | } | ||
619 | |||
620 | /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ | ||
621 | if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit)) | ||
622 | flags &= ~(ATH9K_TXDESC_RTSENA); | ||
623 | |||
624 | /* | ||
625 | * CTS transmit rate is derived from the transmit rate by looking in the | ||
626 | * h/w rate table. We must also factor in whether or not a short | ||
627 | * preamble is to be used. NB: cix is set above where RTS/CTS is enabled | ||
628 | */ | ||
629 | ctsrate = rt->info[cix].ratecode | | ||
630 | (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0); | ||
631 | |||
632 | for (i = 0; i < 4; i++) { | ||
633 | if (!rates[i].count || (rates[i].idx < 0)) | ||
634 | continue; | ||
635 | |||
636 | rix = rates[i].idx; | ||
637 | |||
638 | series[i].Rate = rt->info[rix].ratecode | | ||
639 | (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0); | ||
640 | |||
641 | series[i].Tries = rates[i].count; | ||
642 | 181 | ||
643 | series[i].RateFlags = ( | 182 | if (bf_isretried(bf)) |
644 | (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ? | 183 | return; |
645 | ATH9K_RATESERIES_RTS_CTS : 0) | | ||
646 | ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? | ||
647 | ATH9K_RATESERIES_2040 : 0) | | ||
648 | ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ? | ||
649 | ATH9K_RATESERIES_HALFGI : 0); | ||
650 | 184 | ||
651 | series[i].PktDuration = ath_pkt_duration(sc, rix, bf, | 185 | index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); |
652 | (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0, | 186 | cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); |
653 | (rates[i].flags & IEEE80211_TX_RC_SHORT_GI), | ||
654 | bf_isshpreamble(bf)); | ||
655 | 187 | ||
656 | series[i].ChSel = sc->sc_tx_chainmask; | 188 | ASSERT(tid->tx_buf[cindex] == NULL); |
189 | tid->tx_buf[cindex] = bf; | ||
657 | 190 | ||
658 | if (rtsctsena) | 191 | if (index >= ((tid->baw_tail - tid->baw_head) & |
659 | series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; | 192 | (ATH_TID_MAX_BUFS - 1))) { |
193 | tid->baw_tail = cindex; | ||
194 | INCR(tid->baw_tail, ATH_TID_MAX_BUFS); | ||
660 | } | 195 | } |
661 | |||
662 | /* set dur_update_en for l-sig computation except for PS-Poll frames */ | ||
663 | ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf), | ||
664 | ctsrate, ctsduration, | ||
665 | series, 4, flags); | ||
666 | |||
667 | if (sc->sc_config.ath_aggr_prot && flags) | ||
668 | ath9k_hw_set11n_burstduration(ah, ds, 8192); | ||
669 | } | 196 | } |
670 | 197 | ||
671 | /* | 198 | /* |
672 | * Function to send a normal HT (non-AMPDU) frame | 199 | * TODO: For frame(s) that are in the retry state, we will reuse the |
673 | * NB: must be called with txq lock held | 200 | * sequence number(s) without setting the retry bit. The |
201 | * alternative is to give up on these and BAR the receiver's window | ||
202 | * forward. | ||
674 | */ | 203 | */ |
675 | static int ath_tx_send_normal(struct ath_softc *sc, | 204 | static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, |
676 | struct ath_txq *txq, | 205 | struct ath_atx_tid *tid) |
677 | struct ath_atx_tid *tid, | ||
678 | struct list_head *bf_head) | ||
679 | { | ||
680 | struct ath_buf *bf; | ||
681 | |||
682 | BUG_ON(list_empty(bf_head)); | ||
683 | |||
684 | bf = list_first_entry(bf_head, struct ath_buf, list); | ||
685 | bf->bf_state.bf_type &= ~BUF_AMPDU; /* regular HT frame */ | ||
686 | |||
687 | /* update starting sequence number for subsequent ADDBA request */ | ||
688 | INCR(tid->seq_start, IEEE80211_SEQ_MAX); | ||
689 | |||
690 | /* Queue to h/w without aggregation */ | ||
691 | bf->bf_nframes = 1; | ||
692 | bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ | ||
693 | ath_buf_set_rate(sc, bf); | ||
694 | ath_tx_txqaddbuf(sc, txq, bf_head); | ||
695 | |||
696 | return 0; | ||
697 | } | ||
698 | |||
699 | /* flush tid's software queue and send frames as non-ampdu's */ | ||
700 | 206 | ||
701 | static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | ||
702 | { | 207 | { |
703 | struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; | ||
704 | struct ath_buf *bf; | 208 | struct ath_buf *bf; |
705 | struct list_head bf_head; | 209 | struct list_head bf_head; |
706 | INIT_LIST_HEAD(&bf_head); | 210 | INIT_LIST_HEAD(&bf_head); |
707 | 211 | ||
708 | ASSERT(tid->paused > 0); | 212 | for (;;) { |
709 | spin_lock_bh(&txq->axq_lock); | 213 | if (list_empty(&tid->buf_q)) |
214 | break; | ||
215 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | ||
710 | 216 | ||
711 | tid->paused--; | 217 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); |
712 | 218 | ||
713 | if (tid->paused > 0) { | 219 | if (bf_isretried(bf)) |
714 | spin_unlock_bh(&txq->axq_lock); | 220 | ath_tx_update_baw(sc, tid, bf->bf_seqno); |
715 | return; | ||
716 | } | ||
717 | 221 | ||
718 | while (!list_empty(&tid->buf_q)) { | 222 | spin_unlock(&txq->axq_lock); |
719 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | 223 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); |
720 | ASSERT(!bf_isretried(bf)); | 224 | spin_lock(&txq->axq_lock); |
721 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); | ||
722 | ath_tx_send_normal(sc, txq, tid, &bf_head); | ||
723 | } | 225 | } |
724 | 226 | ||
725 | spin_unlock_bh(&txq->axq_lock); | 227 | tid->seq_next = tid->seq_start; |
228 | tid->baw_tail = tid->baw_head; | ||
726 | } | 229 | } |
727 | 230 | ||
728 | /* Completion routine of an aggregate */ | 231 | static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) |
232 | { | ||
233 | struct sk_buff *skb; | ||
234 | struct ieee80211_hdr *hdr; | ||
235 | |||
236 | bf->bf_state.bf_type |= BUF_RETRY; | ||
237 | bf->bf_retries++; | ||
238 | |||
239 | skb = bf->bf_mpdu; | ||
240 | hdr = (struct ieee80211_hdr *)skb->data; | ||
241 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); | ||
242 | } | ||
729 | 243 | ||
730 | static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, | 244 | static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, struct ath_txq *txq, |
731 | struct ath_txq *txq, | 245 | struct ath_buf *bf, struct list_head *bf_q, |
732 | struct ath_buf *bf, | ||
733 | struct list_head *bf_q, | ||
734 | int txok) | 246 | int txok) |
735 | { | 247 | { |
736 | struct ath_node *an = NULL; | 248 | struct ath_node *an = NULL; |
@@ -757,14 +269,9 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, | |||
757 | if (isaggr) { | 269 | if (isaggr) { |
758 | if (txok) { | 270 | if (txok) { |
759 | if (ATH_DS_TX_BA(ds)) { | 271 | if (ATH_DS_TX_BA(ds)) { |
760 | /* | ||
761 | * extract starting sequence and | ||
762 | * block-ack bitmap | ||
763 | */ | ||
764 | seq_st = ATH_DS_BA_SEQ(ds); | 272 | seq_st = ATH_DS_BA_SEQ(ds); |
765 | memcpy(ba, | 273 | memcpy(ba, ATH_DS_BA_BITMAP(ds), |
766 | ATH_DS_BA_BITMAP(ds), | 274 | WME_BA_BMP_SIZE >> 3); |
767 | WME_BA_BMP_SIZE >> 3); | ||
768 | } else { | 275 | } else { |
769 | memset(ba, 0, WME_BA_BMP_SIZE >> 3); | 276 | memset(ba, 0, WME_BA_BMP_SIZE >> 3); |
770 | 277 | ||
@@ -816,31 +323,15 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, | |||
816 | txfail = 1; | 323 | txfail = 1; |
817 | } | 324 | } |
818 | } | 325 | } |
819 | /* | ||
820 | * Remove ath_buf's of this sub-frame from aggregate queue. | ||
821 | */ | ||
822 | if (bf_next == NULL) { /* last subframe in the aggregate */ | ||
823 | ASSERT(bf->bf_lastfrm == bf_last); | ||
824 | |||
825 | /* | ||
826 | * The last descriptor of the last sub frame could be | ||
827 | * a holding descriptor for h/w. If that's the case, | ||
828 | * bf->bf_lastfrm won't be in the bf_q. | ||
829 | * Make sure we handle bf_q properly here. | ||
830 | */ | ||
831 | 326 | ||
327 | if (bf_next == NULL) { | ||
328 | ASSERT(bf->bf_lastfrm == bf_last); | ||
832 | if (!list_empty(bf_q)) { | 329 | if (!list_empty(bf_q)) { |
833 | bf_lastq = list_entry(bf_q->prev, | 330 | bf_lastq = list_entry(bf_q->prev, |
834 | struct ath_buf, list); | 331 | struct ath_buf, list); |
835 | list_cut_position(&bf_head, | 332 | list_cut_position(&bf_head, |
836 | bf_q, &bf_lastq->list); | 333 | bf_q, &bf_lastq->list); |
837 | } else { | 334 | } else { |
838 | /* | ||
839 | * XXX: if the last subframe only has one | ||
840 | * descriptor which is also being used as | ||
841 | * a holding descriptor. Then the ath_buf | ||
842 | * is not in the bf_q at all. | ||
843 | */ | ||
844 | INIT_LIST_HEAD(&bf_head); | 335 | INIT_LIST_HEAD(&bf_head); |
845 | } | 336 | } |
846 | } else { | 337 | } else { |
@@ -858,18 +349,11 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, | |||
858 | ath_tx_update_baw(sc, tid, bf->bf_seqno); | 349 | ath_tx_update_baw(sc, tid, bf->bf_seqno); |
859 | spin_unlock_bh(&txq->axq_lock); | 350 | spin_unlock_bh(&txq->axq_lock); |
860 | 351 | ||
861 | /* complete this sub-frame */ | ||
862 | ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); | 352 | ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); |
863 | } else { | 353 | } else { |
864 | /* | 354 | /* |
865 | * retry the un-acked ones | 355 | * retry the un-acked ones |
866 | */ | 356 | */ |
867 | /* | ||
868 | * XXX: if the last descriptor is holding descriptor, | ||
869 | * in order to requeue the frame to software queue, we | ||
870 | * need to allocate a new descriptor and | ||
871 | * copy the content of holding descriptor to it. | ||
872 | */ | ||
873 | if (bf->bf_next == NULL && | 357 | if (bf->bf_next == NULL && |
874 | bf_last->bf_status & ATH_BUFSTATUS_STALE) { | 358 | bf_last->bf_status & ATH_BUFSTATUS_STALE) { |
875 | struct ath_buf *tbf; | 359 | struct ath_buf *tbf; |
@@ -950,8 +434,6 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, | |||
950 | */ | 434 | */ |
951 | if (!list_empty(&bf_pending)) { | 435 | if (!list_empty(&bf_pending)) { |
952 | spin_lock_bh(&txq->axq_lock); | 436 | spin_lock_bh(&txq->axq_lock); |
953 | /* Note: we _prepend_, we _do_not_ at to | ||
954 | * the end of the queue ! */ | ||
955 | list_splice(&bf_pending, &tid->buf_q); | 437 | list_splice(&bf_pending, &tid->buf_q); |
956 | ath_tx_queue_tid(txq, tid); | 438 | ath_tx_queue_tid(txq, tid); |
957 | spin_unlock_bh(&txq->axq_lock); | 439 | spin_unlock_bh(&txq->axq_lock); |
@@ -963,292 +445,7 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, | |||
963 | return; | 445 | return; |
964 | } | 446 | } |
965 | 447 | ||
966 | static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad) | 448 | static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, |
967 | { | ||
968 | struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; | ||
969 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | ||
970 | struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); | ||
971 | |||
972 | tx_info_priv->update_rc = false; | ||
973 | if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) | ||
974 | tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; | ||
975 | |||
976 | if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && | ||
977 | (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { | ||
978 | if (bf_isdata(bf)) { | ||
979 | memcpy(&tx_info_priv->tx, &ds->ds_txstat, | ||
980 | sizeof(tx_info_priv->tx)); | ||
981 | tx_info_priv->n_frames = bf->bf_nframes; | ||
982 | tx_info_priv->n_bad_frames = nbad; | ||
983 | tx_info_priv->update_rc = true; | ||
984 | } | ||
985 | } | ||
986 | } | ||
987 | |||
988 | /* Process completed xmit descriptors from the specified queue */ | ||
989 | |||
990 | static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | ||
991 | { | ||
992 | struct ath_hal *ah = sc->sc_ah; | ||
993 | struct ath_buf *bf, *lastbf, *bf_held = NULL; | ||
994 | struct list_head bf_head; | ||
995 | struct ath_desc *ds; | ||
996 | int txok, nbad = 0; | ||
997 | int status; | ||
998 | |||
999 | DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", | ||
1000 | txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), | ||
1001 | txq->axq_link); | ||
1002 | |||
1003 | for (;;) { | ||
1004 | spin_lock_bh(&txq->axq_lock); | ||
1005 | if (list_empty(&txq->axq_q)) { | ||
1006 | txq->axq_link = NULL; | ||
1007 | txq->axq_linkbuf = NULL; | ||
1008 | spin_unlock_bh(&txq->axq_lock); | ||
1009 | break; | ||
1010 | } | ||
1011 | bf = list_first_entry(&txq->axq_q, struct ath_buf, list); | ||
1012 | |||
1013 | /* | ||
1014 | * There is a race condition that a BH gets scheduled | ||
1015 | * after sw writes TxE and before hw re-load the last | ||
1016 | * descriptor to get the newly chained one. | ||
1017 | * Software must keep the last DONE descriptor as a | ||
1018 | * holding descriptor - software does so by marking | ||
1019 | * it with the STALE flag. | ||
1020 | */ | ||
1021 | bf_held = NULL; | ||
1022 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { | ||
1023 | bf_held = bf; | ||
1024 | if (list_is_last(&bf_held->list, &txq->axq_q)) { | ||
1025 | /* FIXME: | ||
1026 | * The holding descriptor is the last | ||
1027 | * descriptor in queue. It's safe to remove | ||
1028 | * the last holding descriptor in BH context. | ||
1029 | */ | ||
1030 | spin_unlock_bh(&txq->axq_lock); | ||
1031 | break; | ||
1032 | } else { | ||
1033 | /* Lets work with the next buffer now */ | ||
1034 | bf = list_entry(bf_held->list.next, | ||
1035 | struct ath_buf, list); | ||
1036 | } | ||
1037 | } | ||
1038 | |||
1039 | lastbf = bf->bf_lastbf; | ||
1040 | ds = lastbf->bf_desc; /* NB: last decriptor */ | ||
1041 | |||
1042 | status = ath9k_hw_txprocdesc(ah, ds); | ||
1043 | if (status == -EINPROGRESS) { | ||
1044 | spin_unlock_bh(&txq->axq_lock); | ||
1045 | break; | ||
1046 | } | ||
1047 | if (bf->bf_desc == txq->axq_lastdsWithCTS) | ||
1048 | txq->axq_lastdsWithCTS = NULL; | ||
1049 | if (ds == txq->axq_gatingds) | ||
1050 | txq->axq_gatingds = NULL; | ||
1051 | |||
1052 | /* | ||
1053 | * Remove ath_buf's of the same transmit unit from txq, | ||
1054 | * however leave the last descriptor back as the holding | ||
1055 | * descriptor for hw. | ||
1056 | */ | ||
1057 | lastbf->bf_status |= ATH_BUFSTATUS_STALE; | ||
1058 | INIT_LIST_HEAD(&bf_head); | ||
1059 | |||
1060 | if (!list_is_singular(&lastbf->list)) | ||
1061 | list_cut_position(&bf_head, | ||
1062 | &txq->axq_q, lastbf->list.prev); | ||
1063 | |||
1064 | txq->axq_depth--; | ||
1065 | |||
1066 | if (bf_isaggr(bf)) | ||
1067 | txq->axq_aggr_depth--; | ||
1068 | |||
1069 | txok = (ds->ds_txstat.ts_status == 0); | ||
1070 | |||
1071 | spin_unlock_bh(&txq->axq_lock); | ||
1072 | |||
1073 | if (bf_held) { | ||
1074 | list_del(&bf_held->list); | ||
1075 | spin_lock_bh(&sc->tx.txbuflock); | ||
1076 | list_add_tail(&bf_held->list, &sc->tx.txbuf); | ||
1077 | spin_unlock_bh(&sc->tx.txbuflock); | ||
1078 | } | ||
1079 | |||
1080 | if (!bf_isampdu(bf)) { | ||
1081 | /* | ||
1082 | * This frame is sent out as a single frame. | ||
1083 | * Use hardware retry status for this frame. | ||
1084 | */ | ||
1085 | bf->bf_retries = ds->ds_txstat.ts_longretry; | ||
1086 | if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) | ||
1087 | bf->bf_state.bf_type |= BUF_XRETRY; | ||
1088 | nbad = 0; | ||
1089 | } else { | ||
1090 | nbad = ath_tx_num_badfrms(sc, bf, txok); | ||
1091 | } | ||
1092 | |||
1093 | ath_tx_rc_status(bf, ds, nbad); | ||
1094 | |||
1095 | /* | ||
1096 | * Complete this transmit unit | ||
1097 | */ | ||
1098 | if (bf_isampdu(bf)) | ||
1099 | ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok); | ||
1100 | else | ||
1101 | ath_tx_complete_buf(sc, bf, &bf_head, txok, 0); | ||
1102 | |||
1103 | /* Wake up mac80211 queue */ | ||
1104 | |||
1105 | spin_lock_bh(&txq->axq_lock); | ||
1106 | if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <= | ||
1107 | (ATH_TXBUF - 20)) { | ||
1108 | int qnum; | ||
1109 | qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); | ||
1110 | if (qnum != -1) { | ||
1111 | ieee80211_wake_queue(sc->hw, qnum); | ||
1112 | txq->stopped = 0; | ||
1113 | } | ||
1114 | |||
1115 | } | ||
1116 | |||
1117 | /* | ||
1118 | * schedule any pending packets if aggregation is enabled | ||
1119 | */ | ||
1120 | if (sc->sc_flags & SC_OP_TXAGGR) | ||
1121 | ath_txq_schedule(sc, txq); | ||
1122 | spin_unlock_bh(&txq->axq_lock); | ||
1123 | } | ||
1124 | } | ||
1125 | |||
1126 | static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) | ||
1127 | { | ||
1128 | struct ath_hal *ah = sc->sc_ah; | ||
1129 | |||
1130 | (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum); | ||
1131 | DPRINTF(sc, ATH_DBG_XMIT, "tx queue [%u] %x, link %p\n", | ||
1132 | txq->axq_qnum, ath9k_hw_gettxbuf(ah, txq->axq_qnum), | ||
1133 | txq->axq_link); | ||
1134 | } | ||
1135 | |||
1136 | /* Drain only the data queues */ | ||
1137 | |||
1138 | static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx) | ||
1139 | { | ||
1140 | struct ath_hal *ah = sc->sc_ah; | ||
1141 | int i, npend = 0; | ||
1142 | |||
1143 | if (!(sc->sc_flags & SC_OP_INVALID)) { | ||
1144 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | ||
1145 | if (ATH_TXQ_SETUP(sc, i)) { | ||
1146 | ath_tx_stopdma(sc, &sc->tx.txq[i]); | ||
1147 | /* The TxDMA may not really be stopped. | ||
1148 | * Double check the hal tx pending count */ | ||
1149 | npend += ath9k_hw_numtxpending(ah, | ||
1150 | sc->tx.txq[i].axq_qnum); | ||
1151 | } | ||
1152 | } | ||
1153 | } | ||
1154 | |||
1155 | if (npend) { | ||
1156 | int r; | ||
1157 | /* TxDMA not stopped, reset the hal */ | ||
1158 | DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n"); | ||
1159 | |||
1160 | spin_lock_bh(&sc->sc_resetlock); | ||
1161 | r = ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, true); | ||
1162 | if (r) | ||
1163 | DPRINTF(sc, ATH_DBG_FATAL, | ||
1164 | "Unable to reset hardware; reset status %u\n", | ||
1165 | r); | ||
1166 | spin_unlock_bh(&sc->sc_resetlock); | ||
1167 | } | ||
1168 | |||
1169 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | ||
1170 | if (ATH_TXQ_SETUP(sc, i)) | ||
1171 | ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx); | ||
1172 | } | ||
1173 | } | ||
1174 | |||
1175 | /* Add a sub-frame to block ack window */ | ||
1176 | |||
1177 | static void ath_tx_addto_baw(struct ath_softc *sc, | ||
1178 | struct ath_atx_tid *tid, | ||
1179 | struct ath_buf *bf) | ||
1180 | { | ||
1181 | int index, cindex; | ||
1182 | |||
1183 | if (bf_isretried(bf)) | ||
1184 | return; | ||
1185 | |||
1186 | index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); | ||
1187 | cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); | ||
1188 | |||
1189 | ASSERT(tid->tx_buf[cindex] == NULL); | ||
1190 | tid->tx_buf[cindex] = bf; | ||
1191 | |||
1192 | if (index >= ((tid->baw_tail - tid->baw_head) & | ||
1193 | (ATH_TID_MAX_BUFS - 1))) { | ||
1194 | tid->baw_tail = cindex; | ||
1195 | INCR(tid->baw_tail, ATH_TID_MAX_BUFS); | ||
1196 | } | ||
1197 | } | ||
1198 | |||
1199 | /* | ||
1200 | * Function to send an A-MPDU | ||
1201 | * NB: must be called with txq lock held | ||
1202 | */ | ||
1203 | static int ath_tx_send_ampdu(struct ath_softc *sc, | ||
1204 | struct ath_atx_tid *tid, | ||
1205 | struct list_head *bf_head, | ||
1206 | struct ath_tx_control *txctl) | ||
1207 | { | ||
1208 | struct ath_buf *bf; | ||
1209 | |||
1210 | BUG_ON(list_empty(bf_head)); | ||
1211 | |||
1212 | bf = list_first_entry(bf_head, struct ath_buf, list); | ||
1213 | bf->bf_state.bf_type |= BUF_AMPDU; | ||
1214 | |||
1215 | /* | ||
1216 | * Do not queue to h/w when any of the following conditions is true: | ||
1217 | * - there are pending frames in software queue | ||
1218 | * - the TID is currently paused for ADDBA/BAR request | ||
1219 | * - seqno is not within block-ack window | ||
1220 | * - h/w queue depth exceeds low water mark | ||
1221 | */ | ||
1222 | if (!list_empty(&tid->buf_q) || tid->paused || | ||
1223 | !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || | ||
1224 | txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { | ||
1225 | /* | ||
1226 | * Add this frame to software queue for scheduling later | ||
1227 | * for aggregation. | ||
1228 | */ | ||
1229 | list_splice_tail_init(bf_head, &tid->buf_q); | ||
1230 | ath_tx_queue_tid(txctl->txq, tid); | ||
1231 | return 0; | ||
1232 | } | ||
1233 | |||
1234 | /* Add sub-frame to BAW */ | ||
1235 | ath_tx_addto_baw(sc, tid, bf); | ||
1236 | |||
1237 | /* Queue to h/w without aggregation */ | ||
1238 | bf->bf_nframes = 1; | ||
1239 | bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ | ||
1240 | ath_buf_set_rate(sc, bf); | ||
1241 | ath_tx_txqaddbuf(sc, txctl->txq, bf_head); | ||
1242 | |||
1243 | return 0; | ||
1244 | } | ||
1245 | |||
1246 | /* | ||
1247 | * looks up the rate | ||
1248 | * returns aggr limit based on lowest of the rates | ||
1249 | */ | ||
1250 | static u32 ath_lookup_rate(struct ath_softc *sc, | ||
1251 | struct ath_buf *bf, | ||
1252 | struct ath_atx_tid *tid) | 449 | struct ath_atx_tid *tid) |
1253 | { | 450 | { |
1254 | struct ath_rate_table *rate_table = sc->cur_rate_table; | 451 | struct ath_rate_table *rate_table = sc->cur_rate_table; |
@@ -1314,10 +511,8 @@ static u32 ath_lookup_rate(struct ath_softc *sc, | |||
1314 | * meet the minimum required mpdudensity. | 511 | * meet the minimum required mpdudensity. |
1315 | * caller should make sure that the rate is HT rate . | 512 | * caller should make sure that the rate is HT rate . |
1316 | */ | 513 | */ |
1317 | static int ath_compute_num_delims(struct ath_softc *sc, | 514 | static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, |
1318 | struct ath_atx_tid *tid, | 515 | struct ath_buf *bf, u16 frmlen) |
1319 | struct ath_buf *bf, | ||
1320 | u16 frmlen) | ||
1321 | { | 516 | { |
1322 | struct ath_rate_table *rt = sc->cur_rate_table; | 517 | struct ath_rate_table *rt = sc->cur_rate_table; |
1323 | struct sk_buff *skb = bf->bf_mpdu; | 518 | struct sk_buff *skb = bf->bf_mpdu; |
@@ -1381,16 +576,10 @@ static int ath_compute_num_delims(struct ath_softc *sc, | |||
1381 | return ndelim; | 576 | return ndelim; |
1382 | } | 577 | } |
1383 | 578 | ||
1384 | /* | ||
1385 | * For aggregation from software buffer queue. | ||
1386 | * NB: must be called with txq lock held | ||
1387 | */ | ||
1388 | static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | 579 | static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, |
1389 | struct ath_atx_tid *tid, | 580 | struct ath_atx_tid *tid, struct list_head *bf_q, |
1390 | struct list_head *bf_q, | 581 | struct ath_buf **bf_last, struct aggr_rifs_param *param, |
1391 | struct ath_buf **bf_last, | 582 | int *prev_frames) |
1392 | struct aggr_rifs_param *param, | ||
1393 | int *prev_frames) | ||
1394 | { | 583 | { |
1395 | #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) | 584 | #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) |
1396 | struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL; | 585 | struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL; |
@@ -1485,15 +674,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | |||
1485 | } | 674 | } |
1486 | bf_prev = bf; | 675 | bf_prev = bf; |
1487 | 676 | ||
1488 | #ifdef AGGR_NOSHORT | ||
1489 | /* | ||
1490 | * terminate aggregation on a small packet boundary | ||
1491 | */ | ||
1492 | if (bf->bf_frmlen < ATH_AGGR_MINPLEN) { | ||
1493 | status = ATH_AGGR_SHORTPKT; | ||
1494 | break; | ||
1495 | } | ||
1496 | #endif | ||
1497 | } while (!list_empty(&tid->buf_q)); | 677 | } while (!list_empty(&tid->buf_q)); |
1498 | 678 | ||
1499 | bf_first->bf_al = al; | 679 | bf_first->bf_al = al; |
@@ -1503,12 +683,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | |||
1503 | #undef PADBYTES | 683 | #undef PADBYTES |
1504 | } | 684 | } |
1505 | 685 | ||
1506 | /* | 686 | static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, |
1507 | * process pending frames possibly doing a-mpdu aggregation | 687 | struct ath_atx_tid *tid) |
1508 | * NB: must be called with txq lock held | ||
1509 | */ | ||
1510 | static void ath_tx_sched_aggr(struct ath_softc *sc, | ||
1511 | struct ath_txq *txq, struct ath_atx_tid *tid) | ||
1512 | { | 688 | { |
1513 | struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL; | 689 | struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL; |
1514 | enum ATH_AGGR_STATUS status; | 690 | enum ATH_AGGR_STATUS status; |
@@ -1588,313 +764,190 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, | |||
1588 | status != ATH_AGGR_BAW_CLOSED); | 764 | status != ATH_AGGR_BAW_CLOSED); |
1589 | } | 765 | } |
1590 | 766 | ||
1591 | /* Called with txq lock held */ | 767 | int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, |
768 | u16 tid, u16 *ssn) | ||
769 | { | ||
770 | struct ath_atx_tid *txtid; | ||
771 | struct ath_node *an; | ||
1592 | 772 | ||
1593 | static void ath_tid_drain(struct ath_softc *sc, | 773 | an = (struct ath_node *)sta->drv_priv; |
1594 | struct ath_txq *txq, | 774 | |
1595 | struct ath_atx_tid *tid) | 775 | if (sc->sc_flags & SC_OP_TXAGGR) { |
776 | txtid = ATH_AN_2_TID(an, tid); | ||
777 | txtid->state |= AGGR_ADDBA_PROGRESS; | ||
778 | ath_tx_pause_tid(sc, txtid); | ||
779 | } | ||
1596 | 780 | ||
781 | return 0; | ||
782 | } | ||
783 | |||
784 | int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) | ||
1597 | { | 785 | { |
786 | struct ath_node *an = (struct ath_node *)sta->drv_priv; | ||
787 | struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); | ||
788 | struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; | ||
1598 | struct ath_buf *bf; | 789 | struct ath_buf *bf; |
1599 | struct list_head bf_head; | 790 | struct list_head bf_head; |
1600 | INIT_LIST_HEAD(&bf_head); | 791 | INIT_LIST_HEAD(&bf_head); |
1601 | 792 | ||
1602 | for (;;) { | 793 | if (txtid->state & AGGR_CLEANUP) |
1603 | if (list_empty(&tid->buf_q)) | 794 | return 0; |
1604 | break; | ||
1605 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | ||
1606 | |||
1607 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); | ||
1608 | 795 | ||
1609 | /* update baw for software retried frame */ | 796 | if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { |
1610 | if (bf_isretried(bf)) | 797 | txtid->addba_exchangeattempts = 0; |
1611 | ath_tx_update_baw(sc, tid, bf->bf_seqno); | 798 | return 0; |
799 | } | ||
1612 | 800 | ||
1613 | /* | 801 | ath_tx_pause_tid(sc, txtid); |
1614 | * do not indicate packets while holding txq spinlock. | ||
1615 | * unlock is intentional here | ||
1616 | */ | ||
1617 | spin_unlock(&txq->axq_lock); | ||
1618 | 802 | ||
1619 | /* complete this sub-frame */ | 803 | /* drop all software retried frames and mark this TID */ |
804 | spin_lock_bh(&txq->axq_lock); | ||
805 | while (!list_empty(&txtid->buf_q)) { | ||
806 | bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); | ||
807 | if (!bf_isretried(bf)) { | ||
808 | /* | ||
809 | * NB: it's based on the assumption that | ||
810 | * software retried frame will always stay | ||
811 | * at the head of software queue. | ||
812 | */ | ||
813 | break; | ||
814 | } | ||
815 | list_cut_position(&bf_head, | ||
816 | &txtid->buf_q, &bf->bf_lastfrm->list); | ||
817 | ath_tx_update_baw(sc, txtid, bf->bf_seqno); | ||
1620 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | 818 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); |
819 | } | ||
1621 | 820 | ||
1622 | spin_lock(&txq->axq_lock); | 821 | if (txtid->baw_head != txtid->baw_tail) { |
822 | spin_unlock_bh(&txq->axq_lock); | ||
823 | txtid->state |= AGGR_CLEANUP; | ||
824 | } else { | ||
825 | txtid->state &= ~AGGR_ADDBA_COMPLETE; | ||
826 | txtid->addba_exchangeattempts = 0; | ||
827 | spin_unlock_bh(&txq->axq_lock); | ||
828 | ath_tx_flush_tid(sc, txtid); | ||
1623 | } | 829 | } |
1624 | 830 | ||
1625 | /* | 831 | return 0; |
1626 | * TODO: For frame(s) that are in the retry state, we will reuse the | ||
1627 | * sequence number(s) without setting the retry bit. The | ||
1628 | * alternative is to give up on these and BAR the receiver's window | ||
1629 | * forward. | ||
1630 | */ | ||
1631 | tid->seq_next = tid->seq_start; | ||
1632 | tid->baw_tail = tid->baw_head; | ||
1633 | } | 832 | } |
1634 | 833 | ||
1635 | /* | 834 | void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) |
1636 | * Drain all pending buffers | ||
1637 | * NB: must be called with txq lock held | ||
1638 | */ | ||
1639 | static void ath_txq_drain_pending_buffers(struct ath_softc *sc, | ||
1640 | struct ath_txq *txq) | ||
1641 | { | 835 | { |
1642 | struct ath_atx_ac *ac, *ac_tmp; | 836 | struct ath_atx_tid *txtid; |
1643 | struct ath_atx_tid *tid, *tid_tmp; | 837 | struct ath_node *an; |
1644 | 838 | ||
1645 | list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { | 839 | an = (struct ath_node *)sta->drv_priv; |
1646 | list_del(&ac->list); | 840 | |
1647 | ac->sched = false; | 841 | if (sc->sc_flags & SC_OP_TXAGGR) { |
1648 | list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { | 842 | txtid = ATH_AN_2_TID(an, tid); |
1649 | list_del(&tid->list); | 843 | txtid->baw_size = |
1650 | tid->sched = false; | 844 | IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; |
1651 | ath_tid_drain(sc, txq, tid); | 845 | txtid->state |= AGGR_ADDBA_COMPLETE; |
1652 | } | 846 | txtid->state &= ~AGGR_ADDBA_PROGRESS; |
847 | ath_tx_resume_tid(sc, txtid); | ||
1653 | } | 848 | } |
1654 | } | 849 | } |
1655 | 850 | ||
1656 | static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf, | 851 | bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) |
1657 | struct sk_buff *skb, | ||
1658 | struct ath_tx_control *txctl) | ||
1659 | { | 852 | { |
1660 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | 853 | struct ath_atx_tid *txtid; |
1661 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
1662 | struct ath_tx_info_priv *tx_info_priv; | ||
1663 | int hdrlen; | ||
1664 | __le16 fc; | ||
1665 | |||
1666 | tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC); | ||
1667 | if (unlikely(!tx_info_priv)) | ||
1668 | return -ENOMEM; | ||
1669 | tx_info->rate_driver_data[0] = tx_info_priv; | ||
1670 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
1671 | fc = hdr->frame_control; | ||
1672 | |||
1673 | ATH_TXBUF_RESET(bf); | ||
1674 | |||
1675 | /* Frame type */ | ||
1676 | |||
1677 | bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3); | ||
1678 | |||
1679 | ieee80211_is_data(fc) ? | ||
1680 | (bf->bf_state.bf_type |= BUF_DATA) : | ||
1681 | (bf->bf_state.bf_type &= ~BUF_DATA); | ||
1682 | ieee80211_is_back_req(fc) ? | ||
1683 | (bf->bf_state.bf_type |= BUF_BAR) : | ||
1684 | (bf->bf_state.bf_type &= ~BUF_BAR); | ||
1685 | ieee80211_is_pspoll(fc) ? | ||
1686 | (bf->bf_state.bf_type |= BUF_PSPOLL) : | ||
1687 | (bf->bf_state.bf_type &= ~BUF_PSPOLL); | ||
1688 | (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ? | ||
1689 | (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) : | ||
1690 | (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE); | ||
1691 | (conf_is_ht(&sc->hw->conf) && !is_pae(skb) && | ||
1692 | (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ? | ||
1693 | (bf->bf_state.bf_type |= BUF_HT) : | ||
1694 | (bf->bf_state.bf_type &= ~BUF_HT); | ||
1695 | |||
1696 | bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); | ||
1697 | 854 | ||
1698 | /* Crypto */ | 855 | if (!(sc->sc_flags & SC_OP_TXAGGR)) |
856 | return false; | ||
1699 | 857 | ||
1700 | bf->bf_keytype = get_hw_crypto_keytype(skb); | 858 | txtid = ATH_AN_2_TID(an, tidno); |
1701 | 859 | ||
1702 | if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { | 860 | if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { |
1703 | bf->bf_frmlen += tx_info->control.hw_key->icv_len; | 861 | if (!(txtid->state & AGGR_ADDBA_PROGRESS) && |
1704 | bf->bf_keyix = tx_info->control.hw_key->hw_key_idx; | 862 | (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) { |
1705 | } else { | 863 | txtid->addba_exchangeattempts++; |
1706 | bf->bf_keyix = ATH9K_TXKEYIX_INVALID; | 864 | return true; |
865 | } | ||
1707 | } | 866 | } |
1708 | 867 | ||
1709 | /* Assign seqno, tidno */ | 868 | return false; |
1710 | 869 | } | |
1711 | if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR)) | ||
1712 | assign_aggr_tid_seqno(skb, bf); | ||
1713 | |||
1714 | /* DMA setup */ | ||
1715 | bf->bf_mpdu = skb; | ||
1716 | 870 | ||
1717 | bf->bf_dmacontext = dma_map_single(sc->dev, skb->data, | 871 | /********************/ |
1718 | skb->len, DMA_TO_DEVICE); | 872 | /* Queue Management */ |
1719 | if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) { | 873 | /********************/ |
1720 | bf->bf_mpdu = NULL; | ||
1721 | DPRINTF(sc, ATH_DBG_CONFIG, | ||
1722 | "dma_mapping_error() on TX\n"); | ||
1723 | return -ENOMEM; | ||
1724 | } | ||
1725 | 874 | ||
1726 | bf->bf_buf_addr = bf->bf_dmacontext; | 875 | static u32 ath_txq_depth(struct ath_softc *sc, int qnum) |
1727 | return 0; | 876 | { |
877 | return sc->tx.txq[qnum].axq_depth; | ||
1728 | } | 878 | } |
1729 | 879 | ||
1730 | /* FIXME: tx power */ | 880 | static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) |
1731 | static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | ||
1732 | struct ath_tx_control *txctl) | ||
1733 | { | 881 | { |
1734 | struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; | ||
1735 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | ||
1736 | struct ath_node *an = NULL; | ||
1737 | struct list_head bf_head; | ||
1738 | struct ath_desc *ds; | ||
1739 | struct ath_atx_tid *tid; | ||
1740 | struct ath_hal *ah = sc->sc_ah; | 882 | struct ath_hal *ah = sc->sc_ah; |
1741 | int frm_type; | 883 | (void) ath9k_hw_stoptxdma(ah, txq->axq_qnum); |
1742 | |||
1743 | frm_type = get_hw_packet_type(skb); | ||
1744 | |||
1745 | INIT_LIST_HEAD(&bf_head); | ||
1746 | list_add_tail(&bf->list, &bf_head); | ||
1747 | |||
1748 | /* setup descriptor */ | ||
1749 | |||
1750 | ds = bf->bf_desc; | ||
1751 | ds->ds_link = 0; | ||
1752 | ds->ds_data = bf->bf_buf_addr; | ||
1753 | |||
1754 | /* Formulate first tx descriptor with tx controls */ | ||
1755 | |||
1756 | ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, | ||
1757 | bf->bf_keyix, bf->bf_keytype, bf->bf_flags); | ||
1758 | |||
1759 | ath9k_hw_filltxdesc(ah, ds, | ||
1760 | skb->len, /* segment length */ | ||
1761 | true, /* first segment */ | ||
1762 | true, /* last segment */ | ||
1763 | ds); /* first descriptor */ | ||
1764 | |||
1765 | bf->bf_lastfrm = bf; | ||
1766 | |||
1767 | spin_lock_bh(&txctl->txq->axq_lock); | ||
1768 | |||
1769 | if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && | ||
1770 | tx_info->control.sta) { | ||
1771 | an = (struct ath_node *)tx_info->control.sta->drv_priv; | ||
1772 | tid = ATH_AN_2_TID(an, bf->bf_tidno); | ||
1773 | |||
1774 | if (ath_aggr_query(sc, an, bf->bf_tidno)) { | ||
1775 | /* | ||
1776 | * Try aggregation if it's a unicast data frame | ||
1777 | * and the destination is HT capable. | ||
1778 | */ | ||
1779 | ath_tx_send_ampdu(sc, tid, &bf_head, txctl); | ||
1780 | } else { | ||
1781 | /* | ||
1782 | * Send this frame as regular when ADDBA | ||
1783 | * exchange is neither complete nor pending. | ||
1784 | */ | ||
1785 | ath_tx_send_normal(sc, txctl->txq, | ||
1786 | tid, &bf_head); | ||
1787 | } | ||
1788 | } else { | ||
1789 | bf->bf_lastbf = bf; | ||
1790 | bf->bf_nframes = 1; | ||
1791 | |||
1792 | ath_buf_set_rate(sc, bf); | ||
1793 | ath_tx_txqaddbuf(sc, txctl->txq, &bf_head); | ||
1794 | } | ||
1795 | |||
1796 | spin_unlock_bh(&txctl->txq->axq_lock); | ||
1797 | } | 884 | } |
1798 | 885 | ||
1799 | /* Upon failure caller should free skb */ | 886 | static void ath_get_beaconconfig(struct ath_softc *sc, int if_id, |
1800 | int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb, | 887 | struct ath_beacon_config *conf) |
1801 | struct ath_tx_control *txctl) | ||
1802 | { | 888 | { |
1803 | struct ath_buf *bf; | 889 | struct ieee80211_hw *hw = sc->hw; |
1804 | int r; | ||
1805 | |||
1806 | /* Check if a tx buffer is available */ | ||
1807 | |||
1808 | bf = ath_tx_get_buffer(sc); | ||
1809 | if (!bf) { | ||
1810 | DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n"); | ||
1811 | return -1; | ||
1812 | } | ||
1813 | |||
1814 | r = ath_tx_setup_buffer(sc, bf, skb, txctl); | ||
1815 | if (unlikely(r)) { | ||
1816 | struct ath_txq *txq = txctl->txq; | ||
1817 | |||
1818 | DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n"); | ||
1819 | |||
1820 | /* upon ath_tx_processq() this TX queue will be resumed, we | ||
1821 | * guarantee this will happen by knowing beforehand that | ||
1822 | * we will at least have to run TX completionon one buffer | ||
1823 | * on the queue */ | ||
1824 | spin_lock_bh(&txq->axq_lock); | ||
1825 | if (ath_txq_depth(sc, txq->axq_qnum) > 1) { | ||
1826 | ieee80211_stop_queue(sc->hw, | ||
1827 | skb_get_queue_mapping(skb)); | ||
1828 | txq->stopped = 1; | ||
1829 | } | ||
1830 | spin_unlock_bh(&txq->axq_lock); | ||
1831 | |||
1832 | spin_lock_bh(&sc->tx.txbuflock); | ||
1833 | list_add_tail(&bf->list, &sc->tx.txbuf); | ||
1834 | spin_unlock_bh(&sc->tx.txbuflock); | ||
1835 | |||
1836 | return r; | ||
1837 | } | ||
1838 | 890 | ||
1839 | ath_tx_start_dma(sc, bf, txctl); | 891 | /* fill in beacon config data */ |
1840 | 892 | ||
1841 | return 0; | 893 | conf->beacon_interval = hw->conf.beacon_int; |
894 | conf->listen_interval = 100; | ||
895 | conf->dtim_count = 1; | ||
896 | conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval; | ||
1842 | } | 897 | } |
1843 | 898 | ||
1844 | /* Initialize TX queue and h/w */ | 899 | static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx) |
1845 | |||
1846 | int ath_tx_init(struct ath_softc *sc, int nbufs) | ||
1847 | { | 900 | { |
1848 | int error = 0; | 901 | struct ath_hal *ah = sc->sc_ah; |
1849 | 902 | int i, npend = 0; | |
1850 | do { | ||
1851 | spin_lock_init(&sc->tx.txbuflock); | ||
1852 | 903 | ||
1853 | /* Setup tx descriptors */ | 904 | if (!(sc->sc_flags & SC_OP_INVALID)) { |
1854 | error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, | 905 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
1855 | "tx", nbufs, 1); | 906 | if (ATH_TXQ_SETUP(sc, i)) { |
1856 | if (error != 0) { | 907 | ath_tx_stopdma(sc, &sc->tx.txq[i]); |
1857 | DPRINTF(sc, ATH_DBG_FATAL, | 908 | npend += ath9k_hw_numtxpending(ah, |
1858 | "Failed to allocate tx descriptors: %d\n", | 909 | sc->tx.txq[i].axq_qnum); |
1859 | error); | 910 | } |
1860 | break; | ||
1861 | } | 911 | } |
912 | } | ||
1862 | 913 | ||
1863 | /* XXX allocate beacon state together with vap */ | 914 | if (npend) { |
1864 | error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, | 915 | int r; |
1865 | "beacon", ATH_BCBUF, 1); | ||
1866 | if (error != 0) { | ||
1867 | DPRINTF(sc, ATH_DBG_FATAL, | ||
1868 | "Failed to allocate beacon descriptors: %d\n", | ||
1869 | error); | ||
1870 | break; | ||
1871 | } | ||
1872 | 916 | ||
1873 | } while (0); | 917 | DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n"); |
1874 | 918 | ||
1875 | if (error != 0) | 919 | spin_lock_bh(&sc->sc_resetlock); |
1876 | ath_tx_cleanup(sc); | 920 | r = ath9k_hw_reset(ah, sc->sc_ah->ah_curchan, true); |
921 | if (r) | ||
922 | DPRINTF(sc, ATH_DBG_FATAL, | ||
923 | "Unable to reset hardware; reset status %u\n", | ||
924 | r); | ||
925 | spin_unlock_bh(&sc->sc_resetlock); | ||
926 | } | ||
1877 | 927 | ||
1878 | return error; | 928 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
929 | if (ATH_TXQ_SETUP(sc, i)) | ||
930 | ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx); | ||
931 | } | ||
1879 | } | 932 | } |
1880 | 933 | ||
1881 | /* Reclaim all tx queue resources */ | 934 | static void ath_txq_drain_pending_buffers(struct ath_softc *sc, |
1882 | 935 | struct ath_txq *txq) | |
1883 | int ath_tx_cleanup(struct ath_softc *sc) | ||
1884 | { | 936 | { |
1885 | /* cleanup beacon descriptors */ | 937 | struct ath_atx_ac *ac, *ac_tmp; |
1886 | if (sc->beacon.bdma.dd_desc_len != 0) | 938 | struct ath_atx_tid *tid, *tid_tmp; |
1887 | ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); | ||
1888 | |||
1889 | /* cleanup tx descriptors */ | ||
1890 | if (sc->tx.txdma.dd_desc_len != 0) | ||
1891 | ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); | ||
1892 | 939 | ||
1893 | return 0; | 940 | list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) { |
941 | list_del(&ac->list); | ||
942 | ac->sched = false; | ||
943 | list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q, list) { | ||
944 | list_del(&tid->list); | ||
945 | tid->sched = false; | ||
946 | ath_tid_drain(sc, txq, tid); | ||
947 | } | ||
948 | } | ||
1894 | } | 949 | } |
1895 | 950 | ||
1896 | /* Setup a h/w transmit queue */ | ||
1897 | |||
1898 | struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) | 951 | struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) |
1899 | { | 952 | { |
1900 | struct ath_hal *ah = sc->sc_ah; | 953 | struct ath_hal *ah = sc->sc_ah; |
@@ -1960,43 +1013,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) | |||
1960 | return &sc->tx.txq[qnum]; | 1013 | return &sc->tx.txq[qnum]; |
1961 | } | 1014 | } |
1962 | 1015 | ||
1963 | /* Reclaim resources for a setup queue */ | 1016 | static int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) |
1964 | |||
1965 | void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) | ||
1966 | { | ||
1967 | ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); | ||
1968 | sc->tx.txqsetup &= ~(1<<txq->axq_qnum); | ||
1969 | } | ||
1970 | |||
1971 | /* | ||
1972 | * Setup a hardware data transmit queue for the specified | ||
1973 | * access control. The hal may not support all requested | ||
1974 | * queues in which case it will return a reference to a | ||
1975 | * previously setup queue. We record the mapping from ac's | ||
1976 | * to h/w queues for use by ath_tx_start and also track | ||
1977 | * the set of h/w queues being used to optimize work in the | ||
1978 | * transmit interrupt handler and related routines. | ||
1979 | */ | ||
1980 | |||
1981 | int ath_tx_setup(struct ath_softc *sc, int haltype) | ||
1982 | { | ||
1983 | struct ath_txq *txq; | ||
1984 | |||
1985 | if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { | ||
1986 | DPRINTF(sc, ATH_DBG_FATAL, | ||
1987 | "HAL AC %u out of range, max %zu!\n", | ||
1988 | haltype, ARRAY_SIZE(sc->tx.hwq_map)); | ||
1989 | return 0; | ||
1990 | } | ||
1991 | txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); | ||
1992 | if (txq != NULL) { | ||
1993 | sc->tx.hwq_map[haltype] = txq->axq_qnum; | ||
1994 | return 1; | ||
1995 | } else | ||
1996 | return 0; | ||
1997 | } | ||
1998 | |||
1999 | int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) | ||
2000 | { | 1017 | { |
2001 | int qnum; | 1018 | int qnum; |
2002 | 1019 | ||
@@ -2022,8 +1039,6 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) | |||
2022 | return qnum; | 1039 | return qnum; |
2023 | } | 1040 | } |
2024 | 1041 | ||
2025 | /* Get a transmit queue, if available */ | ||
2026 | |||
2027 | struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) | 1042 | struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) |
2028 | { | 1043 | { |
2029 | struct ath_txq *txq = NULL; | 1044 | struct ath_txq *txq = NULL; |
@@ -2034,7 +1049,6 @@ struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) | |||
2034 | 1049 | ||
2035 | spin_lock_bh(&txq->axq_lock); | 1050 | spin_lock_bh(&txq->axq_lock); |
2036 | 1051 | ||
2037 | /* Try to avoid running out of descriptors */ | ||
2038 | if (txq->axq_depth >= (ATH_TXBUF - 20)) { | 1052 | if (txq->axq_depth >= (ATH_TXBUF - 20)) { |
2039 | DPRINTF(sc, ATH_DBG_FATAL, | 1053 | DPRINTF(sc, ATH_DBG_FATAL, |
2040 | "TX queue: %d is full, depth: %d\n", | 1054 | "TX queue: %d is full, depth: %d\n", |
@@ -2050,8 +1064,6 @@ struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) | |||
2050 | return txq; | 1064 | return txq; |
2051 | } | 1065 | } |
2052 | 1066 | ||
2053 | /* Update parameters for a transmit queue */ | ||
2054 | |||
2055 | int ath_txq_update(struct ath_softc *sc, int qnum, | 1067 | int ath_txq_update(struct ath_softc *sc, int qnum, |
2056 | struct ath9k_tx_queue_info *qinfo) | 1068 | struct ath9k_tx_queue_info *qinfo) |
2057 | { | 1069 | { |
@@ -2083,7 +1095,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum, | |||
2083 | "Unable to update hardware queue %u!\n", qnum); | 1095 | "Unable to update hardware queue %u!\n", qnum); |
2084 | error = -EIO; | 1096 | error = -EIO; |
2085 | } else { | 1097 | } else { |
2086 | ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */ | 1098 | ath9k_hw_resettxqueue(ah, qnum); |
2087 | } | 1099 | } |
2088 | 1100 | ||
2089 | return error; | 1101 | return error; |
@@ -2112,26 +1124,7 @@ int ath_cabq_update(struct ath_softc *sc) | |||
2112 | return 0; | 1124 | return 0; |
2113 | } | 1125 | } |
2114 | 1126 | ||
2115 | /* Deferred processing of transmit interrupt */ | 1127 | void ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) |
2116 | |||
2117 | void ath_tx_tasklet(struct ath_softc *sc) | ||
2118 | { | ||
2119 | int i; | ||
2120 | u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); | ||
2121 | |||
2122 | ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); | ||
2123 | |||
2124 | /* | ||
2125 | * Process each active queue. | ||
2126 | */ | ||
2127 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | ||
2128 | if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) | ||
2129 | ath_tx_processq(sc, &sc->tx.txq[i]); | ||
2130 | } | ||
2131 | } | ||
2132 | |||
2133 | void ath_tx_draintxq(struct ath_softc *sc, | ||
2134 | struct ath_txq *txq, bool retry_tx) | ||
2135 | { | 1128 | { |
2136 | struct ath_buf *bf, *lastbf; | 1129 | struct ath_buf *bf, *lastbf; |
2137 | struct list_head bf_head; | 1130 | struct list_head bf_head; |
@@ -2191,44 +1184,245 @@ void ath_tx_draintxq(struct ath_softc *sc, | |||
2191 | } | 1184 | } |
2192 | } | 1185 | } |
2193 | 1186 | ||
2194 | /* Drain the transmit queues and reclaim resources */ | 1187 | void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) |
1188 | { | ||
1189 | ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); | ||
1190 | sc->tx.txqsetup &= ~(1<<txq->axq_qnum); | ||
1191 | } | ||
2195 | 1192 | ||
2196 | void ath_draintxq(struct ath_softc *sc, bool retry_tx) | 1193 | void ath_draintxq(struct ath_softc *sc, bool retry_tx) |
2197 | { | 1194 | { |
2198 | /* stop beacon queue. The beacon will be freed when | 1195 | if (!(sc->sc_flags & SC_OP_INVALID)) |
2199 | * we go to INIT state */ | ||
2200 | if (!(sc->sc_flags & SC_OP_INVALID)) { | ||
2201 | (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); | 1196 | (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); |
2202 | DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n", | ||
2203 | ath9k_hw_gettxbuf(sc->sc_ah, sc->beacon.beaconq)); | ||
2204 | } | ||
2205 | 1197 | ||
2206 | ath_drain_txdataq(sc, retry_tx); | 1198 | ath_drain_txdataq(sc, retry_tx); |
2207 | } | 1199 | } |
2208 | 1200 | ||
2209 | u32 ath_txq_depth(struct ath_softc *sc, int qnum) | 1201 | void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) |
2210 | { | 1202 | { |
2211 | return sc->tx.txq[qnum].axq_depth; | 1203 | struct ath_atx_ac *ac; |
1204 | struct ath_atx_tid *tid; | ||
1205 | |||
1206 | if (list_empty(&txq->axq_acq)) | ||
1207 | return; | ||
1208 | |||
1209 | ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); | ||
1210 | list_del(&ac->list); | ||
1211 | ac->sched = false; | ||
1212 | |||
1213 | do { | ||
1214 | if (list_empty(&ac->tid_q)) | ||
1215 | return; | ||
1216 | |||
1217 | tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); | ||
1218 | list_del(&tid->list); | ||
1219 | tid->sched = false; | ||
1220 | |||
1221 | if (tid->paused) | ||
1222 | continue; | ||
1223 | |||
1224 | if ((txq->axq_depth % 2) == 0) | ||
1225 | ath_tx_sched_aggr(sc, txq, tid); | ||
1226 | |||
1227 | /* | ||
1228 | * add tid to round-robin queue if more frames | ||
1229 | * are pending for the tid | ||
1230 | */ | ||
1231 | if (!list_empty(&tid->buf_q)) | ||
1232 | ath_tx_queue_tid(txq, tid); | ||
1233 | |||
1234 | break; | ||
1235 | } while (!list_empty(&ac->tid_q)); | ||
1236 | |||
1237 | if (!list_empty(&ac->tid_q)) { | ||
1238 | if (!ac->sched) { | ||
1239 | ac->sched = true; | ||
1240 | list_add_tail(&ac->list, &txq->axq_acq); | ||
1241 | } | ||
1242 | } | ||
1243 | } | ||
1244 | |||
1245 | int ath_tx_setup(struct ath_softc *sc, int haltype) | ||
1246 | { | ||
1247 | struct ath_txq *txq; | ||
1248 | |||
1249 | if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { | ||
1250 | DPRINTF(sc, ATH_DBG_FATAL, | ||
1251 | "HAL AC %u out of range, max %zu!\n", | ||
1252 | haltype, ARRAY_SIZE(sc->tx.hwq_map)); | ||
1253 | return 0; | ||
1254 | } | ||
1255 | txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); | ||
1256 | if (txq != NULL) { | ||
1257 | sc->tx.hwq_map[haltype] = txq->axq_qnum; | ||
1258 | return 1; | ||
1259 | } else | ||
1260 | return 0; | ||
2212 | } | 1261 | } |
2213 | 1262 | ||
2214 | u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum) | 1263 | /***********/ |
1264 | /* TX, DMA */ | ||
1265 | /***********/ | ||
1266 | |||
1267 | /* | ||
1268 | * Insert a chain of ath_buf (descriptors) on a txq and | ||
1269 | * assume the descriptors are already chained together by caller. | ||
1270 | */ | ||
1271 | static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, | ||
1272 | struct list_head *head) | ||
2215 | { | 1273 | { |
2216 | return sc->tx.txq[qnum].axq_aggr_depth; | 1274 | struct ath_hal *ah = sc->sc_ah; |
1275 | struct ath_buf *bf; | ||
1276 | |||
1277 | /* | ||
1278 | * Insert the frame on the outbound list and | ||
1279 | * pass it on to the hardware. | ||
1280 | */ | ||
1281 | |||
1282 | if (list_empty(head)) | ||
1283 | return; | ||
1284 | |||
1285 | bf = list_first_entry(head, struct ath_buf, list); | ||
1286 | |||
1287 | list_splice_tail_init(head, &txq->axq_q); | ||
1288 | txq->axq_depth++; | ||
1289 | txq->axq_totalqueued++; | ||
1290 | txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); | ||
1291 | |||
1292 | DPRINTF(sc, ATH_DBG_QUEUE, | ||
1293 | "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); | ||
1294 | |||
1295 | if (txq->axq_link == NULL) { | ||
1296 | ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); | ||
1297 | DPRINTF(sc, ATH_DBG_XMIT, | ||
1298 | "TXDP[%u] = %llx (%p)\n", | ||
1299 | txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); | ||
1300 | } else { | ||
1301 | *txq->axq_link = bf->bf_daddr; | ||
1302 | DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n", | ||
1303 | txq->axq_qnum, txq->axq_link, | ||
1304 | ito64(bf->bf_daddr), bf->bf_desc); | ||
1305 | } | ||
1306 | txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); | ||
1307 | ath9k_hw_txstart(ah, txq->axq_qnum); | ||
2217 | } | 1308 | } |
2218 | 1309 | ||
2219 | bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) | 1310 | static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) |
2220 | { | 1311 | { |
2221 | struct ath_atx_tid *txtid; | 1312 | struct ath_buf *bf = NULL; |
2222 | 1313 | ||
2223 | if (!(sc->sc_flags & SC_OP_TXAGGR)) | 1314 | spin_lock_bh(&sc->tx.txbuflock); |
2224 | return false; | ||
2225 | 1315 | ||
2226 | txtid = ATH_AN_2_TID(an, tidno); | 1316 | if (unlikely(list_empty(&sc->tx.txbuf))) { |
1317 | spin_unlock_bh(&sc->tx.txbuflock); | ||
1318 | return NULL; | ||
1319 | } | ||
2227 | 1320 | ||
2228 | if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { | 1321 | bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); |
2229 | if (!(txtid->state & AGGR_ADDBA_PROGRESS) && | 1322 | list_del(&bf->list); |
2230 | (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) { | 1323 | |
2231 | txtid->addba_exchangeattempts++; | 1324 | spin_unlock_bh(&sc->tx.txbuflock); |
1325 | |||
1326 | return bf; | ||
1327 | } | ||
1328 | |||
1329 | static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, | ||
1330 | struct list_head *bf_head, | ||
1331 | struct ath_tx_control *txctl) | ||
1332 | { | ||
1333 | struct ath_buf *bf; | ||
1334 | |||
1335 | BUG_ON(list_empty(bf_head)); | ||
1336 | |||
1337 | bf = list_first_entry(bf_head, struct ath_buf, list); | ||
1338 | bf->bf_state.bf_type |= BUF_AMPDU; | ||
1339 | |||
1340 | /* | ||
1341 | * Do not queue to h/w when any of the following conditions is true: | ||
1342 | * - there are pending frames in software queue | ||
1343 | * - the TID is currently paused for ADDBA/BAR request | ||
1344 | * - seqno is not within block-ack window | ||
1345 | * - h/w queue depth exceeds low water mark | ||
1346 | */ | ||
1347 | if (!list_empty(&tid->buf_q) || tid->paused || | ||
1348 | !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || | ||
1349 | txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { | ||
1350 | /* | ||
1351 | * Add this frame to software queue for scheduling later | ||
1352 | * for aggregation. | ||
1353 | */ | ||
1354 | list_splice_tail_init(bf_head, &tid->buf_q); | ||
1355 | ath_tx_queue_tid(txctl->txq, tid); | ||
1356 | return; | ||
1357 | } | ||
1358 | |||
1359 | /* Add sub-frame to BAW */ | ||
1360 | ath_tx_addto_baw(sc, tid, bf); | ||
1361 | |||
1362 | /* Queue to h/w without aggregation */ | ||
1363 | bf->bf_nframes = 1; | ||
1364 | bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ | ||
1365 | ath_buf_set_rate(sc, bf); | ||
1366 | ath_tx_txqaddbuf(sc, txctl->txq, bf_head); | ||
1367 | |||
1368 | return; | ||
1369 | } | ||
1370 | |||
1371 | static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, | ||
1372 | struct ath_atx_tid *tid, | ||
1373 | struct list_head *bf_head) | ||
1374 | { | ||
1375 | struct ath_buf *bf; | ||
1376 | |||
1377 | BUG_ON(list_empty(bf_head)); | ||
1378 | |||
1379 | bf = list_first_entry(bf_head, struct ath_buf, list); | ||
1380 | bf->bf_state.bf_type &= ~BUF_AMPDU; | ||
1381 | |||
1382 | /* update starting sequence number for subsequent ADDBA request */ | ||
1383 | INCR(tid->seq_start, IEEE80211_SEQ_MAX); | ||
1384 | |||
1385 | bf->bf_nframes = 1; | ||
1386 | bf->bf_lastbf = bf->bf_lastfrm; | ||
1387 | ath_buf_set_rate(sc, bf); | ||
1388 | ath_tx_txqaddbuf(sc, txq, bf_head); | ||
1389 | } | ||
1390 | |||
1391 | static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) | ||
1392 | { | ||
1393 | struct ieee80211_hdr *hdr; | ||
1394 | enum ath9k_pkt_type htype; | ||
1395 | __le16 fc; | ||
1396 | |||
1397 | hdr = (struct ieee80211_hdr *)skb->data; | ||
1398 | fc = hdr->frame_control; | ||
1399 | |||
1400 | if (ieee80211_is_beacon(fc)) | ||
1401 | htype = ATH9K_PKT_TYPE_BEACON; | ||
1402 | else if (ieee80211_is_probe_resp(fc)) | ||
1403 | htype = ATH9K_PKT_TYPE_PROBE_RESP; | ||
1404 | else if (ieee80211_is_atim(fc)) | ||
1405 | htype = ATH9K_PKT_TYPE_ATIM; | ||
1406 | else if (ieee80211_is_pspoll(fc)) | ||
1407 | htype = ATH9K_PKT_TYPE_PSPOLL; | ||
1408 | else | ||
1409 | htype = ATH9K_PKT_TYPE_NORMAL; | ||
1410 | |||
1411 | return htype; | ||
1412 | } | ||
1413 | |||
1414 | static bool is_pae(struct sk_buff *skb) | ||
1415 | { | ||
1416 | struct ieee80211_hdr *hdr; | ||
1417 | __le16 fc; | ||
1418 | |||
1419 | hdr = (struct ieee80211_hdr *)skb->data; | ||
1420 | fc = hdr->frame_control; | ||
1421 | |||
1422 | if (ieee80211_is_data(fc)) { | ||
1423 | if (ieee80211_is_nullfunc(fc) || | ||
1424 | /* Port Access Entity (IEEE 802.1X) */ | ||
1425 | (skb->protocol == cpu_to_be16(ETH_P_PAE))) { | ||
2232 | return true; | 1426 | return true; |
2233 | } | 1427 | } |
2234 | } | 1428 | } |
@@ -2236,175 +1430,801 @@ bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) | |||
2236 | return false; | 1430 | return false; |
2237 | } | 1431 | } |
2238 | 1432 | ||
2239 | /* Start TX aggregation */ | 1433 | static int get_hw_crypto_keytype(struct sk_buff *skb) |
1434 | { | ||
1435 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | ||
2240 | 1436 | ||
2241 | int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, | 1437 | if (tx_info->control.hw_key) { |
2242 | u16 tid, u16 *ssn) | 1438 | if (tx_info->control.hw_key->alg == ALG_WEP) |
1439 | return ATH9K_KEY_TYPE_WEP; | ||
1440 | else if (tx_info->control.hw_key->alg == ALG_TKIP) | ||
1441 | return ATH9K_KEY_TYPE_TKIP; | ||
1442 | else if (tx_info->control.hw_key->alg == ALG_CCMP) | ||
1443 | return ATH9K_KEY_TYPE_AES; | ||
1444 | } | ||
1445 | |||
1446 | return ATH9K_KEY_TYPE_CLEAR; | ||
1447 | } | ||
1448 | |||
1449 | static void assign_aggr_tid_seqno(struct sk_buff *skb, | ||
1450 | struct ath_buf *bf) | ||
2243 | { | 1451 | { |
2244 | struct ath_atx_tid *txtid; | 1452 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
1453 | struct ieee80211_hdr *hdr; | ||
2245 | struct ath_node *an; | 1454 | struct ath_node *an; |
1455 | struct ath_atx_tid *tid; | ||
1456 | __le16 fc; | ||
1457 | u8 *qc; | ||
2246 | 1458 | ||
2247 | an = (struct ath_node *)sta->drv_priv; | 1459 | if (!tx_info->control.sta) |
1460 | return; | ||
2248 | 1461 | ||
2249 | if (sc->sc_flags & SC_OP_TXAGGR) { | 1462 | an = (struct ath_node *)tx_info->control.sta->drv_priv; |
2250 | txtid = ATH_AN_2_TID(an, tid); | 1463 | hdr = (struct ieee80211_hdr *)skb->data; |
2251 | txtid->state |= AGGR_ADDBA_PROGRESS; | 1464 | fc = hdr->frame_control; |
2252 | ath_tx_pause_tid(sc, txtid); | 1465 | |
1466 | if (ieee80211_is_data_qos(fc)) { | ||
1467 | qc = ieee80211_get_qos_ctl(hdr); | ||
1468 | bf->bf_tidno = qc[0] & 0xf; | ||
2253 | } | 1469 | } |
2254 | 1470 | ||
2255 | return 0; | 1471 | /* |
1472 | * For HT capable stations, we save tidno for later use. | ||
1473 | * We also override seqno set by upper layer with the one | ||
1474 | * in tx aggregation state. | ||
1475 | * | ||
1476 | * If fragmentation is on, the sequence number is | ||
1477 | * not overridden, since it has been | ||
1478 | * incremented by the fragmentation routine. | ||
1479 | * | ||
1480 | * FIXME: check if the fragmentation threshold exceeds | ||
1481 | * IEEE80211 max. | ||
1482 | */ | ||
1483 | tid = ATH_AN_2_TID(an, bf->bf_tidno); | ||
1484 | hdr->seq_ctrl = cpu_to_le16(tid->seq_next << | ||
1485 | IEEE80211_SEQ_SEQ_SHIFT); | ||
1486 | bf->bf_seqno = tid->seq_next; | ||
1487 | INCR(tid->seq_next, IEEE80211_SEQ_MAX); | ||
2256 | } | 1488 | } |
2257 | 1489 | ||
2258 | /* Stop tx aggregation */ | 1490 | static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb, |
2259 | 1491 | struct ath_txq *txq) | |
2260 | int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) | ||
2261 | { | 1492 | { |
2262 | struct ath_node *an = (struct ath_node *)sta->drv_priv; | 1493 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
1494 | int flags = 0; | ||
2263 | 1495 | ||
2264 | ath_tx_aggr_teardown(sc, an, tid); | 1496 | flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */ |
2265 | return 0; | 1497 | flags |= ATH9K_TXDESC_INTREQ; |
1498 | |||
1499 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) | ||
1500 | flags |= ATH9K_TXDESC_NOACK; | ||
1501 | if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) | ||
1502 | flags |= ATH9K_TXDESC_RTSENA; | ||
1503 | |||
1504 | return flags; | ||
2266 | } | 1505 | } |
2267 | 1506 | ||
2268 | /* Resume tx aggregation */ | 1507 | /* |
1508 | * rix - rate index | ||
1509 | * pktlen - total bytes (delims + data + fcs + pads + pad delims) | ||
1510 | * width - 0 for 20 MHz, 1 for 40 MHz | ||
1511 | * half_gi - to use 4us v/s 3.6 us for symbol time | ||
1512 | */ | ||
1513 | static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf, | ||
1514 | int width, int half_gi, bool shortPreamble) | ||
1515 | { | ||
1516 | struct ath_rate_table *rate_table = sc->cur_rate_table; | ||
1517 | u32 nbits, nsymbits, duration, nsymbols; | ||
1518 | u8 rc; | ||
1519 | int streams, pktlen; | ||
2269 | 1520 | ||
2270 | void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) | 1521 | pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen; |
1522 | rc = rate_table->info[rix].ratecode; | ||
1523 | |||
1524 | /* for legacy rates, use old function to compute packet duration */ | ||
1525 | if (!IS_HT_RATE(rc)) | ||
1526 | return ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen, | ||
1527 | rix, shortPreamble); | ||
1528 | |||
1529 | /* find number of symbols: PLCP + data */ | ||
1530 | nbits = (pktlen << 3) + OFDM_PLCP_BITS; | ||
1531 | nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; | ||
1532 | nsymbols = (nbits + nsymbits - 1) / nsymbits; | ||
1533 | |||
1534 | if (!half_gi) | ||
1535 | duration = SYMBOL_TIME(nsymbols); | ||
1536 | else | ||
1537 | duration = SYMBOL_TIME_HALFGI(nsymbols); | ||
1538 | |||
1539 | /* addup duration for legacy/ht training and signal fields */ | ||
1540 | streams = HT_RC_2_STREAMS(rc); | ||
1541 | duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); | ||
1542 | |||
1543 | return duration; | ||
1544 | } | ||
1545 | |||
1546 | static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) | ||
2271 | { | 1547 | { |
2272 | struct ath_atx_tid *txtid; | 1548 | struct ath_hal *ah = sc->sc_ah; |
2273 | struct ath_node *an; | 1549 | struct ath_rate_table *rt; |
1550 | struct ath_desc *ds = bf->bf_desc; | ||
1551 | struct ath_desc *lastds = bf->bf_lastbf->bf_desc; | ||
1552 | struct ath9k_11n_rate_series series[4]; | ||
1553 | struct sk_buff *skb; | ||
1554 | struct ieee80211_tx_info *tx_info; | ||
1555 | struct ieee80211_tx_rate *rates; | ||
1556 | struct ieee80211_hdr *hdr; | ||
1557 | struct ieee80211_hw *hw = sc->hw; | ||
1558 | int i, flags, rtsctsena = 0, enable_g_protection = 0; | ||
1559 | u32 ctsduration = 0; | ||
1560 | u8 rix = 0, cix, ctsrate = 0; | ||
1561 | __le16 fc; | ||
2274 | 1562 | ||
2275 | an = (struct ath_node *)sta->drv_priv; | 1563 | memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); |
2276 | 1564 | ||
2277 | if (sc->sc_flags & SC_OP_TXAGGR) { | 1565 | skb = (struct sk_buff *)bf->bf_mpdu; |
2278 | txtid = ATH_AN_2_TID(an, tid); | 1566 | hdr = (struct ieee80211_hdr *)skb->data; |
2279 | txtid->baw_size = | 1567 | fc = hdr->frame_control; |
2280 | IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor; | 1568 | tx_info = IEEE80211_SKB_CB(skb); |
2281 | txtid->state |= AGGR_ADDBA_COMPLETE; | 1569 | rates = tx_info->control.rates; |
2282 | txtid->state &= ~AGGR_ADDBA_PROGRESS; | 1570 | |
2283 | ath_tx_resume_tid(sc, txtid); | 1571 | if (ieee80211_has_morefrags(fc) || |
1572 | (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) { | ||
1573 | rates[1].count = rates[2].count = rates[3].count = 0; | ||
1574 | rates[1].idx = rates[2].idx = rates[3].idx = 0; | ||
1575 | rates[0].count = ATH_TXMAXTRY; | ||
1576 | } | ||
1577 | |||
1578 | /* get the cix for the lowest valid rix */ | ||
1579 | rt = sc->cur_rate_table; | ||
1580 | for (i = 3; i >= 0; i--) { | ||
1581 | if (rates[i].count && (rates[i].idx >= 0)) { | ||
1582 | rix = rates[i].idx; | ||
1583 | break; | ||
1584 | } | ||
1585 | } | ||
1586 | |||
1587 | flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)); | ||
1588 | cix = rt->info[rix].ctrl_rate; | ||
1589 | |||
1590 | /* All protection frames are transmited at 2Mb/s for 802.11g, | ||
1591 | * otherwise we transmit them at 1Mb/s */ | ||
1592 | if (hw->conf.channel->band == IEEE80211_BAND_2GHZ && | ||
1593 | !conf_is_ht(&hw->conf)) | ||
1594 | enable_g_protection = 1; | ||
1595 | |||
1596 | /* | ||
1597 | * If 802.11g protection is enabled, determine whether to use RTS/CTS or | ||
1598 | * just CTS. Note that this is only done for OFDM/HT unicast frames. | ||
1599 | */ | ||
1600 | if (sc->sc_protmode != PROT_M_NONE && !(bf->bf_flags & ATH9K_TXDESC_NOACK) | ||
1601 | && (rt->info[rix].phy == WLAN_RC_PHY_OFDM || | ||
1602 | WLAN_RC_PHY_HT(rt->info[rix].phy))) { | ||
1603 | if (sc->sc_protmode == PROT_M_RTSCTS) | ||
1604 | flags = ATH9K_TXDESC_RTSENA; | ||
1605 | else if (sc->sc_protmode == PROT_M_CTSONLY) | ||
1606 | flags = ATH9K_TXDESC_CTSENA; | ||
1607 | |||
1608 | cix = rt->info[enable_g_protection].ctrl_rate; | ||
1609 | rtsctsena = 1; | ||
2284 | } | 1610 | } |
1611 | |||
1612 | /* For 11n, the default behavior is to enable RTS for hw retried frames. | ||
1613 | * We enable the global flag here and let rate series flags determine | ||
1614 | * which rates will actually use RTS. | ||
1615 | */ | ||
1616 | if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) { | ||
1617 | /* 802.11g protection not needed, use our default behavior */ | ||
1618 | if (!rtsctsena) | ||
1619 | flags = ATH9K_TXDESC_RTSENA; | ||
1620 | } | ||
1621 | |||
1622 | /* Set protection if aggregate protection on */ | ||
1623 | if (sc->sc_config.ath_aggr_prot && | ||
1624 | (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) { | ||
1625 | flags = ATH9K_TXDESC_RTSENA; | ||
1626 | cix = rt->info[enable_g_protection].ctrl_rate; | ||
1627 | rtsctsena = 1; | ||
1628 | } | ||
1629 | |||
1630 | /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ | ||
1631 | if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit)) | ||
1632 | flags &= ~(ATH9K_TXDESC_RTSENA); | ||
1633 | |||
1634 | /* | ||
1635 | * CTS transmit rate is derived from the transmit rate by looking in the | ||
1636 | * h/w rate table. We must also factor in whether or not a short | ||
1637 | * preamble is to be used. NB: cix is set above where RTS/CTS is enabled | ||
1638 | */ | ||
1639 | ctsrate = rt->info[cix].ratecode | | ||
1640 | (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0); | ||
1641 | |||
1642 | for (i = 0; i < 4; i++) { | ||
1643 | if (!rates[i].count || (rates[i].idx < 0)) | ||
1644 | continue; | ||
1645 | |||
1646 | rix = rates[i].idx; | ||
1647 | |||
1648 | series[i].Rate = rt->info[rix].ratecode | | ||
1649 | (bf_isshpreamble(bf) ? rt->info[rix].short_preamble : 0); | ||
1650 | |||
1651 | series[i].Tries = rates[i].count; | ||
1652 | |||
1653 | series[i].RateFlags = ( | ||
1654 | (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) ? | ||
1655 | ATH9K_RATESERIES_RTS_CTS : 0) | | ||
1656 | ((rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ? | ||
1657 | ATH9K_RATESERIES_2040 : 0) | | ||
1658 | ((rates[i].flags & IEEE80211_TX_RC_SHORT_GI) ? | ||
1659 | ATH9K_RATESERIES_HALFGI : 0); | ||
1660 | |||
1661 | series[i].PktDuration = ath_pkt_duration(sc, rix, bf, | ||
1662 | (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) != 0, | ||
1663 | (rates[i].flags & IEEE80211_TX_RC_SHORT_GI), | ||
1664 | bf_isshpreamble(bf)); | ||
1665 | |||
1666 | series[i].ChSel = sc->sc_tx_chainmask; | ||
1667 | |||
1668 | if (rtsctsena) | ||
1669 | series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; | ||
1670 | } | ||
1671 | |||
1672 | /* set dur_update_en for l-sig computation except for PS-Poll frames */ | ||
1673 | ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf), | ||
1674 | ctsrate, ctsduration, | ||
1675 | series, 4, flags); | ||
1676 | |||
1677 | if (sc->sc_config.ath_aggr_prot && flags) | ||
1678 | ath9k_hw_set11n_burstduration(ah, ds, 8192); | ||
2285 | } | 1679 | } |
2286 | 1680 | ||
2287 | /* | 1681 | static int ath_tx_setup_buffer(struct ath_softc *sc, struct ath_buf *bf, |
2288 | * Performs transmit side cleanup when TID changes from aggregated to | 1682 | struct sk_buff *skb, |
2289 | * unaggregated. | 1683 | struct ath_tx_control *txctl) |
2290 | * - Pause the TID and mark cleanup in progress | 1684 | { |
2291 | * - Discard all retry frames from the s/w queue. | 1685 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
2292 | */ | 1686 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
1687 | struct ath_tx_info_priv *tx_info_priv; | ||
1688 | int hdrlen; | ||
1689 | __le16 fc; | ||
1690 | |||
1691 | tx_info_priv = kzalloc(sizeof(*tx_info_priv), GFP_ATOMIC); | ||
1692 | if (unlikely(!tx_info_priv)) | ||
1693 | return -ENOMEM; | ||
1694 | tx_info->rate_driver_data[0] = tx_info_priv; | ||
1695 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
1696 | fc = hdr->frame_control; | ||
1697 | |||
1698 | ATH_TXBUF_RESET(bf); | ||
1699 | |||
1700 | bf->bf_frmlen = skb->len + FCS_LEN - (hdrlen & 3); | ||
2293 | 1701 | ||
2294 | void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid) | 1702 | ieee80211_is_data(fc) ? |
1703 | (bf->bf_state.bf_type |= BUF_DATA) : | ||
1704 | (bf->bf_state.bf_type &= ~BUF_DATA); | ||
1705 | ieee80211_is_back_req(fc) ? | ||
1706 | (bf->bf_state.bf_type |= BUF_BAR) : | ||
1707 | (bf->bf_state.bf_type &= ~BUF_BAR); | ||
1708 | ieee80211_is_pspoll(fc) ? | ||
1709 | (bf->bf_state.bf_type |= BUF_PSPOLL) : | ||
1710 | (bf->bf_state.bf_type &= ~BUF_PSPOLL); | ||
1711 | (sc->sc_flags & SC_OP_PREAMBLE_SHORT) ? | ||
1712 | (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) : | ||
1713 | (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE); | ||
1714 | (conf_is_ht(&sc->hw->conf) && !is_pae(skb) && | ||
1715 | (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) ? | ||
1716 | (bf->bf_state.bf_type |= BUF_HT) : | ||
1717 | (bf->bf_state.bf_type &= ~BUF_HT); | ||
1718 | |||
1719 | bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); | ||
1720 | |||
1721 | bf->bf_keytype = get_hw_crypto_keytype(skb); | ||
1722 | |||
1723 | if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { | ||
1724 | bf->bf_frmlen += tx_info->control.hw_key->icv_len; | ||
1725 | bf->bf_keyix = tx_info->control.hw_key->hw_key_idx; | ||
1726 | } else { | ||
1727 | bf->bf_keyix = ATH9K_TXKEYIX_INVALID; | ||
1728 | } | ||
1729 | |||
1730 | if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR)) | ||
1731 | assign_aggr_tid_seqno(skb, bf); | ||
1732 | |||
1733 | bf->bf_mpdu = skb; | ||
1734 | |||
1735 | bf->bf_dmacontext = dma_map_single(sc->dev, skb->data, | ||
1736 | skb->len, DMA_TO_DEVICE); | ||
1737 | if (unlikely(dma_mapping_error(sc->dev, bf->bf_dmacontext))) { | ||
1738 | bf->bf_mpdu = NULL; | ||
1739 | DPRINTF(sc, ATH_DBG_CONFIG, | ||
1740 | "dma_mapping_error() on TX\n"); | ||
1741 | return -ENOMEM; | ||
1742 | } | ||
1743 | |||
1744 | bf->bf_buf_addr = bf->bf_dmacontext; | ||
1745 | return 0; | ||
1746 | } | ||
1747 | |||
1748 | /* FIXME: tx power */ | ||
1749 | static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | ||
1750 | struct ath_tx_control *txctl) | ||
2295 | { | 1751 | { |
2296 | struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); | 1752 | struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; |
2297 | struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; | 1753 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
2298 | struct ath_buf *bf; | 1754 | struct ath_node *an = NULL; |
2299 | struct list_head bf_head; | 1755 | struct list_head bf_head; |
1756 | struct ath_desc *ds; | ||
1757 | struct ath_atx_tid *tid; | ||
1758 | struct ath_hal *ah = sc->sc_ah; | ||
1759 | int frm_type; | ||
1760 | |||
1761 | frm_type = get_hw_packet_type(skb); | ||
1762 | |||
2300 | INIT_LIST_HEAD(&bf_head); | 1763 | INIT_LIST_HEAD(&bf_head); |
1764 | list_add_tail(&bf->list, &bf_head); | ||
2301 | 1765 | ||
2302 | if (txtid->state & AGGR_CLEANUP) /* cleanup is in progress */ | 1766 | ds = bf->bf_desc; |
2303 | return; | 1767 | ds->ds_link = 0; |
1768 | ds->ds_data = bf->bf_buf_addr; | ||
2304 | 1769 | ||
2305 | if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { | 1770 | ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, |
2306 | txtid->addba_exchangeattempts = 0; | 1771 | bf->bf_keyix, bf->bf_keytype, bf->bf_flags); |
2307 | return; | ||
2308 | } | ||
2309 | 1772 | ||
2310 | /* TID must be paused first */ | 1773 | ath9k_hw_filltxdesc(ah, ds, |
2311 | ath_tx_pause_tid(sc, txtid); | 1774 | skb->len, /* segment length */ |
1775 | true, /* first segment */ | ||
1776 | true, /* last segment */ | ||
1777 | ds); /* first descriptor */ | ||
2312 | 1778 | ||
2313 | /* drop all software retried frames and mark this TID */ | 1779 | bf->bf_lastfrm = bf; |
2314 | spin_lock_bh(&txq->axq_lock); | 1780 | |
2315 | while (!list_empty(&txtid->buf_q)) { | 1781 | spin_lock_bh(&txctl->txq->axq_lock); |
2316 | bf = list_first_entry(&txtid->buf_q, struct ath_buf, list); | 1782 | |
2317 | if (!bf_isretried(bf)) { | 1783 | if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && |
1784 | tx_info->control.sta) { | ||
1785 | an = (struct ath_node *)tx_info->control.sta->drv_priv; | ||
1786 | tid = ATH_AN_2_TID(an, bf->bf_tidno); | ||
1787 | |||
1788 | if (ath_aggr_query(sc, an, bf->bf_tidno)) { | ||
2318 | /* | 1789 | /* |
2319 | * NB: it's based on the assumption that | 1790 | * Try aggregation if it's a unicast data frame |
2320 | * software retried frame will always stay | 1791 | * and the destination is HT capable. |
2321 | * at the head of software queue. | ||
2322 | */ | 1792 | */ |
2323 | break; | 1793 | ath_tx_send_ampdu(sc, tid, &bf_head, txctl); |
1794 | } else { | ||
1795 | /* | ||
1796 | * Send this frame as regular when ADDBA | ||
1797 | * exchange is neither complete nor pending. | ||
1798 | */ | ||
1799 | ath_tx_send_normal(sc, txctl->txq, | ||
1800 | tid, &bf_head); | ||
2324 | } | 1801 | } |
2325 | list_cut_position(&bf_head, | 1802 | } else { |
2326 | &txtid->buf_q, &bf->bf_lastfrm->list); | 1803 | bf->bf_lastbf = bf; |
2327 | ath_tx_update_baw(sc, txtid, bf->bf_seqno); | 1804 | bf->bf_nframes = 1; |
2328 | 1805 | ||
2329 | /* complete this sub-frame */ | 1806 | ath_buf_set_rate(sc, bf); |
2330 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | 1807 | ath_tx_txqaddbuf(sc, txctl->txq, &bf_head); |
2331 | } | 1808 | } |
2332 | 1809 | ||
2333 | if (txtid->baw_head != txtid->baw_tail) { | 1810 | spin_unlock_bh(&txctl->txq->axq_lock); |
2334 | spin_unlock_bh(&txq->axq_lock); | 1811 | } |
2335 | txtid->state |= AGGR_CLEANUP; | 1812 | |
2336 | } else { | 1813 | /* Upon failure caller should free skb */ |
2337 | txtid->state &= ~AGGR_ADDBA_COMPLETE; | 1814 | int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb, |
2338 | txtid->addba_exchangeattempts = 0; | 1815 | struct ath_tx_control *txctl) |
1816 | { | ||
1817 | struct ath_buf *bf; | ||
1818 | int r; | ||
1819 | |||
1820 | bf = ath_tx_get_buffer(sc); | ||
1821 | if (!bf) { | ||
1822 | DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n"); | ||
1823 | return -1; | ||
1824 | } | ||
1825 | |||
1826 | r = ath_tx_setup_buffer(sc, bf, skb, txctl); | ||
1827 | if (unlikely(r)) { | ||
1828 | struct ath_txq *txq = txctl->txq; | ||
1829 | |||
1830 | DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n"); | ||
1831 | |||
1832 | /* upon ath_tx_processq() this TX queue will be resumed, we | ||
1833 | * guarantee this will happen by knowing beforehand that | ||
1834 | * we will at least have to run TX completionon one buffer | ||
1835 | * on the queue */ | ||
1836 | spin_lock_bh(&txq->axq_lock); | ||
1837 | if (ath_txq_depth(sc, txq->axq_qnum) > 1) { | ||
1838 | ieee80211_stop_queue(sc->hw, | ||
1839 | skb_get_queue_mapping(skb)); | ||
1840 | txq->stopped = 1; | ||
1841 | } | ||
2339 | spin_unlock_bh(&txq->axq_lock); | 1842 | spin_unlock_bh(&txq->axq_lock); |
2340 | ath_tx_flush_tid(sc, txtid); | 1843 | |
1844 | spin_lock_bh(&sc->tx.txbuflock); | ||
1845 | list_add_tail(&bf->list, &sc->tx.txbuf); | ||
1846 | spin_unlock_bh(&sc->tx.txbuflock); | ||
1847 | |||
1848 | return r; | ||
2341 | } | 1849 | } |
2342 | } | ||
2343 | 1850 | ||
2344 | /* | 1851 | ath_tx_start_dma(sc, bf, txctl); |
2345 | * Tx scheduling logic | ||
2346 | * NB: must be called with txq lock held | ||
2347 | */ | ||
2348 | 1852 | ||
2349 | void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq) | 1853 | return 0; |
1854 | } | ||
1855 | |||
1856 | void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb) | ||
2350 | { | 1857 | { |
2351 | struct ath_atx_ac *ac; | 1858 | int hdrlen, padsize; |
2352 | struct ath_atx_tid *tid; | 1859 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); |
1860 | struct ath_tx_control txctl; | ||
2353 | 1861 | ||
2354 | /* nothing to schedule */ | 1862 | memset(&txctl, 0, sizeof(struct ath_tx_control)); |
2355 | if (list_empty(&txq->axq_acq)) | ||
2356 | return; | ||
2357 | /* | ||
2358 | * get the first node/ac pair on the queue | ||
2359 | */ | ||
2360 | ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list); | ||
2361 | list_del(&ac->list); | ||
2362 | ac->sched = false; | ||
2363 | 1863 | ||
2364 | /* | 1864 | /* |
2365 | * process a single tid per destination | 1865 | * As a temporary workaround, assign seq# here; this will likely need |
1866 | * to be cleaned up to work better with Beacon transmission and virtual | ||
1867 | * BSSes. | ||
2366 | */ | 1868 | */ |
2367 | do { | 1869 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { |
2368 | /* nothing to schedule */ | 1870 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
2369 | if (list_empty(&ac->tid_q)) | 1871 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) |
1872 | sc->tx.seq_no += 0x10; | ||
1873 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
1874 | hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); | ||
1875 | } | ||
1876 | |||
1877 | /* Add the padding after the header if this is not already done */ | ||
1878 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
1879 | if (hdrlen & 3) { | ||
1880 | padsize = hdrlen % 4; | ||
1881 | if (skb_headroom(skb) < padsize) { | ||
1882 | DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n"); | ||
1883 | dev_kfree_skb_any(skb); | ||
2370 | return; | 1884 | return; |
1885 | } | ||
1886 | skb_push(skb, padsize); | ||
1887 | memmove(skb->data, skb->data + padsize, hdrlen); | ||
1888 | } | ||
2371 | 1889 | ||
2372 | tid = list_first_entry(&ac->tid_q, struct ath_atx_tid, list); | 1890 | txctl.txq = sc->beacon.cabq; |
2373 | list_del(&tid->list); | ||
2374 | tid->sched = false; | ||
2375 | 1891 | ||
2376 | if (tid->paused) /* check next tid to keep h/w busy */ | 1892 | DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb); |
2377 | continue; | ||
2378 | 1893 | ||
2379 | if ((txq->axq_depth % 2) == 0) | 1894 | if (ath_tx_start(sc, skb, &txctl) != 0) { |
2380 | ath_tx_sched_aggr(sc, txq, tid); | 1895 | DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n"); |
1896 | goto exit; | ||
1897 | } | ||
2381 | 1898 | ||
1899 | return; | ||
1900 | exit: | ||
1901 | dev_kfree_skb_any(skb); | ||
1902 | } | ||
1903 | |||
1904 | /*****************/ | ||
1905 | /* TX Completion */ | ||
1906 | /*****************/ | ||
1907 | |||
1908 | static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, | ||
1909 | struct ath_xmit_status *tx_status) | ||
1910 | { | ||
1911 | struct ieee80211_hw *hw = sc->hw; | ||
1912 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | ||
1913 | struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); | ||
1914 | int hdrlen, padsize; | ||
1915 | |||
1916 | DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); | ||
1917 | |||
1918 | if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK || | ||
1919 | tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) { | ||
1920 | kfree(tx_info_priv); | ||
1921 | tx_info->rate_driver_data[0] = NULL; | ||
1922 | } | ||
1923 | |||
1924 | if (tx_status->flags & ATH_TX_BAR) { | ||
1925 | tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; | ||
1926 | tx_status->flags &= ~ATH_TX_BAR; | ||
1927 | } | ||
1928 | |||
1929 | if (!(tx_status->flags & (ATH_TX_ERROR | ATH_TX_XRETRY))) { | ||
1930 | /* Frame was ACKed */ | ||
1931 | tx_info->flags |= IEEE80211_TX_STAT_ACK; | ||
1932 | } | ||
1933 | |||
1934 | tx_info->status.rates[0].count = tx_status->retries + 1; | ||
1935 | |||
1936 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
1937 | padsize = hdrlen & 3; | ||
1938 | if (padsize && hdrlen >= 24) { | ||
2382 | /* | 1939 | /* |
2383 | * add tid to round-robin queue if more frames | 1940 | * Remove MAC header padding before giving the frame back to |
2384 | * are pending for the tid | 1941 | * mac80211. |
2385 | */ | 1942 | */ |
2386 | if (!list_empty(&tid->buf_q)) | 1943 | memmove(skb->data + padsize, skb->data, hdrlen); |
2387 | ath_tx_queue_tid(txq, tid); | 1944 | skb_pull(skb, padsize); |
1945 | } | ||
2388 | 1946 | ||
2389 | /* only schedule one TID at a time */ | 1947 | ieee80211_tx_status(hw, skb); |
2390 | break; | 1948 | } |
2391 | } while (!list_empty(&ac->tid_q)); | 1949 | |
1950 | static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, | ||
1951 | struct list_head *bf_q, | ||
1952 | int txok, int sendbar) | ||
1953 | { | ||
1954 | struct sk_buff *skb = bf->bf_mpdu; | ||
1955 | struct ath_xmit_status tx_status; | ||
1956 | unsigned long flags; | ||
2392 | 1957 | ||
2393 | /* | 1958 | /* |
2394 | * schedule AC if more TIDs need processing | 1959 | * Set retry information. |
1960 | * NB: Don't use the information in the descriptor, because the frame | ||
1961 | * could be software retried. | ||
2395 | */ | 1962 | */ |
2396 | if (!list_empty(&ac->tid_q)) { | 1963 | tx_status.retries = bf->bf_retries; |
1964 | tx_status.flags = 0; | ||
1965 | |||
1966 | if (sendbar) | ||
1967 | tx_status.flags = ATH_TX_BAR; | ||
1968 | |||
1969 | if (!txok) { | ||
1970 | tx_status.flags |= ATH_TX_ERROR; | ||
1971 | |||
1972 | if (bf_isxretried(bf)) | ||
1973 | tx_status.flags |= ATH_TX_XRETRY; | ||
1974 | } | ||
1975 | |||
1976 | dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); | ||
1977 | ath_tx_complete(sc, skb, &tx_status); | ||
1978 | |||
1979 | /* | ||
1980 | * Return the list of ath_buf of this mpdu to free queue | ||
1981 | */ | ||
1982 | spin_lock_irqsave(&sc->tx.txbuflock, flags); | ||
1983 | list_splice_tail_init(bf_q, &sc->tx.txbuf); | ||
1984 | spin_unlock_irqrestore(&sc->tx.txbuflock, flags); | ||
1985 | } | ||
1986 | |||
1987 | static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, | ||
1988 | int txok) | ||
1989 | { | ||
1990 | struct ath_buf *bf_last = bf->bf_lastbf; | ||
1991 | struct ath_desc *ds = bf_last->bf_desc; | ||
1992 | u16 seq_st = 0; | ||
1993 | u32 ba[WME_BA_BMP_SIZE >> 5]; | ||
1994 | int ba_index; | ||
1995 | int nbad = 0; | ||
1996 | int isaggr = 0; | ||
1997 | |||
1998 | if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) | ||
1999 | return 0; | ||
2000 | |||
2001 | isaggr = bf_isaggr(bf); | ||
2002 | if (isaggr) { | ||
2003 | seq_st = ATH_DS_BA_SEQ(ds); | ||
2004 | memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); | ||
2005 | } | ||
2006 | |||
2007 | while (bf) { | ||
2008 | ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno); | ||
2009 | if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index))) | ||
2010 | nbad++; | ||
2011 | |||
2012 | bf = bf->bf_next; | ||
2013 | } | ||
2014 | |||
2015 | return nbad; | ||
2016 | } | ||
2017 | |||
2018 | static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, int nbad) | ||
2019 | { | ||
2020 | struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu; | ||
2021 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); | ||
2022 | struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); | ||
2023 | |||
2024 | tx_info_priv->update_rc = false; | ||
2025 | if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) | ||
2026 | tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; | ||
2027 | |||
2028 | if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && | ||
2029 | (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) { | ||
2030 | if (bf_isdata(bf)) { | ||
2031 | memcpy(&tx_info_priv->tx, &ds->ds_txstat, | ||
2032 | sizeof(tx_info_priv->tx)); | ||
2033 | tx_info_priv->n_frames = bf->bf_nframes; | ||
2034 | tx_info_priv->n_bad_frames = nbad; | ||
2035 | tx_info_priv->update_rc = true; | ||
2036 | } | ||
2037 | } | ||
2038 | } | ||
2039 | |||
2040 | static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | ||
2041 | { | ||
2042 | struct ath_hal *ah = sc->sc_ah; | ||
2043 | struct ath_buf *bf, *lastbf, *bf_held = NULL; | ||
2044 | struct list_head bf_head; | ||
2045 | struct ath_desc *ds; | ||
2046 | int txok, nbad = 0; | ||
2047 | int status; | ||
2048 | |||
2049 | DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", | ||
2050 | txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), | ||
2051 | txq->axq_link); | ||
2052 | |||
2053 | for (;;) { | ||
2054 | spin_lock_bh(&txq->axq_lock); | ||
2055 | if (list_empty(&txq->axq_q)) { | ||
2056 | txq->axq_link = NULL; | ||
2057 | txq->axq_linkbuf = NULL; | ||
2058 | spin_unlock_bh(&txq->axq_lock); | ||
2059 | break; | ||
2060 | } | ||
2061 | bf = list_first_entry(&txq->axq_q, struct ath_buf, list); | ||
2062 | |||
2397 | /* | 2063 | /* |
2398 | * add dest ac to txq if not already added | 2064 | * There is a race condition that a BH gets scheduled |
2065 | * after sw writes TxE and before hw re-load the last | ||
2066 | * descriptor to get the newly chained one. | ||
2067 | * Software must keep the last DONE descriptor as a | ||
2068 | * holding descriptor - software does so by marking | ||
2069 | * it with the STALE flag. | ||
2399 | */ | 2070 | */ |
2400 | if (!ac->sched) { | 2071 | bf_held = NULL; |
2401 | ac->sched = true; | 2072 | if (bf->bf_status & ATH_BUFSTATUS_STALE) { |
2402 | list_add_tail(&ac->list, &txq->axq_acq); | 2073 | bf_held = bf; |
2074 | if (list_is_last(&bf_held->list, &txq->axq_q)) { | ||
2075 | /* FIXME: | ||
2076 | * The holding descriptor is the last | ||
2077 | * descriptor in queue. It's safe to remove | ||
2078 | * the last holding descriptor in BH context. | ||
2079 | */ | ||
2080 | spin_unlock_bh(&txq->axq_lock); | ||
2081 | break; | ||
2082 | } else { | ||
2083 | bf = list_entry(bf_held->list.next, | ||
2084 | struct ath_buf, list); | ||
2085 | } | ||
2086 | } | ||
2087 | |||
2088 | lastbf = bf->bf_lastbf; | ||
2089 | ds = lastbf->bf_desc; | ||
2090 | |||
2091 | status = ath9k_hw_txprocdesc(ah, ds); | ||
2092 | if (status == -EINPROGRESS) { | ||
2093 | spin_unlock_bh(&txq->axq_lock); | ||
2094 | break; | ||
2403 | } | 2095 | } |
2096 | if (bf->bf_desc == txq->axq_lastdsWithCTS) | ||
2097 | txq->axq_lastdsWithCTS = NULL; | ||
2098 | if (ds == txq->axq_gatingds) | ||
2099 | txq->axq_gatingds = NULL; | ||
2100 | |||
2101 | /* | ||
2102 | * Remove ath_buf's of the same transmit unit from txq, | ||
2103 | * however leave the last descriptor back as the holding | ||
2104 | * descriptor for hw. | ||
2105 | */ | ||
2106 | lastbf->bf_status |= ATH_BUFSTATUS_STALE; | ||
2107 | INIT_LIST_HEAD(&bf_head); | ||
2108 | |||
2109 | if (!list_is_singular(&lastbf->list)) | ||
2110 | list_cut_position(&bf_head, | ||
2111 | &txq->axq_q, lastbf->list.prev); | ||
2112 | |||
2113 | txq->axq_depth--; | ||
2114 | |||
2115 | if (bf_isaggr(bf)) | ||
2116 | txq->axq_aggr_depth--; | ||
2117 | |||
2118 | txok = (ds->ds_txstat.ts_status == 0); | ||
2119 | |||
2120 | spin_unlock_bh(&txq->axq_lock); | ||
2121 | |||
2122 | if (bf_held) { | ||
2123 | list_del(&bf_held->list); | ||
2124 | spin_lock_bh(&sc->tx.txbuflock); | ||
2125 | list_add_tail(&bf_held->list, &sc->tx.txbuf); | ||
2126 | spin_unlock_bh(&sc->tx.txbuflock); | ||
2127 | } | ||
2128 | |||
2129 | if (!bf_isampdu(bf)) { | ||
2130 | /* | ||
2131 | * This frame is sent out as a single frame. | ||
2132 | * Use hardware retry status for this frame. | ||
2133 | */ | ||
2134 | bf->bf_retries = ds->ds_txstat.ts_longretry; | ||
2135 | if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) | ||
2136 | bf->bf_state.bf_type |= BUF_XRETRY; | ||
2137 | nbad = 0; | ||
2138 | } else { | ||
2139 | nbad = ath_tx_num_badfrms(sc, bf, txok); | ||
2140 | } | ||
2141 | |||
2142 | ath_tx_rc_status(bf, ds, nbad); | ||
2143 | |||
2144 | if (bf_isampdu(bf)) | ||
2145 | ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok); | ||
2146 | else | ||
2147 | ath_tx_complete_buf(sc, bf, &bf_head, txok, 0); | ||
2148 | |||
2149 | spin_lock_bh(&txq->axq_lock); | ||
2150 | if (txq->stopped && ath_txq_depth(sc, txq->axq_qnum) <= | ||
2151 | (ATH_TXBUF - 20)) { | ||
2152 | int qnum; | ||
2153 | qnum = ath_get_mac80211_qnum(txq->axq_qnum, sc); | ||
2154 | if (qnum != -1) { | ||
2155 | ieee80211_wake_queue(sc->hw, qnum); | ||
2156 | txq->stopped = 0; | ||
2157 | } | ||
2158 | |||
2159 | } | ||
2160 | |||
2161 | if (sc->sc_flags & SC_OP_TXAGGR) | ||
2162 | ath_txq_schedule(sc, txq); | ||
2163 | spin_unlock_bh(&txq->axq_lock); | ||
2404 | } | 2164 | } |
2405 | } | 2165 | } |
2406 | 2166 | ||
2407 | /* Initialize per-node transmit state */ | 2167 | |
2168 | void ath_tx_tasklet(struct ath_softc *sc) | ||
2169 | { | ||
2170 | int i; | ||
2171 | u32 qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1); | ||
2172 | |||
2173 | ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask); | ||
2174 | |||
2175 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | ||
2176 | if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) | ||
2177 | ath_tx_processq(sc, &sc->tx.txq[i]); | ||
2178 | } | ||
2179 | } | ||
2180 | |||
2181 | /*****************/ | ||
2182 | /* Init, Cleanup */ | ||
2183 | /*****************/ | ||
2184 | |||
2185 | int ath_tx_init(struct ath_softc *sc, int nbufs) | ||
2186 | { | ||
2187 | int error = 0; | ||
2188 | |||
2189 | do { | ||
2190 | spin_lock_init(&sc->tx.txbuflock); | ||
2191 | |||
2192 | error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, | ||
2193 | "tx", nbufs, 1); | ||
2194 | if (error != 0) { | ||
2195 | DPRINTF(sc, ATH_DBG_FATAL, | ||
2196 | "Failed to allocate tx descriptors: %d\n", | ||
2197 | error); | ||
2198 | break; | ||
2199 | } | ||
2200 | |||
2201 | error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, | ||
2202 | "beacon", ATH_BCBUF, 1); | ||
2203 | if (error != 0) { | ||
2204 | DPRINTF(sc, ATH_DBG_FATAL, | ||
2205 | "Failed to allocate beacon descriptors: %d\n", | ||
2206 | error); | ||
2207 | break; | ||
2208 | } | ||
2209 | |||
2210 | } while (0); | ||
2211 | |||
2212 | if (error != 0) | ||
2213 | ath_tx_cleanup(sc); | ||
2214 | |||
2215 | return error; | ||
2216 | } | ||
2217 | |||
2218 | int ath_tx_cleanup(struct ath_softc *sc) | ||
2219 | { | ||
2220 | if (sc->beacon.bdma.dd_desc_len != 0) | ||
2221 | ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); | ||
2222 | |||
2223 | if (sc->tx.txdma.dd_desc_len != 0) | ||
2224 | ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); | ||
2225 | |||
2226 | return 0; | ||
2227 | } | ||
2408 | 2228 | ||
2409 | void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) | 2229 | void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) |
2410 | { | 2230 | { |
@@ -2412,9 +2232,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) | |||
2412 | struct ath_atx_ac *ac; | 2232 | struct ath_atx_ac *ac; |
2413 | int tidno, acno; | 2233 | int tidno, acno; |
2414 | 2234 | ||
2415 | /* | ||
2416 | * Init per tid tx state | ||
2417 | */ | ||
2418 | for (tidno = 0, tid = &an->tid[tidno]; | 2235 | for (tidno = 0, tid = &an->tid[tidno]; |
2419 | tidno < WME_NUM_TID; | 2236 | tidno < WME_NUM_TID; |
2420 | tidno++, tid++) { | 2237 | tidno++, tid++) { |
@@ -2424,22 +2241,16 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) | |||
2424 | tid->baw_size = WME_MAX_BA; | 2241 | tid->baw_size = WME_MAX_BA; |
2425 | tid->baw_head = tid->baw_tail = 0; | 2242 | tid->baw_head = tid->baw_tail = 0; |
2426 | tid->sched = false; | 2243 | tid->sched = false; |
2427 | tid->paused = false; | 2244 | tid->paused = false; |
2428 | tid->state &= ~AGGR_CLEANUP; | 2245 | tid->state &= ~AGGR_CLEANUP; |
2429 | INIT_LIST_HEAD(&tid->buf_q); | 2246 | INIT_LIST_HEAD(&tid->buf_q); |
2430 | |||
2431 | acno = TID_TO_WME_AC(tidno); | 2247 | acno = TID_TO_WME_AC(tidno); |
2432 | tid->ac = &an->ac[acno]; | 2248 | tid->ac = &an->ac[acno]; |
2433 | |||
2434 | /* ADDBA state */ | ||
2435 | tid->state &= ~AGGR_ADDBA_COMPLETE; | 2249 | tid->state &= ~AGGR_ADDBA_COMPLETE; |
2436 | tid->state &= ~AGGR_ADDBA_PROGRESS; | 2250 | tid->state &= ~AGGR_ADDBA_PROGRESS; |
2437 | tid->addba_exchangeattempts = 0; | 2251 | tid->addba_exchangeattempts = 0; |
2438 | } | 2252 | } |
2439 | 2253 | ||
2440 | /* | ||
2441 | * Init per ac tx state | ||
2442 | */ | ||
2443 | for (acno = 0, ac = &an->ac[acno]; | 2254 | for (acno = 0, ac = &an->ac[acno]; |
2444 | acno < WME_NUM_AC; acno++, ac++) { | 2255 | acno < WME_NUM_AC; acno++, ac++) { |
2445 | ac->sched = false; | 2256 | ac->sched = false; |
@@ -2466,14 +2277,13 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) | |||
2466 | } | 2277 | } |
2467 | } | 2278 | } |
2468 | 2279 | ||
2469 | /* Cleanupthe pending buffers for the node. */ | ||
2470 | |||
2471 | void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) | 2280 | void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) |
2472 | { | 2281 | { |
2473 | int i; | 2282 | int i; |
2474 | struct ath_atx_ac *ac, *ac_tmp; | 2283 | struct ath_atx_ac *ac, *ac_tmp; |
2475 | struct ath_atx_tid *tid, *tid_tmp; | 2284 | struct ath_atx_tid *tid, *tid_tmp; |
2476 | struct ath_txq *txq; | 2285 | struct ath_txq *txq; |
2286 | |||
2477 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | 2287 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
2478 | if (ATH_TXQ_SETUP(sc, i)) { | 2288 | if (ATH_TXQ_SETUP(sc, i)) { |
2479 | txq = &sc->tx.txq[i]; | 2289 | txq = &sc->tx.txq[i]; |
@@ -2504,51 +2314,3 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) | |||
2504 | } | 2314 | } |
2505 | } | 2315 | } |
2506 | } | 2316 | } |
2507 | |||
2508 | void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb) | ||
2509 | { | ||
2510 | int hdrlen, padsize; | ||
2511 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
2512 | struct ath_tx_control txctl; | ||
2513 | |||
2514 | memset(&txctl, 0, sizeof(struct ath_tx_control)); | ||
2515 | |||
2516 | /* | ||
2517 | * As a temporary workaround, assign seq# here; this will likely need | ||
2518 | * to be cleaned up to work better with Beacon transmission and virtual | ||
2519 | * BSSes. | ||
2520 | */ | ||
2521 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { | ||
2522 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | ||
2523 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) | ||
2524 | sc->tx.seq_no += 0x10; | ||
2525 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | ||
2526 | hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); | ||
2527 | } | ||
2528 | |||
2529 | /* Add the padding after the header if this is not already done */ | ||
2530 | hdrlen = ieee80211_get_hdrlen_from_skb(skb); | ||
2531 | if (hdrlen & 3) { | ||
2532 | padsize = hdrlen % 4; | ||
2533 | if (skb_headroom(skb) < padsize) { | ||
2534 | DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n"); | ||
2535 | dev_kfree_skb_any(skb); | ||
2536 | return; | ||
2537 | } | ||
2538 | skb_push(skb, padsize); | ||
2539 | memmove(skb->data, skb->data + padsize, hdrlen); | ||
2540 | } | ||
2541 | |||
2542 | txctl.txq = sc->beacon.cabq; | ||
2543 | |||
2544 | DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb); | ||
2545 | |||
2546 | if (ath_tx_start(sc, skb, &txctl) != 0) { | ||
2547 | DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n"); | ||
2548 | goto exit; | ||
2549 | } | ||
2550 | |||
2551 | return; | ||
2552 | exit: | ||
2553 | dev_kfree_skb_any(skb); | ||
2554 | } | ||