diff options
author | Sujith <Sujith.Manoharan@atheros.com> | 2009-01-16 11:08:53 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2009-01-29 16:00:42 -0500 |
commit | d43f301520aa64bb331736a4568d435762f980b0 (patch) | |
tree | f67454cd6057a071501d2a8c5ec2bad05737a0f2 /drivers/net/wireless/ath9k/xmit.c | |
parent | 6ef9b13db24757a9856f2feb1e571f34938567c9 (diff) |
ath9k: Revamp TX aggregation
This patch cleans up the convoluted buffer management
logic for TX aggregation. Both aggregation creation and
completion are addressed.
Signed-off-by: Sujith <Sujith.Manoharan@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath9k/xmit.c')
-rw-r--r-- | drivers/net/wireless/ath9k/xmit.c | 305 |
1 files changed, 93 insertions, 212 deletions
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index d7cec0fee34c..0d05a7f8903f 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c | |||
@@ -151,7 +151,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | |||
151 | while (!list_empty(&tid->buf_q)) { | 151 | while (!list_empty(&tid->buf_q)) { |
152 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | 152 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); |
153 | ASSERT(!bf_isretried(bf)); | 153 | ASSERT(!bf_isretried(bf)); |
154 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); | 154 | list_move_tail(&bf->list, &bf_head); |
155 | ath_tx_send_normal(sc, txq, tid, &bf_head); | 155 | ath_tx_send_normal(sc, txq, tid, &bf_head); |
156 | } | 156 | } |
157 | 157 | ||
@@ -212,9 +212,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, | |||
212 | for (;;) { | 212 | for (;;) { |
213 | if (list_empty(&tid->buf_q)) | 213 | if (list_empty(&tid->buf_q)) |
214 | break; | 214 | break; |
215 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | ||
216 | 215 | ||
217 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); | 216 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); |
217 | list_move_tail(&bf->list, &bf_head); | ||
218 | 218 | ||
219 | if (bf_isretried(bf)) | 219 | if (bf_isretried(bf)) |
220 | ath_tx_update_baw(sc, tid, bf->bf_seqno); | 220 | ath_tx_update_baw(sc, tid, bf->bf_seqno); |
@@ -241,17 +241,37 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_buf *bf) | |||
241 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); | 241 | hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); |
242 | } | 242 | } |
243 | 243 | ||
244 | static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, struct ath_txq *txq, | 244 | static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) |
245 | struct ath_buf *bf, struct list_head *bf_q, | 245 | { |
246 | int txok) | 246 | struct ath_buf *tbf; |
247 | |||
248 | spin_lock_bh(&sc->tx.txbuflock); | ||
249 | ASSERT(!list_empty((&sc->tx.txbuf))); | ||
250 | tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); | ||
251 | list_del(&tbf->list); | ||
252 | spin_unlock_bh(&sc->tx.txbuflock); | ||
253 | |||
254 | ATH_TXBUF_RESET(tbf); | ||
255 | |||
256 | tbf->bf_mpdu = bf->bf_mpdu; | ||
257 | tbf->bf_buf_addr = bf->bf_buf_addr; | ||
258 | *(tbf->bf_desc) = *(bf->bf_desc); | ||
259 | tbf->bf_state = bf->bf_state; | ||
260 | tbf->bf_dmacontext = bf->bf_dmacontext; | ||
261 | |||
262 | return tbf; | ||
263 | } | ||
264 | |||
265 | static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, | ||
266 | struct ath_buf *bf, struct list_head *bf_q, | ||
267 | int txok) | ||
247 | { | 268 | { |
248 | struct ath_node *an = NULL; | 269 | struct ath_node *an = NULL; |
249 | struct sk_buff *skb; | 270 | struct sk_buff *skb; |
250 | struct ieee80211_tx_info *tx_info; | 271 | struct ieee80211_tx_info *tx_info; |
251 | struct ath_atx_tid *tid = NULL; | 272 | struct ath_atx_tid *tid = NULL; |
252 | struct ath_buf *bf_last = bf->bf_lastbf; | 273 | struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; |
253 | struct ath_desc *ds = bf_last->bf_desc; | 274 | struct ath_desc *ds = bf_last->bf_desc; |
254 | struct ath_buf *bf_next, *bf_lastq = NULL; | ||
255 | struct list_head bf_head, bf_pending; | 275 | struct list_head bf_head, bf_pending; |
256 | u16 seq_st = 0; | 276 | u16 seq_st = 0; |
257 | u32 ba[WME_BA_BMP_SIZE >> 5]; | 277 | u32 ba[WME_BA_BMP_SIZE >> 5]; |
@@ -266,28 +286,23 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, struct ath_txq *txq, | |||
266 | } | 286 | } |
267 | 287 | ||
268 | isaggr = bf_isaggr(bf); | 288 | isaggr = bf_isaggr(bf); |
269 | if (isaggr) { | 289 | memset(ba, 0, WME_BA_BMP_SIZE >> 3); |
270 | if (txok) { | ||
271 | if (ATH_DS_TX_BA(ds)) { | ||
272 | seq_st = ATH_DS_BA_SEQ(ds); | ||
273 | memcpy(ba, ATH_DS_BA_BITMAP(ds), | ||
274 | WME_BA_BMP_SIZE >> 3); | ||
275 | } else { | ||
276 | memset(ba, 0, WME_BA_BMP_SIZE >> 3); | ||
277 | 290 | ||
278 | /* | 291 | if (isaggr && txok) { |
279 | * AR5416 can become deaf/mute when BA | 292 | if (ATH_DS_TX_BA(ds)) { |
280 | * issue happens. Chip needs to be reset. | 293 | seq_st = ATH_DS_BA_SEQ(ds); |
281 | * But AP code may have sychronization issues | 294 | memcpy(ba, ATH_DS_BA_BITMAP(ds), |
282 | * when perform internal reset in this routine. | 295 | WME_BA_BMP_SIZE >> 3); |
283 | * Only enable reset in STA mode for now. | ||
284 | */ | ||
285 | if (sc->sc_ah->ah_opmode == | ||
286 | NL80211_IFTYPE_STATION) | ||
287 | needreset = 1; | ||
288 | } | ||
289 | } else { | 296 | } else { |
290 | memset(ba, 0, WME_BA_BMP_SIZE >> 3); | 297 | /* |
298 | * AR5416 can become deaf/mute when BA | ||
299 | * issue happens. Chip needs to be reset. | ||
300 | * But AP code may have sychronization issues | ||
301 | * when perform internal reset in this routine. | ||
302 | * Only enable reset in STA mode for now. | ||
303 | */ | ||
304 | if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_STATION) | ||
305 | needreset = 1; | ||
291 | } | 306 | } |
292 | } | 307 | } |
293 | 308 | ||
@@ -304,7 +319,6 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, struct ath_txq *txq, | |||
304 | } else if (!isaggr && txok) { | 319 | } else if (!isaggr && txok) { |
305 | /* transmit completion */ | 320 | /* transmit completion */ |
306 | } else { | 321 | } else { |
307 | |||
308 | if (!(tid->state & AGGR_CLEANUP) && | 322 | if (!(tid->state & AGGR_CLEANUP) && |
309 | ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { | 323 | ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { |
310 | if (bf->bf_retries < ATH_MAX_SW_RETRIES) { | 324 | if (bf->bf_retries < ATH_MAX_SW_RETRIES) { |
@@ -325,19 +339,10 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, struct ath_txq *txq, | |||
325 | } | 339 | } |
326 | 340 | ||
327 | if (bf_next == NULL) { | 341 | if (bf_next == NULL) { |
328 | ASSERT(bf->bf_lastfrm == bf_last); | 342 | INIT_LIST_HEAD(&bf_head); |
329 | if (!list_empty(bf_q)) { | ||
330 | bf_lastq = list_entry(bf_q->prev, | ||
331 | struct ath_buf, list); | ||
332 | list_cut_position(&bf_head, | ||
333 | bf_q, &bf_lastq->list); | ||
334 | } else { | ||
335 | INIT_LIST_HEAD(&bf_head); | ||
336 | } | ||
337 | } else { | 343 | } else { |
338 | ASSERT(!list_empty(bf_q)); | 344 | ASSERT(!list_empty(bf_q)); |
339 | list_cut_position(&bf_head, | 345 | list_move_tail(&bf->list, &bf_head); |
340 | bf_q, &bf->bf_lastfrm->list); | ||
341 | } | 346 | } |
342 | 347 | ||
343 | if (!txpending) { | 348 | if (!txpending) { |
@@ -351,53 +356,20 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, struct ath_txq *txq, | |||
351 | 356 | ||
352 | ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); | 357 | ath_tx_complete_buf(sc, bf, &bf_head, !txfail, sendbar); |
353 | } else { | 358 | } else { |
354 | /* | 359 | /* retry the un-acked ones */ |
355 | * retry the un-acked ones | ||
356 | */ | ||
357 | if (bf->bf_next == NULL && | 360 | if (bf->bf_next == NULL && |
358 | bf_last->bf_status & ATH_BUFSTATUS_STALE) { | 361 | bf_last->bf_status & ATH_BUFSTATUS_STALE) { |
359 | struct ath_buf *tbf; | 362 | struct ath_buf *tbf; |
360 | 363 | ||
361 | /* allocate new descriptor */ | 364 | tbf = ath_clone_txbuf(sc, bf_last); |
362 | spin_lock_bh(&sc->tx.txbuflock); | 365 | ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc); |
363 | ASSERT(!list_empty((&sc->tx.txbuf))); | ||
364 | tbf = list_first_entry(&sc->tx.txbuf, | ||
365 | struct ath_buf, list); | ||
366 | list_del(&tbf->list); | ||
367 | spin_unlock_bh(&sc->tx.txbuflock); | ||
368 | |||
369 | ATH_TXBUF_RESET(tbf); | ||
370 | |||
371 | /* copy descriptor content */ | ||
372 | tbf->bf_mpdu = bf_last->bf_mpdu; | ||
373 | tbf->bf_buf_addr = bf_last->bf_buf_addr; | ||
374 | *(tbf->bf_desc) = *(bf_last->bf_desc); | ||
375 | |||
376 | /* link it to the frame */ | ||
377 | if (bf_lastq) { | ||
378 | bf_lastq->bf_desc->ds_link = | ||
379 | tbf->bf_daddr; | ||
380 | bf->bf_lastfrm = tbf; | ||
381 | ath9k_hw_cleartxdesc(sc->sc_ah, | ||
382 | bf->bf_lastfrm->bf_desc); | ||
383 | } else { | ||
384 | tbf->bf_state = bf_last->bf_state; | ||
385 | tbf->bf_lastfrm = tbf; | ||
386 | ath9k_hw_cleartxdesc(sc->sc_ah, | ||
387 | tbf->bf_lastfrm->bf_desc); | ||
388 | |||
389 | /* copy the DMA context */ | ||
390 | tbf->bf_dmacontext = | ||
391 | bf_last->bf_dmacontext; | ||
392 | } | ||
393 | list_add_tail(&tbf->list, &bf_head); | 366 | list_add_tail(&tbf->list, &bf_head); |
394 | } else { | 367 | } else { |
395 | /* | 368 | /* |
396 | * Clear descriptor status words for | 369 | * Clear descriptor status words for |
397 | * software retry | 370 | * software retry |
398 | */ | 371 | */ |
399 | ath9k_hw_cleartxdesc(sc->sc_ah, | 372 | ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc); |
400 | bf->bf_lastfrm->bf_desc); | ||
401 | } | 373 | } |
402 | 374 | ||
403 | /* | 375 | /* |
@@ -411,27 +383,18 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, struct ath_txq *txq, | |||
411 | } | 383 | } |
412 | 384 | ||
413 | if (tid->state & AGGR_CLEANUP) { | 385 | if (tid->state & AGGR_CLEANUP) { |
414 | /* check to see if we're done with cleaning the h/w queue */ | ||
415 | spin_lock_bh(&txq->axq_lock); | ||
416 | |||
417 | if (tid->baw_head == tid->baw_tail) { | 386 | if (tid->baw_head == tid->baw_tail) { |
418 | tid->state &= ~AGGR_ADDBA_COMPLETE; | 387 | tid->state &= ~AGGR_ADDBA_COMPLETE; |
419 | tid->addba_exchangeattempts = 0; | 388 | tid->addba_exchangeattempts = 0; |
420 | spin_unlock_bh(&txq->axq_lock); | ||
421 | |||
422 | tid->state &= ~AGGR_CLEANUP; | 389 | tid->state &= ~AGGR_CLEANUP; |
423 | 390 | ||
424 | /* send buffered frames as singles */ | 391 | /* send buffered frames as singles */ |
425 | ath_tx_flush_tid(sc, tid); | 392 | ath_tx_flush_tid(sc, tid); |
426 | } else | 393 | } |
427 | spin_unlock_bh(&txq->axq_lock); | ||
428 | |||
429 | return; | 394 | return; |
430 | } | 395 | } |
431 | 396 | ||
432 | /* | 397 | /* prepend un-acked frames to the beginning of the pending frame queue */ |
433 | * prepend un-acked frames to the beginning of the pending frame queue | ||
434 | */ | ||
435 | if (!list_empty(&bf_pending)) { | 398 | if (!list_empty(&bf_pending)) { |
436 | spin_lock_bh(&txq->axq_lock); | 399 | spin_lock_bh(&txq->axq_lock); |
437 | list_splice(&bf_pending, &tid->buf_q); | 400 | list_splice(&bf_pending, &tid->buf_q); |
@@ -441,8 +404,6 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, struct ath_txq *txq, | |||
441 | 404 | ||
442 | if (needreset) | 405 | if (needreset) |
443 | ath_reset(sc, false); | 406 | ath_reset(sc, false); |
444 | |||
445 | return; | ||
446 | } | 407 | } |
447 | 408 | ||
448 | static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, | 409 | static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, |
@@ -453,15 +414,14 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, | |||
453 | struct ieee80211_tx_info *tx_info; | 414 | struct ieee80211_tx_info *tx_info; |
454 | struct ieee80211_tx_rate *rates; | 415 | struct ieee80211_tx_rate *rates; |
455 | struct ath_tx_info_priv *tx_info_priv; | 416 | struct ath_tx_info_priv *tx_info_priv; |
456 | u32 max_4ms_framelen, frame_length; | 417 | u32 max_4ms_framelen, frmlen; |
457 | u16 aggr_limit, legacy = 0, maxampdu; | 418 | u16 aggr_limit, legacy = 0, maxampdu; |
458 | int i; | 419 | int i; |
459 | 420 | ||
460 | skb = (struct sk_buff *)bf->bf_mpdu; | 421 | skb = (struct sk_buff *)bf->bf_mpdu; |
461 | tx_info = IEEE80211_SKB_CB(skb); | 422 | tx_info = IEEE80211_SKB_CB(skb); |
462 | rates = tx_info->control.rates; | 423 | rates = tx_info->control.rates; |
463 | tx_info_priv = | 424 | tx_info_priv = (struct ath_tx_info_priv *)tx_info->rate_driver_data[0]; |
464 | (struct ath_tx_info_priv *)tx_info->rate_driver_data[0]; | ||
465 | 425 | ||
466 | /* | 426 | /* |
467 | * Find the lowest frame length among the rate series that will have a | 427 | * Find the lowest frame length among the rate series that will have a |
@@ -477,9 +437,8 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, | |||
477 | break; | 437 | break; |
478 | } | 438 | } |
479 | 439 | ||
480 | frame_length = | 440 | frmlen = rate_table->info[rates[i].idx].max_4ms_framelen; |
481 | rate_table->info[rates[i].idx].max_4ms_framelen; | 441 | max_4ms_framelen = min(max_4ms_framelen, frmlen); |
482 | max_4ms_framelen = min(max_4ms_framelen, frame_length); | ||
483 | } | 442 | } |
484 | } | 443 | } |
485 | 444 | ||
@@ -491,8 +450,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, | |||
491 | if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) | 450 | if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy) |
492 | return 0; | 451 | return 0; |
493 | 452 | ||
494 | aggr_limit = min(max_4ms_framelen, | 453 | aggr_limit = min(max_4ms_framelen, (u32)ATH_AMPDU_LIMIT_DEFAULT); |
495 | (u32)ATH_AMPDU_LIMIT_DEFAULT); | ||
496 | 454 | ||
497 | /* | 455 | /* |
498 | * h/w can accept aggregates upto 16 bit lengths (65535). | 456 | * h/w can accept aggregates upto 16 bit lengths (65535). |
@@ -507,9 +465,9 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, | |||
507 | } | 465 | } |
508 | 466 | ||
509 | /* | 467 | /* |
510 | * returns the number of delimiters to be added to | 468 | * Returns the number of delimiters to be added to |
511 | * meet the minimum required mpdudensity. | 469 | * meet the minimum required mpdudensity. |
512 | * caller should make sure that the rate is HT rate . | 470 | * caller should make sure that the rate is HT rate . |
513 | */ | 471 | */ |
514 | static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, | 472 | static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, |
515 | struct ath_buf *bf, u16 frmlen) | 473 | struct ath_buf *bf, u16 frmlen) |
@@ -566,9 +524,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
566 | nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; | 524 | nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width]; |
567 | minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; | 525 | minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; |
568 | 526 | ||
569 | /* Is frame shorter than required minimum length? */ | ||
570 | if (frmlen < minlen) { | 527 | if (frmlen < minlen) { |
571 | /* Get the minimum number of delimiters required. */ | ||
572 | mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; | 528 | mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ; |
573 | ndelim = max(mindelim, ndelim); | 529 | ndelim = max(mindelim, ndelim); |
574 | } | 530 | } |
@@ -577,30 +533,22 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
577 | } | 533 | } |
578 | 534 | ||
579 | static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | 535 | static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, |
580 | struct ath_atx_tid *tid, struct list_head *bf_q, | 536 | struct ath_atx_tid *tid, |
581 | struct ath_buf **bf_last, struct aggr_rifs_param *param, | 537 | struct list_head *bf_q) |
582 | int *prev_frames) | ||
583 | { | 538 | { |
584 | #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) | 539 | #define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) |
585 | struct ath_buf *bf, *tbf, *bf_first, *bf_prev = NULL; | 540 | struct ath_buf *bf, *bf_first, *bf_prev = NULL; |
586 | struct list_head bf_head; | 541 | int rl = 0, nframes = 0, ndelim, prev_al = 0; |
587 | int rl = 0, nframes = 0, ndelim; | ||
588 | u16 aggr_limit = 0, al = 0, bpad = 0, | 542 | u16 aggr_limit = 0, al = 0, bpad = 0, |
589 | al_delta, h_baw = tid->baw_size / 2; | 543 | al_delta, h_baw = tid->baw_size / 2; |
590 | enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; | 544 | enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; |
591 | int prev_al = 0; | ||
592 | INIT_LIST_HEAD(&bf_head); | ||
593 | |||
594 | BUG_ON(list_empty(&tid->buf_q)); | ||
595 | 545 | ||
596 | bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); | 546 | bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); |
597 | 547 | ||
598 | do { | 548 | do { |
599 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); | 549 | bf = list_first_entry(&tid->buf_q, struct ath_buf, list); |
600 | 550 | ||
601 | /* | 551 | /* do not step over block-ack window */ |
602 | * do not step over block-ack window | ||
603 | */ | ||
604 | if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { | 552 | if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { |
605 | status = ATH_AGGR_BAW_CLOSED; | 553 | status = ATH_AGGR_BAW_CLOSED; |
606 | break; | 554 | break; |
@@ -611,29 +559,23 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | |||
611 | rl = 1; | 559 | rl = 1; |
612 | } | 560 | } |
613 | 561 | ||
614 | /* | 562 | /* do not exceed aggregation limit */ |
615 | * do not exceed aggregation limit | ||
616 | */ | ||
617 | al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; | 563 | al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; |
618 | 564 | ||
619 | if (nframes && (aggr_limit < | 565 | if (nframes && |
620 | (al + bpad + al_delta + prev_al))) { | 566 | (aggr_limit < (al + bpad + al_delta + prev_al))) { |
621 | status = ATH_AGGR_LIMITED; | 567 | status = ATH_AGGR_LIMITED; |
622 | break; | 568 | break; |
623 | } | 569 | } |
624 | 570 | ||
625 | /* | 571 | /* do not exceed subframe limit */ |
626 | * do not exceed subframe limit | 572 | if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { |
627 | */ | ||
628 | if ((nframes + *prev_frames) >= | ||
629 | min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) { | ||
630 | status = ATH_AGGR_LIMITED; | 573 | status = ATH_AGGR_LIMITED; |
631 | break; | 574 | break; |
632 | } | 575 | } |
576 | nframes++; | ||
633 | 577 | ||
634 | /* | 578 | /* add padding for previous frame to aggregation length */ |
635 | * add padding for previous frame to aggregation length | ||
636 | */ | ||
637 | al += bpad + al_delta; | 579 | al += bpad + al_delta; |
638 | 580 | ||
639 | /* | 581 | /* |
@@ -641,44 +583,25 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | |||
641 | * density for this node. | 583 | * density for this node. |
642 | */ | 584 | */ |
643 | ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); | 585 | ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); |
644 | |||
645 | bpad = PADBYTES(al_delta) + (ndelim << 2); | 586 | bpad = PADBYTES(al_delta) + (ndelim << 2); |
646 | 587 | ||
647 | bf->bf_next = NULL; | 588 | bf->bf_next = NULL; |
648 | bf->bf_lastfrm->bf_desc->ds_link = 0; | 589 | bf->bf_desc->ds_link = 0; |
649 | 590 | ||
650 | /* | 591 | /* link buffers of this frame to the aggregate */ |
651 | * this packet is part of an aggregate | ||
652 | * - remove all descriptors belonging to this frame from | ||
653 | * software queue | ||
654 | * - add it to block ack window | ||
655 | * - set up descriptors for aggregation | ||
656 | */ | ||
657 | list_cut_position(&bf_head, &tid->buf_q, &bf->bf_lastfrm->list); | ||
658 | ath_tx_addto_baw(sc, tid, bf); | 592 | ath_tx_addto_baw(sc, tid, bf); |
659 | 593 | ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); | |
660 | list_for_each_entry(tbf, &bf_head, list) { | 594 | list_move_tail(&bf->list, bf_q); |
661 | ath9k_hw_set11n_aggr_middle(sc->sc_ah, | ||
662 | tbf->bf_desc, ndelim); | ||
663 | } | ||
664 | |||
665 | /* | ||
666 | * link buffers of this frame to the aggregate | ||
667 | */ | ||
668 | list_splice_tail_init(&bf_head, bf_q); | ||
669 | nframes++; | ||
670 | |||
671 | if (bf_prev) { | 595 | if (bf_prev) { |
672 | bf_prev->bf_next = bf; | 596 | bf_prev->bf_next = bf; |
673 | bf_prev->bf_lastfrm->bf_desc->ds_link = bf->bf_daddr; | 597 | bf_prev->bf_desc->ds_link = bf->bf_daddr; |
674 | } | 598 | } |
675 | bf_prev = bf; | 599 | bf_prev = bf; |
676 | |||
677 | } while (!list_empty(&tid->buf_q)); | 600 | } while (!list_empty(&tid->buf_q)); |
678 | 601 | ||
679 | bf_first->bf_al = al; | 602 | bf_first->bf_al = al; |
680 | bf_first->bf_nframes = nframes; | 603 | bf_first->bf_nframes = nframes; |
681 | *bf_last = bf_prev; | 604 | |
682 | return status; | 605 | return status; |
683 | #undef PADBYTES | 606 | #undef PADBYTES |
684 | } | 607 | } |
@@ -686,11 +609,9 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, | |||
686 | static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, | 609 | static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, |
687 | struct ath_atx_tid *tid) | 610 | struct ath_atx_tid *tid) |
688 | { | 611 | { |
689 | struct ath_buf *bf, *tbf, *bf_last, *bf_lastaggr = NULL; | 612 | struct ath_buf *bf; |
690 | enum ATH_AGGR_STATUS status; | 613 | enum ATH_AGGR_STATUS status; |
691 | struct list_head bf_q; | 614 | struct list_head bf_q; |
692 | struct aggr_rifs_param param = {0, 0, 0, 0, NULL}; | ||
693 | int prev_frames = 0; | ||
694 | 615 | ||
695 | do { | 616 | do { |
696 | if (list_empty(&tid->buf_q)) | 617 | if (list_empty(&tid->buf_q)) |
@@ -698,66 +619,36 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq, | |||
698 | 619 | ||
699 | INIT_LIST_HEAD(&bf_q); | 620 | INIT_LIST_HEAD(&bf_q); |
700 | 621 | ||
701 | status = ath_tx_form_aggr(sc, tid, &bf_q, &bf_lastaggr, ¶m, | 622 | status = ath_tx_form_aggr(sc, tid, &bf_q); |
702 | &prev_frames); | ||
703 | 623 | ||
704 | /* | 624 | /* |
705 | * no frames picked up to be aggregated; block-ack | 625 | * no frames picked up to be aggregated; |
706 | * window is not open | 626 | * block-ack window is not open. |
707 | */ | 627 | */ |
708 | if (list_empty(&bf_q)) | 628 | if (list_empty(&bf_q)) |
709 | break; | 629 | break; |
710 | 630 | ||
711 | bf = list_first_entry(&bf_q, struct ath_buf, list); | 631 | bf = list_first_entry(&bf_q, struct ath_buf, list); |
712 | bf_last = list_entry(bf_q.prev, struct ath_buf, list); | 632 | bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); |
713 | bf->bf_lastbf = bf_last; | ||
714 | 633 | ||
715 | /* | 634 | /* if only one frame, send as non-aggregate */ |
716 | * if only one frame, send as non-aggregate | ||
717 | */ | ||
718 | if (bf->bf_nframes == 1) { | 635 | if (bf->bf_nframes == 1) { |
719 | ASSERT(bf->bf_lastfrm == bf_last); | ||
720 | |||
721 | bf->bf_state.bf_type &= ~BUF_AGGR; | 636 | bf->bf_state.bf_type &= ~BUF_AGGR; |
722 | /* | 637 | ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc); |
723 | * clear aggr bits for every descriptor | ||
724 | * XXX TODO: is there a way to optimize it? | ||
725 | */ | ||
726 | list_for_each_entry(tbf, &bf_q, list) { | ||
727 | ath9k_hw_clr11n_aggr(sc->sc_ah, tbf->bf_desc); | ||
728 | } | ||
729 | |||
730 | ath_buf_set_rate(sc, bf); | 638 | ath_buf_set_rate(sc, bf); |
731 | ath_tx_txqaddbuf(sc, txq, &bf_q); | 639 | ath_tx_txqaddbuf(sc, txq, &bf_q); |
732 | continue; | 640 | continue; |
733 | } | 641 | } |
734 | 642 | ||
735 | /* | 643 | /* setup first desc of aggregate */ |
736 | * setup first desc with rate and aggr info | ||
737 | */ | ||
738 | bf->bf_state.bf_type |= BUF_AGGR; | 644 | bf->bf_state.bf_type |= BUF_AGGR; |
739 | ath_buf_set_rate(sc, bf); | 645 | ath_buf_set_rate(sc, bf); |
740 | ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); | 646 | ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); |
741 | 647 | ||
742 | /* | 648 | /* anchor last desc of aggregate */ |
743 | * anchor last frame of aggregate correctly | 649 | ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); |
744 | */ | ||
745 | ASSERT(bf_lastaggr); | ||
746 | ASSERT(bf_lastaggr->bf_lastfrm == bf_last); | ||
747 | tbf = bf_lastaggr; | ||
748 | ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc); | ||
749 | |||
750 | /* XXX: We don't enter into this loop, consider removing this */ | ||
751 | while (!list_empty(&bf_q) && !list_is_last(&tbf->list, &bf_q)) { | ||
752 | tbf = list_entry(tbf->list.next, struct ath_buf, list); | ||
753 | ath9k_hw_set11n_aggr_last(sc->sc_ah, tbf->bf_desc); | ||
754 | } | ||
755 | 650 | ||
756 | txq->axq_aggr_depth++; | 651 | txq->axq_aggr_depth++; |
757 | |||
758 | /* | ||
759 | * Normal aggregate, queue to hardware | ||
760 | */ | ||
761 | ath_tx_txqaddbuf(sc, txq, &bf_q); | 652 | ath_tx_txqaddbuf(sc, txq, &bf_q); |
762 | 653 | ||
763 | } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && | 654 | } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && |
@@ -812,19 +703,17 @@ int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) | |||
812 | */ | 703 | */ |
813 | break; | 704 | break; |
814 | } | 705 | } |
815 | list_cut_position(&bf_head, | 706 | list_move_tail(&bf->list, &bf_head); |
816 | &txtid->buf_q, &bf->bf_lastfrm->list); | ||
817 | ath_tx_update_baw(sc, txtid, bf->bf_seqno); | 707 | ath_tx_update_baw(sc, txtid, bf->bf_seqno); |
818 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | 708 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); |
819 | } | 709 | } |
710 | spin_unlock_bh(&txq->axq_lock); | ||
820 | 711 | ||
821 | if (txtid->baw_head != txtid->baw_tail) { | 712 | if (txtid->baw_head != txtid->baw_tail) { |
822 | spin_unlock_bh(&txq->axq_lock); | ||
823 | txtid->state |= AGGR_CLEANUP; | 713 | txtid->state |= AGGR_CLEANUP; |
824 | } else { | 714 | } else { |
825 | txtid->state &= ~AGGR_ADDBA_COMPLETE; | 715 | txtid->state &= ~AGGR_ADDBA_COMPLETE; |
826 | txtid->addba_exchangeattempts = 0; | 716 | txtid->addba_exchangeattempts = 0; |
827 | spin_unlock_bh(&txq->axq_lock); | ||
828 | ath_tx_flush_tid(sc, txtid); | 717 | ath_tx_flush_tid(sc, txtid); |
829 | } | 718 | } |
830 | 719 | ||
@@ -1130,7 +1019,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx) | |||
1130 | spin_unlock_bh(&txq->axq_lock); | 1019 | spin_unlock_bh(&txq->axq_lock); |
1131 | 1020 | ||
1132 | if (bf_isampdu(bf)) | 1021 | if (bf_isampdu(bf)) |
1133 | ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, 0); | 1022 | ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0); |
1134 | else | 1023 | else |
1135 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); | 1024 | ath_tx_complete_buf(sc, bf, &bf_head, 0, 0); |
1136 | } | 1025 | } |
@@ -1326,8 +1215,6 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
1326 | { | 1215 | { |
1327 | struct ath_buf *bf; | 1216 | struct ath_buf *bf; |
1328 | 1217 | ||
1329 | BUG_ON(list_empty(bf_head)); | ||
1330 | |||
1331 | bf = list_first_entry(bf_head, struct ath_buf, list); | 1218 | bf = list_first_entry(bf_head, struct ath_buf, list); |
1332 | bf->bf_state.bf_type |= BUF_AMPDU; | 1219 | bf->bf_state.bf_type |= BUF_AMPDU; |
1333 | 1220 | ||
@@ -1345,7 +1232,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
1345 | * Add this frame to software queue for scheduling later | 1232 | * Add this frame to software queue for scheduling later |
1346 | * for aggregation. | 1233 | * for aggregation. |
1347 | */ | 1234 | */ |
1348 | list_splice_tail_init(bf_head, &tid->buf_q); | 1235 | list_move_tail(&bf->list, &tid->buf_q); |
1349 | ath_tx_queue_tid(txctl->txq, tid); | 1236 | ath_tx_queue_tid(txctl->txq, tid); |
1350 | return; | 1237 | return; |
1351 | } | 1238 | } |
@@ -1355,11 +1242,9 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, | |||
1355 | 1242 | ||
1356 | /* Queue to h/w without aggregation */ | 1243 | /* Queue to h/w without aggregation */ |
1357 | bf->bf_nframes = 1; | 1244 | bf->bf_nframes = 1; |
1358 | bf->bf_lastbf = bf->bf_lastfrm; /* one single frame */ | 1245 | bf->bf_lastbf = bf; |
1359 | ath_buf_set_rate(sc, bf); | 1246 | ath_buf_set_rate(sc, bf); |
1360 | ath_tx_txqaddbuf(sc, txctl->txq, bf_head); | 1247 | ath_tx_txqaddbuf(sc, txctl->txq, bf_head); |
1361 | |||
1362 | return; | ||
1363 | } | 1248 | } |
1364 | 1249 | ||
1365 | static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, | 1250 | static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, |
@@ -1368,8 +1253,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, | |||
1368 | { | 1253 | { |
1369 | struct ath_buf *bf; | 1254 | struct ath_buf *bf; |
1370 | 1255 | ||
1371 | BUG_ON(list_empty(bf_head)); | ||
1372 | |||
1373 | bf = list_first_entry(bf_head, struct ath_buf, list); | 1256 | bf = list_first_entry(bf_head, struct ath_buf, list); |
1374 | bf->bf_state.bf_type &= ~BUF_AMPDU; | 1257 | bf->bf_state.bf_type &= ~BUF_AMPDU; |
1375 | 1258 | ||
@@ -1377,7 +1260,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq, | |||
1377 | INCR(tid->seq_start, IEEE80211_SEQ_MAX); | 1260 | INCR(tid->seq_start, IEEE80211_SEQ_MAX); |
1378 | 1261 | ||
1379 | bf->bf_nframes = 1; | 1262 | bf->bf_nframes = 1; |
1380 | bf->bf_lastbf = bf->bf_lastfrm; | 1263 | bf->bf_lastbf = bf; |
1381 | ath_buf_set_rate(sc, bf); | 1264 | ath_buf_set_rate(sc, bf); |
1382 | ath_tx_txqaddbuf(sc, txq, bf_head); | 1265 | ath_tx_txqaddbuf(sc, txq, bf_head); |
1383 | } | 1266 | } |
@@ -1770,8 +1653,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf, | |||
1770 | true, /* last segment */ | 1653 | true, /* last segment */ |
1771 | ds); /* first descriptor */ | 1654 | ds); /* first descriptor */ |
1772 | 1655 | ||
1773 | bf->bf_lastfrm = bf; | ||
1774 | |||
1775 | spin_lock_bh(&txctl->txq->axq_lock); | 1656 | spin_lock_bh(&txctl->txq->axq_lock); |
1776 | 1657 | ||
1777 | if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && | 1658 | if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && |
@@ -2155,7 +2036,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | |||
2155 | ath_tx_rc_status(bf, ds, nbad); | 2036 | ath_tx_rc_status(bf, ds, nbad); |
2156 | 2037 | ||
2157 | if (bf_isampdu(bf)) | 2038 | if (bf_isampdu(bf)) |
2158 | ath_tx_complete_aggr_rifs(sc, txq, bf, &bf_head, txok); | 2039 | ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok); |
2159 | else | 2040 | else |
2160 | ath_tx_complete_buf(sc, bf, &bf_head, txok, 0); | 2041 | ath_tx_complete_buf(sc, bf, &bf_head, txok, 0); |
2161 | 2042 | ||