diff options
author | Sujith <Sujith.Manoharan@atheros.com> | 2008-12-07 11:14:03 -0500 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2008-12-12 14:02:17 -0500 |
commit | b77f483fcf0579de28873828897f53371a33a0ea (patch) | |
tree | a08d0f942d4b5d0cd8a7893753f9b7554ebc89e4 /drivers/net/wireless/ath9k | |
parent | 59651e89187293e88863891b821c7379391ef75c (diff) |
ath9k: Refactor struct ath_softc
Split ath_softc into smaller structures for rx, tx and beacon
handling.
Signed-off-by: Sujith <Sujith.Manoharan@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath9k')
-rw-r--r-- | drivers/net/wireless/ath9k/beacon.c | 94 | ||||
-rw-r--r-- | drivers/net/wireless/ath9k/core.h | 134 | ||||
-rw-r--r-- | drivers/net/wireless/ath9k/main.c | 60 | ||||
-rw-r--r-- | drivers/net/wireless/ath9k/recv.c | 107 | ||||
-rw-r--r-- | drivers/net/wireless/ath9k/xmit.c | 126 |
5 files changed, 254 insertions, 267 deletions
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c index 9e5c0c0446b6..3ab0b43aaf93 100644 --- a/drivers/net/wireless/ath9k/beacon.c +++ b/drivers/net/wireless/ath9k/beacon.c | |||
@@ -26,7 +26,7 @@ static int ath_beaconq_config(struct ath_softc *sc) | |||
26 | struct ath_hal *ah = sc->sc_ah; | 26 | struct ath_hal *ah = sc->sc_ah; |
27 | struct ath9k_tx_queue_info qi; | 27 | struct ath9k_tx_queue_info qi; |
28 | 28 | ||
29 | ath9k_hw_get_txq_props(ah, sc->sc_bhalq, &qi); | 29 | ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi); |
30 | if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) { | 30 | if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) { |
31 | /* Always burst out beacon and CAB traffic. */ | 31 | /* Always burst out beacon and CAB traffic. */ |
32 | qi.tqi_aifs = 1; | 32 | qi.tqi_aifs = 1; |
@@ -34,17 +34,17 @@ static int ath_beaconq_config(struct ath_softc *sc) | |||
34 | qi.tqi_cwmax = 0; | 34 | qi.tqi_cwmax = 0; |
35 | } else { | 35 | } else { |
36 | /* Adhoc mode; important thing is to use 2x cwmin. */ | 36 | /* Adhoc mode; important thing is to use 2x cwmin. */ |
37 | qi.tqi_aifs = sc->sc_beacon_qi.tqi_aifs; | 37 | qi.tqi_aifs = sc->beacon.beacon_qi.tqi_aifs; |
38 | qi.tqi_cwmin = 2*sc->sc_beacon_qi.tqi_cwmin; | 38 | qi.tqi_cwmin = 2*sc->beacon.beacon_qi.tqi_cwmin; |
39 | qi.tqi_cwmax = sc->sc_beacon_qi.tqi_cwmax; | 39 | qi.tqi_cwmax = sc->beacon.beacon_qi.tqi_cwmax; |
40 | } | 40 | } |
41 | 41 | ||
42 | if (!ath9k_hw_set_txq_props(ah, sc->sc_bhalq, &qi)) { | 42 | if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) { |
43 | DPRINTF(sc, ATH_DBG_FATAL, | 43 | DPRINTF(sc, ATH_DBG_FATAL, |
44 | "unable to update h/w beacon queue parameters\n"); | 44 | "unable to update h/w beacon queue parameters\n"); |
45 | return 0; | 45 | return 0; |
46 | } else { | 46 | } else { |
47 | ath9k_hw_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ | 47 | ath9k_hw_resettxqueue(ah, sc->beacon.beaconq); /* push to h/w */ |
48 | return 1; | 48 | return 1; |
49 | } | 49 | } |
50 | } | 50 | } |
@@ -53,7 +53,7 @@ static void ath_bstuck_process(struct ath_softc *sc) | |||
53 | { | 53 | { |
54 | DPRINTF(sc, ATH_DBG_BEACON, | 54 | DPRINTF(sc, ATH_DBG_BEACON, |
55 | "stuck beacon; resetting (bmiss count %u)\n", | 55 | "stuck beacon; resetting (bmiss count %u)\n", |
56 | sc->sc_bmisscount); | 56 | sc->beacon.bmisscnt); |
57 | ath_reset(sc, false); | 57 | ath_reset(sc, false); |
58 | } | 58 | } |
59 | 59 | ||
@@ -96,7 +96,7 @@ static void ath_beacon_setup(struct ath_softc *sc, | |||
96 | * SWBA's | 96 | * SWBA's |
97 | * XXX assumes two antenna | 97 | * XXX assumes two antenna |
98 | */ | 98 | */ |
99 | antenna = ((sc->ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1); | 99 | antenna = ((sc->beacon.ast_be_xmit / sc->sc_nbcnvaps) & 1 ? 2 : 1); |
100 | } | 100 | } |
101 | 101 | ||
102 | ds->ds_data = bf->bf_buf_addr; | 102 | ds->ds_data = bf->bf_buf_addr; |
@@ -153,7 +153,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) | |||
153 | ASSERT(vif); | 153 | ASSERT(vif); |
154 | 154 | ||
155 | avp = (void *)vif->drv_priv; | 155 | avp = (void *)vif->drv_priv; |
156 | cabq = sc->sc_cabq; | 156 | cabq = sc->beacon.cabq; |
157 | 157 | ||
158 | if (avp->av_bcbuf == NULL) { | 158 | if (avp->av_bcbuf == NULL) { |
159 | DPRINTF(sc, ATH_DBG_BEACON, "avp=%p av_bcbuf=%p\n", | 159 | DPRINTF(sc, ATH_DBG_BEACON, "avp=%p av_bcbuf=%p\n", |
@@ -182,9 +182,9 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) | |||
182 | * TX frames) | 182 | * TX frames) |
183 | */ | 183 | */ |
184 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | 184 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; |
185 | sc->seq_no += 0x10; | 185 | sc->tx.seq_no += 0x10; |
186 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | 186 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); |
187 | hdr->seq_ctrl |= cpu_to_le16(sc->seq_no); | 187 | hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); |
188 | } | 188 | } |
189 | 189 | ||
190 | bf->bf_buf_addr = bf->bf_dmacontext = | 190 | bf->bf_buf_addr = bf->bf_dmacontext = |
@@ -270,10 +270,10 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id) | |||
270 | ath_beacon_setup(sc, avp, bf); | 270 | ath_beacon_setup(sc, avp, bf); |
271 | 271 | ||
272 | /* NB: caller is known to have already stopped tx dma */ | 272 | /* NB: caller is known to have already stopped tx dma */ |
273 | ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); | 273 | ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr); |
274 | ath9k_hw_txstart(ah, sc->sc_bhalq); | 274 | ath9k_hw_txstart(ah, sc->beacon.beaconq); |
275 | DPRINTF(sc, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n", | 275 | DPRINTF(sc, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n", |
276 | sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc); | 276 | sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc); |
277 | } | 277 | } |
278 | 278 | ||
279 | int ath_beaconq_setup(struct ath_hal *ah) | 279 | int ath_beaconq_setup(struct ath_hal *ah) |
@@ -306,7 +306,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id) | |||
306 | if (!avp->av_bcbuf) { | 306 | if (!avp->av_bcbuf) { |
307 | /* Allocate beacon state for hostap/ibss. We know | 307 | /* Allocate beacon state for hostap/ibss. We know |
308 | * a buffer is available. */ | 308 | * a buffer is available. */ |
309 | avp->av_bcbuf = list_first_entry(&sc->sc_bbuf, | 309 | avp->av_bcbuf = list_first_entry(&sc->beacon.bbuf, |
310 | struct ath_buf, list); | 310 | struct ath_buf, list); |
311 | list_del(&avp->av_bcbuf->list); | 311 | list_del(&avp->av_bcbuf->list); |
312 | 312 | ||
@@ -319,13 +319,13 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id) | |||
319 | */ | 319 | */ |
320 | avp->av_bslot = 0; | 320 | avp->av_bslot = 0; |
321 | for (slot = 0; slot < ATH_BCBUF; slot++) | 321 | for (slot = 0; slot < ATH_BCBUF; slot++) |
322 | if (sc->sc_bslot[slot] == ATH_IF_ID_ANY) { | 322 | if (sc->beacon.bslot[slot] == ATH_IF_ID_ANY) { |
323 | /* | 323 | /* |
324 | * XXX hack, space out slots to better | 324 | * XXX hack, space out slots to better |
325 | * deal with misses | 325 | * deal with misses |
326 | */ | 326 | */ |
327 | if (slot+1 < ATH_BCBUF && | 327 | if (slot+1 < ATH_BCBUF && |
328 | sc->sc_bslot[slot+1] == | 328 | sc->beacon.bslot[slot+1] == |
329 | ATH_IF_ID_ANY) { | 329 | ATH_IF_ID_ANY) { |
330 | avp->av_bslot = slot+1; | 330 | avp->av_bslot = slot+1; |
331 | break; | 331 | break; |
@@ -333,8 +333,8 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id) | |||
333 | avp->av_bslot = slot; | 333 | avp->av_bslot = slot; |
334 | /* NB: keep looking for a double slot */ | 334 | /* NB: keep looking for a double slot */ |
335 | } | 335 | } |
336 | BUG_ON(sc->sc_bslot[avp->av_bslot] != ATH_IF_ID_ANY); | 336 | BUG_ON(sc->beacon.bslot[avp->av_bslot] != ATH_IF_ID_ANY); |
337 | sc->sc_bslot[avp->av_bslot] = if_id; | 337 | sc->beacon.bslot[avp->av_bslot] = if_id; |
338 | sc->sc_nbcnvaps++; | 338 | sc->sc_nbcnvaps++; |
339 | } | 339 | } |
340 | } | 340 | } |
@@ -362,7 +362,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id) | |||
362 | } | 362 | } |
363 | 363 | ||
364 | tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; | 364 | tstamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; |
365 | sc->bc_tstamp = le64_to_cpu(tstamp); | 365 | sc->beacon.bc_tstamp = le64_to_cpu(tstamp); |
366 | 366 | ||
367 | /* | 367 | /* |
368 | * Calculate a TSF adjustment factor required for | 368 | * Calculate a TSF adjustment factor required for |
@@ -422,7 +422,7 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp) | |||
422 | struct ath_buf *bf; | 422 | struct ath_buf *bf; |
423 | 423 | ||
424 | if (avp->av_bslot != -1) { | 424 | if (avp->av_bslot != -1) { |
425 | sc->sc_bslot[avp->av_bslot] = ATH_IF_ID_ANY; | 425 | sc->beacon.bslot[avp->av_bslot] = ATH_IF_ID_ANY; |
426 | sc->sc_nbcnvaps--; | 426 | sc->sc_nbcnvaps--; |
427 | } | 427 | } |
428 | 428 | ||
@@ -435,7 +435,7 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp) | |||
435 | dev_kfree_skb_any(skb); | 435 | dev_kfree_skb_any(skb); |
436 | bf->bf_mpdu = NULL; | 436 | bf->bf_mpdu = NULL; |
437 | } | 437 | } |
438 | list_add_tail(&bf->list, &sc->sc_bbuf); | 438 | list_add_tail(&bf->list, &sc->beacon.bbuf); |
439 | 439 | ||
440 | avp->av_bcbuf = NULL; | 440 | avp->av_bcbuf = NULL; |
441 | } | 441 | } |
@@ -469,18 +469,18 @@ void ath9k_beacon_tasklet(unsigned long data) | |||
469 | * | 469 | * |
470 | * FIXME: Clean up this mess !! | 470 | * FIXME: Clean up this mess !! |
471 | */ | 471 | */ |
472 | if (ath9k_hw_numtxpending(ah, sc->sc_bhalq) != 0) { | 472 | if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) { |
473 | sc->sc_bmisscount++; | 473 | sc->beacon.bmisscnt++; |
474 | /* XXX: doth needs the chanchange IE countdown decremented. | 474 | /* XXX: doth needs the chanchange IE countdown decremented. |
475 | * We should consider adding a mac80211 call to indicate | 475 | * We should consider adding a mac80211 call to indicate |
476 | * a beacon miss so appropriate action could be taken | 476 | * a beacon miss so appropriate action could be taken |
477 | * (in that layer). | 477 | * (in that layer). |
478 | */ | 478 | */ |
479 | if (sc->sc_bmisscount < BSTUCK_THRESH) { | 479 | if (sc->beacon.bmisscnt < BSTUCK_THRESH) { |
480 | if (sc->sc_flags & SC_OP_NO_RESET) { | 480 | if (sc->sc_flags & SC_OP_NO_RESET) { |
481 | DPRINTF(sc, ATH_DBG_BEACON, | 481 | DPRINTF(sc, ATH_DBG_BEACON, |
482 | "missed %u consecutive beacons\n", | 482 | "missed %u consecutive beacons\n", |
483 | sc->sc_bmisscount); | 483 | sc->beacon.bmisscnt); |
484 | if (show_cycles) { | 484 | if (show_cycles) { |
485 | /* | 485 | /* |
486 | * Display cycle counter stats from HW | 486 | * Display cycle counter stats from HW |
@@ -499,11 +499,11 @@ void ath9k_beacon_tasklet(unsigned long data) | |||
499 | } else { | 499 | } else { |
500 | DPRINTF(sc, ATH_DBG_BEACON, | 500 | DPRINTF(sc, ATH_DBG_BEACON, |
501 | "missed %u consecutive beacons\n", | 501 | "missed %u consecutive beacons\n", |
502 | sc->sc_bmisscount); | 502 | sc->beacon.bmisscnt); |
503 | } | 503 | } |
504 | } else if (sc->sc_bmisscount >= BSTUCK_THRESH) { | 504 | } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { |
505 | if (sc->sc_flags & SC_OP_NO_RESET) { | 505 | if (sc->sc_flags & SC_OP_NO_RESET) { |
506 | if (sc->sc_bmisscount == BSTUCK_THRESH) { | 506 | if (sc->beacon.bmisscnt == BSTUCK_THRESH) { |
507 | DPRINTF(sc, ATH_DBG_BEACON, | 507 | DPRINTF(sc, ATH_DBG_BEACON, |
508 | "beacon is officially " | 508 | "beacon is officially " |
509 | "stuck\n"); | 509 | "stuck\n"); |
@@ -517,17 +517,17 @@ void ath9k_beacon_tasklet(unsigned long data) | |||
517 | return; | 517 | return; |
518 | } | 518 | } |
519 | 519 | ||
520 | if (sc->sc_bmisscount != 0) { | 520 | if (sc->beacon.bmisscnt != 0) { |
521 | if (sc->sc_flags & SC_OP_NO_RESET) { | 521 | if (sc->sc_flags & SC_OP_NO_RESET) { |
522 | DPRINTF(sc, ATH_DBG_BEACON, | 522 | DPRINTF(sc, ATH_DBG_BEACON, |
523 | "resume beacon xmit after %u misses\n", | 523 | "resume beacon xmit after %u misses\n", |
524 | sc->sc_bmisscount); | 524 | sc->beacon.bmisscnt); |
525 | } else { | 525 | } else { |
526 | DPRINTF(sc, ATH_DBG_BEACON, | 526 | DPRINTF(sc, ATH_DBG_BEACON, |
527 | "resume beacon xmit after %u misses\n", | 527 | "resume beacon xmit after %u misses\n", |
528 | sc->sc_bmisscount); | 528 | sc->beacon.bmisscnt); |
529 | } | 529 | } |
530 | sc->sc_bmisscount = 0; | 530 | sc->beacon.bmisscnt = 0; |
531 | } | 531 | } |
532 | 532 | ||
533 | /* | 533 | /* |
@@ -542,7 +542,7 @@ void ath9k_beacon_tasklet(unsigned long data) | |||
542 | tsf = ath9k_hw_gettsf64(ah); | 542 | tsf = ath9k_hw_gettsf64(ah); |
543 | tsftu = TSF_TO_TU(tsf>>32, tsf); | 543 | tsftu = TSF_TO_TU(tsf>>32, tsf); |
544 | slot = ((tsftu % intval) * ATH_BCBUF) / intval; | 544 | slot = ((tsftu % intval) * ATH_BCBUF) / intval; |
545 | if_id = sc->sc_bslot[(slot + 1) % ATH_BCBUF]; | 545 | if_id = sc->beacon.bslot[(slot + 1) % ATH_BCBUF]; |
546 | 546 | ||
547 | DPRINTF(sc, ATH_DBG_BEACON, | 547 | DPRINTF(sc, ATH_DBG_BEACON, |
548 | "slot %d [tsf %llu tsftu %u intval %u] if_id %d\n", | 548 | "slot %d [tsf %llu tsftu %u intval %u] if_id %d\n", |
@@ -574,12 +574,12 @@ void ath9k_beacon_tasklet(unsigned long data) | |||
574 | * set to ATH_BCBUF so this check is a noop. | 574 | * set to ATH_BCBUF so this check is a noop. |
575 | */ | 575 | */ |
576 | /* XXX locking */ | 576 | /* XXX locking */ |
577 | if (sc->sc_updateslot == UPDATE) { | 577 | if (sc->beacon.updateslot == UPDATE) { |
578 | sc->sc_updateslot = COMMIT; /* commit next beacon */ | 578 | sc->beacon.updateslot = COMMIT; /* commit next beacon */ |
579 | sc->sc_slotupdate = slot; | 579 | sc->beacon.slotupdate = slot; |
580 | } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) { | 580 | } else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) { |
581 | ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime); | 581 | ath9k_hw_setslottime(sc->sc_ah, sc->beacon.slottime); |
582 | sc->sc_updateslot = OK; | 582 | sc->beacon.updateslot = OK; |
583 | } | 583 | } |
584 | if (bfaddr != 0) { | 584 | if (bfaddr != 0) { |
585 | /* | 585 | /* |
@@ -587,17 +587,17 @@ void ath9k_beacon_tasklet(unsigned long data) | |||
587 | * This should never fail since we check above that no frames | 587 | * This should never fail since we check above that no frames |
588 | * are still pending on the queue. | 588 | * are still pending on the queue. |
589 | */ | 589 | */ |
590 | if (!ath9k_hw_stoptxdma(ah, sc->sc_bhalq)) { | 590 | if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) { |
591 | DPRINTF(sc, ATH_DBG_FATAL, | 591 | DPRINTF(sc, ATH_DBG_FATAL, |
592 | "beacon queue %u did not stop?\n", sc->sc_bhalq); | 592 | "beacon queue %u did not stop?\n", sc->beacon.beaconq); |
593 | /* NB: the HAL still stops DMA, so proceed */ | 593 | /* NB: the HAL still stops DMA, so proceed */ |
594 | } | 594 | } |
595 | 595 | ||
596 | /* NB: cabq traffic should already be queued and primed */ | 596 | /* NB: cabq traffic should already be queued and primed */ |
597 | ath9k_hw_puttxbuf(ah, sc->sc_bhalq, bfaddr); | 597 | ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr); |
598 | ath9k_hw_txstart(ah, sc->sc_bhalq); | 598 | ath9k_hw_txstart(ah, sc->beacon.beaconq); |
599 | 599 | ||
600 | sc->ast_be_xmit += bc; /* XXX per-vap? */ | 600 | sc->beacon.ast_be_xmit += bc; /* XXX per-vap? */ |
601 | } | 601 | } |
602 | } | 602 | } |
603 | 603 | ||
@@ -644,7 +644,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id) | |||
644 | conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval; | 644 | conf.bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf.beacon_interval; |
645 | 645 | ||
646 | /* extract tstamp from last beacon and convert to TU */ | 646 | /* extract tstamp from last beacon and convert to TU */ |
647 | nexttbtt = TSF_TO_TU(sc->bc_tstamp >> 32, sc->bc_tstamp); | 647 | nexttbtt = TSF_TO_TU(sc->beacon.bc_tstamp >> 32, sc->beacon.bc_tstamp); |
648 | 648 | ||
649 | /* XXX conditionalize multi-bss support? */ | 649 | /* XXX conditionalize multi-bss support? */ |
650 | if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) { | 650 | if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP) { |
@@ -831,7 +831,7 @@ void ath_beacon_config(struct ath_softc *sc, int if_id) | |||
831 | ath_beaconq_config(sc); | 831 | ath_beaconq_config(sc); |
832 | } | 832 | } |
833 | ath9k_hw_beaconinit(ah, nexttbtt, intval); | 833 | ath9k_hw_beaconinit(ah, nexttbtt, intval); |
834 | sc->sc_bmisscount = 0; | 834 | sc->beacon.bmisscnt = 0; |
835 | ath9k_hw_set_interrupts(ah, sc->sc_imask); | 835 | ath9k_hw_set_interrupts(ah, sc->sc_imask); |
836 | /* | 836 | /* |
837 | * When using a self-linked beacon descriptor in | 837 | * When using a self-linked beacon descriptor in |
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h index 41a87b99deaa..e38f0331cfd5 100644 --- a/drivers/net/wireless/ath9k/core.h +++ b/drivers/net/wireless/ath9k/core.h | |||
@@ -61,7 +61,7 @@ struct ath_node; | |||
61 | #define TSF_TO_TU(_h,_l) \ | 61 | #define TSF_TO_TU(_h,_l) \ |
62 | ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) | 62 | ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) |
63 | 63 | ||
64 | #define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i)) | 64 | #define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i)) |
65 | 65 | ||
66 | static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 66 | static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
67 | 67 | ||
@@ -453,6 +453,28 @@ struct ath_node { | |||
453 | u8 mpdudensity; | 453 | u8 mpdudensity; |
454 | }; | 454 | }; |
455 | 455 | ||
456 | struct ath_tx { | ||
457 | u16 seq_no; | ||
458 | u32 txqsetup; | ||
459 | int hwq_map[ATH9K_WME_AC_VO+1]; | ||
460 | spinlock_t txbuflock; | ||
461 | struct list_head txbuf; | ||
462 | struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; | ||
463 | struct ath_descdma txdma; | ||
464 | }; | ||
465 | |||
466 | struct ath_rx { | ||
467 | u8 defant; | ||
468 | u8 rxotherant; | ||
469 | u32 *rxlink; | ||
470 | int bufsize; | ||
471 | unsigned int rxfilter; | ||
472 | spinlock_t rxflushlock; | ||
473 | spinlock_t rxbuflock; | ||
474 | struct list_head rxbuf; | ||
475 | struct ath_descdma rxdma; | ||
476 | }; | ||
477 | |||
456 | int ath_startrecv(struct ath_softc *sc); | 478 | int ath_startrecv(struct ath_softc *sc); |
457 | bool ath_stoprecv(struct ath_softc *sc); | 479 | bool ath_stoprecv(struct ath_softc *sc); |
458 | void ath_flushrecv(struct ath_softc *sc); | 480 | void ath_flushrecv(struct ath_softc *sc); |
@@ -540,6 +562,26 @@ struct ath_beacon_config { | |||
540 | } u; /* last received beacon/probe response timestamp of this BSS. */ | 562 | } u; /* last received beacon/probe response timestamp of this BSS. */ |
541 | }; | 563 | }; |
542 | 564 | ||
565 | struct ath_beacon { | ||
566 | enum { | ||
567 | OK, /* no change needed */ | ||
568 | UPDATE, /* update pending */ | ||
569 | COMMIT /* beacon sent, commit change */ | ||
570 | } updateslot; /* slot time update fsm */ | ||
571 | |||
572 | u32 beaconq; | ||
573 | u32 bmisscnt; | ||
574 | u32 ast_be_xmit; | ||
575 | u64 bc_tstamp; | ||
576 | int bslot[ATH_BCBUF]; | ||
577 | int slottime; | ||
578 | int slotupdate; | ||
579 | struct ath9k_tx_queue_info beacon_qi; | ||
580 | struct ath_descdma bdma; | ||
581 | struct ath_txq *cabq; | ||
582 | struct list_head bbuf; | ||
583 | }; | ||
584 | |||
543 | void ath9k_beacon_tasklet(unsigned long data); | 585 | void ath9k_beacon_tasklet(unsigned long data); |
544 | void ath_beacon_config(struct ath_softc *sc, int if_id); | 586 | void ath_beacon_config(struct ath_softc *sc, int if_id); |
545 | int ath_beaconq_setup(struct ath_hal *ah); | 587 | int ath_beaconq_setup(struct ath_hal *ah); |
@@ -610,7 +652,7 @@ struct ath_rfkill { | |||
610 | #define DEFAULT_CACHELINE 32 | 652 | #define DEFAULT_CACHELINE 32 |
611 | #define ATH_DEFAULT_NOISE_FLOOR -95 | 653 | #define ATH_DEFAULT_NOISE_FLOOR -95 |
612 | #define ATH_REGCLASSIDS_MAX 10 | 654 | #define ATH_REGCLASSIDS_MAX 10 |
613 | #define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ | 655 | #define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ |
614 | #define ATH_MAX_SW_RETRIES 10 | 656 | #define ATH_MAX_SW_RETRIES 10 |
615 | #define ATH_CHAN_MAX 255 | 657 | #define ATH_CHAN_MAX 255 |
616 | #define IEEE80211_WEP_NKID 4 /* number of key ids */ | 658 | #define IEEE80211_WEP_NKID 4 /* number of key ids */ |
@@ -623,7 +665,7 @@ struct ath_rfkill { | |||
623 | * Different parts have different size key caches. We handle | 665 | * Different parts have different size key caches. We handle |
624 | * up to ATH_KEYMAX entries (could dynamically allocate state). | 666 | * up to ATH_KEYMAX entries (could dynamically allocate state). |
625 | */ | 667 | */ |
626 | #define ATH_KEYMAX 128 /* max key cache size we handle */ | 668 | #define ATH_KEYMAX 128 /* max key cache size we handle */ |
627 | 669 | ||
628 | #define ATH_IF_ID_ANY 0xff | 670 | #define ATH_IF_ID_ANY 0xff |
629 | #define ATH_TXPOWER_MAX 100 /* .5 dBm units */ | 671 | #define ATH_TXPOWER_MAX 100 /* .5 dBm units */ |
@@ -656,105 +698,51 @@ struct ath_softc { | |||
656 | struct pci_dev *pdev; | 698 | struct pci_dev *pdev; |
657 | struct tasklet_struct intr_tq; | 699 | struct tasklet_struct intr_tq; |
658 | struct tasklet_struct bcon_tasklet; | 700 | struct tasklet_struct bcon_tasklet; |
659 | struct ath_config sc_config; | ||
660 | struct ath_hal *sc_ah; | 701 | struct ath_hal *sc_ah; |
661 | void __iomem *mem; | 702 | void __iomem *mem; |
703 | spinlock_t sc_resetlock; | ||
662 | 704 | ||
663 | u8 sc_curbssid[ETH_ALEN]; | 705 | u8 sc_curbssid[ETH_ALEN]; |
664 | u8 sc_myaddr[ETH_ALEN]; | 706 | u8 sc_myaddr[ETH_ALEN]; |
665 | u8 sc_bssidmask[ETH_ALEN]; | 707 | u8 sc_bssidmask[ETH_ALEN]; |
666 | |||
667 | #ifdef CONFIG_ATH9K_DEBUG | ||
668 | struct ath9k_debug sc_debug; | ||
669 | #endif | ||
670 | u32 sc_intrstatus; | 708 | u32 sc_intrstatus; |
671 | u32 sc_flags; /* SC_OP_* */ | 709 | u32 sc_flags; /* SC_OP_* */ |
672 | unsigned int rx_filter; | ||
673 | u16 sc_curtxpow; | 710 | u16 sc_curtxpow; |
674 | u16 sc_curaid; | 711 | u16 sc_curaid; |
675 | u16 sc_cachelsz; | 712 | u16 sc_cachelsz; |
676 | int sc_slotupdate; /* slot to next advance fsm */ | 713 | u8 sc_nbcnvaps; |
677 | int sc_slottime; | 714 | u16 sc_nvaps; |
678 | int sc_bslot[ATH_BCBUF]; | ||
679 | u8 sc_tx_chainmask; | 715 | u8 sc_tx_chainmask; |
680 | u8 sc_rx_chainmask; | 716 | u8 sc_rx_chainmask; |
717 | u32 sc_keymax; | ||
718 | DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); | ||
719 | u8 sc_splitmic; | ||
720 | u8 sc_protrix; | ||
681 | enum ath9k_int sc_imask; | 721 | enum ath9k_int sc_imask; |
682 | enum PROT_MODE sc_protmode; | 722 | enum PROT_MODE sc_protmode; |
683 | |||
684 | u8 sc_nbcnvaps; | ||
685 | u16 sc_nvaps; | ||
686 | struct ieee80211_vif *sc_vaps[ATH_BCBUF]; | ||
687 | |||
688 | u8 sc_mcastantenna; | ||
689 | u8 sc_defant; | ||
690 | u8 sc_rxotherant; | ||
691 | |||
692 | struct ath9k_node_stats sc_halstats; | ||
693 | enum ath9k_ht_extprotspacing sc_ht_extprotspacing; | 723 | enum ath9k_ht_extprotspacing sc_ht_extprotspacing; |
694 | enum ath9k_ht_macmode tx_chan_width; | 724 | enum ath9k_ht_macmode tx_chan_width; |
695 | 725 | ||
696 | enum { | 726 | struct ath_config sc_config; |
697 | OK, /* no change needed */ | 727 | struct ath_rx rx; |
698 | UPDATE, /* update pending */ | 728 | struct ath_tx tx; |
699 | COMMIT /* beacon sent, commit change */ | 729 | struct ath_beacon beacon; |
700 | } sc_updateslot; /* slot time update fsm */ | 730 | struct ieee80211_vif *sc_vaps[ATH_BCBUF]; |
701 | |||
702 | /* Crypto */ | ||
703 | u32 sc_keymax; | ||
704 | DECLARE_BITMAP(sc_keymap, ATH_KEYMAX); | ||
705 | u8 sc_splitmic; /* split TKIP MIC keys */ | ||
706 | |||
707 | /* RX */ | ||
708 | struct list_head sc_rxbuf; | ||
709 | struct ath_descdma sc_rxdma; | ||
710 | int sc_rxbufsize; | ||
711 | u32 *sc_rxlink; | ||
712 | |||
713 | /* TX */ | ||
714 | struct list_head sc_txbuf; | ||
715 | struct ath_txq sc_txq[ATH9K_NUM_TX_QUEUES]; | ||
716 | struct ath_descdma sc_txdma; | ||
717 | u32 sc_txqsetup; | ||
718 | int sc_haltype2q[ATH9K_WME_AC_VO+1]; | ||
719 | u16 seq_no; /* TX sequence number */ | ||
720 | |||
721 | /* Beacon */ | ||
722 | struct ath9k_tx_queue_info sc_beacon_qi; | ||
723 | struct ath_descdma sc_bdma; | ||
724 | struct ath_txq *sc_cabq; | ||
725 | struct list_head sc_bbuf; | ||
726 | u32 sc_bhalq; | ||
727 | u32 sc_bmisscount; | ||
728 | u32 ast_be_xmit; | ||
729 | u64 bc_tstamp; | ||
730 | |||
731 | /* Rate */ | ||
732 | struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX]; | 731 | struct ieee80211_rate rates[IEEE80211_NUM_BANDS][ATH_RATE_MAX]; |
733 | struct ath_rate_table *hw_rate_table[ATH9K_MODE_MAX]; | 732 | struct ath_rate_table *hw_rate_table[ATH9K_MODE_MAX]; |
734 | struct ath_rate_table *cur_rate_table; | 733 | struct ath_rate_table *cur_rate_table; |
735 | u8 sc_protrix; | ||
736 | |||
737 | /* Channel, Band */ | ||
738 | struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX]; | 734 | struct ieee80211_channel channels[IEEE80211_NUM_BANDS][ATH_CHAN_MAX]; |
739 | struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; | 735 | struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; |
740 | |||
741 | /* Locks */ | ||
742 | spinlock_t sc_rxflushlock; | ||
743 | spinlock_t sc_rxbuflock; | ||
744 | spinlock_t sc_txbuflock; | ||
745 | spinlock_t sc_resetlock; | ||
746 | |||
747 | /* LEDs */ | ||
748 | struct ath_led radio_led; | 736 | struct ath_led radio_led; |
749 | struct ath_led assoc_led; | 737 | struct ath_led assoc_led; |
750 | struct ath_led tx_led; | 738 | struct ath_led tx_led; |
751 | struct ath_led rx_led; | 739 | struct ath_led rx_led; |
752 | |||
753 | /* Rfkill */ | ||
754 | struct ath_rfkill rf_kill; | 740 | struct ath_rfkill rf_kill; |
755 | |||
756 | /* ANI */ | ||
757 | struct ath_ani sc_ani; | 741 | struct ath_ani sc_ani; |
742 | struct ath9k_node_stats sc_halstats; | ||
743 | #ifdef CONFIG_ATH9K_DEBUG | ||
744 | struct ath9k_debug sc_debug; | ||
745 | #endif | ||
758 | }; | 746 | }; |
759 | 747 | ||
760 | int ath_reset(struct ath_softc *sc, bool retry_tx); | 748 | int ath_reset(struct ath_softc *sc, bool retry_tx); |
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c index 9455a6939876..02e1771bb274 100644 --- a/drivers/net/wireless/ath9k/main.c +++ b/drivers/net/wireless/ath9k/main.c | |||
@@ -348,7 +348,7 @@ static void ath_ani_calibrate(unsigned long data) | |||
348 | * don't calibrate when we're scanning. | 348 | * don't calibrate when we're scanning. |
349 | * we are most likely not on our home channel. | 349 | * we are most likely not on our home channel. |
350 | */ | 350 | */ |
351 | if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC) | 351 | if (sc->rx.rxfilter & FIF_BCN_PRBRESP_PROMISC) |
352 | return; | 352 | return; |
353 | 353 | ||
354 | /* Long calibration runs independently of short calibration. */ | 354 | /* Long calibration runs independently of short calibration. */ |
@@ -487,9 +487,9 @@ static void ath9k_tasklet(unsigned long data) | |||
487 | 487 | ||
488 | if (status & | 488 | if (status & |
489 | (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) { | 489 | (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) { |
490 | spin_lock_bh(&sc->sc_rxflushlock); | 490 | spin_lock_bh(&sc->rx.rxflushlock); |
491 | ath_rx_tasklet(sc, 0); | 491 | ath_rx_tasklet(sc, 0); |
492 | spin_unlock_bh(&sc->sc_rxflushlock); | 492 | spin_unlock_bh(&sc->rx.rxflushlock); |
493 | } | 493 | } |
494 | /* XXX: optimize this */ | 494 | /* XXX: optimize this */ |
495 | if (status & ATH9K_INT_TX) | 495 | if (status & ATH9K_INT_TX) |
@@ -1306,7 +1306,7 @@ static void ath_detach(struct ath_softc *sc) | |||
1306 | /* cleanup tx queues */ | 1306 | /* cleanup tx queues */ |
1307 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) | 1307 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) |
1308 | if (ATH_TXQ_SETUP(sc, i)) | 1308 | if (ATH_TXQ_SETUP(sc, i)) |
1309 | ath_tx_cleanupq(sc, &sc->sc_txq[i]); | 1309 | ath_tx_cleanupq(sc, &sc->tx.txq[i]); |
1310 | 1310 | ||
1311 | ath9k_hw_detach(sc->sc_ah); | 1311 | ath9k_hw_detach(sc->sc_ah); |
1312 | ath9k_exit_debug(sc); | 1312 | ath9k_exit_debug(sc); |
@@ -1397,15 +1397,15 @@ static int ath_init(u16 devid, struct ath_softc *sc) | |||
1397 | * priority. Note that the hal handles reseting | 1397 | * priority. Note that the hal handles reseting |
1398 | * these queues at the needed time. | 1398 | * these queues at the needed time. |
1399 | */ | 1399 | */ |
1400 | sc->sc_bhalq = ath_beaconq_setup(ah); | 1400 | sc->beacon.beaconq = ath_beaconq_setup(ah); |
1401 | if (sc->sc_bhalq == -1) { | 1401 | if (sc->beacon.beaconq == -1) { |
1402 | DPRINTF(sc, ATH_DBG_FATAL, | 1402 | DPRINTF(sc, ATH_DBG_FATAL, |
1403 | "Unable to setup a beacon xmit queue\n"); | 1403 | "Unable to setup a beacon xmit queue\n"); |
1404 | error = -EIO; | 1404 | error = -EIO; |
1405 | goto bad2; | 1405 | goto bad2; |
1406 | } | 1406 | } |
1407 | sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); | 1407 | sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); |
1408 | if (sc->sc_cabq == NULL) { | 1408 | if (sc->beacon.cabq == NULL) { |
1409 | DPRINTF(sc, ATH_DBG_FATAL, | 1409 | DPRINTF(sc, ATH_DBG_FATAL, |
1410 | "Unable to setup CAB xmit queue\n"); | 1410 | "Unable to setup CAB xmit queue\n"); |
1411 | error = -EIO; | 1411 | error = -EIO; |
@@ -1415,8 +1415,8 @@ static int ath_init(u16 devid, struct ath_softc *sc) | |||
1415 | sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME; | 1415 | sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME; |
1416 | ath_cabq_update(sc); | 1416 | ath_cabq_update(sc); |
1417 | 1417 | ||
1418 | for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++) | 1418 | for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++) |
1419 | sc->sc_haltype2q[i] = -1; | 1419 | sc->tx.hwq_map[i] = -1; |
1420 | 1420 | ||
1421 | /* Setup data queues */ | 1421 | /* Setup data queues */ |
1422 | /* NB: ensure BK queue is the lowest priority h/w queue */ | 1422 | /* NB: ensure BK queue is the lowest priority h/w queue */ |
@@ -1496,7 +1496,7 @@ static int ath_init(u16 devid, struct ath_softc *sc) | |||
1496 | sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask; | 1496 | sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask; |
1497 | 1497 | ||
1498 | ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); | 1498 | ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); |
1499 | sc->sc_defant = ath9k_hw_getdefantenna(ah); | 1499 | sc->rx.defant = ath9k_hw_getdefantenna(ah); |
1500 | 1500 | ||
1501 | ath9k_hw_getmac(ah, sc->sc_myaddr); | 1501 | ath9k_hw_getmac(ah, sc->sc_myaddr); |
1502 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) { | 1502 | if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) { |
@@ -1505,11 +1505,11 @@ static int ath_init(u16 devid, struct ath_softc *sc) | |||
1505 | ath9k_hw_setbssidmask(ah, sc->sc_bssidmask); | 1505 | ath9k_hw_setbssidmask(ah, sc->sc_bssidmask); |
1506 | } | 1506 | } |
1507 | 1507 | ||
1508 | sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */ | 1508 | sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */ |
1509 | 1509 | ||
1510 | /* initialize beacon slots */ | 1510 | /* initialize beacon slots */ |
1511 | for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++) | 1511 | for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) |
1512 | sc->sc_bslot[i] = ATH_IF_ID_ANY; | 1512 | sc->beacon.bslot[i] = ATH_IF_ID_ANY; |
1513 | 1513 | ||
1514 | /* save MISC configurations */ | 1514 | /* save MISC configurations */ |
1515 | sc->sc_config.swBeaconProcess = 1; | 1515 | sc->sc_config.swBeaconProcess = 1; |
@@ -1535,7 +1535,7 @@ bad2: | |||
1535 | /* cleanup tx queues */ | 1535 | /* cleanup tx queues */ |
1536 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) | 1536 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) |
1537 | if (ATH_TXQ_SETUP(sc, i)) | 1537 | if (ATH_TXQ_SETUP(sc, i)) |
1538 | ath_tx_cleanupq(sc, &sc->sc_txq[i]); | 1538 | ath_tx_cleanupq(sc, &sc->tx.txq[i]); |
1539 | bad: | 1539 | bad: |
1540 | if (ah) | 1540 | if (ah) |
1541 | ath9k_hw_detach(ah); | 1541 | ath9k_hw_detach(ah); |
@@ -1673,9 +1673,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx) | |||
1673 | int i; | 1673 | int i; |
1674 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | 1674 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
1675 | if (ATH_TXQ_SETUP(sc, i)) { | 1675 | if (ATH_TXQ_SETUP(sc, i)) { |
1676 | spin_lock_bh(&sc->sc_txq[i].axq_lock); | 1676 | spin_lock_bh(&sc->tx.txq[i].axq_lock); |
1677 | ath_txq_schedule(sc, &sc->sc_txq[i]); | 1677 | ath_txq_schedule(sc, &sc->tx.txq[i]); |
1678 | spin_unlock_bh(&sc->sc_txq[i].axq_lock); | 1678 | spin_unlock_bh(&sc->tx.txq[i].axq_lock); |
1679 | } | 1679 | } |
1680 | } | 1680 | } |
1681 | } | 1681 | } |
@@ -1810,19 +1810,19 @@ int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) | |||
1810 | 1810 | ||
1811 | switch (queue) { | 1811 | switch (queue) { |
1812 | case 0: | 1812 | case 0: |
1813 | qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO]; | 1813 | qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO]; |
1814 | break; | 1814 | break; |
1815 | case 1: | 1815 | case 1: |
1816 | qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI]; | 1816 | qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI]; |
1817 | break; | 1817 | break; |
1818 | case 2: | 1818 | case 2: |
1819 | qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE]; | 1819 | qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE]; |
1820 | break; | 1820 | break; |
1821 | case 3: | 1821 | case 3: |
1822 | qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK]; | 1822 | qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK]; |
1823 | break; | 1823 | break; |
1824 | default: | 1824 | default: |
1825 | qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE]; | 1825 | qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE]; |
1826 | break; | 1826 | break; |
1827 | } | 1827 | } |
1828 | 1828 | ||
@@ -1993,9 +1993,9 @@ static int ath9k_tx(struct ieee80211_hw *hw, | |||
1993 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { | 1993 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { |
1994 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 1994 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
1995 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) | 1995 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) |
1996 | sc->seq_no += 0x10; | 1996 | sc->tx.seq_no += 0x10; |
1997 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | 1997 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); |
1998 | hdr->seq_ctrl |= cpu_to_le16(sc->seq_no); | 1998 | hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); |
1999 | } | 1999 | } |
2000 | 2000 | ||
2001 | /* Add the padding after the header if this is not already done */ | 2001 | /* Add the padding after the header if this is not already done */ |
@@ -2049,7 +2049,7 @@ static void ath9k_stop(struct ieee80211_hw *hw) | |||
2049 | ath_stoprecv(sc); | 2049 | ath_stoprecv(sc); |
2050 | ath9k_hw_phy_disable(sc->sc_ah); | 2050 | ath9k_hw_phy_disable(sc->sc_ah); |
2051 | } else | 2051 | } else |
2052 | sc->sc_rxlink = NULL; | 2052 | sc->rx.rxlink = NULL; |
2053 | 2053 | ||
2054 | #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) | 2054 | #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) |
2055 | if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) | 2055 | if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT) |
@@ -2131,7 +2131,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw, | |||
2131 | /* Reclaim beacon resources */ | 2131 | /* Reclaim beacon resources */ |
2132 | if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP || | 2132 | if (sc->sc_ah->ah_opmode == NL80211_IFTYPE_AP || |
2133 | sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC) { | 2133 | sc->sc_ah->ah_opmode == NL80211_IFTYPE_ADHOC) { |
2134 | ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); | 2134 | ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); |
2135 | ath_beacon_return(sc, avp); | 2135 | ath_beacon_return(sc, avp); |
2136 | } | 2136 | } |
2137 | 2137 | ||
@@ -2250,7 +2250,7 @@ static int ath9k_config_interface(struct ieee80211_hw *hw, | |||
2250 | * causes reconfiguration; we may be called | 2250 | * causes reconfiguration; we may be called |
2251 | * with beacon transmission active. | 2251 | * with beacon transmission active. |
2252 | */ | 2252 | */ |
2253 | ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); | 2253 | ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); |
2254 | 2254 | ||
2255 | error = ath_beacon_alloc(sc, 0); | 2255 | error = ath_beacon_alloc(sc, 0); |
2256 | if (error != 0) | 2256 | if (error != 0) |
@@ -2296,7 +2296,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw, | |||
2296 | changed_flags &= SUPPORTED_FILTERS; | 2296 | changed_flags &= SUPPORTED_FILTERS; |
2297 | *total_flags &= SUPPORTED_FILTERS; | 2297 | *total_flags &= SUPPORTED_FILTERS; |
2298 | 2298 | ||
2299 | sc->rx_filter = *total_flags; | 2299 | sc->rx.rxfilter = *total_flags; |
2300 | rfilt = ath_calcrxfilter(sc); | 2300 | rfilt = ath_calcrxfilter(sc); |
2301 | ath9k_hw_setrxfilter(sc->sc_ah, rfilt); | 2301 | ath9k_hw_setrxfilter(sc->sc_ah, rfilt); |
2302 | 2302 | ||
@@ -2305,7 +2305,7 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw, | |||
2305 | ath9k_hw_write_associd(sc->sc_ah, ath_bcast_mac, 0); | 2305 | ath9k_hw_write_associd(sc->sc_ah, ath_bcast_mac, 0); |
2306 | } | 2306 | } |
2307 | 2307 | ||
2308 | DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx_filter); | 2308 | DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter); |
2309 | } | 2309 | } |
2310 | 2310 | ||
2311 | static void ath9k_sta_notify(struct ieee80211_hw *hw, | 2311 | static void ath9k_sta_notify(struct ieee80211_hw *hw, |
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c index b182ef570f88..cb449f0b4171 100644 --- a/drivers/net/wireless/ath9k/recv.c +++ b/drivers/net/wireless/ath9k/recv.c | |||
@@ -41,20 +41,19 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf) | |||
41 | ASSERT(skb != NULL); | 41 | ASSERT(skb != NULL); |
42 | ds->ds_vdata = skb->data; | 42 | ds->ds_vdata = skb->data; |
43 | 43 | ||
44 | /* setup rx descriptors. The sc_rxbufsize here tells the harware | 44 | /* setup rx descriptors. The rx.bufsize here tells the harware |
45 | * how much data it can DMA to us and that we are prepared | 45 | * how much data it can DMA to us and that we are prepared |
46 | * to process */ | 46 | * to process */ |
47 | ath9k_hw_setuprxdesc(ah, | 47 | ath9k_hw_setuprxdesc(ah, ds, |
48 | ds, | 48 | sc->rx.bufsize, |
49 | sc->sc_rxbufsize, | ||
50 | 0); | 49 | 0); |
51 | 50 | ||
52 | if (sc->sc_rxlink == NULL) | 51 | if (sc->rx.rxlink == NULL) |
53 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | 52 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
54 | else | 53 | else |
55 | *sc->sc_rxlink = bf->bf_daddr; | 54 | *sc->rx.rxlink = bf->bf_daddr; |
56 | 55 | ||
57 | sc->sc_rxlink = &ds->ds_link; | 56 | sc->rx.rxlink = &ds->ds_link; |
58 | ath9k_hw_rxena(ah); | 57 | ath9k_hw_rxena(ah); |
59 | } | 58 | } |
60 | 59 | ||
@@ -62,8 +61,8 @@ static void ath_setdefantenna(struct ath_softc *sc, u32 antenna) | |||
62 | { | 61 | { |
63 | /* XXX block beacon interrupts */ | 62 | /* XXX block beacon interrupts */ |
64 | ath9k_hw_setantenna(sc->sc_ah, antenna); | 63 | ath9k_hw_setantenna(sc->sc_ah, antenna); |
65 | sc->sc_defant = antenna; | 64 | sc->rx.defant = antenna; |
66 | sc->sc_rxotherant = 0; | 65 | sc->rx.rxotherant = 0; |
67 | } | 66 | } |
68 | 67 | ||
69 | /* | 68 | /* |
@@ -272,20 +271,20 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
272 | int error = 0; | 271 | int error = 0; |
273 | 272 | ||
274 | do { | 273 | do { |
275 | spin_lock_init(&sc->sc_rxflushlock); | 274 | spin_lock_init(&sc->rx.rxflushlock); |
276 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 275 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
277 | spin_lock_init(&sc->sc_rxbuflock); | 276 | spin_lock_init(&sc->rx.rxbuflock); |
278 | 277 | ||
279 | sc->sc_rxbufsize = roundup(IEEE80211_MAX_MPDU_LEN, | 278 | sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN, |
280 | min(sc->sc_cachelsz, | 279 | min(sc->sc_cachelsz, |
281 | (u16)64)); | 280 | (u16)64)); |
282 | 281 | ||
283 | DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", | 282 | DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", |
284 | sc->sc_cachelsz, sc->sc_rxbufsize); | 283 | sc->sc_cachelsz, sc->rx.bufsize); |
285 | 284 | ||
286 | /* Initialize rx descriptors */ | 285 | /* Initialize rx descriptors */ |
287 | 286 | ||
288 | error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, | 287 | error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, |
289 | "rx", nbufs, 1); | 288 | "rx", nbufs, 1); |
290 | if (error != 0) { | 289 | if (error != 0) { |
291 | DPRINTF(sc, ATH_DBG_FATAL, | 290 | DPRINTF(sc, ATH_DBG_FATAL, |
@@ -293,8 +292,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
293 | break; | 292 | break; |
294 | } | 293 | } |
295 | 294 | ||
296 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | 295 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
297 | skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | 296 | skb = ath_rxbuf_alloc(sc, sc->rx.bufsize); |
298 | if (skb == NULL) { | 297 | if (skb == NULL) { |
299 | error = -ENOMEM; | 298 | error = -ENOMEM; |
300 | break; | 299 | break; |
@@ -302,8 +301,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
302 | 301 | ||
303 | bf->bf_mpdu = skb; | 302 | bf->bf_mpdu = skb; |
304 | bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, | 303 | bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data, |
305 | sc->sc_rxbufsize, | 304 | sc->rx.bufsize, |
306 | PCI_DMA_FROMDEVICE); | 305 | PCI_DMA_FROMDEVICE); |
307 | if (unlikely(pci_dma_mapping_error(sc->pdev, | 306 | if (unlikely(pci_dma_mapping_error(sc->pdev, |
308 | bf->bf_buf_addr))) { | 307 | bf->bf_buf_addr))) { |
309 | dev_kfree_skb_any(skb); | 308 | dev_kfree_skb_any(skb); |
@@ -315,7 +314,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs) | |||
315 | } | 314 | } |
316 | bf->bf_dmacontext = bf->bf_buf_addr; | 315 | bf->bf_dmacontext = bf->bf_buf_addr; |
317 | } | 316 | } |
318 | sc->sc_rxlink = NULL; | 317 | sc->rx.rxlink = NULL; |
319 | 318 | ||
320 | } while (0); | 319 | } while (0); |
321 | 320 | ||
@@ -330,14 +329,14 @@ void ath_rx_cleanup(struct ath_softc *sc) | |||
330 | struct sk_buff *skb; | 329 | struct sk_buff *skb; |
331 | struct ath_buf *bf; | 330 | struct ath_buf *bf; |
332 | 331 | ||
333 | list_for_each_entry(bf, &sc->sc_rxbuf, list) { | 332 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
334 | skb = bf->bf_mpdu; | 333 | skb = bf->bf_mpdu; |
335 | if (skb) | 334 | if (skb) |
336 | dev_kfree_skb(skb); | 335 | dev_kfree_skb(skb); |
337 | } | 336 | } |
338 | 337 | ||
339 | if (sc->sc_rxdma.dd_desc_len != 0) | 338 | if (sc->rx.rxdma.dd_desc_len != 0) |
340 | ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); | 339 | ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); |
341 | } | 340 | } |
342 | 341 | ||
343 | /* | 342 | /* |
@@ -375,7 +374,7 @@ u32 ath_calcrxfilter(struct ath_softc *sc) | |||
375 | 374 | ||
376 | /* Can't set HOSTAP into promiscous mode */ | 375 | /* Can't set HOSTAP into promiscous mode */ |
377 | if (((sc->sc_ah->ah_opmode != NL80211_IFTYPE_AP) && | 376 | if (((sc->sc_ah->ah_opmode != NL80211_IFTYPE_AP) && |
378 | (sc->rx_filter & FIF_PROMISC_IN_BSS)) || | 377 | (sc->rx.rxfilter & FIF_PROMISC_IN_BSS)) || |
379 | (sc->sc_ah->ah_opmode == NL80211_IFTYPE_MONITOR)) { | 378 | (sc->sc_ah->ah_opmode == NL80211_IFTYPE_MONITOR)) { |
380 | rfilt |= ATH9K_RX_FILTER_PROM; | 379 | rfilt |= ATH9K_RX_FILTER_PROM; |
381 | /* ??? To prevent from sending ACK */ | 380 | /* ??? To prevent from sending ACK */ |
@@ -401,25 +400,25 @@ int ath_startrecv(struct ath_softc *sc) | |||
401 | struct ath_hal *ah = sc->sc_ah; | 400 | struct ath_hal *ah = sc->sc_ah; |
402 | struct ath_buf *bf, *tbf; | 401 | struct ath_buf *bf, *tbf; |
403 | 402 | ||
404 | spin_lock_bh(&sc->sc_rxbuflock); | 403 | spin_lock_bh(&sc->rx.rxbuflock); |
405 | if (list_empty(&sc->sc_rxbuf)) | 404 | if (list_empty(&sc->rx.rxbuf)) |
406 | goto start_recv; | 405 | goto start_recv; |
407 | 406 | ||
408 | sc->sc_rxlink = NULL; | 407 | sc->rx.rxlink = NULL; |
409 | list_for_each_entry_safe(bf, tbf, &sc->sc_rxbuf, list) { | 408 | list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) { |
410 | ath_rx_buf_link(sc, bf); | 409 | ath_rx_buf_link(sc, bf); |
411 | } | 410 | } |
412 | 411 | ||
413 | /* We could have deleted elements so the list may be empty now */ | 412 | /* We could have deleted elements so the list may be empty now */ |
414 | if (list_empty(&sc->sc_rxbuf)) | 413 | if (list_empty(&sc->rx.rxbuf)) |
415 | goto start_recv; | 414 | goto start_recv; |
416 | 415 | ||
417 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | 416 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
418 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); | 417 | ath9k_hw_putrxbuf(ah, bf->bf_daddr); |
419 | ath9k_hw_rxena(ah); | 418 | ath9k_hw_rxena(ah); |
420 | 419 | ||
421 | start_recv: | 420 | start_recv: |
422 | spin_unlock_bh(&sc->sc_rxbuflock); | 421 | spin_unlock_bh(&sc->rx.rxbuflock); |
423 | ath_opmode_init(sc); | 422 | ath_opmode_init(sc); |
424 | ath9k_hw_startpcureceive(ah); | 423 | ath9k_hw_startpcureceive(ah); |
425 | 424 | ||
@@ -435,25 +434,25 @@ bool ath_stoprecv(struct ath_softc *sc) | |||
435 | ath9k_hw_setrxfilter(ah, 0); | 434 | ath9k_hw_setrxfilter(ah, 0); |
436 | stopped = ath9k_hw_stopdmarecv(ah); | 435 | stopped = ath9k_hw_stopdmarecv(ah); |
437 | mdelay(3); /* 3ms is long enough for 1 frame */ | 436 | mdelay(3); /* 3ms is long enough for 1 frame */ |
438 | sc->sc_rxlink = NULL; | 437 | sc->rx.rxlink = NULL; |
439 | 438 | ||
440 | return stopped; | 439 | return stopped; |
441 | } | 440 | } |
442 | 441 | ||
443 | void ath_flushrecv(struct ath_softc *sc) | 442 | void ath_flushrecv(struct ath_softc *sc) |
444 | { | 443 | { |
445 | spin_lock_bh(&sc->sc_rxflushlock); | 444 | spin_lock_bh(&sc->rx.rxflushlock); |
446 | sc->sc_flags |= SC_OP_RXFLUSH; | 445 | sc->sc_flags |= SC_OP_RXFLUSH; |
447 | ath_rx_tasklet(sc, 1); | 446 | ath_rx_tasklet(sc, 1); |
448 | sc->sc_flags &= ~SC_OP_RXFLUSH; | 447 | sc->sc_flags &= ~SC_OP_RXFLUSH; |
449 | spin_unlock_bh(&sc->sc_rxflushlock); | 448 | spin_unlock_bh(&sc->rx.rxflushlock); |
450 | } | 449 | } |
451 | 450 | ||
452 | int ath_rx_tasklet(struct ath_softc *sc, int flush) | 451 | int ath_rx_tasklet(struct ath_softc *sc, int flush) |
453 | { | 452 | { |
454 | #define PA2DESC(_sc, _pa) \ | 453 | #define PA2DESC(_sc, _pa) \ |
455 | ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ | 454 | ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ |
456 | ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) | 455 | ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) |
457 | 456 | ||
458 | struct ath_buf *bf; | 457 | struct ath_buf *bf; |
459 | struct ath_desc *ds; | 458 | struct ath_desc *ds; |
@@ -465,19 +464,19 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
465 | bool decrypt_error = false; | 464 | bool decrypt_error = false; |
466 | u8 keyix; | 465 | u8 keyix; |
467 | 466 | ||
468 | spin_lock_bh(&sc->sc_rxbuflock); | 467 | spin_lock_bh(&sc->rx.rxbuflock); |
469 | 468 | ||
470 | do { | 469 | do { |
471 | /* If handling rx interrupt and flush is in progress => exit */ | 470 | /* If handling rx interrupt and flush is in progress => exit */ |
472 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) | 471 | if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) |
473 | break; | 472 | break; |
474 | 473 | ||
475 | if (list_empty(&sc->sc_rxbuf)) { | 474 | if (list_empty(&sc->rx.rxbuf)) { |
476 | sc->sc_rxlink = NULL; | 475 | sc->rx.rxlink = NULL; |
477 | break; | 476 | break; |
478 | } | 477 | } |
479 | 478 | ||
480 | bf = list_first_entry(&sc->sc_rxbuf, struct ath_buf, list); | 479 | bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list); |
481 | ds = bf->bf_desc; | 480 | ds = bf->bf_desc; |
482 | 481 | ||
483 | /* | 482 | /* |
@@ -499,8 +498,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
499 | struct ath_buf *tbf; | 498 | struct ath_buf *tbf; |
500 | struct ath_desc *tds; | 499 | struct ath_desc *tds; |
501 | 500 | ||
502 | if (list_is_last(&bf->list, &sc->sc_rxbuf)) { | 501 | if (list_is_last(&bf->list, &sc->rx.rxbuf)) { |
503 | sc->sc_rxlink = NULL; | 502 | sc->rx.rxlink = NULL; |
504 | break; | 503 | break; |
505 | } | 504 | } |
506 | 505 | ||
@@ -540,7 +539,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
540 | goto requeue; | 539 | goto requeue; |
541 | 540 | ||
542 | /* The status portion of the descriptor could get corrupted. */ | 541 | /* The status portion of the descriptor could get corrupted. */ |
543 | if (sc->sc_rxbufsize < ds->ds_rxstat.rs_datalen) | 542 | if (sc->rx.bufsize < ds->ds_rxstat.rs_datalen) |
544 | goto requeue; | 543 | goto requeue; |
545 | 544 | ||
546 | if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc)) | 545 | if (!ath_rx_prepare(skb, ds, &rx_status, &decrypt_error, sc)) |
@@ -548,21 +547,21 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
548 | 547 | ||
549 | /* Ensure we always have an skb to requeue once we are done | 548 | /* Ensure we always have an skb to requeue once we are done |
550 | * processing the current buffer's skb */ | 549 | * processing the current buffer's skb */ |
551 | requeue_skb = ath_rxbuf_alloc(sc, sc->sc_rxbufsize); | 550 | requeue_skb = ath_rxbuf_alloc(sc, sc->rx.bufsize); |
552 | 551 | ||
553 | /* If there is no memory we ignore the current RX'd frame, | 552 | /* If there is no memory we ignore the current RX'd frame, |
554 | * tell hardware it can give us a new frame using the old | 553 | * tell hardware it can give us a new frame using the old |
555 | * skb and put it at the tail of the sc->sc_rxbuf list for | 554 | * skb and put it at the tail of the sc->rx.rxbuf list for |
556 | * processing. */ | 555 | * processing. */ |
557 | if (!requeue_skb) | 556 | if (!requeue_skb) |
558 | goto requeue; | 557 | goto requeue; |
559 | 558 | ||
560 | pci_dma_sync_single_for_cpu(sc->pdev, | 559 | /* Sync and unmap the frame */ |
561 | bf->bf_buf_addr, | 560 | pci_dma_sync_single_for_cpu(sc->pdev, bf->bf_buf_addr, |
562 | sc->sc_rxbufsize, | 561 | sc->rx.bufsize, |
563 | PCI_DMA_FROMDEVICE); | 562 | PCI_DMA_FROMDEVICE); |
564 | pci_unmap_single(sc->pdev, bf->bf_buf_addr, | 563 | pci_unmap_single(sc->pdev, bf->bf_buf_addr, |
565 | sc->sc_rxbufsize, | 564 | sc->rx.bufsize, |
566 | PCI_DMA_FROMDEVICE); | 565 | PCI_DMA_FROMDEVICE); |
567 | 566 | ||
568 | skb_put(skb, ds->ds_rxstat.rs_datalen); | 567 | skb_put(skb, ds->ds_rxstat.rs_datalen); |
@@ -596,7 +595,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
596 | /* We will now give hardware our shiny new allocated skb */ | 595 | /* We will now give hardware our shiny new allocated skb */ |
597 | bf->bf_mpdu = requeue_skb; | 596 | bf->bf_mpdu = requeue_skb; |
598 | bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data, | 597 | bf->bf_buf_addr = pci_map_single(sc->pdev, requeue_skb->data, |
599 | sc->sc_rxbufsize, | 598 | sc->rx.bufsize, |
600 | PCI_DMA_FROMDEVICE); | 599 | PCI_DMA_FROMDEVICE); |
601 | if (unlikely(pci_dma_mapping_error(sc->pdev, | 600 | if (unlikely(pci_dma_mapping_error(sc->pdev, |
602 | bf->bf_buf_addr))) { | 601 | bf->bf_buf_addr))) { |
@@ -612,18 +611,18 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush) | |||
612 | * change the default rx antenna if rx diversity chooses the | 611 | * change the default rx antenna if rx diversity chooses the |
613 | * other antenna 3 times in a row. | 612 | * other antenna 3 times in a row. |
614 | */ | 613 | */ |
615 | if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { | 614 | if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { |
616 | if (++sc->sc_rxotherant >= 3) | 615 | if (++sc->rx.rxotherant >= 3) |
617 | ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); | 616 | ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); |
618 | } else { | 617 | } else { |
619 | sc->sc_rxotherant = 0; | 618 | sc->rx.rxotherant = 0; |
620 | } | 619 | } |
621 | requeue: | 620 | requeue: |
622 | list_move_tail(&bf->list, &sc->sc_rxbuf); | 621 | list_move_tail(&bf->list, &sc->rx.rxbuf); |
623 | ath_rx_buf_link(sc, bf); | 622 | ath_rx_buf_link(sc, bf); |
624 | } while (1); | 623 | } while (1); |
625 | 624 | ||
626 | spin_unlock_bh(&sc->sc_rxbuflock); | 625 | spin_unlock_bh(&sc->rx.rxbuflock); |
627 | 626 | ||
628 | return 0; | 627 | return 0; |
629 | #undef PA2DESC | 628 | #undef PA2DESC |
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c index e2e847db0891..f9c309ed3a2d 100644 --- a/drivers/net/wireless/ath9k/xmit.c +++ b/drivers/net/wireless/ath9k/xmit.c | |||
@@ -286,17 +286,17 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) | |||
286 | { | 286 | { |
287 | struct ath_buf *bf = NULL; | 287 | struct ath_buf *bf = NULL; |
288 | 288 | ||
289 | spin_lock_bh(&sc->sc_txbuflock); | 289 | spin_lock_bh(&sc->tx.txbuflock); |
290 | 290 | ||
291 | if (unlikely(list_empty(&sc->sc_txbuf))) { | 291 | if (unlikely(list_empty(&sc->tx.txbuf))) { |
292 | spin_unlock_bh(&sc->sc_txbuflock); | 292 | spin_unlock_bh(&sc->tx.txbuflock); |
293 | return NULL; | 293 | return NULL; |
294 | } | 294 | } |
295 | 295 | ||
296 | bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list); | 296 | bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); |
297 | list_del(&bf->list); | 297 | list_del(&bf->list); |
298 | 298 | ||
299 | spin_unlock_bh(&sc->sc_txbuflock); | 299 | spin_unlock_bh(&sc->tx.txbuflock); |
300 | 300 | ||
301 | return bf; | 301 | return bf; |
302 | } | 302 | } |
@@ -341,9 +341,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc, | |||
341 | /* | 341 | /* |
342 | * Return the list of ath_buf of this mpdu to free queue | 342 | * Return the list of ath_buf of this mpdu to free queue |
343 | */ | 343 | */ |
344 | spin_lock_irqsave(&sc->sc_txbuflock, flags); | 344 | spin_lock_irqsave(&sc->tx.txbuflock, flags); |
345 | list_splice_tail_init(bf_q, &sc->sc_txbuf); | 345 | list_splice_tail_init(bf_q, &sc->tx.txbuf); |
346 | spin_unlock_irqrestore(&sc->sc_txbuflock, flags); | 346 | spin_unlock_irqrestore(&sc->tx.txbuflock, flags); |
347 | } | 347 | } |
348 | 348 | ||
349 | /* | 349 | /* |
@@ -384,7 +384,7 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid) | |||
384 | 384 | ||
385 | static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | 385 | static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) |
386 | { | 386 | { |
387 | struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; | 387 | struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; |
388 | 388 | ||
389 | spin_lock_bh(&txq->axq_lock); | 389 | spin_lock_bh(&txq->axq_lock); |
390 | 390 | ||
@@ -397,7 +397,7 @@ static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | |||
397 | 397 | ||
398 | void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | 398 | void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) |
399 | { | 399 | { |
400 | struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; | 400 | struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; |
401 | 401 | ||
402 | ASSERT(tid->paused > 0); | 402 | ASSERT(tid->paused > 0); |
403 | spin_lock_bh(&txq->axq_lock); | 403 | spin_lock_bh(&txq->axq_lock); |
@@ -686,7 +686,7 @@ static int ath_tx_send_normal(struct ath_softc *sc, | |||
686 | 686 | ||
687 | static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) | 687 | static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) |
688 | { | 688 | { |
689 | struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum]; | 689 | struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; |
690 | struct ath_buf *bf; | 690 | struct ath_buf *bf; |
691 | struct list_head bf_head; | 691 | struct list_head bf_head; |
692 | INIT_LIST_HEAD(&bf_head); | 692 | INIT_LIST_HEAD(&bf_head); |
@@ -861,12 +861,12 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc, | |||
861 | struct ath_buf *tbf; | 861 | struct ath_buf *tbf; |
862 | 862 | ||
863 | /* allocate new descriptor */ | 863 | /* allocate new descriptor */ |
864 | spin_lock_bh(&sc->sc_txbuflock); | 864 | spin_lock_bh(&sc->tx.txbuflock); |
865 | ASSERT(!list_empty((&sc->sc_txbuf))); | 865 | ASSERT(!list_empty((&sc->tx.txbuf))); |
866 | tbf = list_first_entry(&sc->sc_txbuf, | 866 | tbf = list_first_entry(&sc->tx.txbuf, |
867 | struct ath_buf, list); | 867 | struct ath_buf, list); |
868 | list_del(&tbf->list); | 868 | list_del(&tbf->list); |
869 | spin_unlock_bh(&sc->sc_txbuflock); | 869 | spin_unlock_bh(&sc->tx.txbuflock); |
870 | 870 | ||
871 | ATH_TXBUF_RESET(tbf); | 871 | ATH_TXBUF_RESET(tbf); |
872 | 872 | ||
@@ -1058,9 +1058,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) | |||
1058 | 1058 | ||
1059 | if (bf_held) { | 1059 | if (bf_held) { |
1060 | list_del(&bf_held->list); | 1060 | list_del(&bf_held->list); |
1061 | spin_lock_bh(&sc->sc_txbuflock); | 1061 | spin_lock_bh(&sc->tx.txbuflock); |
1062 | list_add_tail(&bf_held->list, &sc->sc_txbuf); | 1062 | list_add_tail(&bf_held->list, &sc->tx.txbuf); |
1063 | spin_unlock_bh(&sc->sc_txbuflock); | 1063 | spin_unlock_bh(&sc->tx.txbuflock); |
1064 | } | 1064 | } |
1065 | 1065 | ||
1066 | if (!bf_isampdu(bf)) { | 1066 | if (!bf_isampdu(bf)) { |
@@ -1129,11 +1129,11 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx) | |||
1129 | if (!(sc->sc_flags & SC_OP_INVALID)) { | 1129 | if (!(sc->sc_flags & SC_OP_INVALID)) { |
1130 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | 1130 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
1131 | if (ATH_TXQ_SETUP(sc, i)) { | 1131 | if (ATH_TXQ_SETUP(sc, i)) { |
1132 | ath_tx_stopdma(sc, &sc->sc_txq[i]); | 1132 | ath_tx_stopdma(sc, &sc->tx.txq[i]); |
1133 | /* The TxDMA may not really be stopped. | 1133 | /* The TxDMA may not really be stopped. |
1134 | * Double check the hal tx pending count */ | 1134 | * Double check the hal tx pending count */ |
1135 | npend += ath9k_hw_numtxpending(ah, | 1135 | npend += ath9k_hw_numtxpending(ah, |
1136 | sc->sc_txq[i].axq_qnum); | 1136 | sc->tx.txq[i].axq_qnum); |
1137 | } | 1137 | } |
1138 | } | 1138 | } |
1139 | } | 1139 | } |
@@ -1158,7 +1158,7 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx) | |||
1158 | 1158 | ||
1159 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | 1159 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
1160 | if (ATH_TXQ_SETUP(sc, i)) | 1160 | if (ATH_TXQ_SETUP(sc, i)) |
1161 | ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx); | 1161 | ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx); |
1162 | } | 1162 | } |
1163 | } | 1163 | } |
1164 | 1164 | ||
@@ -1820,9 +1820,9 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb, | |||
1820 | } | 1820 | } |
1821 | spin_unlock_bh(&txq->axq_lock); | 1821 | spin_unlock_bh(&txq->axq_lock); |
1822 | 1822 | ||
1823 | spin_lock_bh(&sc->sc_txbuflock); | 1823 | spin_lock_bh(&sc->tx.txbuflock); |
1824 | list_add_tail(&bf->list, &sc->sc_txbuf); | 1824 | list_add_tail(&bf->list, &sc->tx.txbuf); |
1825 | spin_unlock_bh(&sc->sc_txbuflock); | 1825 | spin_unlock_bh(&sc->tx.txbuflock); |
1826 | 1826 | ||
1827 | return r; | 1827 | return r; |
1828 | } | 1828 | } |
@@ -1839,10 +1839,10 @@ int ath_tx_init(struct ath_softc *sc, int nbufs) | |||
1839 | int error = 0; | 1839 | int error = 0; |
1840 | 1840 | ||
1841 | do { | 1841 | do { |
1842 | spin_lock_init(&sc->sc_txbuflock); | 1842 | spin_lock_init(&sc->tx.txbuflock); |
1843 | 1843 | ||
1844 | /* Setup tx descriptors */ | 1844 | /* Setup tx descriptors */ |
1845 | error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, | 1845 | error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, |
1846 | "tx", nbufs, 1); | 1846 | "tx", nbufs, 1); |
1847 | if (error != 0) { | 1847 | if (error != 0) { |
1848 | DPRINTF(sc, ATH_DBG_FATAL, | 1848 | DPRINTF(sc, ATH_DBG_FATAL, |
@@ -1852,7 +1852,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs) | |||
1852 | } | 1852 | } |
1853 | 1853 | ||
1854 | /* XXX allocate beacon state together with vap */ | 1854 | /* XXX allocate beacon state together with vap */ |
1855 | error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, | 1855 | error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, |
1856 | "beacon", ATH_BCBUF, 1); | 1856 | "beacon", ATH_BCBUF, 1); |
1857 | if (error != 0) { | 1857 | if (error != 0) { |
1858 | DPRINTF(sc, ATH_DBG_FATAL, | 1858 | DPRINTF(sc, ATH_DBG_FATAL, |
@@ -1874,12 +1874,12 @@ int ath_tx_init(struct ath_softc *sc, int nbufs) | |||
1874 | int ath_tx_cleanup(struct ath_softc *sc) | 1874 | int ath_tx_cleanup(struct ath_softc *sc) |
1875 | { | 1875 | { |
1876 | /* cleanup beacon descriptors */ | 1876 | /* cleanup beacon descriptors */ |
1877 | if (sc->sc_bdma.dd_desc_len != 0) | 1877 | if (sc->beacon.bdma.dd_desc_len != 0) |
1878 | ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); | 1878 | ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf); |
1879 | 1879 | ||
1880 | /* cleanup tx descriptors */ | 1880 | /* cleanup tx descriptors */ |
1881 | if (sc->sc_txdma.dd_desc_len != 0) | 1881 | if (sc->tx.txdma.dd_desc_len != 0) |
1882 | ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); | 1882 | ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); |
1883 | 1883 | ||
1884 | return 0; | 1884 | return 0; |
1885 | } | 1885 | } |
@@ -1927,15 +1927,15 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) | |||
1927 | */ | 1927 | */ |
1928 | return NULL; | 1928 | return NULL; |
1929 | } | 1929 | } |
1930 | if (qnum >= ARRAY_SIZE(sc->sc_txq)) { | 1930 | if (qnum >= ARRAY_SIZE(sc->tx.txq)) { |
1931 | DPRINTF(sc, ATH_DBG_FATAL, | 1931 | DPRINTF(sc, ATH_DBG_FATAL, |
1932 | "qnum %u out of range, max %u!\n", | 1932 | "qnum %u out of range, max %u!\n", |
1933 | qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq)); | 1933 | qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq)); |
1934 | ath9k_hw_releasetxqueue(ah, qnum); | 1934 | ath9k_hw_releasetxqueue(ah, qnum); |
1935 | return NULL; | 1935 | return NULL; |
1936 | } | 1936 | } |
1937 | if (!ATH_TXQ_SETUP(sc, qnum)) { | 1937 | if (!ATH_TXQ_SETUP(sc, qnum)) { |
1938 | struct ath_txq *txq = &sc->sc_txq[qnum]; | 1938 | struct ath_txq *txq = &sc->tx.txq[qnum]; |
1939 | 1939 | ||
1940 | txq->axq_qnum = qnum; | 1940 | txq->axq_qnum = qnum; |
1941 | txq->axq_link = NULL; | 1941 | txq->axq_link = NULL; |
@@ -1946,9 +1946,9 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) | |||
1946 | txq->axq_aggr_depth = 0; | 1946 | txq->axq_aggr_depth = 0; |
1947 | txq->axq_totalqueued = 0; | 1947 | txq->axq_totalqueued = 0; |
1948 | txq->axq_linkbuf = NULL; | 1948 | txq->axq_linkbuf = NULL; |
1949 | sc->sc_txqsetup |= 1<<qnum; | 1949 | sc->tx.txqsetup |= 1<<qnum; |
1950 | } | 1950 | } |
1951 | return &sc->sc_txq[qnum]; | 1951 | return &sc->tx.txq[qnum]; |
1952 | } | 1952 | } |
1953 | 1953 | ||
1954 | /* Reclaim resources for a setup queue */ | 1954 | /* Reclaim resources for a setup queue */ |
@@ -1956,7 +1956,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) | |||
1956 | void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) | 1956 | void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) |
1957 | { | 1957 | { |
1958 | ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); | 1958 | ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum); |
1959 | sc->sc_txqsetup &= ~(1<<txq->axq_qnum); | 1959 | sc->tx.txqsetup &= ~(1<<txq->axq_qnum); |
1960 | } | 1960 | } |
1961 | 1961 | ||
1962 | /* | 1962 | /* |
@@ -1973,15 +1973,15 @@ int ath_tx_setup(struct ath_softc *sc, int haltype) | |||
1973 | { | 1973 | { |
1974 | struct ath_txq *txq; | 1974 | struct ath_txq *txq; |
1975 | 1975 | ||
1976 | if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { | 1976 | if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { |
1977 | DPRINTF(sc, ATH_DBG_FATAL, | 1977 | DPRINTF(sc, ATH_DBG_FATAL, |
1978 | "HAL AC %u out of range, max %zu!\n", | 1978 | "HAL AC %u out of range, max %zu!\n", |
1979 | haltype, ARRAY_SIZE(sc->sc_haltype2q)); | 1979 | haltype, ARRAY_SIZE(sc->tx.hwq_map)); |
1980 | return 0; | 1980 | return 0; |
1981 | } | 1981 | } |
1982 | txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); | 1982 | txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype); |
1983 | if (txq != NULL) { | 1983 | if (txq != NULL) { |
1984 | sc->sc_haltype2q[haltype] = txq->axq_qnum; | 1984 | sc->tx.hwq_map[haltype] = txq->axq_qnum; |
1985 | return 1; | 1985 | return 1; |
1986 | } else | 1986 | } else |
1987 | return 0; | 1987 | return 0; |
@@ -1993,19 +1993,19 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype) | |||
1993 | 1993 | ||
1994 | switch (qtype) { | 1994 | switch (qtype) { |
1995 | case ATH9K_TX_QUEUE_DATA: | 1995 | case ATH9K_TX_QUEUE_DATA: |
1996 | if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) { | 1996 | if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { |
1997 | DPRINTF(sc, ATH_DBG_FATAL, | 1997 | DPRINTF(sc, ATH_DBG_FATAL, |
1998 | "HAL AC %u out of range, max %zu!\n", | 1998 | "HAL AC %u out of range, max %zu!\n", |
1999 | haltype, ARRAY_SIZE(sc->sc_haltype2q)); | 1999 | haltype, ARRAY_SIZE(sc->tx.hwq_map)); |
2000 | return -1; | 2000 | return -1; |
2001 | } | 2001 | } |
2002 | qnum = sc->sc_haltype2q[haltype]; | 2002 | qnum = sc->tx.hwq_map[haltype]; |
2003 | break; | 2003 | break; |
2004 | case ATH9K_TX_QUEUE_BEACON: | 2004 | case ATH9K_TX_QUEUE_BEACON: |
2005 | qnum = sc->sc_bhalq; | 2005 | qnum = sc->beacon.beaconq; |
2006 | break; | 2006 | break; |
2007 | case ATH9K_TX_QUEUE_CAB: | 2007 | case ATH9K_TX_QUEUE_CAB: |
2008 | qnum = sc->sc_cabq->axq_qnum; | 2008 | qnum = sc->beacon.cabq->axq_qnum; |
2009 | break; | 2009 | break; |
2010 | default: | 2010 | default: |
2011 | qnum = -1; | 2011 | qnum = -1; |
@@ -2021,7 +2021,7 @@ struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb) | |||
2021 | int qnum; | 2021 | int qnum; |
2022 | 2022 | ||
2023 | qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); | 2023 | qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc); |
2024 | txq = &sc->sc_txq[qnum]; | 2024 | txq = &sc->tx.txq[qnum]; |
2025 | 2025 | ||
2026 | spin_lock_bh(&txq->axq_lock); | 2026 | spin_lock_bh(&txq->axq_lock); |
2027 | 2027 | ||
@@ -2050,17 +2050,17 @@ int ath_txq_update(struct ath_softc *sc, int qnum, | |||
2050 | int error = 0; | 2050 | int error = 0; |
2051 | struct ath9k_tx_queue_info qi; | 2051 | struct ath9k_tx_queue_info qi; |
2052 | 2052 | ||
2053 | if (qnum == sc->sc_bhalq) { | 2053 | if (qnum == sc->beacon.beaconq) { |
2054 | /* | 2054 | /* |
2055 | * XXX: for beacon queue, we just save the parameter. | 2055 | * XXX: for beacon queue, we just save the parameter. |
2056 | * It will be picked up by ath_beaconq_config when | 2056 | * It will be picked up by ath_beaconq_config when |
2057 | * it's necessary. | 2057 | * it's necessary. |
2058 | */ | 2058 | */ |
2059 | sc->sc_beacon_qi = *qinfo; | 2059 | sc->beacon.beacon_qi = *qinfo; |
2060 | return 0; | 2060 | return 0; |
2061 | } | 2061 | } |
2062 | 2062 | ||
2063 | ASSERT(sc->sc_txq[qnum].axq_qnum == qnum); | 2063 | ASSERT(sc->tx.txq[qnum].axq_qnum == qnum); |
2064 | 2064 | ||
2065 | ath9k_hw_get_txq_props(ah, qnum, &qi); | 2065 | ath9k_hw_get_txq_props(ah, qnum, &qi); |
2066 | qi.tqi_aifs = qinfo->tqi_aifs; | 2066 | qi.tqi_aifs = qinfo->tqi_aifs; |
@@ -2083,7 +2083,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum, | |||
2083 | int ath_cabq_update(struct ath_softc *sc) | 2083 | int ath_cabq_update(struct ath_softc *sc) |
2084 | { | 2084 | { |
2085 | struct ath9k_tx_queue_info qi; | 2085 | struct ath9k_tx_queue_info qi; |
2086 | int qnum = sc->sc_cabq->axq_qnum; | 2086 | int qnum = sc->beacon.cabq->axq_qnum; |
2087 | struct ath_beacon_config conf; | 2087 | struct ath_beacon_config conf; |
2088 | 2088 | ||
2089 | ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); | 2089 | ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi); |
@@ -2117,7 +2117,7 @@ void ath_tx_tasklet(struct ath_softc *sc) | |||
2117 | */ | 2117 | */ |
2118 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | 2118 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
2119 | if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) | 2119 | if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) |
2120 | ath_tx_processq(sc, &sc->sc_txq[i]); | 2120 | ath_tx_processq(sc, &sc->tx.txq[i]); |
2121 | } | 2121 | } |
2122 | } | 2122 | } |
2123 | 2123 | ||
@@ -2149,9 +2149,9 @@ void ath_tx_draintxq(struct ath_softc *sc, | |||
2149 | list_del(&bf->list); | 2149 | list_del(&bf->list); |
2150 | spin_unlock_bh(&txq->axq_lock); | 2150 | spin_unlock_bh(&txq->axq_lock); |
2151 | 2151 | ||
2152 | spin_lock_bh(&sc->sc_txbuflock); | 2152 | spin_lock_bh(&sc->tx.txbuflock); |
2153 | list_add_tail(&bf->list, &sc->sc_txbuf); | 2153 | list_add_tail(&bf->list, &sc->tx.txbuf); |
2154 | spin_unlock_bh(&sc->sc_txbuflock); | 2154 | spin_unlock_bh(&sc->tx.txbuflock); |
2155 | continue; | 2155 | continue; |
2156 | } | 2156 | } |
2157 | 2157 | ||
@@ -2189,9 +2189,9 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx) | |||
2189 | /* stop beacon queue. The beacon will be freed when | 2189 | /* stop beacon queue. The beacon will be freed when |
2190 | * we go to INIT state */ | 2190 | * we go to INIT state */ |
2191 | if (!(sc->sc_flags & SC_OP_INVALID)) { | 2191 | if (!(sc->sc_flags & SC_OP_INVALID)) { |
2192 | (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq); | 2192 | (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq); |
2193 | DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n", | 2193 | DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n", |
2194 | ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq)); | 2194 | ath9k_hw_gettxbuf(sc->sc_ah, sc->beacon.beaconq)); |
2195 | } | 2195 | } |
2196 | 2196 | ||
2197 | ath_drain_txdataq(sc, retry_tx); | 2197 | ath_drain_txdataq(sc, retry_tx); |
@@ -2199,12 +2199,12 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx) | |||
2199 | 2199 | ||
2200 | u32 ath_txq_depth(struct ath_softc *sc, int qnum) | 2200 | u32 ath_txq_depth(struct ath_softc *sc, int qnum) |
2201 | { | 2201 | { |
2202 | return sc->sc_txq[qnum].axq_depth; | 2202 | return sc->tx.txq[qnum].axq_depth; |
2203 | } | 2203 | } |
2204 | 2204 | ||
2205 | u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum) | 2205 | u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum) |
2206 | { | 2206 | { |
2207 | return sc->sc_txq[qnum].axq_aggr_depth; | 2207 | return sc->tx.txq[qnum].axq_aggr_depth; |
2208 | } | 2208 | } |
2209 | 2209 | ||
2210 | bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) | 2210 | bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno) |
@@ -2285,7 +2285,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid | |||
2285 | void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid) | 2285 | void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid) |
2286 | { | 2286 | { |
2287 | struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); | 2287 | struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); |
2288 | struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum]; | 2288 | struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; |
2289 | struct ath_buf *bf; | 2289 | struct ath_buf *bf; |
2290 | struct list_head bf_head; | 2290 | struct list_head bf_head; |
2291 | INIT_LIST_HEAD(&bf_head); | 2291 | INIT_LIST_HEAD(&bf_head); |
@@ -2467,7 +2467,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) | |||
2467 | struct ath_txq *txq; | 2467 | struct ath_txq *txq; |
2468 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { | 2468 | for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { |
2469 | if (ATH_TXQ_SETUP(sc, i)) { | 2469 | if (ATH_TXQ_SETUP(sc, i)) { |
2470 | txq = &sc->sc_txq[i]; | 2470 | txq = &sc->tx.txq[i]; |
2471 | 2471 | ||
2472 | spin_lock(&txq->axq_lock); | 2472 | spin_lock(&txq->axq_lock); |
2473 | 2473 | ||
@@ -2512,9 +2512,9 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb) | |||
2512 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { | 2512 | if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) { |
2513 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; | 2513 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; |
2514 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) | 2514 | if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) |
2515 | sc->seq_no += 0x10; | 2515 | sc->tx.seq_no += 0x10; |
2516 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); | 2516 | hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); |
2517 | hdr->seq_ctrl |= cpu_to_le16(sc->seq_no); | 2517 | hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no); |
2518 | } | 2518 | } |
2519 | 2519 | ||
2520 | /* Add the padding after the header if this is not already done */ | 2520 | /* Add the padding after the header if this is not already done */ |
@@ -2530,7 +2530,7 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb) | |||
2530 | memmove(skb->data, skb->data + padsize, hdrlen); | 2530 | memmove(skb->data, skb->data + padsize, hdrlen); |
2531 | } | 2531 | } |
2532 | 2532 | ||
2533 | txctl.txq = sc->sc_cabq; | 2533 | txctl.txq = sc->beacon.cabq; |
2534 | 2534 | ||
2535 | DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb); | 2535 | DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb); |
2536 | 2536 | ||