aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorSujith <Sujith.Manoharan@atheros.com>2008-11-24 01:37:55 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-11-26 09:47:49 -0500
commitff37e337beb838d4c2540fa93b2c4c632ee17750 (patch)
tree649d6dfedaef70558b222cc75e952193147449ed /drivers
parentbf8c1ac6d81ba8c0e4dc2215f84f5e2a3c8227e8 (diff)
ath9k: Code scrub
Merge core.c and base.c Remove Antenna Diversity (unused now). Remove unused chainmask handling code. Comment, indentation scrub. Signed-off-by: Sujith <Sujith.Manoharan@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/wireless/ath9k/Makefile3
-rw-r--r--drivers/net/wireless/ath9k/beacon.c80
-rw-r--r--drivers/net/wireless/ath9k/core.c1610
-rw-r--r--drivers/net/wireless/ath9k/core.h155
-rw-r--r--drivers/net/wireless/ath9k/main.c1195
-rw-r--r--drivers/net/wireless/ath9k/rc.c9
-rw-r--r--drivers/net/wireless/ath9k/recv.c22
-rw-r--r--drivers/net/wireless/ath9k/xmit.c22
8 files changed, 1231 insertions, 1865 deletions
diff --git a/drivers/net/wireless/ath9k/Makefile b/drivers/net/wireless/ath9k/Makefile
index c58cfdeb49c9..c741e8d34748 100644
--- a/drivers/net/wireless/ath9k/Makefile
+++ b/drivers/net/wireless/ath9k/Makefile
@@ -9,7 +9,6 @@ ath9k-y += hw.o \
9 main.o \ 9 main.o \
10 recv.o \ 10 recv.o \
11 xmit.o \ 11 xmit.o \
12 rc.o \ 12 rc.o
13 core.o
14 13
15obj-$(CONFIG_ATH9K) += ath9k.o 14obj-$(CONFIG_ATH9K) += ath9k.o
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index dcf23834194c..377d2df05316 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -14,13 +14,9 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17 /* Implementation of beacon processing. */
18
19#include "core.h" 17#include "core.h"
20 18
21/* 19/*
22 * Configure parameters for the beacon queue
23 *
24 * This function will modify certain transmit queue properties depending on 20 * This function will modify certain transmit queue properties depending on
25 * the operating mode of the station (AP or AdHoc). Parameters are AIFS 21 * the operating mode of the station (AP or AdHoc). Parameters are AIFS
26 * settings and channel width min/max 22 * settings and channel width min/max
@@ -54,9 +50,15 @@ static int ath_beaconq_config(struct ath_softc *sc)
54 } 50 }
55} 51}
56 52
53static void ath_bstuck_process(struct ath_softc *sc)
54{
55 DPRINTF(sc, ATH_DBG_BEACON,
56 "%s: stuck beacon; resetting (bmiss count %u)\n",
57 __func__, sc->sc_bmisscount);
58 ath_reset(sc, false);
59}
60
57/* 61/*
58 * Setup the beacon frame for transmit.
59 *
60 * Associates the beacon frame buffer with a transmit descriptor. Will set 62 * Associates the beacon frame buffer with a transmit descriptor. Will set
61 * up all required antenna switch parameters, rate codes, and channel flags. 63 * up all required antenna switch parameters, rate codes, and channel flags.
62 * Beacons are always sent out at the lowest rate, and are not retried. 64 * Beacons are always sent out at the lowest rate, and are not retried.
@@ -138,14 +140,7 @@ static void ath_beacon_setup(struct ath_softc *sc,
138 ctsrate, ctsduration, series, 4, 0); 140 ctsrate, ctsduration, series, 4, 0);
139} 141}
140 142
141/* 143/* Generate beacon frame and queue cab data for a vap */
142 * Generate beacon frame and queue cab data for a vap.
143 *
144 * Updates the contents of the beacon frame. It is assumed that the buffer for
145 * the beacon frame has been allocated in the ATH object, and simply needs to
146 * be filled for this cycle. Also, any CAB (crap after beacon?) traffic will
147 * be added to the beacon frame at this point.
148*/
149static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id) 144static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
150{ 145{
151 struct ath_buf *bf; 146 struct ath_buf *bf;
@@ -275,14 +270,6 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc, int if_id)
275 sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc); 270 sc->sc_bhalq, ito64(bf->bf_daddr), bf->bf_desc);
276} 271}
277 272
278/*
279 * Setup a h/w transmit queue for beacons.
280 *
281 * This function allocates an information structure (struct ath9k_txq_info)
282 * on the stack, sets some specific parameters (zero out channel width
283 * min/max, and enable aifs). The info structure does not need to be
284 * persistant.
285*/
286int ath_beaconq_setup(struct ath_hal *ah) 273int ath_beaconq_setup(struct ath_hal *ah)
287{ 274{
288 struct ath9k_tx_queue_info qi; 275 struct ath9k_tx_queue_info qi;
@@ -295,14 +282,6 @@ int ath_beaconq_setup(struct ath_hal *ah)
295 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); 282 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
296} 283}
297 284
298
299/*
300 * Allocate and setup an initial beacon frame.
301 *
302 * Allocate a beacon state variable for a specific VAP instance created on
303 * the ATH interface. This routine also calculates the beacon "slot" for
304 * staggared beacons in the mBSSID case.
305*/
306int ath_beacon_alloc(struct ath_softc *sc, int if_id) 285int ath_beacon_alloc(struct ath_softc *sc, int if_id)
307{ 286{
308 struct ieee80211_vif *vif; 287 struct ieee80211_vif *vif;
@@ -321,7 +300,6 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
321 if (!avp->av_bcbuf) { 300 if (!avp->av_bcbuf) {
322 /* Allocate beacon state for hostap/ibss. We know 301 /* Allocate beacon state for hostap/ibss. We know
323 * a buffer is available. */ 302 * a buffer is available. */
324
325 avp->av_bcbuf = list_first_entry(&sc->sc_bbuf, 303 avp->av_bcbuf = list_first_entry(&sc->sc_bbuf,
326 struct ath_buf, list); 304 struct ath_buf, list);
327 list_del(&avp->av_bcbuf->list); 305 list_del(&avp->av_bcbuf->list);
@@ -427,12 +405,6 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
427 return 0; 405 return 0;
428} 406}
429 407
430/*
431 * Reclaim beacon resources and return buffer to the pool.
432 *
433 * Checks the VAP to put the beacon frame buffer back to the ATH object
434 * queue, and de-allocates any skbs that were sent as CAB traffic.
435*/
436void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp) 408void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
437{ 409{
438 if (avp->av_bcbuf != NULL) { 410 if (avp->av_bcbuf != NULL) {
@@ -458,13 +430,6 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
458 } 430 }
459} 431}
460 432
461/*
462 * Tasklet for Sending Beacons
463 *
464 * Transmit one or more beacon frames at SWBA. Dynamic updates to the frame
465 * contents are done as needed and the slot time is also adjusted based on
466 * current state.
467*/
468void ath9k_beacon_tasklet(unsigned long data) 433void ath9k_beacon_tasklet(unsigned long data)
469{ 434{
470 struct ath_softc *sc = (struct ath_softc *)data; 435 struct ath_softc *sc = (struct ath_softc *)data;
@@ -481,9 +446,7 @@ void ath9k_beacon_tasklet(unsigned long data)
481 446
482 if (sc->sc_flags & SC_OP_NO_RESET) { 447 if (sc->sc_flags & SC_OP_NO_RESET) {
483 show_cycles = ath9k_hw_GetMibCycleCountsPct(ah, 448 show_cycles = ath9k_hw_GetMibCycleCountsPct(ah,
484 &rx_clear, 449 &rx_clear, &rx_frame, &tx_frame);
485 &rx_frame,
486 &tx_frame);
487 } 450 }
488 451
489 /* 452 /*
@@ -605,9 +568,10 @@ void ath9k_beacon_tasklet(unsigned long data)
605 if (sc->sc_updateslot == UPDATE) { 568 if (sc->sc_updateslot == UPDATE) {
606 sc->sc_updateslot = COMMIT; /* commit next beacon */ 569 sc->sc_updateslot = COMMIT; /* commit next beacon */
607 sc->sc_slotupdate = slot; 570 sc->sc_slotupdate = slot;
608 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) 571 } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) {
609 ath_setslottime(sc); /* commit change to hardware */ 572 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
610 573 sc->sc_updateslot = OK;
574 }
611 if (bfaddr != 0) { 575 if (bfaddr != 0) {
612 /* 576 /*
613 * Stop any current dma and put the new frame(s) on the queue. 577 * Stop any current dma and put the new frame(s) on the queue.
@@ -630,20 +594,6 @@ void ath9k_beacon_tasklet(unsigned long data)
630} 594}
631 595
632/* 596/*
633 * Tasklet for Beacon Stuck processing
634 *
635 * Processing for Beacon Stuck.
636 * Basically resets the chip.
637*/
638void ath_bstuck_process(struct ath_softc *sc)
639{
640 DPRINTF(sc, ATH_DBG_BEACON,
641 "%s: stuck beacon; resetting (bmiss count %u)\n",
642 __func__, sc->sc_bmisscount);
643 ath_reset(sc, false);
644}
645
646/*
647 * Configure the beacon and sleep timers. 597 * Configure the beacon and sleep timers.
648 * 598 *
649 * When operating as an AP this resets the TSF and sets 599 * When operating as an AP this resets the TSF and sets
@@ -886,8 +836,6 @@ void ath_beacon_config(struct ath_softc *sc, int if_id)
886 } 836 }
887} 837}
888 838
889/* Function to collect beacon rssi data and resync beacon if necessary */
890
891void ath_beacon_sync(struct ath_softc *sc, int if_id) 839void ath_beacon_sync(struct ath_softc *sc, int if_id)
892{ 840{
893 /* 841 /*
diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
deleted file mode 100644
index fb6a013f3f31..000000000000
--- a/drivers/net/wireless/ath9k/core.c
+++ /dev/null
@@ -1,1610 +0,0 @@
1/*
2 * Copyright (c) 2008, Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "regd.h"
19
20static u32 ath_chainmask_sel_up_rssi_thres =
21 ATH_CHAINMASK_SEL_UP_RSSI_THRES;
22static u32 ath_chainmask_sel_down_rssi_thres =
23 ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
24static u32 ath_chainmask_sel_period =
25 ATH_CHAINMASK_SEL_TIMEOUT;
26
27/* return bus cachesize in 4B word units */
28
29static void bus_read_cachesize(struct ath_softc *sc, int *csz)
30{
31 u8 u8tmp;
32
33 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
34 *csz = (int)u8tmp;
35
36 /*
37 * This check was put in to avoid "unplesant" consequences if
38 * the bootrom has not fully initialized all PCI devices.
39 * Sometimes the cache line size register is not set
40 */
41
42 if (*csz == 0)
43 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
44}
45
46static u8 parse_mpdudensity(u8 mpdudensity)
47{
48 /*
49 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
50 * 0 for no restriction
51 * 1 for 1/4 us
52 * 2 for 1/2 us
53 * 3 for 1 us
54 * 4 for 2 us
55 * 5 for 4 us
56 * 6 for 8 us
57 * 7 for 16 us
58 */
59 switch (mpdudensity) {
60 case 0:
61 return 0;
62 case 1:
63 case 2:
64 case 3:
65 /* Our lower layer calculations limit our precision to
66 1 microsecond */
67 return 1;
68 case 4:
69 return 2;
70 case 5:
71 return 4;
72 case 6:
73 return 8;
74 case 7:
75 return 16;
76 default:
77 return 0;
78 }
79}
80
81/*
82 * Set current operating mode
83*/
84static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
85{
86 sc->sc_curmode = mode;
87 /*
88 * All protection frames are transmited at 2Mb/s for
89 * 11g, otherwise at 1Mb/s.
90 * XXX select protection rate index from rate table.
91 */
92 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
93}
94
95/*
96 * Set up rate table (legacy rates)
97 */
98static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
99{
100 struct ath_rate_table *rate_table = NULL;
101 struct ieee80211_supported_band *sband;
102 struct ieee80211_rate *rate;
103 int i, maxrates;
104
105 switch (band) {
106 case IEEE80211_BAND_2GHZ:
107 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
108 break;
109 case IEEE80211_BAND_5GHZ:
110 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
111 break;
112 default:
113 break;
114 }
115
116 if (rate_table == NULL)
117 return;
118
119 sband = &sc->sbands[band];
120 rate = sc->rates[band];
121
122 if (rate_table->rate_cnt > ATH_RATE_MAX)
123 maxrates = ATH_RATE_MAX;
124 else
125 maxrates = rate_table->rate_cnt;
126
127 for (i = 0; i < maxrates; i++) {
128 rate[i].bitrate = rate_table->info[i].ratekbps / 100;
129 rate[i].hw_value = rate_table->info[i].ratecode;
130 sband->n_bitrates++;
131 DPRINTF(sc, ATH_DBG_CONFIG,
132 "%s: Rate: %2dMbps, ratecode: %2d\n",
133 __func__,
134 rate[i].bitrate / 10,
135 rate[i].hw_value);
136 }
137}
138
139/*
140 * Set up channel list
141 */
142static int ath_setup_channels(struct ath_softc *sc)
143{
144 struct ath_hal *ah = sc->sc_ah;
145 int nchan, i, a = 0, b = 0;
146 u8 regclassids[ATH_REGCLASSIDS_MAX];
147 u32 nregclass = 0;
148 struct ieee80211_supported_band *band_2ghz;
149 struct ieee80211_supported_band *band_5ghz;
150 struct ieee80211_channel *chan_2ghz;
151 struct ieee80211_channel *chan_5ghz;
152 struct ath9k_channel *c;
153
154 /* Fill in ah->ah_channels */
155 if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (u32 *)&nchan,
156 regclassids, ATH_REGCLASSIDS_MAX,
157 &nregclass, CTRY_DEFAULT, false, 1)) {
158 u32 rd = ah->ah_currentRD;
159 DPRINTF(sc, ATH_DBG_FATAL,
160 "%s: unable to collect channel list; "
161 "regdomain likely %u country code %u\n",
162 __func__, rd, CTRY_DEFAULT);
163 return -EINVAL;
164 }
165
166 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
167 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
168 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
169 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
170
171 for (i = 0; i < nchan; i++) {
172 c = &ah->ah_channels[i];
173 if (IS_CHAN_2GHZ(c)) {
174 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
175 chan_2ghz[a].center_freq = c->channel;
176 chan_2ghz[a].max_power = c->maxTxPower;
177
178 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
179 chan_2ghz[a].flags |= IEEE80211_CHAN_NO_IBSS;
180 if (c->channelFlags & CHANNEL_PASSIVE)
181 chan_2ghz[a].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
182
183 band_2ghz->n_channels = ++a;
184
185 DPRINTF(sc, ATH_DBG_CONFIG,
186 "%s: 2MHz channel: %d, "
187 "channelFlags: 0x%x\n",
188 __func__, c->channel, c->channelFlags);
189 } else if (IS_CHAN_5GHZ(c)) {
190 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
191 chan_5ghz[b].center_freq = c->channel;
192 chan_5ghz[b].max_power = c->maxTxPower;
193
194 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
195 chan_5ghz[b].flags |= IEEE80211_CHAN_NO_IBSS;
196 if (c->channelFlags & CHANNEL_PASSIVE)
197 chan_5ghz[b].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
198
199 band_5ghz->n_channels = ++b;
200
201 DPRINTF(sc, ATH_DBG_CONFIG,
202 "%s: 5MHz channel: %d, "
203 "channelFlags: 0x%x\n",
204 __func__, c->channel, c->channelFlags);
205 }
206 }
207
208 return 0;
209}
210
211/*
212 * Determine mode from channel flags
213 *
214 * This routine will provide the enumerated WIRELESSS_MODE value based
215 * on the settings of the channel flags. If no valid set of flags
216 * exist, the lowest mode (11b) is selected.
217*/
218
219static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
220{
221 if (chan->chanmode == CHANNEL_A)
222 return ATH9K_MODE_11A;
223 else if (chan->chanmode == CHANNEL_G)
224 return ATH9K_MODE_11G;
225 else if (chan->chanmode == CHANNEL_B)
226 return ATH9K_MODE_11B;
227 else if (chan->chanmode == CHANNEL_A_HT20)
228 return ATH9K_MODE_11NA_HT20;
229 else if (chan->chanmode == CHANNEL_G_HT20)
230 return ATH9K_MODE_11NG_HT20;
231 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
232 return ATH9K_MODE_11NA_HT40PLUS;
233 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
234 return ATH9K_MODE_11NA_HT40MINUS;
235 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
236 return ATH9K_MODE_11NG_HT40PLUS;
237 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
238 return ATH9K_MODE_11NG_HT40MINUS;
239
240 WARN_ON(1); /* should not get here */
241
242 return ATH9K_MODE_11B;
243}
244
245/*
246 * Set the current channel
247 *
248 * Set/change channels. If the channel is really being changed, it's done
249 * by reseting the chip. To accomplish this we must first cleanup any pending
250 * DMA, then restart stuff after a la ath_init.
251*/
252int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
253{
254 struct ath_hal *ah = sc->sc_ah;
255 bool fastcc = true, stopped;
256
257 if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
258 return -EIO;
259
260 DPRINTF(sc, ATH_DBG_CONFIG,
261 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
262 __func__,
263 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
264 sc->sc_ah->ah_curchan->channelFlags),
265 sc->sc_ah->ah_curchan->channel,
266 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
267 hchan->channel, hchan->channelFlags);
268
269 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
270 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
271 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
272 (sc->sc_flags & SC_OP_FULL_RESET)) {
273 int status;
274 /*
275 * This is only performed if the channel settings have
276 * actually changed.
277 *
278 * To switch channels clear any pending DMA operations;
279 * wait long enough for the RX fifo to drain, reset the
280 * hardware at the new frequency, and then re-enable
281 * the relevant bits of the h/w.
282 */
283 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
284 ath_draintxq(sc, false); /* clear pending tx frames */
285 stopped = ath_stoprecv(sc); /* turn off frame recv */
286
287 /* XXX: do not flush receive queue here. We don't want
288 * to flush data frames already in queue because of
289 * changing channel. */
290
291 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
292 fastcc = false;
293
294 spin_lock_bh(&sc->sc_resetlock);
295 if (!ath9k_hw_reset(ah, hchan,
296 sc->sc_ht_info.tx_chan_width,
297 sc->sc_tx_chainmask,
298 sc->sc_rx_chainmask,
299 sc->sc_ht_extprotspacing,
300 fastcc, &status)) {
301 DPRINTF(sc, ATH_DBG_FATAL,
302 "%s: unable to reset channel %u (%uMhz) "
303 "flags 0x%x hal status %u\n", __func__,
304 ath9k_hw_mhz2ieee(ah, hchan->channel,
305 hchan->channelFlags),
306 hchan->channel, hchan->channelFlags, status);
307 spin_unlock_bh(&sc->sc_resetlock);
308 return -EIO;
309 }
310 spin_unlock_bh(&sc->sc_resetlock);
311
312 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
313 sc->sc_flags &= ~SC_OP_FULL_RESET;
314
315 /* Re-enable rx framework */
316 if (ath_startrecv(sc) != 0) {
317 DPRINTF(sc, ATH_DBG_FATAL,
318 "%s: unable to restart recv logic\n", __func__);
319 return -EIO;
320 }
321 /*
322 * Change channels and update the h/w rate map
323 * if we're switching; e.g. 11a to 11b/g.
324 */
325 ath_setcurmode(sc, ath_chan2mode(hchan));
326
327 ath_update_txpow(sc); /* update tx power state */
328 /*
329 * Re-enable interrupts.
330 */
331 ath9k_hw_set_interrupts(ah, sc->sc_imask);
332 }
333 return 0;
334}
335
336/**********************/
337/* Chainmask Handling */
338/**********************/
339
340static void ath_chainmask_sel_timertimeout(unsigned long data)
341{
342 struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
343 cm->switch_allowed = 1;
344}
345
346/* Start chainmask select timer */
347static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
348{
349 cm->switch_allowed = 0;
350 mod_timer(&cm->timer, ath_chainmask_sel_period);
351}
352
353/* Stop chainmask select timer */
354static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
355{
356 cm->switch_allowed = 0;
357 del_timer_sync(&cm->timer);
358}
359
360static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
361{
362 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
363
364 memset(cm, 0, sizeof(struct ath_chainmask_sel));
365
366 cm->cur_tx_mask = sc->sc_tx_chainmask;
367 cm->cur_rx_mask = sc->sc_rx_chainmask;
368 cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
369 setup_timer(&cm->timer,
370 ath_chainmask_sel_timertimeout, (unsigned long) cm);
371}
372
373int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
374{
375 struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
376
377 /*
378 * Disable auto-swtiching in one of the following if conditions.
379 * sc_chainmask_auto_sel is used for internal global auto-switching
380 * enabled/disabled setting
381 */
382 if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
383 cm->cur_tx_mask = sc->sc_tx_chainmask;
384 return cm->cur_tx_mask;
385 }
386
387 if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
388 return cm->cur_tx_mask;
389
390 if (cm->switch_allowed) {
391 /* Switch down from tx 3 to tx 2. */
392 if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
393 ATH_RSSI_OUT(cm->tx_avgrssi) >=
394 ath_chainmask_sel_down_rssi_thres) {
395 cm->cur_tx_mask = sc->sc_tx_chainmask;
396
397 /* Don't let another switch happen until
398 * this timer expires */
399 ath_chainmask_sel_timerstart(cm);
400 }
401 /* Switch up from tx 2 to 3. */
402 else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
403 ATH_RSSI_OUT(cm->tx_avgrssi) <=
404 ath_chainmask_sel_up_rssi_thres) {
405 cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
406
407 /* Don't let another switch happen
408 * until this timer expires */
409 ath_chainmask_sel_timerstart(cm);
410 }
411 }
412
413 return cm->cur_tx_mask;
414}
415
416/*
417 * Update tx/rx chainmask. For legacy association,
418 * hard code chainmask to 1x1, for 11n association, use
419 * the chainmask configuration.
420 */
421
422void ath_update_chainmask(struct ath_softc *sc, int is_ht)
423{
424 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
425 if (is_ht) {
426 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
427 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
428 } else {
429 sc->sc_tx_chainmask = 1;
430 sc->sc_rx_chainmask = 1;
431 }
432
433 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
434 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
435}
436
437/*******/
438/* ANI */
439/*******/
440
441/*
442 * This routine performs the periodic noise floor calibration function
443 * that is used to adjust and optimize the chip performance. This
444 * takes environmental changes (location, temperature) into account.
445 * When the task is complete, it reschedules itself depending on the
446 * appropriate interval that was calculated.
447 */
448
449static void ath_ani_calibrate(unsigned long data)
450{
451 struct ath_softc *sc;
452 struct ath_hal *ah;
453 bool longcal = false;
454 bool shortcal = false;
455 bool aniflag = false;
456 unsigned int timestamp = jiffies_to_msecs(jiffies);
457 u32 cal_interval;
458
459 sc = (struct ath_softc *)data;
460 ah = sc->sc_ah;
461
462 /*
463 * don't calibrate when we're scanning.
464 * we are most likely not on our home channel.
465 */
466 if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)
467 return;
468
469 /* Long calibration runs independently of short calibration. */
470 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
471 longcal = true;
472 DPRINTF(sc, ATH_DBG_ANI, "%s: longcal @%lu\n",
473 __func__, jiffies);
474 sc->sc_ani.sc_longcal_timer = timestamp;
475 }
476
477 /* Short calibration applies only while sc_caldone is false */
478 if (!sc->sc_ani.sc_caldone) {
479 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
480 ATH_SHORT_CALINTERVAL) {
481 shortcal = true;
482 DPRINTF(sc, ATH_DBG_ANI, "%s: shortcal @%lu\n",
483 __func__, jiffies);
484 sc->sc_ani.sc_shortcal_timer = timestamp;
485 sc->sc_ani.sc_resetcal_timer = timestamp;
486 }
487 } else {
488 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
489 ATH_RESTART_CALINTERVAL) {
490 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
491 &sc->sc_ani.sc_caldone);
492 if (sc->sc_ani.sc_caldone)
493 sc->sc_ani.sc_resetcal_timer = timestamp;
494 }
495 }
496
497 /* Verify whether we must check ANI */
498 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
499 ATH_ANI_POLLINTERVAL) {
500 aniflag = true;
501 sc->sc_ani.sc_checkani_timer = timestamp;
502 }
503
504 /* Skip all processing if there's nothing to do. */
505 if (longcal || shortcal || aniflag) {
506 /* Call ANI routine if necessary */
507 if (aniflag)
508 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
509 ah->ah_curchan);
510
511 /* Perform calibration if necessary */
512 if (longcal || shortcal) {
513 bool iscaldone = false;
514
515 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
516 sc->sc_rx_chainmask, longcal,
517 &iscaldone)) {
518 if (longcal)
519 sc->sc_ani.sc_noise_floor =
520 ath9k_hw_getchan_noise(ah,
521 ah->ah_curchan);
522
523 DPRINTF(sc, ATH_DBG_ANI,
524 "%s: calibrate chan %u/%x nf: %d\n",
525 __func__,
526 ah->ah_curchan->channel,
527 ah->ah_curchan->channelFlags,
528 sc->sc_ani.sc_noise_floor);
529 } else {
530 DPRINTF(sc, ATH_DBG_ANY,
531 "%s: calibrate chan %u/%x failed\n",
532 __func__,
533 ah->ah_curchan->channel,
534 ah->ah_curchan->channelFlags);
535 }
536 sc->sc_ani.sc_caldone = iscaldone;
537 }
538 }
539
540 /*
541 * Set timer interval based on previous results.
542 * The interval must be the shortest necessary to satisfy ANI,
543 * short calibration and long calibration.
544 */
545
546 cal_interval = ATH_ANI_POLLINTERVAL;
547 if (!sc->sc_ani.sc_caldone)
548 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
549
550 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
551}
552
553/********/
554/* Core */
555/********/
556
557int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
558{
559 struct ath_hal *ah = sc->sc_ah;
560 int status;
561 int error = 0;
562
563 DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
564 __func__, sc->sc_ah->ah_opmode);
565
566 /* Reset SERDES registers */
567 ath9k_hw_configpcipowersave(ah, 0);
568
569 /*
570 * The basic interface to setting the hardware in a good
571 * state is ``reset''. On return the hardware is known to
572 * be powered up and with interrupts disabled. This must
573 * be followed by initialization of the appropriate bits
574 * and then setup of the interrupt mask.
575 */
576
577 spin_lock_bh(&sc->sc_resetlock);
578 if (!ath9k_hw_reset(ah, initial_chan,
579 sc->sc_ht_info.tx_chan_width,
580 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
581 sc->sc_ht_extprotspacing, false, &status)) {
582 DPRINTF(sc, ATH_DBG_FATAL,
583 "%s: unable to reset hardware; hal status %u "
584 "(freq %u flags 0x%x)\n", __func__, status,
585 initial_chan->channel, initial_chan->channelFlags);
586 error = -EIO;
587 spin_unlock_bh(&sc->sc_resetlock);
588 goto done;
589 }
590 spin_unlock_bh(&sc->sc_resetlock);
591
592 /*
593 * This is needed only to setup initial state
594 * but it's best done after a reset.
595 */
596 ath_update_txpow(sc);
597
598 /*
599 * Setup the hardware after reset:
600 * The receive engine is set going.
601 * Frame transmit is handled entirely
602 * in the frame output path; there's nothing to do
603 * here except setup the interrupt mask.
604 */
605 if (ath_startrecv(sc) != 0) {
606 DPRINTF(sc, ATH_DBG_FATAL,
607 "%s: unable to start recv logic\n", __func__);
608 error = -EIO;
609 goto done;
610 }
611
612 /* Setup our intr mask. */
613 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
614 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
615 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
616
617 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
618 sc->sc_imask |= ATH9K_INT_GTT;
619
620 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
621 sc->sc_imask |= ATH9K_INT_CST;
622
623 /*
624 * Enable MIB interrupts when there are hardware phy counters.
625 * Note we only do this (at the moment) for station mode.
626 */
627 if (ath9k_hw_phycounters(ah) &&
628 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
629 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
630 sc->sc_imask |= ATH9K_INT_MIB;
631 /*
632 * Some hardware processes the TIM IE and fires an
633 * interrupt when the TIM bit is set. For hardware
634 * that does, if not overridden by configuration,
635 * enable the TIM interrupt when operating as station.
636 */
637 if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
638 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
639 !sc->sc_config.swBeaconProcess)
640 sc->sc_imask |= ATH9K_INT_TIM;
641
642 ath_setcurmode(sc, ath_chan2mode(initial_chan));
643
644 sc->sc_flags &= ~SC_OP_INVALID;
645
646 /* Disable BMISS interrupt when we're not associated */
647 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
648 ath9k_hw_set_interrupts(sc->sc_ah,sc->sc_imask);
649
650 ieee80211_wake_queues(sc->hw);
651done:
652 return error;
653}
654
655void ath_stop(struct ath_softc *sc)
656{
657 struct ath_hal *ah = sc->sc_ah;
658
659 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Cleaning up\n", __func__);
660
661 ieee80211_stop_queues(sc->hw);
662
663 /* make sure h/w will not generate any interrupt
664 * before setting the invalid flag. */
665 ath9k_hw_set_interrupts(ah, 0);
666
667 if (!(sc->sc_flags & SC_OP_INVALID)) {
668 ath_draintxq(sc, false);
669 ath_stoprecv(sc);
670 ath9k_hw_phy_disable(ah);
671 } else
672 sc->sc_rxlink = NULL;
673
674#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
675 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
676 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
677#endif
678 /* disable HAL and put h/w to sleep */
679 ath9k_hw_disable(sc->sc_ah);
680 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
681
682 sc->sc_flags |= SC_OP_INVALID;
683}
684
685int ath_reset(struct ath_softc *sc, bool retry_tx)
686{
687 struct ath_hal *ah = sc->sc_ah;
688 int status;
689 int error = 0;
690
691 ath9k_hw_set_interrupts(ah, 0);
692 ath_draintxq(sc, retry_tx);
693 ath_stoprecv(sc);
694 ath_flushrecv(sc);
695
696 /* Reset chip */
697 spin_lock_bh(&sc->sc_resetlock);
698 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
699 sc->sc_ht_info.tx_chan_width,
700 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
701 sc->sc_ht_extprotspacing, false, &status)) {
702 DPRINTF(sc, ATH_DBG_FATAL,
703 "%s: unable to reset hardware; hal status %u\n",
704 __func__, status);
705 error = -EIO;
706 }
707 spin_unlock_bh(&sc->sc_resetlock);
708
709 if (ath_startrecv(sc) != 0)
710 DPRINTF(sc, ATH_DBG_FATAL,
711 "%s: unable to start recv logic\n", __func__);
712
713 /*
714 * We may be doing a reset in response to a request
715 * that changes the channel so update any state that
716 * might change as a result.
717 */
718 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
719
720 ath_update_txpow(sc);
721
722 if (sc->sc_flags & SC_OP_BEACONS)
723 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
724
725 ath9k_hw_set_interrupts(ah, sc->sc_imask);
726
727 /* Restart the txq */
728 if (retry_tx) {
729 int i;
730 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
731 if (ATH_TXQ_SETUP(sc, i)) {
732 spin_lock_bh(&sc->sc_txq[i].axq_lock);
733 ath_txq_schedule(sc, &sc->sc_txq[i]);
734 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
735 }
736 }
737 }
738
739 return error;
740}
741
742/* Interrupt handler. Most of the actual processing is deferred.
743 * It's the caller's responsibility to ensure the chip is awake. */
744
745irqreturn_t ath_isr(int irq, void *dev)
746{
747 struct ath_softc *sc = dev;
748 struct ath_hal *ah = sc->sc_ah;
749 enum ath9k_int status;
750 bool sched = false;
751
752 do {
753 if (sc->sc_flags & SC_OP_INVALID) {
754 /*
755 * The hardware is not ready/present, don't
756 * touch anything. Note this can happen early
757 * on if the IRQ is shared.
758 */
759 return IRQ_NONE;
760 }
761 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
762 return IRQ_NONE;
763 }
764
765 /*
766 * Figure out the reason(s) for the interrupt. Note
767 * that the hal returns a pseudo-ISR that may include
768 * bits we haven't explicitly enabled so we mask the
769 * value to insure we only process bits we requested.
770 */
771 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
772
773 status &= sc->sc_imask; /* discard unasked-for bits */
774
775 /*
776 * If there are no status bits set, then this interrupt was not
777 * for me (should have been caught above).
778 */
779
780 if (!status)
781 return IRQ_NONE;
782
783 sc->sc_intrstatus = status;
784
785 if (status & ATH9K_INT_FATAL) {
786 /* need a chip reset */
787 sched = true;
788 } else if (status & ATH9K_INT_RXORN) {
789 /* need a chip reset */
790 sched = true;
791 } else {
792 if (status & ATH9K_INT_SWBA) {
793 /* schedule a tasklet for beacon handling */
794 tasklet_schedule(&sc->bcon_tasklet);
795 }
796 if (status & ATH9K_INT_RXEOL) {
797 /*
798 * NB: the hardware should re-read the link when
799 * RXE bit is written, but it doesn't work
800 * at least on older hardware revs.
801 */
802 sched = true;
803 }
804
805 if (status & ATH9K_INT_TXURN)
806 /* bump tx trigger level */
807 ath9k_hw_updatetxtriglevel(ah, true);
808 /* XXX: optimize this */
809 if (status & ATH9K_INT_RX)
810 sched = true;
811 if (status & ATH9K_INT_TX)
812 sched = true;
813 if (status & ATH9K_INT_BMISS)
814 sched = true;
815 /* carrier sense timeout */
816 if (status & ATH9K_INT_CST)
817 sched = true;
818 if (status & ATH9K_INT_MIB) {
819 /*
820 * Disable interrupts until we service the MIB
821 * interrupt; otherwise it will continue to
822 * fire.
823 */
824 ath9k_hw_set_interrupts(ah, 0);
825 /*
826 * Let the hal handle the event. We assume
827 * it will clear whatever condition caused
828 * the interrupt.
829 */
830 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
831 ath9k_hw_set_interrupts(ah, sc->sc_imask);
832 }
833 if (status & ATH9K_INT_TIM_TIMER) {
834 if (!(ah->ah_caps.hw_caps &
835 ATH9K_HW_CAP_AUTOSLEEP)) {
836 /* Clear RxAbort bit so that we can
837 * receive frames */
838 ath9k_hw_setrxabort(ah, 0);
839 sched = true;
840 }
841 }
842 }
843 } while (0);
844
845 if (sched) {
846 /* turn off every interrupt except SWBA */
847 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
848 tasklet_schedule(&sc->intr_tq);
849 }
850
851 return IRQ_HANDLED;
852}
853
854/* Deferred interrupt processing */
855
856static void ath9k_tasklet(unsigned long data)
857{
858 struct ath_softc *sc = (struct ath_softc *)data;
859 u32 status = sc->sc_intrstatus;
860
861 if (status & ATH9K_INT_FATAL) {
862 /* need a chip reset */
863 ath_reset(sc, false);
864 return;
865 } else {
866
867 if (status &
868 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
869 /* XXX: fill me in */
870 /*
871 if (status & ATH9K_INT_RXORN) {
872 }
873 if (status & ATH9K_INT_RXEOL) {
874 }
875 */
876 spin_lock_bh(&sc->sc_rxflushlock);
877 ath_rx_tasklet(sc, 0);
878 spin_unlock_bh(&sc->sc_rxflushlock);
879 }
880 /* XXX: optimize this */
881 if (status & ATH9K_INT_TX)
882 ath_tx_tasklet(sc);
883 /* XXX: fill me in */
884 /*
885 if (status & ATH9K_INT_BMISS) {
886 }
887 if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
888 if (status & ATH9K_INT_TIM) {
889 }
890 if (status & ATH9K_INT_DTIMSYNC) {
891 }
892 }
893 */
894 }
895
896 /* re-enable hardware interrupt */
897 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
898}
899
900int ath_init(u16 devid, struct ath_softc *sc)
901{
902 struct ath_hal *ah = NULL;
903 int status;
904 int error = 0, i;
905 int csz = 0;
906
907 /* XXX: hardware will not be ready until ath_open() being called */
908 sc->sc_flags |= SC_OP_INVALID;
909 sc->sc_debug = DBG_DEFAULT;
910
911 spin_lock_init(&sc->sc_resetlock);
912 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
913 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
914 (unsigned long)sc);
915
916 /*
917 * Cache line size is used to size and align various
918 * structures used to communicate with the hardware.
919 */
920 bus_read_cachesize(sc, &csz);
921 /* XXX assert csz is non-zero */
922 sc->sc_cachelsz = csz << 2; /* convert to bytes */
923
924 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
925 if (ah == NULL) {
926 DPRINTF(sc, ATH_DBG_FATAL,
927 "%s: unable to attach hardware; HAL status %u\n",
928 __func__, status);
929 error = -ENXIO;
930 goto bad;
931 }
932 sc->sc_ah = ah;
933
934 /* Get the hardware key cache size. */
935 sc->sc_keymax = ah->ah_caps.keycache_size;
936 if (sc->sc_keymax > ATH_KEYMAX) {
937 DPRINTF(sc, ATH_DBG_KEYCACHE,
938 "%s: Warning, using only %u entries in %u key cache\n",
939 __func__, ATH_KEYMAX, sc->sc_keymax);
940 sc->sc_keymax = ATH_KEYMAX;
941 }
942
943 /*
944 * Reset the key cache since some parts do not
945 * reset the contents on initial power up.
946 */
947 for (i = 0; i < sc->sc_keymax; i++)
948 ath9k_hw_keyreset(ah, (u16) i);
949 /*
950 * Mark key cache slots associated with global keys
951 * as in use. If we knew TKIP was not to be used we
952 * could leave the +32, +64, and +32+64 slots free.
953 * XXX only for splitmic.
954 */
955 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
956 set_bit(i, sc->sc_keymap);
957 set_bit(i + 32, sc->sc_keymap);
958 set_bit(i + 64, sc->sc_keymap);
959 set_bit(i + 32 + 64, sc->sc_keymap);
960 }
961
962 /* Collect the channel list using the default country code */
963
964 error = ath_setup_channels(sc);
965 if (error)
966 goto bad;
967
968 /* default to MONITOR mode */
969 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
970
971 /* Setup rate tables */
972
973 ath_rate_attach(sc);
974 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
975 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
976
977 /*
978 * Allocate hardware transmit queues: one queue for
979 * beacon frames and one data queue for each QoS
980 * priority. Note that the hal handles reseting
981 * these queues at the needed time.
982 */
983 sc->sc_bhalq = ath_beaconq_setup(ah);
984 if (sc->sc_bhalq == -1) {
985 DPRINTF(sc, ATH_DBG_FATAL,
986 "%s: unable to setup a beacon xmit queue\n", __func__);
987 error = -EIO;
988 goto bad2;
989 }
990 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
991 if (sc->sc_cabq == NULL) {
992 DPRINTF(sc, ATH_DBG_FATAL,
993 "%s: unable to setup CAB xmit queue\n", __func__);
994 error = -EIO;
995 goto bad2;
996 }
997
998 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
999 ath_cabq_update(sc);
1000
1001 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1002 sc->sc_haltype2q[i] = -1;
1003
1004 /* Setup data queues */
1005 /* NB: ensure BK queue is the lowest priority h/w queue */
1006 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1007 DPRINTF(sc, ATH_DBG_FATAL,
1008 "%s: unable to setup xmit queue for BK traffic\n",
1009 __func__);
1010 error = -EIO;
1011 goto bad2;
1012 }
1013
1014 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1015 DPRINTF(sc, ATH_DBG_FATAL,
1016 "%s: unable to setup xmit queue for BE traffic\n",
1017 __func__);
1018 error = -EIO;
1019 goto bad2;
1020 }
1021 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1022 DPRINTF(sc, ATH_DBG_FATAL,
1023 "%s: unable to setup xmit queue for VI traffic\n",
1024 __func__);
1025 error = -EIO;
1026 goto bad2;
1027 }
1028 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1029 DPRINTF(sc, ATH_DBG_FATAL,
1030 "%s: unable to setup xmit queue for VO traffic\n",
1031 __func__);
1032 error = -EIO;
1033 goto bad2;
1034 }
1035
1036 /* Initializes the noise floor to a reasonable default value.
1037 * Later on this will be updated during ANI processing. */
1038
1039 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1040 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
1041
1042 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1043 ATH9K_CIPHER_TKIP, NULL)) {
1044 /*
1045 * Whether we should enable h/w TKIP MIC.
1046 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1047 * report WMM capable, so it's always safe to turn on
1048 * TKIP MIC in this case.
1049 */
1050 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1051 0, 1, NULL);
1052 }
1053
1054 /*
1055 * Check whether the separate key cache entries
1056 * are required to handle both tx+rx MIC keys.
1057 * With split mic keys the number of stations is limited
1058 * to 27 otherwise 59.
1059 */
1060 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1061 ATH9K_CIPHER_TKIP, NULL)
1062 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1063 ATH9K_CIPHER_MIC, NULL)
1064 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1065 0, NULL))
1066 sc->sc_splitmic = 1;
1067
1068 /* turn on mcast key search if possible */
1069 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1070 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1071 1, NULL);
1072
1073 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1074 sc->sc_config.txpowlimit_override = 0;
1075
1076 /* 11n Capabilities */
1077 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1078 sc->sc_flags |= SC_OP_TXAGGR;
1079 sc->sc_flags |= SC_OP_RXAGGR;
1080 }
1081
1082 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1083 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1084
1085 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1086 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1087
1088 ath9k_hw_getmac(ah, sc->sc_myaddr);
1089 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1090 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1091 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1092 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1093 }
1094
1095 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1096
1097 /* initialize beacon slots */
1098 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1099 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1100
1101 /* save MISC configurations */
1102 sc->sc_config.swBeaconProcess = 1;
1103
1104#ifdef CONFIG_SLOW_ANT_DIV
1105 /* range is 40 - 255, we use something in the middle */
1106 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1107#endif
1108
1109 /* setup channels and rates */
1110
1111 sc->sbands[IEEE80211_BAND_2GHZ].channels =
1112 sc->channels[IEEE80211_BAND_2GHZ];
1113 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1114 sc->rates[IEEE80211_BAND_2GHZ];
1115 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1116
1117 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
1118 sc->sbands[IEEE80211_BAND_5GHZ].channels =
1119 sc->channels[IEEE80211_BAND_5GHZ];
1120 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1121 sc->rates[IEEE80211_BAND_5GHZ];
1122 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1123 }
1124
1125 return 0;
1126bad2:
1127 /* cleanup tx queues */
1128 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1129 if (ATH_TXQ_SETUP(sc, i))
1130 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1131bad:
1132 if (ah)
1133 ath9k_hw_detach(ah);
1134
1135 return error;
1136}
1137
1138/*******************/
1139/* Node Management */
1140/*******************/
1141
1142void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
1143{
1144 struct ath_node *an;
1145
1146 an = (struct ath_node *)sta->drv_priv;
1147
1148 if (sc->sc_flags & SC_OP_TXAGGR)
1149 ath_tx_node_init(sc, an);
1150
1151 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
1152 sta->ht_cap.ampdu_factor);
1153 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
1154
1155 ath_chainmask_sel_init(sc, an);
1156 ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
1157}
1158
1159void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
1160{
1161 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1162
1163 ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
1164
1165 if (sc->sc_flags & SC_OP_TXAGGR)
1166 ath_tx_node_cleanup(sc, an);
1167}
1168
1169/*
1170 * Set up New Node
1171 *
1172 * Setup driver-specific state for a newly associated node. This routine
1173 * really only applies if compression or XR are enabled, there is no code
1174 * covering any other cases.
1175*/
1176
1177void ath_newassoc(struct ath_softc *sc,
1178 struct ath_node *an, int isnew, int isuapsd)
1179{
1180 int tidno;
1181
1182 /* if station reassociates, tear down the aggregation state. */
1183 if (!isnew) {
1184 for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
1185 if (sc->sc_flags & SC_OP_TXAGGR)
1186 ath_tx_aggr_teardown(sc, an, tidno);
1187 }
1188 }
1189}
1190
1191/**************/
1192/* Encryption */
1193/**************/
1194
1195void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
1196{
1197 ath9k_hw_keyreset(sc->sc_ah, keyix);
1198 if (freeslot)
1199 clear_bit(keyix, sc->sc_keymap);
1200}
1201
1202int ath_keyset(struct ath_softc *sc,
1203 u16 keyix,
1204 struct ath9k_keyval *hk,
1205 const u8 mac[ETH_ALEN])
1206{
1207 bool status;
1208
1209 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
1210 keyix, hk, mac, false);
1211
1212 return status != false;
1213}
1214
1215/***********************/
1216/* TX Power/Regulatory */
1217/***********************/
1218
1219/*
1220 * Set Transmit power in HAL
1221 *
1222 * This routine makes the actual HAL calls to set the new transmit power
1223 * limit.
1224*/
1225
1226void ath_update_txpow(struct ath_softc *sc)
1227{
1228 struct ath_hal *ah = sc->sc_ah;
1229 u32 txpow;
1230
1231 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
1232 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
1233 /* read back in case value is clamped */
1234 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
1235 sc->sc_curtxpow = txpow;
1236 }
1237}
1238
1239/**************************/
1240/* Slow Antenna Diversity */
1241/**************************/
1242
1243void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
1244 struct ath_softc *sc,
1245 int32_t rssitrig)
1246{
1247 int trig;
1248
1249 /* antdivf_rssitrig can range from 40 - 0xff */
1250 trig = (rssitrig > 0xff) ? 0xff : rssitrig;
1251 trig = (rssitrig < 40) ? 40 : rssitrig;
1252
1253 antdiv->antdiv_sc = sc;
1254 antdiv->antdivf_rssitrig = trig;
1255}
1256
1257void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
1258 u8 num_antcfg,
1259 const u8 *bssid)
1260{
1261 antdiv->antdiv_num_antcfg =
1262 num_antcfg < ATH_ANT_DIV_MAX_CFG ?
1263 num_antcfg : ATH_ANT_DIV_MAX_CFG;
1264 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1265 antdiv->antdiv_curcfg = 0;
1266 antdiv->antdiv_bestcfg = 0;
1267 antdiv->antdiv_laststatetsf = 0;
1268
1269 memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
1270
1271 antdiv->antdiv_start = 1;
1272}
1273
1274void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
1275{
1276 antdiv->antdiv_start = 0;
1277}
1278
1279static int32_t ath_find_max_val(int32_t *val,
1280 u8 num_val, u8 *max_index)
1281{
1282 u32 MaxVal = *val++;
1283 u32 cur_index = 0;
1284
1285 *max_index = 0;
1286 while (++cur_index < num_val) {
1287 if (*val > MaxVal) {
1288 MaxVal = *val;
1289 *max_index = cur_index;
1290 }
1291
1292 val++;
1293 }
1294
1295 return MaxVal;
1296}
1297
1298void ath_slow_ant_div(struct ath_antdiv *antdiv,
1299 struct ieee80211_hdr *hdr,
1300 struct ath_rx_status *rx_stats)
1301{
1302 struct ath_softc *sc = antdiv->antdiv_sc;
1303 struct ath_hal *ah = sc->sc_ah;
1304 u64 curtsf = 0;
1305 u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
1306 __le16 fc = hdr->frame_control;
1307
1308 if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
1309 && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
1310 antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
1311 antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
1312 curtsf = antdiv->antdiv_lastbtsf[curcfg];
1313 } else {
1314 return;
1315 }
1316
1317 switch (antdiv->antdiv_state) {
1318 case ATH_ANT_DIV_IDLE:
1319 if ((antdiv->antdiv_lastbrssi[curcfg] <
1320 antdiv->antdivf_rssitrig)
1321 && ((curtsf - antdiv->antdiv_laststatetsf) >
1322 ATH_ANT_DIV_MIN_IDLE_US)) {
1323
1324 curcfg++;
1325 if (curcfg == antdiv->antdiv_num_antcfg)
1326 curcfg = 0;
1327
1328 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1329 antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
1330 antdiv->antdiv_curcfg = curcfg;
1331 antdiv->antdiv_laststatetsf = curtsf;
1332 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1333 }
1334 }
1335 break;
1336
1337 case ATH_ANT_DIV_SCAN:
1338 if ((curtsf - antdiv->antdiv_laststatetsf) <
1339 ATH_ANT_DIV_MIN_SCAN_US)
1340 break;
1341
1342 curcfg++;
1343 if (curcfg == antdiv->antdiv_num_antcfg)
1344 curcfg = 0;
1345
1346 if (curcfg == antdiv->antdiv_bestcfg) {
1347 ath_find_max_val(antdiv->antdiv_lastbrssi,
1348 antdiv->antdiv_num_antcfg, &bestcfg);
1349 if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
1350 antdiv->antdiv_bestcfg = bestcfg;
1351 antdiv->antdiv_curcfg = bestcfg;
1352 antdiv->antdiv_laststatetsf = curtsf;
1353 antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
1354 }
1355 } else {
1356 if (!ath9k_hw_select_antconfig(ah, curcfg)) {
1357 antdiv->antdiv_curcfg = curcfg;
1358 antdiv->antdiv_laststatetsf = curtsf;
1359 antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
1360 }
1361 }
1362
1363 break;
1364 }
1365}
1366
1367/***********************/
1368/* Descriptor Handling */
1369/***********************/
1370
1371/*
1372 * Set up DMA descriptors
1373 *
1374 * This function will allocate both the DMA descriptor structure, and the
1375 * buffers it contains. These are used to contain the descriptors used
1376 * by the system.
1377*/
1378
1379int ath_descdma_setup(struct ath_softc *sc,
1380 struct ath_descdma *dd,
1381 struct list_head *head,
1382 const char *name,
1383 int nbuf,
1384 int ndesc)
1385{
1386#define DS2PHYS(_dd, _ds) \
1387 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1388#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1389#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1390
1391 struct ath_desc *ds;
1392 struct ath_buf *bf;
1393 int i, bsize, error;
1394
1395 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1396 __func__, name, nbuf, ndesc);
1397
1398 /* ath_desc must be a multiple of DWORDs */
1399 if ((sizeof(struct ath_desc) % 4) != 0) {
1400 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1401 __func__);
1402 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1403 error = -ENOMEM;
1404 goto fail;
1405 }
1406
1407 dd->dd_name = name;
1408 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1409
1410 /*
1411 * Need additional DMA memory because we can't use
1412 * descriptors that cross the 4K page boundary. Assume
1413 * one skipped descriptor per 4K page.
1414 */
1415 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1416 u32 ndesc_skipped =
1417 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1418 u32 dma_len;
1419
1420 while (ndesc_skipped) {
1421 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1422 dd->dd_desc_len += dma_len;
1423
1424 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1425 };
1426 }
1427
1428 /* allocate descriptors */
1429 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1430 dd->dd_desc_len,
1431 &dd->dd_desc_paddr);
1432 if (dd->dd_desc == NULL) {
1433 error = -ENOMEM;
1434 goto fail;
1435 }
1436 ds = dd->dd_desc;
1437 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1438 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1439 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1440
1441 /* allocate buffers */
1442 bsize = sizeof(struct ath_buf) * nbuf;
1443 bf = kmalloc(bsize, GFP_KERNEL);
1444 if (bf == NULL) {
1445 error = -ENOMEM;
1446 goto fail2;
1447 }
1448 memset(bf, 0, bsize);
1449 dd->dd_bufptr = bf;
1450
1451 INIT_LIST_HEAD(head);
1452 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1453 bf->bf_desc = ds;
1454 bf->bf_daddr = DS2PHYS(dd, ds);
1455
1456 if (!(sc->sc_ah->ah_caps.hw_caps &
1457 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1458 /*
1459 * Skip descriptor addresses which can cause 4KB
1460 * boundary crossing (addr + length) with a 32 dword
1461 * descriptor fetch.
1462 */
1463 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1464 ASSERT((caddr_t) bf->bf_desc <
1465 ((caddr_t) dd->dd_desc +
1466 dd->dd_desc_len));
1467
1468 ds += ndesc;
1469 bf->bf_desc = ds;
1470 bf->bf_daddr = DS2PHYS(dd, ds);
1471 }
1472 }
1473 list_add_tail(&bf->list, head);
1474 }
1475 return 0;
1476fail2:
1477 pci_free_consistent(sc->pdev,
1478 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1479fail:
1480 memset(dd, 0, sizeof(*dd));
1481 return error;
1482#undef ATH_DESC_4KB_BOUND_CHECK
1483#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1484#undef DS2PHYS
1485}
1486
1487/*
1488 * Cleanup DMA descriptors
1489 *
1490 * This function will free the DMA block that was allocated for the descriptor
1491 * pool. Since this was allocated as one "chunk", it is freed in the same
1492 * manner.
1493*/
1494
1495void ath_descdma_cleanup(struct ath_softc *sc,
1496 struct ath_descdma *dd,
1497 struct list_head *head)
1498{
1499 /* Free memory associated with descriptors */
1500 pci_free_consistent(sc->pdev,
1501 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1502
1503 INIT_LIST_HEAD(head);
1504 kfree(dd->dd_bufptr);
1505 memset(dd, 0, sizeof(*dd));
1506}
1507
1508/*************/
1509/* Utilities */
1510/*************/
1511
1512int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1513{
1514 int qnum;
1515
1516 switch (queue) {
1517 case 0:
1518 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1519 break;
1520 case 1:
1521 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1522 break;
1523 case 2:
1524 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1525 break;
1526 case 3:
1527 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1528 break;
1529 default:
1530 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1531 break;
1532 }
1533
1534 return qnum;
1535}
1536
1537int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1538{
1539 int qnum;
1540
1541 switch (queue) {
1542 case ATH9K_WME_AC_VO:
1543 qnum = 0;
1544 break;
1545 case ATH9K_WME_AC_VI:
1546 qnum = 1;
1547 break;
1548 case ATH9K_WME_AC_BE:
1549 qnum = 2;
1550 break;
1551 case ATH9K_WME_AC_BK:
1552 qnum = 3;
1553 break;
1554 default:
1555 qnum = -1;
1556 break;
1557 }
1558
1559 return qnum;
1560}
1561
1562
1563/*
1564 * Expand time stamp to TSF
1565 *
1566 * Extend 15-bit time stamp from rx descriptor to
1567 * a full 64-bit TSF using the current h/w TSF.
1568*/
1569
1570u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
1571{
1572 u64 tsf;
1573
1574 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1575 if ((tsf & 0x7fff) < rstamp)
1576 tsf -= 0x8000;
1577 return (tsf & ~0x7fff) | rstamp;
1578}
1579
1580/*
1581 * Set Default Antenna
1582 *
1583 * Call into the HAL to set the default antenna to use. Not really valid for
1584 * MIMO technology.
1585*/
1586
1587void ath_setdefantenna(void *context, u32 antenna)
1588{
1589 struct ath_softc *sc = (struct ath_softc *)context;
1590 struct ath_hal *ah = sc->sc_ah;
1591
1592 /* XXX block beacon interrupts */
1593 ath9k_hw_setantenna(ah, antenna);
1594 sc->sc_defant = antenna;
1595 sc->sc_rxotherant = 0;
1596}
1597
1598/*
1599 * Set Slot Time
1600 *
1601 * This will wake up the chip if required, and set the slot time for the
1602 * frame (maximum transmit time). Slot time is assumed to be already set
1603 * in the ATH object member sc_slottime
1604*/
1605
1606void ath_setslottime(struct ath_softc *sc)
1607{
1608 ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
1609 sc->sc_updateslot = OK;
1610}
diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
index e6725567109d..21ee0c240528 100644
--- a/drivers/net/wireless/ath9k/core.h
+++ b/drivers/net/wireless/ath9k/core.h
@@ -47,10 +47,6 @@
47 47
48struct ath_node; 48struct ath_node;
49 49
50/******************/
51/* Utility macros */
52/******************/
53
54/* Macro to expand scalars to 64-bit objects */ 50/* Macro to expand scalars to 64-bit objects */
55 51
56#define ito64(x) (sizeof(x) == 8) ? \ 52#define ito64(x) (sizeof(x) == 8) ? \
@@ -86,11 +82,6 @@ struct ath_node;
86 82
87#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i)) 83#define ATH_TXQ_SETUP(sc, i) ((sc)->sc_txqsetup & (1<<i))
88 84
89static inline unsigned long get_timestamp(void)
90{
91 return ((jiffies / HZ) * 1000) + (jiffies % HZ) * (1000 / HZ);
92}
93
94static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 85static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
95 86
96/*************/ 87/*************/
@@ -141,34 +132,6 @@ struct ath_config {
141 u8 swBeaconProcess; /* Process received beacons in SW (vs HW) */ 132 u8 swBeaconProcess; /* Process received beacons in SW (vs HW) */
142}; 133};
143 134
144/***********************/
145/* Chainmask Selection */
146/***********************/
147
148#define ATH_CHAINMASK_SEL_TIMEOUT 6000
149/* Default - Number of last RSSI values that is used for
150 * chainmask selection */
151#define ATH_CHAINMASK_SEL_RSSI_CNT 10
152/* Means use 3x3 chainmask instead of configured chainmask */
153#define ATH_CHAINMASK_SEL_3X3 7
154/* Default - Rssi threshold below which we have to switch to 3x3 */
155#define ATH_CHAINMASK_SEL_UP_RSSI_THRES 20
156/* Default - Rssi threshold above which we have to switch to
157 * user configured values */
158#define ATH_CHAINMASK_SEL_DOWN_RSSI_THRES 35
159/* Struct to store the chainmask select related info */
160struct ath_chainmask_sel {
161 struct timer_list timer;
162 int cur_tx_mask; /* user configured or 3x3 */
163 int cur_rx_mask; /* user configured or 3x3 */
164 int tx_avgrssi;
165 u8 switch_allowed:1, /* timer will set this */
166 cm_sel_enabled : 1;
167};
168
169int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an);
170void ath_update_chainmask(struct ath_softc *sc, int is_ht);
171
172/*************************/ 135/*************************/
173/* Descriptor Management */ 136/* Descriptor Management */
174/*************************/ 137/*************************/
@@ -240,7 +203,6 @@ struct ath_buf {
240 an aggregate) */ 203 an aggregate) */
241 struct ath_buf *bf_lastfrm; /* last buf of this frame */ 204 struct ath_buf *bf_lastfrm; /* last buf of this frame */
242 struct ath_buf *bf_next; /* next subframe in the aggregate */ 205 struct ath_buf *bf_next; /* next subframe in the aggregate */
243 struct ath_buf *bf_rifslast; /* last buf for RIFS burst */
244 void *bf_mpdu; /* enclosing frame structure */ 206 void *bf_mpdu; /* enclosing frame structure */
245 struct ath_desc *bf_desc; /* virtual addr of desc */ 207 struct ath_desc *bf_desc; /* virtual addr of desc */
246 dma_addr_t bf_daddr; /* physical addr of desc */ 208 dma_addr_t bf_daddr; /* physical addr of desc */
@@ -278,16 +240,10 @@ struct ath_descdma {
278 dma_addr_t dd_dmacontext; 240 dma_addr_t dd_dmacontext;
279}; 241};
280 242
281int ath_descdma_setup(struct ath_softc *sc, 243int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
282 struct ath_descdma *dd, 244 struct list_head *head, const char *name,
283 struct list_head *head, 245 int nbuf, int ndesc);
284 const char *name, 246void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
285 int nbuf,
286 int ndesc);
287int ath_desc_alloc(struct ath_softc *sc);
288void ath_desc_free(struct ath_softc *sc);
289void ath_descdma_cleanup(struct ath_softc *sc,
290 struct ath_descdma *dd,
291 struct list_head *head); 247 struct list_head *head);
292 248
293/***********/ 249/***********/
@@ -452,7 +408,6 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
452void ath_tx_tasklet(struct ath_softc *sc); 408void ath_tx_tasklet(struct ath_softc *sc);
453u32 ath_txq_depth(struct ath_softc *sc, int qnum); 409u32 ath_txq_depth(struct ath_softc *sc, int qnum);
454u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum); 410u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum);
455void ath_notify_txq_status(struct ath_softc *sc, u16 queue_depth);
456void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb); 411void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb);
457 412
458/**********************/ 413/**********************/
@@ -517,25 +472,18 @@ struct ath_node_aggr {
517/* driver-specific node state */ 472/* driver-specific node state */
518struct ath_node { 473struct ath_node {
519 struct ath_softc *an_sc; 474 struct ath_softc *an_sc;
520 struct ath_chainmask_sel an_chainmask_sel;
521 struct ath_node_aggr an_aggr; 475 struct ath_node_aggr an_aggr;
522 u16 maxampdu; 476 u16 maxampdu;
523 u8 mpdudensity; 477 u8 mpdudensity;
524}; 478};
525 479
526void ath_tx_resume_tid(struct ath_softc *sc, 480void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid);
527 struct ath_atx_tid *tid);
528bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno); 481bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
529void ath_tx_aggr_teardown(struct ath_softc *sc, 482void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tidno);
530 struct ath_node *an, u8 tidno);
531int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 483int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
532 u16 tid, u16 *ssn); 484 u16 tid, u16 *ssn);
533int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 485int ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
534void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 486void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
535void ath_newassoc(struct ath_softc *sc,
536 struct ath_node *node, int isnew, int isuapsd);
537void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta);
538void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta);
539 487
540/********/ 488/********/
541/* VAPs */ 489/* VAPs */
@@ -593,49 +541,8 @@ void ath9k_beacon_tasklet(unsigned long data);
593void ath_beacon_config(struct ath_softc *sc, int if_id); 541void ath_beacon_config(struct ath_softc *sc, int if_id);
594int ath_beaconq_setup(struct ath_hal *ah); 542int ath_beaconq_setup(struct ath_hal *ah);
595int ath_beacon_alloc(struct ath_softc *sc, int if_id); 543int ath_beacon_alloc(struct ath_softc *sc, int if_id);
596void ath_bstuck_process(struct ath_softc *sc);
597void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp); 544void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp);
598void ath_beacon_sync(struct ath_softc *sc, int if_id); 545void ath_beacon_sync(struct ath_softc *sc, int if_id);
599void ath_get_beaconconfig(struct ath_softc *sc,
600 int if_id,
601 struct ath_beacon_config *conf);
602/*********************/
603/* Antenna diversity */
604/*********************/
605
606#define ATH_ANT_DIV_MAX_CFG 2
607#define ATH_ANT_DIV_MIN_IDLE_US 1000000 /* us */
608#define ATH_ANT_DIV_MIN_SCAN_US 50000 /* us */
609
610enum ATH_ANT_DIV_STATE{
611 ATH_ANT_DIV_IDLE,
612 ATH_ANT_DIV_SCAN, /* evaluating antenna */
613};
614
615struct ath_antdiv {
616 struct ath_softc *antdiv_sc;
617 u8 antdiv_start;
618 enum ATH_ANT_DIV_STATE antdiv_state;
619 u8 antdiv_num_antcfg;
620 u8 antdiv_curcfg;
621 u8 antdiv_bestcfg;
622 int32_t antdivf_rssitrig;
623 int32_t antdiv_lastbrssi[ATH_ANT_DIV_MAX_CFG];
624 u64 antdiv_lastbtsf[ATH_ANT_DIV_MAX_CFG];
625 u64 antdiv_laststatetsf;
626 u8 antdiv_bssid[ETH_ALEN];
627};
628
629void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
630 struct ath_softc *sc, int32_t rssitrig);
631void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
632 u8 num_antcfg,
633 const u8 *bssid);
634void ath_slow_ant_div_stop(struct ath_antdiv *antdiv);
635void ath_slow_ant_div(struct ath_antdiv *antdiv,
636 struct ieee80211_hdr *wh,
637 struct ath_rx_status *rx_stats);
638void ath_setdefantenna(void *sc, u32 antenna);
639 546
640/*******/ 547/*******/
641/* ANI */ 548/* ANI */
@@ -717,30 +624,8 @@ struct ath_rfkill {
717 624
718#define ATH_IF_ID_ANY 0xff 625#define ATH_IF_ID_ANY 0xff
719#define ATH_TXPOWER_MAX 100 /* .5 dBm units */ 626#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
720 627#define ATH_RSSI_DUMMY_MARKER 0x127
721#define RSSI_LPF_THRESHOLD -20 628#define ATH_RATE_DUMMY_MARKER 0
722#define ATH_RSSI_EP_MULTIPLIER (1<<7) /* pow2 to optimize out * and / */
723#define ATH_RATE_DUMMY_MARKER 0
724#define ATH_RSSI_LPF_LEN 10
725#define ATH_RSSI_DUMMY_MARKER 0x127
726
727#define ATH_EP_MUL(x, mul) ((x) * (mul))
728#define ATH_EP_RND(x, mul) \
729 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul))
730#define ATH_RSSI_OUT(x) \
731 (((x) != ATH_RSSI_DUMMY_MARKER) ? \
732 (ATH_EP_RND((x), ATH_RSSI_EP_MULTIPLIER)) : ATH_RSSI_DUMMY_MARKER)
733#define ATH_RSSI_IN(x) \
734 (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
735#define ATH_LPF_RSSI(x, y, len) \
736 ((x != ATH_RSSI_DUMMY_MARKER) ? \
737 (((x) * ((len) - 1) + (y)) / (len)) : (y))
738#define ATH_RSSI_LPF(x, y) do { \
739 if ((y) >= RSSI_LPF_THRESHOLD) \
740 x = ATH_LPF_RSSI((x), \
741 ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
742 } while (0)
743
744 629
745enum PROT_MODE { 630enum PROT_MODE {
746 PROT_M_NONE = 0, 631 PROT_M_NONE = 0,
@@ -748,12 +633,6 @@ enum PROT_MODE {
748 PROT_M_CTSONLY 633 PROT_M_CTSONLY
749}; 634};
750 635
751enum RATE_TYPE {
752 NORMAL_RATE = 0,
753 HALF_RATE,
754 QUARTER_RATE
755};
756
757struct ath_ht_info { 636struct ath_ht_info {
758 enum ath9k_ht_macmode tx_chan_width; 637 enum ath9k_ht_macmode tx_chan_width;
759 u8 ext_chan_offset; 638 u8 ext_chan_offset;
@@ -881,27 +760,9 @@ struct ath_softc {
881 struct ath_ani sc_ani; 760 struct ath_ani sc_ani;
882}; 761};
883 762
884int ath_init(u16 devid, struct ath_softc *sc);
885int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan);
886void ath_stop(struct ath_softc *sc);
887irqreturn_t ath_isr(int irq, void *dev);
888int ath_reset(struct ath_softc *sc, bool retry_tx); 763int ath_reset(struct ath_softc *sc, bool retry_tx);
889int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan);
890
891/*********************/
892/* Utility Functions */
893/*********************/
894
895void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot);
896int ath_keyset(struct ath_softc *sc,
897 u16 keyix,
898 struct ath9k_keyval *hk,
899 const u8 mac[ETH_ALEN]);
900int ath_get_hal_qnum(u16 queue, struct ath_softc *sc); 764int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
901int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); 765int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
902void ath_setslottime(struct ath_softc *sc);
903void ath_update_txpow(struct ath_softc *sc);
904int ath_cabq_update(struct ath_softc *); 766int ath_cabq_update(struct ath_softc *);
905u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp);
906 767
907#endif /* CORE_H */ 768#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
index 54d89abce478..f226a4daef75 100644
--- a/drivers/net/wireless/ath9k/main.c
+++ b/drivers/net/wireless/ath9k/main.c
@@ -14,8 +14,6 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17/* mac80211 and PCI callbacks */
18
19#include <linux/nl80211.h> 17#include <linux/nl80211.h>
20#include "core.h" 18#include "core.h"
21#include "reg.h" 19#include "reg.h"
@@ -40,6 +38,580 @@ static struct pci_device_id ath_pci_id_table[] __devinitdata = {
40 38
41static void ath_detach(struct ath_softc *sc); 39static void ath_detach(struct ath_softc *sc);
42 40
41/* return bus cachesize in 4B word units */
42
43static void bus_read_cachesize(struct ath_softc *sc, int *csz)
44{
45 u8 u8tmp;
46
47 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
48 *csz = (int)u8tmp;
49
50 /*
51 * This check was put in to avoid "unplesant" consequences if
52 * the bootrom has not fully initialized all PCI devices.
53 * Sometimes the cache line size register is not set
54 */
55
56 if (*csz == 0)
57 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
58}
59
60static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
61{
62 sc->sc_curmode = mode;
63 /*
64 * All protection frames are transmited at 2Mb/s for
65 * 11g, otherwise at 1Mb/s.
66 * XXX select protection rate index from rate table.
67 */
68 sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
69}
70
71static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
72{
73 if (chan->chanmode == CHANNEL_A)
74 return ATH9K_MODE_11A;
75 else if (chan->chanmode == CHANNEL_G)
76 return ATH9K_MODE_11G;
77 else if (chan->chanmode == CHANNEL_B)
78 return ATH9K_MODE_11B;
79 else if (chan->chanmode == CHANNEL_A_HT20)
80 return ATH9K_MODE_11NA_HT20;
81 else if (chan->chanmode == CHANNEL_G_HT20)
82 return ATH9K_MODE_11NG_HT20;
83 else if (chan->chanmode == CHANNEL_A_HT40PLUS)
84 return ATH9K_MODE_11NA_HT40PLUS;
85 else if (chan->chanmode == CHANNEL_A_HT40MINUS)
86 return ATH9K_MODE_11NA_HT40MINUS;
87 else if (chan->chanmode == CHANNEL_G_HT40PLUS)
88 return ATH9K_MODE_11NG_HT40PLUS;
89 else if (chan->chanmode == CHANNEL_G_HT40MINUS)
90 return ATH9K_MODE_11NG_HT40MINUS;
91
92 WARN_ON(1); /* should not get here */
93
94 return ATH9K_MODE_11B;
95}
96
97static void ath_update_txpow(struct ath_softc *sc)
98{
99 struct ath_hal *ah = sc->sc_ah;
100 u32 txpow;
101
102 if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
103 ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
104 /* read back in case value is clamped */
105 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
106 sc->sc_curtxpow = txpow;
107 }
108}
109
110static u8 parse_mpdudensity(u8 mpdudensity)
111{
112 /*
113 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
114 * 0 for no restriction
115 * 1 for 1/4 us
116 * 2 for 1/2 us
117 * 3 for 1 us
118 * 4 for 2 us
119 * 5 for 4 us
120 * 6 for 8 us
121 * 7 for 16 us
122 */
123 switch (mpdudensity) {
124 case 0:
125 return 0;
126 case 1:
127 case 2:
128 case 3:
129 /* Our lower layer calculations limit our precision to
130 1 microsecond */
131 return 1;
132 case 4:
133 return 2;
134 case 5:
135 return 4;
136 case 6:
137 return 8;
138 case 7:
139 return 16;
140 default:
141 return 0;
142 }
143}
144
145static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
146{
147 struct ath_rate_table *rate_table = NULL;
148 struct ieee80211_supported_band *sband;
149 struct ieee80211_rate *rate;
150 int i, maxrates;
151
152 switch (band) {
153 case IEEE80211_BAND_2GHZ:
154 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
155 break;
156 case IEEE80211_BAND_5GHZ:
157 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
158 break;
159 default:
160 break;
161 }
162
163 if (rate_table == NULL)
164 return;
165
166 sband = &sc->sbands[band];
167 rate = sc->rates[band];
168
169 if (rate_table->rate_cnt > ATH_RATE_MAX)
170 maxrates = ATH_RATE_MAX;
171 else
172 maxrates = rate_table->rate_cnt;
173
174 for (i = 0; i < maxrates; i++) {
175 rate[i].bitrate = rate_table->info[i].ratekbps / 100;
176 rate[i].hw_value = rate_table->info[i].ratecode;
177 sband->n_bitrates++;
178 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Rate: %2dMbps, ratecode: %2d\n",
179 __func__, rate[i].bitrate / 10, rate[i].hw_value);
180 }
181}
182
183static int ath_setup_channels(struct ath_softc *sc)
184{
185 struct ath_hal *ah = sc->sc_ah;
186 int nchan, i, a = 0, b = 0;
187 u8 regclassids[ATH_REGCLASSIDS_MAX];
188 u32 nregclass = 0;
189 struct ieee80211_supported_band *band_2ghz;
190 struct ieee80211_supported_band *band_5ghz;
191 struct ieee80211_channel *chan_2ghz;
192 struct ieee80211_channel *chan_5ghz;
193 struct ath9k_channel *c;
194
195 /* Fill in ah->ah_channels */
196 if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (u32 *)&nchan,
197 regclassids, ATH_REGCLASSIDS_MAX,
198 &nregclass, CTRY_DEFAULT, false, 1)) {
199 u32 rd = ah->ah_currentRD;
200 DPRINTF(sc, ATH_DBG_FATAL,
201 "%s: unable to collect channel list; "
202 "regdomain likely %u country code %u\n",
203 __func__, rd, CTRY_DEFAULT);
204 return -EINVAL;
205 }
206
207 band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
208 band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
209 chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
210 chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
211
212 for (i = 0; i < nchan; i++) {
213 c = &ah->ah_channels[i];
214 if (IS_CHAN_2GHZ(c)) {
215 chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
216 chan_2ghz[a].center_freq = c->channel;
217 chan_2ghz[a].max_power = c->maxTxPower;
218
219 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
220 chan_2ghz[a].flags |= IEEE80211_CHAN_NO_IBSS;
221 if (c->channelFlags & CHANNEL_PASSIVE)
222 chan_2ghz[a].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
223
224 band_2ghz->n_channels = ++a;
225
226 DPRINTF(sc, ATH_DBG_CONFIG, "%s: 2MHz channel: %d, "
227 "channelFlags: 0x%x\n",
228 __func__, c->channel, c->channelFlags);
229 } else if (IS_CHAN_5GHZ(c)) {
230 chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
231 chan_5ghz[b].center_freq = c->channel;
232 chan_5ghz[b].max_power = c->maxTxPower;
233
234 if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
235 chan_5ghz[b].flags |= IEEE80211_CHAN_NO_IBSS;
236 if (c->channelFlags & CHANNEL_PASSIVE)
237 chan_5ghz[b].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
238
239 band_5ghz->n_channels = ++b;
240
241 DPRINTF(sc, ATH_DBG_CONFIG, "%s: 5MHz channel: %d, "
242 "channelFlags: 0x%x\n",
243 __func__, c->channel, c->channelFlags);
244 }
245 }
246
247 return 0;
248}
249
250/*
251 * Set/change channels. If the channel is really being changed, it's done
252 * by reseting the chip. To accomplish this we must first cleanup any pending
253 * DMA, then restart stuff.
254*/
255static int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
256{
257 struct ath_hal *ah = sc->sc_ah;
258 bool fastcc = true, stopped;
259
260 if (sc->sc_flags & SC_OP_INVALID)
261 return -EIO;
262
263 DPRINTF(sc, ATH_DBG_CONFIG,
264 "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
265 __func__,
266 ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
267 sc->sc_ah->ah_curchan->channelFlags),
268 sc->sc_ah->ah_curchan->channel,
269 ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
270 hchan->channel, hchan->channelFlags);
271
272 if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
273 hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
274 (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
275 (sc->sc_flags & SC_OP_FULL_RESET)) {
276 int status;
277 /*
278 * This is only performed if the channel settings have
279 * actually changed.
280 *
281 * To switch channels clear any pending DMA operations;
282 * wait long enough for the RX fifo to drain, reset the
283 * hardware at the new frequency, and then re-enable
284 * the relevant bits of the h/w.
285 */
286 ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
287 ath_draintxq(sc, false); /* clear pending tx frames */
288 stopped = ath_stoprecv(sc); /* turn off frame recv */
289
290 /* XXX: do not flush receive queue here. We don't want
291 * to flush data frames already in queue because of
292 * changing channel. */
293
294 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
295 fastcc = false;
296
297 spin_lock_bh(&sc->sc_resetlock);
298 if (!ath9k_hw_reset(ah, hchan, sc->sc_ht_info.tx_chan_width,
299 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
300 sc->sc_ht_extprotspacing, fastcc, &status)) {
301 DPRINTF(sc, ATH_DBG_FATAL,
302 "%s: unable to reset channel %u (%uMhz) "
303 "flags 0x%x hal status %u\n", __func__,
304 ath9k_hw_mhz2ieee(ah, hchan->channel,
305 hchan->channelFlags),
306 hchan->channel, hchan->channelFlags, status);
307 spin_unlock_bh(&sc->sc_resetlock);
308 return -EIO;
309 }
310 spin_unlock_bh(&sc->sc_resetlock);
311
312 sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
313 sc->sc_flags &= ~SC_OP_FULL_RESET;
314
315 if (ath_startrecv(sc) != 0) {
316 DPRINTF(sc, ATH_DBG_FATAL,
317 "%s: unable to restart recv logic\n", __func__);
318 return -EIO;
319 }
320
321 ath_setcurmode(sc, ath_chan2mode(hchan));
322 ath_update_txpow(sc);
323 ath9k_hw_set_interrupts(ah, sc->sc_imask);
324 }
325 return 0;
326}
327
328/*
329 * This routine performs the periodic noise floor calibration function
330 * that is used to adjust and optimize the chip performance. This
331 * takes environmental changes (location, temperature) into account.
332 * When the task is complete, it reschedules itself depending on the
333 * appropriate interval that was calculated.
334 */
335static void ath_ani_calibrate(unsigned long data)
336{
337 struct ath_softc *sc;
338 struct ath_hal *ah;
339 bool longcal = false;
340 bool shortcal = false;
341 bool aniflag = false;
342 unsigned int timestamp = jiffies_to_msecs(jiffies);
343 u32 cal_interval;
344
345 sc = (struct ath_softc *)data;
346 ah = sc->sc_ah;
347
348 /*
349 * don't calibrate when we're scanning.
350 * we are most likely not on our home channel.
351 */
352 if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)
353 return;
354
355 /* Long calibration runs independently of short calibration. */
356 if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
357 longcal = true;
358 DPRINTF(sc, ATH_DBG_ANI, "%s: longcal @%lu\n",
359 __func__, jiffies);
360 sc->sc_ani.sc_longcal_timer = timestamp;
361 }
362
363 /* Short calibration applies only while sc_caldone is false */
364 if (!sc->sc_ani.sc_caldone) {
365 if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
366 ATH_SHORT_CALINTERVAL) {
367 shortcal = true;
368 DPRINTF(sc, ATH_DBG_ANI, "%s: shortcal @%lu\n",
369 __func__, jiffies);
370 sc->sc_ani.sc_shortcal_timer = timestamp;
371 sc->sc_ani.sc_resetcal_timer = timestamp;
372 }
373 } else {
374 if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
375 ATH_RESTART_CALINTERVAL) {
376 ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
377 &sc->sc_ani.sc_caldone);
378 if (sc->sc_ani.sc_caldone)
379 sc->sc_ani.sc_resetcal_timer = timestamp;
380 }
381 }
382
383 /* Verify whether we must check ANI */
384 if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
385 ATH_ANI_POLLINTERVAL) {
386 aniflag = true;
387 sc->sc_ani.sc_checkani_timer = timestamp;
388 }
389
390 /* Skip all processing if there's nothing to do. */
391 if (longcal || shortcal || aniflag) {
392 /* Call ANI routine if necessary */
393 if (aniflag)
394 ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
395 ah->ah_curchan);
396
397 /* Perform calibration if necessary */
398 if (longcal || shortcal) {
399 bool iscaldone = false;
400
401 if (ath9k_hw_calibrate(ah, ah->ah_curchan,
402 sc->sc_rx_chainmask, longcal,
403 &iscaldone)) {
404 if (longcal)
405 sc->sc_ani.sc_noise_floor =
406 ath9k_hw_getchan_noise(ah,
407 ah->ah_curchan);
408
409 DPRINTF(sc, ATH_DBG_ANI,
410 "%s: calibrate chan %u/%x nf: %d\n",
411 __func__,
412 ah->ah_curchan->channel,
413 ah->ah_curchan->channelFlags,
414 sc->sc_ani.sc_noise_floor);
415 } else {
416 DPRINTF(sc, ATH_DBG_ANY,
417 "%s: calibrate chan %u/%x failed\n",
418 __func__,
419 ah->ah_curchan->channel,
420 ah->ah_curchan->channelFlags);
421 }
422 sc->sc_ani.sc_caldone = iscaldone;
423 }
424 }
425
426 /*
427 * Set timer interval based on previous results.
428 * The interval must be the shortest necessary to satisfy ANI,
429 * short calibration and long calibration.
430 */
431
432 cal_interval = ATH_ANI_POLLINTERVAL;
433 if (!sc->sc_ani.sc_caldone)
434 cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
435
436 mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
437}
438
439/*
440 * Update tx/rx chainmask. For legacy association,
441 * hard code chainmask to 1x1, for 11n association, use
442 * the chainmask configuration.
443 */
444static void ath_update_chainmask(struct ath_softc *sc, int is_ht)
445{
446 sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
447 if (is_ht) {
448 sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
449 sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
450 } else {
451 sc->sc_tx_chainmask = 1;
452 sc->sc_rx_chainmask = 1;
453 }
454
455 DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
456 __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
457}
458
459static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
460{
461 struct ath_node *an;
462
463 an = (struct ath_node *)sta->drv_priv;
464
465 if (sc->sc_flags & SC_OP_TXAGGR)
466 ath_tx_node_init(sc, an);
467
468 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
469 sta->ht_cap.ampdu_factor);
470 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
471}
472
473static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
474{
475 struct ath_node *an = (struct ath_node *)sta->drv_priv;
476
477 if (sc->sc_flags & SC_OP_TXAGGR)
478 ath_tx_node_cleanup(sc, an);
479}
480
481static void ath9k_tasklet(unsigned long data)
482{
483 struct ath_softc *sc = (struct ath_softc *)data;
484 u32 status = sc->sc_intrstatus;
485
486 if (status & ATH9K_INT_FATAL) {
487 /* need a chip reset */
488 ath_reset(sc, false);
489 return;
490 } else {
491
492 if (status &
493 (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
494 spin_lock_bh(&sc->sc_rxflushlock);
495 ath_rx_tasklet(sc, 0);
496 spin_unlock_bh(&sc->sc_rxflushlock);
497 }
498 /* XXX: optimize this */
499 if (status & ATH9K_INT_TX)
500 ath_tx_tasklet(sc);
501 }
502
503 /* re-enable hardware interrupt */
504 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
505}
506
507static irqreturn_t ath_isr(int irq, void *dev)
508{
509 struct ath_softc *sc = dev;
510 struct ath_hal *ah = sc->sc_ah;
511 enum ath9k_int status;
512 bool sched = false;
513
514 do {
515 if (sc->sc_flags & SC_OP_INVALID) {
516 /*
517 * The hardware is not ready/present, don't
518 * touch anything. Note this can happen early
519 * on if the IRQ is shared.
520 */
521 return IRQ_NONE;
522 }
523 if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
524 return IRQ_NONE;
525 }
526
527 /*
528 * Figure out the reason(s) for the interrupt. Note
529 * that the hal returns a pseudo-ISR that may include
530 * bits we haven't explicitly enabled so we mask the
531 * value to insure we only process bits we requested.
532 */
533 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
534
535 status &= sc->sc_imask; /* discard unasked-for bits */
536
537 /*
538 * If there are no status bits set, then this interrupt was not
539 * for me (should have been caught above).
540 */
541 if (!status)
542 return IRQ_NONE;
543
544 sc->sc_intrstatus = status;
545
546 if (status & ATH9K_INT_FATAL) {
547 /* need a chip reset */
548 sched = true;
549 } else if (status & ATH9K_INT_RXORN) {
550 /* need a chip reset */
551 sched = true;
552 } else {
553 if (status & ATH9K_INT_SWBA) {
554 /* schedule a tasklet for beacon handling */
555 tasklet_schedule(&sc->bcon_tasklet);
556 }
557 if (status & ATH9K_INT_RXEOL) {
558 /*
559 * NB: the hardware should re-read the link when
560 * RXE bit is written, but it doesn't work
561 * at least on older hardware revs.
562 */
563 sched = true;
564 }
565
566 if (status & ATH9K_INT_TXURN)
567 /* bump tx trigger level */
568 ath9k_hw_updatetxtriglevel(ah, true);
569 /* XXX: optimize this */
570 if (status & ATH9K_INT_RX)
571 sched = true;
572 if (status & ATH9K_INT_TX)
573 sched = true;
574 if (status & ATH9K_INT_BMISS)
575 sched = true;
576 /* carrier sense timeout */
577 if (status & ATH9K_INT_CST)
578 sched = true;
579 if (status & ATH9K_INT_MIB) {
580 /*
581 * Disable interrupts until we service the MIB
582 * interrupt; otherwise it will continue to
583 * fire.
584 */
585 ath9k_hw_set_interrupts(ah, 0);
586 /*
587 * Let the hal handle the event. We assume
588 * it will clear whatever condition caused
589 * the interrupt.
590 */
591 ath9k_hw_procmibevent(ah, &sc->sc_halstats);
592 ath9k_hw_set_interrupts(ah, sc->sc_imask);
593 }
594 if (status & ATH9K_INT_TIM_TIMER) {
595 if (!(ah->ah_caps.hw_caps &
596 ATH9K_HW_CAP_AUTOSLEEP)) {
597 /* Clear RxAbort bit so that we can
598 * receive frames */
599 ath9k_hw_setrxabort(ah, 0);
600 sched = true;
601 }
602 }
603 }
604 } while (0);
605
606 if (sched) {
607 /* turn off every interrupt except SWBA */
608 ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
609 tasklet_schedule(&sc->intr_tq);
610 }
611
612 return IRQ_HANDLED;
613}
614
43static int ath_get_channel(struct ath_softc *sc, 615static int ath_get_channel(struct ath_softc *sc,
44 struct ieee80211_channel *chan) 616 struct ieee80211_channel *chan)
45{ 617{
@@ -90,6 +662,23 @@ static u32 ath_get_extchanmode(struct ath_softc *sc,
90 return chanmode; 662 return chanmode;
91} 663}
92 664
665static void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
666{
667 ath9k_hw_keyreset(sc->sc_ah, keyix);
668 if (freeslot)
669 clear_bit(keyix, sc->sc_keymap);
670}
671
672static int ath_keyset(struct ath_softc *sc, u16 keyix,
673 struct ath9k_keyval *hk, const u8 mac[ETH_ALEN])
674{
675 bool status;
676
677 status = ath9k_hw_set_keycache_entry(sc->sc_ah,
678 keyix, hk, mac, false);
679
680 return status != false;
681}
93 682
94static int ath_setkey_tkip(struct ath_softc *sc, 683static int ath_setkey_tkip(struct ath_softc *sc,
95 struct ieee80211_key_conf *key, 684 struct ieee80211_key_conf *key,
@@ -327,20 +916,6 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
327 } 916 }
328} 917}
329 918
330void ath_get_beaconconfig(struct ath_softc *sc,
331 int if_id,
332 struct ath_beacon_config *conf)
333{
334 struct ieee80211_hw *hw = sc->hw;
335
336 /* fill in beacon config data */
337
338 conf->beacon_interval = hw->conf.beacon_int;
339 conf->listen_interval = 100;
340 conf->dtim_count = 1;
341 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
342}
343
344/********************************/ 919/********************************/
345/* LED functions */ 920/* LED functions */
346/********************************/ 921/********************************/
@@ -722,6 +1297,244 @@ static void ath_detach(struct ath_softc *sc)
722 ath9k_hw_detach(sc->sc_ah); 1297 ath9k_hw_detach(sc->sc_ah);
723} 1298}
724 1299
1300static int ath_init(u16 devid, struct ath_softc *sc)
1301{
1302 struct ath_hal *ah = NULL;
1303 int status;
1304 int error = 0, i;
1305 int csz = 0;
1306
1307 /* XXX: hardware will not be ready until ath_open() being called */
1308 sc->sc_flags |= SC_OP_INVALID;
1309 sc->sc_debug = DBG_DEFAULT;
1310
1311 spin_lock_init(&sc->sc_resetlock);
1312 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1313 tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
1314 (unsigned long)sc);
1315
1316 /*
1317 * Cache line size is used to size and align various
1318 * structures used to communicate with the hardware.
1319 */
1320 bus_read_cachesize(sc, &csz);
1321 /* XXX assert csz is non-zero */
1322 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1323
1324 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1325 if (ah == NULL) {
1326 DPRINTF(sc, ATH_DBG_FATAL,
1327 "%s: unable to attach hardware; HAL status %u\n",
1328 __func__, status);
1329 error = -ENXIO;
1330 goto bad;
1331 }
1332 sc->sc_ah = ah;
1333
1334 /* Get the hardware key cache size. */
1335 sc->sc_keymax = ah->ah_caps.keycache_size;
1336 if (sc->sc_keymax > ATH_KEYMAX) {
1337 DPRINTF(sc, ATH_DBG_KEYCACHE,
1338 "%s: Warning, using only %u entries in %u key cache\n",
1339 __func__, ATH_KEYMAX, sc->sc_keymax);
1340 sc->sc_keymax = ATH_KEYMAX;
1341 }
1342
1343 /*
1344 * Reset the key cache since some parts do not
1345 * reset the contents on initial power up.
1346 */
1347 for (i = 0; i < sc->sc_keymax; i++)
1348 ath9k_hw_keyreset(ah, (u16) i);
1349 /*
1350 * Mark key cache slots associated with global keys
1351 * as in use. If we knew TKIP was not to be used we
1352 * could leave the +32, +64, and +32+64 slots free.
1353 * XXX only for splitmic.
1354 */
1355 for (i = 0; i < IEEE80211_WEP_NKID; i++) {
1356 set_bit(i, sc->sc_keymap);
1357 set_bit(i + 32, sc->sc_keymap);
1358 set_bit(i + 64, sc->sc_keymap);
1359 set_bit(i + 32 + 64, sc->sc_keymap);
1360 }
1361
1362 /* Collect the channel list using the default country code */
1363
1364 error = ath_setup_channels(sc);
1365 if (error)
1366 goto bad;
1367
1368 /* default to MONITOR mode */
1369 sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
1370
1371 /* Setup rate tables */
1372
1373 ath_rate_attach(sc);
1374 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1375 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1376
1377 /*
1378 * Allocate hardware transmit queues: one queue for
1379 * beacon frames and one data queue for each QoS
1380 * priority. Note that the hal handles reseting
1381 * these queues at the needed time.
1382 */
1383 sc->sc_bhalq = ath_beaconq_setup(ah);
1384 if (sc->sc_bhalq == -1) {
1385 DPRINTF(sc, ATH_DBG_FATAL,
1386 "%s: unable to setup a beacon xmit queue\n", __func__);
1387 error = -EIO;
1388 goto bad2;
1389 }
1390 sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1391 if (sc->sc_cabq == NULL) {
1392 DPRINTF(sc, ATH_DBG_FATAL,
1393 "%s: unable to setup CAB xmit queue\n", __func__);
1394 error = -EIO;
1395 goto bad2;
1396 }
1397
1398 sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
1399 ath_cabq_update(sc);
1400
1401 for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
1402 sc->sc_haltype2q[i] = -1;
1403
1404 /* Setup data queues */
1405 /* NB: ensure BK queue is the lowest priority h/w queue */
1406 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1407 DPRINTF(sc, ATH_DBG_FATAL,
1408 "%s: unable to setup xmit queue for BK traffic\n",
1409 __func__);
1410 error = -EIO;
1411 goto bad2;
1412 }
1413
1414 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1415 DPRINTF(sc, ATH_DBG_FATAL,
1416 "%s: unable to setup xmit queue for BE traffic\n",
1417 __func__);
1418 error = -EIO;
1419 goto bad2;
1420 }
1421 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1422 DPRINTF(sc, ATH_DBG_FATAL,
1423 "%s: unable to setup xmit queue for VI traffic\n",
1424 __func__);
1425 error = -EIO;
1426 goto bad2;
1427 }
1428 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1429 DPRINTF(sc, ATH_DBG_FATAL,
1430 "%s: unable to setup xmit queue for VO traffic\n",
1431 __func__);
1432 error = -EIO;
1433 goto bad2;
1434 }
1435
1436 /* Initializes the noise floor to a reasonable default value.
1437 * Later on this will be updated during ANI processing. */
1438
1439 sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1440 setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
1441
1442 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1443 ATH9K_CIPHER_TKIP, NULL)) {
1444 /*
1445 * Whether we should enable h/w TKIP MIC.
1446 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1447 * report WMM capable, so it's always safe to turn on
1448 * TKIP MIC in this case.
1449 */
1450 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1451 0, 1, NULL);
1452 }
1453
1454 /*
1455 * Check whether the separate key cache entries
1456 * are required to handle both tx+rx MIC keys.
1457 * With split mic keys the number of stations is limited
1458 * to 27 otherwise 59.
1459 */
1460 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1461 ATH9K_CIPHER_TKIP, NULL)
1462 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1463 ATH9K_CIPHER_MIC, NULL)
1464 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1465 0, NULL))
1466 sc->sc_splitmic = 1;
1467
1468 /* turn on mcast key search if possible */
1469 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1470 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1471 1, NULL);
1472
1473 sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
1474 sc->sc_config.txpowlimit_override = 0;
1475
1476 /* 11n Capabilities */
1477 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
1478 sc->sc_flags |= SC_OP_TXAGGR;
1479 sc->sc_flags |= SC_OP_RXAGGR;
1480 }
1481
1482 sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
1483 sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
1484
1485 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1486 sc->sc_defant = ath9k_hw_getdefantenna(ah);
1487
1488 ath9k_hw_getmac(ah, sc->sc_myaddr);
1489 if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
1490 ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
1491 ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
1492 ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
1493 }
1494
1495 sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1496
1497 /* initialize beacon slots */
1498 for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
1499 sc->sc_bslot[i] = ATH_IF_ID_ANY;
1500
1501 /* save MISC configurations */
1502 sc->sc_config.swBeaconProcess = 1;
1503
1504#ifdef CONFIG_SLOW_ANT_DIV
1505 /* range is 40 - 255, we use something in the middle */
1506 ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
1507#endif
1508
1509 /* setup channels and rates */
1510
1511 sc->sbands[IEEE80211_BAND_2GHZ].channels =
1512 sc->channels[IEEE80211_BAND_2GHZ];
1513 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1514 sc->rates[IEEE80211_BAND_2GHZ];
1515 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1516
1517 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
1518 sc->sbands[IEEE80211_BAND_5GHZ].channels =
1519 sc->channels[IEEE80211_BAND_5GHZ];
1520 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1521 sc->rates[IEEE80211_BAND_5GHZ];
1522 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1523 }
1524
1525 return 0;
1526bad2:
1527 /* cleanup tx queues */
1528 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1529 if (ATH_TXQ_SETUP(sc, i))
1530 ath_tx_cleanupq(sc, &sc->sc_txq[i]);
1531bad:
1532 if (ah)
1533 ath9k_hw_detach(ah);
1534
1535 return error;
1536}
1537
725static int ath_attach(u16 devid, struct ath_softc *sc) 1538static int ath_attach(u16 devid, struct ath_softc *sc)
726{ 1539{
727 struct ieee80211_hw *hw = sc->hw; 1540 struct ieee80211_hw *hw = sc->hw;
@@ -810,11 +1623,243 @@ bad:
810 return error; 1623 return error;
811} 1624}
812 1625
1626int ath_reset(struct ath_softc *sc, bool retry_tx)
1627{
1628 struct ath_hal *ah = sc->sc_ah;
1629 int status;
1630 int error = 0;
1631
1632 ath9k_hw_set_interrupts(ah, 0);
1633 ath_draintxq(sc, retry_tx);
1634 ath_stoprecv(sc);
1635 ath_flushrecv(sc);
1636
1637 spin_lock_bh(&sc->sc_resetlock);
1638 if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
1639 sc->sc_ht_info.tx_chan_width,
1640 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1641 sc->sc_ht_extprotspacing, false, &status)) {
1642 DPRINTF(sc, ATH_DBG_FATAL,
1643 "%s: unable to reset hardware; hal status %u\n",
1644 __func__, status);
1645 error = -EIO;
1646 }
1647 spin_unlock_bh(&sc->sc_resetlock);
1648
1649 if (ath_startrecv(sc) != 0)
1650 DPRINTF(sc, ATH_DBG_FATAL,
1651 "%s: unable to start recv logic\n", __func__);
1652
1653 /*
1654 * We may be doing a reset in response to a request
1655 * that changes the channel so update any state that
1656 * might change as a result.
1657 */
1658 ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
1659
1660 ath_update_txpow(sc);
1661
1662 if (sc->sc_flags & SC_OP_BEACONS)
1663 ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
1664
1665 ath9k_hw_set_interrupts(ah, sc->sc_imask);
1666
1667 if (retry_tx) {
1668 int i;
1669 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1670 if (ATH_TXQ_SETUP(sc, i)) {
1671 spin_lock_bh(&sc->sc_txq[i].axq_lock);
1672 ath_txq_schedule(sc, &sc->sc_txq[i]);
1673 spin_unlock_bh(&sc->sc_txq[i].axq_lock);
1674 }
1675 }
1676 }
1677
1678 return error;
1679}
1680
1681/*
1682 * This function will allocate both the DMA descriptor structure, and the
1683 * buffers it contains. These are used to contain the descriptors used
1684 * by the system.
1685*/
1686int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1687 struct list_head *head, const char *name,
1688 int nbuf, int ndesc)
1689{
1690#define DS2PHYS(_dd, _ds) \
1691 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1692#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1693#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1694
1695 struct ath_desc *ds;
1696 struct ath_buf *bf;
1697 int i, bsize, error;
1698
1699 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
1700 __func__, name, nbuf, ndesc);
1701
1702 /* ath_desc must be a multiple of DWORDs */
1703 if ((sizeof(struct ath_desc) % 4) != 0) {
1704 DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
1705 __func__);
1706 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1707 error = -ENOMEM;
1708 goto fail;
1709 }
1710
1711 dd->dd_name = name;
1712 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1713
1714 /*
1715 * Need additional DMA memory because we can't use
1716 * descriptors that cross the 4K page boundary. Assume
1717 * one skipped descriptor per 4K page.
1718 */
1719 if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1720 u32 ndesc_skipped =
1721 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1722 u32 dma_len;
1723
1724 while (ndesc_skipped) {
1725 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1726 dd->dd_desc_len += dma_len;
1727
1728 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1729 };
1730 }
1731
1732 /* allocate descriptors */
1733 dd->dd_desc = pci_alloc_consistent(sc->pdev,
1734 dd->dd_desc_len,
1735 &dd->dd_desc_paddr);
1736 if (dd->dd_desc == NULL) {
1737 error = -ENOMEM;
1738 goto fail;
1739 }
1740 ds = dd->dd_desc;
1741 DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
1742 __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
1743 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1744
1745 /* allocate buffers */
1746 bsize = sizeof(struct ath_buf) * nbuf;
1747 bf = kmalloc(bsize, GFP_KERNEL);
1748 if (bf == NULL) {
1749 error = -ENOMEM;
1750 goto fail2;
1751 }
1752 memset(bf, 0, bsize);
1753 dd->dd_bufptr = bf;
1754
1755 INIT_LIST_HEAD(head);
1756 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1757 bf->bf_desc = ds;
1758 bf->bf_daddr = DS2PHYS(dd, ds);
1759
1760 if (!(sc->sc_ah->ah_caps.hw_caps &
1761 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1762 /*
1763 * Skip descriptor addresses which can cause 4KB
1764 * boundary crossing (addr + length) with a 32 dword
1765 * descriptor fetch.
1766 */
1767 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1768 ASSERT((caddr_t) bf->bf_desc <
1769 ((caddr_t) dd->dd_desc +
1770 dd->dd_desc_len));
1771
1772 ds += ndesc;
1773 bf->bf_desc = ds;
1774 bf->bf_daddr = DS2PHYS(dd, ds);
1775 }
1776 }
1777 list_add_tail(&bf->list, head);
1778 }
1779 return 0;
1780fail2:
1781 pci_free_consistent(sc->pdev,
1782 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1783fail:
1784 memset(dd, 0, sizeof(*dd));
1785 return error;
1786#undef ATH_DESC_4KB_BOUND_CHECK
1787#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1788#undef DS2PHYS
1789}
1790
1791void ath_descdma_cleanup(struct ath_softc *sc,
1792 struct ath_descdma *dd,
1793 struct list_head *head)
1794{
1795 pci_free_consistent(sc->pdev,
1796 dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
1797
1798 INIT_LIST_HEAD(head);
1799 kfree(dd->dd_bufptr);
1800 memset(dd, 0, sizeof(*dd));
1801}
1802
1803int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1804{
1805 int qnum;
1806
1807 switch (queue) {
1808 case 0:
1809 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
1810 break;
1811 case 1:
1812 qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
1813 break;
1814 case 2:
1815 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1816 break;
1817 case 3:
1818 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
1819 break;
1820 default:
1821 qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
1822 break;
1823 }
1824
1825 return qnum;
1826}
1827
1828int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1829{
1830 int qnum;
1831
1832 switch (queue) {
1833 case ATH9K_WME_AC_VO:
1834 qnum = 0;
1835 break;
1836 case ATH9K_WME_AC_VI:
1837 qnum = 1;
1838 break;
1839 case ATH9K_WME_AC_BE:
1840 qnum = 2;
1841 break;
1842 case ATH9K_WME_AC_BK:
1843 qnum = 3;
1844 break;
1845 default:
1846 qnum = -1;
1847 break;
1848 }
1849
1850 return qnum;
1851}
1852
1853/**********************/
1854/* mac80211 callbacks */
1855/**********************/
1856
813static int ath9k_start(struct ieee80211_hw *hw) 1857static int ath9k_start(struct ieee80211_hw *hw)
814{ 1858{
815 struct ath_softc *sc = hw->priv; 1859 struct ath_softc *sc = hw->priv;
816 struct ieee80211_channel *curchan = hw->conf.channel; 1860 struct ieee80211_channel *curchan = hw->conf.channel;
817 int error = 0, pos; 1861 struct ath9k_channel *init_channel;
1862 int error = 0, pos, status;
818 1863
819 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Starting driver with " 1864 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Starting driver with "
820 "initial channel: %d MHz\n", __func__, curchan->center_freq); 1865 "initial channel: %d MHz\n", __func__, curchan->center_freq);
@@ -827,24 +1872,103 @@ static int ath9k_start(struct ieee80211_hw *hw)
827 if (pos == -1) { 1872 if (pos == -1) {
828 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__); 1873 DPRINTF(sc, ATH_DBG_FATAL, "%s: Invalid channel\n", __func__);
829 error = -EINVAL; 1874 error = -EINVAL;
830 goto exit; 1875 goto error;
831 } 1876 }
832 1877
833 sc->sc_ah->ah_channels[pos].chanmode = 1878 sc->sc_ah->ah_channels[pos].chanmode =
834 (curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A; 1879 (curchan->band == IEEE80211_BAND_2GHZ) ? CHANNEL_G : CHANNEL_A;
1880 init_channel = &sc->sc_ah->ah_channels[pos];
835 1881
836 error = ath_open(sc, &sc->sc_ah->ah_channels[pos]); 1882 /* Reset SERDES registers */
837 if (error) { 1883 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
1884
1885 /*
1886 * The basic interface to setting the hardware in a good
1887 * state is ``reset''. On return the hardware is known to
1888 * be powered up and with interrupts disabled. This must
1889 * be followed by initialization of the appropriate bits
1890 * and then setup of the interrupt mask.
1891 */
1892 spin_lock_bh(&sc->sc_resetlock);
1893 if (!ath9k_hw_reset(sc->sc_ah, init_channel,
1894 sc->sc_ht_info.tx_chan_width,
1895 sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1896 sc->sc_ht_extprotspacing, false, &status)) {
838 DPRINTF(sc, ATH_DBG_FATAL, 1897 DPRINTF(sc, ATH_DBG_FATAL,
839 "%s: Unable to complete ath_open\n", __func__); 1898 "%s: unable to reset hardware; hal status %u "
840 goto exit; 1899 "(freq %u flags 0x%x)\n", __func__, status,
1900 init_channel->channel, init_channel->channelFlags);
1901 error = -EIO;
1902 spin_unlock_bh(&sc->sc_resetlock);
1903 goto error;
841 } 1904 }
1905 spin_unlock_bh(&sc->sc_resetlock);
1906
1907 /*
1908 * This is needed only to setup initial state
1909 * but it's best done after a reset.
1910 */
1911 ath_update_txpow(sc);
1912
1913 /*
1914 * Setup the hardware after reset:
1915 * The receive engine is set going.
1916 * Frame transmit is handled entirely
1917 * in the frame output path; there's nothing to do
1918 * here except setup the interrupt mask.
1919 */
1920 if (ath_startrecv(sc) != 0) {
1921 DPRINTF(sc, ATH_DBG_FATAL,
1922 "%s: unable to start recv logic\n", __func__);
1923 error = -EIO;
1924 goto error;
1925 }
1926
1927 /* Setup our intr mask. */
1928 sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
1929 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
1930 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
1931
1932 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
1933 sc->sc_imask |= ATH9K_INT_GTT;
1934
1935 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
1936 sc->sc_imask |= ATH9K_INT_CST;
1937
1938 /*
1939 * Enable MIB interrupts when there are hardware phy counters.
1940 * Note we only do this (at the moment) for station mode.
1941 */
1942 if (ath9k_hw_phycounters(sc->sc_ah) &&
1943 ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
1944 (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
1945 sc->sc_imask |= ATH9K_INT_MIB;
1946 /*
1947 * Some hardware processes the TIM IE and fires an
1948 * interrupt when the TIM bit is set. For hardware
1949 * that does, if not overridden by configuration,
1950 * enable the TIM interrupt when operating as station.
1951 */
1952 if ((sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
1953 (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
1954 !sc->sc_config.swBeaconProcess)
1955 sc->sc_imask |= ATH9K_INT_TIM;
1956
1957 ath_setcurmode(sc, ath_chan2mode(init_channel));
1958
1959 sc->sc_flags &= ~SC_OP_INVALID;
1960
1961 /* Disable BMISS interrupt when we're not associated */
1962 sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1963 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
1964
1965 ieee80211_wake_queues(sc->hw);
842 1966
843#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) 1967#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
844 error = ath_start_rfkill_poll(sc); 1968 error = ath_start_rfkill_poll(sc);
845#endif 1969#endif
846 1970
847exit: 1971error:
848 return error; 1972 return error;
849} 1973}
850 1974
@@ -911,7 +2035,30 @@ static void ath9k_stop(struct ieee80211_hw *hw)
911 return; 2035 return;
912 } 2036 }
913 2037
914 ath_stop(sc); 2038 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Cleaning up\n", __func__);
2039
2040 ieee80211_stop_queues(sc->hw);
2041
2042 /* make sure h/w will not generate any interrupt
2043 * before setting the invalid flag. */
2044 ath9k_hw_set_interrupts(sc->sc_ah, 0);
2045
2046 if (!(sc->sc_flags & SC_OP_INVALID)) {
2047 ath_draintxq(sc, false);
2048 ath_stoprecv(sc);
2049 ath9k_hw_phy_disable(sc->sc_ah);
2050 } else
2051 sc->sc_rxlink = NULL;
2052
2053#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2054 if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2055 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
2056#endif
2057 /* disable HAL and put h/w to sleep */
2058 ath9k_hw_disable(sc->sc_ah);
2059 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2060
2061 sc->sc_flags |= SC_OP_INVALID;
915 2062
916 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Driver halt\n", __func__); 2063 DPRINTF(sc, ATH_DBG_CONFIG, "%s: Driver halt\n", __func__);
917} 2064}
diff --git a/drivers/net/wireless/ath9k/rc.c b/drivers/net/wireless/ath9k/rc.c
index 7d1913d48d31..93dfea897ff2 100644
--- a/drivers/net/wireless/ath9k/rc.c
+++ b/drivers/net/wireless/ath9k/rc.c
@@ -1401,7 +1401,6 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1401 struct ath_softc *sc = priv; 1401 struct ath_softc *sc = priv;
1402 struct ath_rate_priv *ath_rc_priv = priv_sta; 1402 struct ath_rate_priv *ath_rc_priv = priv_sta;
1403 struct ath_tx_info_priv *tx_info_priv = NULL; 1403 struct ath_tx_info_priv *tx_info_priv = NULL;
1404 struct ath_node *an;
1405 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1404 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1406 struct ieee80211_hdr *hdr; 1405 struct ieee80211_hdr *hdr;
1407 int final_ts_idx, tx_status = 0, is_underrun = 0; 1406 int final_ts_idx, tx_status = 0, is_underrun = 0;
@@ -1410,21 +1409,15 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1410 hdr = (struct ieee80211_hdr *)skb->data; 1409 hdr = (struct ieee80211_hdr *)skb->data;
1411 fc = hdr->frame_control; 1410 fc = hdr->frame_control;
1412 tx_info_priv = ATH_TX_INFO_PRIV(tx_info); 1411 tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
1413 an = (struct ath_node *)sta->drv_priv;
1414 final_ts_idx = tx_info_priv->tx.ts_rateindex; 1412 final_ts_idx = tx_info_priv->tx.ts_rateindex;
1415 1413
1416 if (!an || !priv_sta || !ieee80211_is_data(fc) || 1414 if (!priv_sta || !ieee80211_is_data(fc) ||
1417 !tx_info_priv->update_rc) 1415 !tx_info_priv->update_rc)
1418 goto exit; 1416 goto exit;
1419 1417
1420 if (tx_info_priv->tx.ts_status & ATH9K_TXERR_FILT) 1418 if (tx_info_priv->tx.ts_status & ATH9K_TXERR_FILT)
1421 goto exit; 1419 goto exit;
1422 1420
1423 if (tx_info_priv->tx.ts_rssi > 0) {
1424 ATH_RSSI_LPF(an->an_chainmask_sel.tx_avgrssi,
1425 tx_info_priv->tx.ts_rssi);
1426 }
1427
1428 /* 1421 /*
1429 * If underrun error is seen assume it as an excessive retry only 1422 * If underrun error is seen assume it as an excessive retry only
1430 * if prefetch trigger level have reached the max (0x3f for 5416) 1423 * if prefetch trigger level have reached the max (0x3f for 5416)
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 6eae2542392a..743ad228b833 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -55,6 +55,28 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
55 ath9k_hw_rxena(ah); 55 ath9k_hw_rxena(ah);
56} 56}
57 57
58static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
59{
60 /* XXX block beacon interrupts */
61 ath9k_hw_setantenna(sc->sc_ah, antenna);
62 sc->sc_defant = antenna;
63 sc->sc_rxotherant = 0;
64}
65
66/*
67 * Extend 15-bit time stamp from rx descriptor to
68 * a full 64-bit TSF using the current h/w TSF.
69*/
70static u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
71{
72 u64 tsf;
73
74 tsf = ath9k_hw_gettsf64(sc->sc_ah);
75 if ((tsf & 0x7fff) < rstamp)
76 tsf -= 0x8000;
77 return (tsf & ~0x7fff) | rstamp;
78}
79
58static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len) 80static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc, u32 len)
59{ 81{
60 struct sk_buff *skb; 82 struct sk_buff *skb;
diff --git a/drivers/net/wireless/ath9k/xmit.c b/drivers/net/wireless/ath9k/xmit.c
index dad81a9df152..3de60c51e5f2 100644
--- a/drivers/net/wireless/ath9k/xmit.c
+++ b/drivers/net/wireless/ath9k/xmit.c
@@ -147,6 +147,19 @@ static int ath_aggr_query(struct ath_softc *sc, struct ath_node *an, u8 tidno)
147 return 0; 147 return 0;
148} 148}
149 149
150static void ath_get_beaconconfig(struct ath_softc *sc, int if_id,
151 struct ath_beacon_config *conf)
152{
153 struct ieee80211_hw *hw = sc->hw;
154
155 /* fill in beacon config data */
156
157 conf->beacon_interval = hw->conf.beacon_int;
158 conf->listen_interval = 100;
159 conf->dtim_count = 1;
160 conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
161}
162
150/* Calculate Atheros packet type from IEEE80211 packet header */ 163/* Calculate Atheros packet type from IEEE80211 packet header */
151 164
152static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb) 165static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
@@ -522,7 +535,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
522 struct ath_desc *ds = bf->bf_desc; 535 struct ath_desc *ds = bf->bf_desc;
523 struct ath_desc *lastds = bf->bf_lastbf->bf_desc; 536 struct ath_desc *lastds = bf->bf_lastbf->bf_desc;
524 struct ath9k_11n_rate_series series[4]; 537 struct ath9k_11n_rate_series series[4];
525 struct ath_node *an = NULL;
526 struct sk_buff *skb; 538 struct sk_buff *skb;
527 struct ieee80211_tx_info *tx_info; 539 struct ieee80211_tx_info *tx_info;
528 struct ieee80211_tx_rate *rates; 540 struct ieee80211_tx_rate *rates;
@@ -540,9 +552,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
540 tx_info = IEEE80211_SKB_CB(skb); 552 tx_info = IEEE80211_SKB_CB(skb);
541 rates = tx_info->control.rates; 553 rates = tx_info->control.rates;
542 554
543 if (tx_info->control.sta)
544 an = (struct ath_node *)tx_info->control.sta->drv_priv;
545
546 if (ieee80211_has_morefrags(fc) || 555 if (ieee80211_has_morefrags(fc) ||
547 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) { 556 (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
548 rates[1].count = rates[2].count = rates[3].count = 0; 557 rates[1].count = rates[2].count = rates[3].count = 0;
@@ -632,10 +641,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
632 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI), 641 (rates[i].flags & IEEE80211_TX_RC_SHORT_GI),
633 bf_isshpreamble(bf)); 642 bf_isshpreamble(bf));
634 643
635 if (bf_isht(bf) && an) 644 series[i].ChSel = sc->sc_tx_chainmask;
636 series[i].ChSel = ath_chainmask_sel_logic(sc, an);
637 else
638 series[i].ChSel = sc->sc_tx_chainmask;
639 645
640 if (rtsctsena) 646 if (rtsctsena)
641 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS; 647 series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;