aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath9k/main.c
diff options
context:
space:
mode:
authorLuis R. Rodriguez <lrodriguez@atheros.com>2009-03-30 22:30:33 -0400
committerJohn W. Linville <linville@tuxdriver.com>2009-04-22 16:54:38 -0400
commit203c4805e91786f9a010bc7945a0fde70c9da28e (patch)
tree00415276b2fe65713f066ffe07b11ad2d8b6bea8 /drivers/net/wireless/ath9k/main.c
parent1878f77e13b9d720b78c4f818b94bfd4a7f596e5 (diff)
atheros: put atheros wireless drivers into ath/
Signed-off-by: Luis R. Rodriguez <lrodriguez@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath9k/main.c')
-rw-r--r--drivers/net/wireless/ath9k/main.c2890
1 files changed, 0 insertions, 2890 deletions
diff --git a/drivers/net/wireless/ath9k/main.c b/drivers/net/wireless/ath9k/main.c
deleted file mode 100644
index 8b6a7ea4e59b..000000000000
--- a/drivers/net/wireless/ath9k/main.c
+++ /dev/null
@@ -1,2890 +0,0 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/nl80211.h>
18#include "ath9k.h"
19
20#define ATH_PCI_VERSION "0.1"
21
22static char *dev_info = "ath9k";
23
24MODULE_AUTHOR("Atheros Communications");
25MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
26MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
27MODULE_LICENSE("Dual BSD/GPL");
28
29static int modparam_nohwcrypt;
30module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
31MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
32
33/* We use the hw_value as an index into our private channel structure */
34
35#define CHAN2G(_freq, _idx) { \
36 .center_freq = (_freq), \
37 .hw_value = (_idx), \
38 .max_power = 30, \
39}
40
41#define CHAN5G(_freq, _idx) { \
42 .band = IEEE80211_BAND_5GHZ, \
43 .center_freq = (_freq), \
44 .hw_value = (_idx), \
45 .max_power = 30, \
46}
47
48/* Some 2 GHz radios are actually tunable on 2312-2732
49 * on 5 MHz steps, we support the channels which we know
50 * we have calibration data for all cards though to make
51 * this static */
52static struct ieee80211_channel ath9k_2ghz_chantable[] = {
53 CHAN2G(2412, 0), /* Channel 1 */
54 CHAN2G(2417, 1), /* Channel 2 */
55 CHAN2G(2422, 2), /* Channel 3 */
56 CHAN2G(2427, 3), /* Channel 4 */
57 CHAN2G(2432, 4), /* Channel 5 */
58 CHAN2G(2437, 5), /* Channel 6 */
59 CHAN2G(2442, 6), /* Channel 7 */
60 CHAN2G(2447, 7), /* Channel 8 */
61 CHAN2G(2452, 8), /* Channel 9 */
62 CHAN2G(2457, 9), /* Channel 10 */
63 CHAN2G(2462, 10), /* Channel 11 */
64 CHAN2G(2467, 11), /* Channel 12 */
65 CHAN2G(2472, 12), /* Channel 13 */
66 CHAN2G(2484, 13), /* Channel 14 */
67};
68
69/* Some 5 GHz radios are actually tunable on XXXX-YYYY
70 * on 5 MHz steps, we support the channels which we know
71 * we have calibration data for all cards though to make
72 * this static */
73static struct ieee80211_channel ath9k_5ghz_chantable[] = {
74 /* _We_ call this UNII 1 */
75 CHAN5G(5180, 14), /* Channel 36 */
76 CHAN5G(5200, 15), /* Channel 40 */
77 CHAN5G(5220, 16), /* Channel 44 */
78 CHAN5G(5240, 17), /* Channel 48 */
79 /* _We_ call this UNII 2 */
80 CHAN5G(5260, 18), /* Channel 52 */
81 CHAN5G(5280, 19), /* Channel 56 */
82 CHAN5G(5300, 20), /* Channel 60 */
83 CHAN5G(5320, 21), /* Channel 64 */
84 /* _We_ call this "Middle band" */
85 CHAN5G(5500, 22), /* Channel 100 */
86 CHAN5G(5520, 23), /* Channel 104 */
87 CHAN5G(5540, 24), /* Channel 108 */
88 CHAN5G(5560, 25), /* Channel 112 */
89 CHAN5G(5580, 26), /* Channel 116 */
90 CHAN5G(5600, 27), /* Channel 120 */
91 CHAN5G(5620, 28), /* Channel 124 */
92 CHAN5G(5640, 29), /* Channel 128 */
93 CHAN5G(5660, 30), /* Channel 132 */
94 CHAN5G(5680, 31), /* Channel 136 */
95 CHAN5G(5700, 32), /* Channel 140 */
96 /* _We_ call this UNII 3 */
97 CHAN5G(5745, 33), /* Channel 149 */
98 CHAN5G(5765, 34), /* Channel 153 */
99 CHAN5G(5785, 35), /* Channel 157 */
100 CHAN5G(5805, 36), /* Channel 161 */
101 CHAN5G(5825, 37), /* Channel 165 */
102};
103
104static void ath_cache_conf_rate(struct ath_softc *sc,
105 struct ieee80211_conf *conf)
106{
107 switch (conf->channel->band) {
108 case IEEE80211_BAND_2GHZ:
109 if (conf_is_ht20(conf))
110 sc->cur_rate_table =
111 sc->hw_rate_table[ATH9K_MODE_11NG_HT20];
112 else if (conf_is_ht40_minus(conf))
113 sc->cur_rate_table =
114 sc->hw_rate_table[ATH9K_MODE_11NG_HT40MINUS];
115 else if (conf_is_ht40_plus(conf))
116 sc->cur_rate_table =
117 sc->hw_rate_table[ATH9K_MODE_11NG_HT40PLUS];
118 else
119 sc->cur_rate_table =
120 sc->hw_rate_table[ATH9K_MODE_11G];
121 break;
122 case IEEE80211_BAND_5GHZ:
123 if (conf_is_ht20(conf))
124 sc->cur_rate_table =
125 sc->hw_rate_table[ATH9K_MODE_11NA_HT20];
126 else if (conf_is_ht40_minus(conf))
127 sc->cur_rate_table =
128 sc->hw_rate_table[ATH9K_MODE_11NA_HT40MINUS];
129 else if (conf_is_ht40_plus(conf))
130 sc->cur_rate_table =
131 sc->hw_rate_table[ATH9K_MODE_11NA_HT40PLUS];
132 else
133 sc->cur_rate_table =
134 sc->hw_rate_table[ATH9K_MODE_11A];
135 break;
136 default:
137 BUG_ON(1);
138 break;
139 }
140}
141
142static void ath_update_txpow(struct ath_softc *sc)
143{
144 struct ath_hw *ah = sc->sc_ah;
145 u32 txpow;
146
147 if (sc->curtxpow != sc->config.txpowlimit) {
148 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit);
149 /* read back in case value is clamped */
150 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
151 sc->curtxpow = txpow;
152 }
153}
154
155static u8 parse_mpdudensity(u8 mpdudensity)
156{
157 /*
158 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
159 * 0 for no restriction
160 * 1 for 1/4 us
161 * 2 for 1/2 us
162 * 3 for 1 us
163 * 4 for 2 us
164 * 5 for 4 us
165 * 6 for 8 us
166 * 7 for 16 us
167 */
168 switch (mpdudensity) {
169 case 0:
170 return 0;
171 case 1:
172 case 2:
173 case 3:
174 /* Our lower layer calculations limit our precision to
175 1 microsecond */
176 return 1;
177 case 4:
178 return 2;
179 case 5:
180 return 4;
181 case 6:
182 return 8;
183 case 7:
184 return 16;
185 default:
186 return 0;
187 }
188}
189
190static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
191{
192 struct ath_rate_table *rate_table = NULL;
193 struct ieee80211_supported_band *sband;
194 struct ieee80211_rate *rate;
195 int i, maxrates;
196
197 switch (band) {
198 case IEEE80211_BAND_2GHZ:
199 rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
200 break;
201 case IEEE80211_BAND_5GHZ:
202 rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
203 break;
204 default:
205 break;
206 }
207
208 if (rate_table == NULL)
209 return;
210
211 sband = &sc->sbands[band];
212 rate = sc->rates[band];
213
214 if (rate_table->rate_cnt > ATH_RATE_MAX)
215 maxrates = ATH_RATE_MAX;
216 else
217 maxrates = rate_table->rate_cnt;
218
219 for (i = 0; i < maxrates; i++) {
220 rate[i].bitrate = rate_table->info[i].ratekbps / 100;
221 rate[i].hw_value = rate_table->info[i].ratecode;
222 if (rate_table->info[i].short_preamble) {
223 rate[i].hw_value_short = rate_table->info[i].ratecode |
224 rate_table->info[i].short_preamble;
225 rate[i].flags = IEEE80211_RATE_SHORT_PREAMBLE;
226 }
227 sband->n_bitrates++;
228
229 DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n",
230 rate[i].bitrate / 10, rate[i].hw_value);
231 }
232}
233
234/*
235 * Set/change channels. If the channel is really being changed, it's done
236 * by reseting the chip. To accomplish this we must first cleanup any pending
237 * DMA, then restart stuff.
238*/
239int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
240 struct ath9k_channel *hchan)
241{
242 struct ath_hw *ah = sc->sc_ah;
243 bool fastcc = true, stopped;
244 struct ieee80211_channel *channel = hw->conf.channel;
245 int r;
246
247 if (sc->sc_flags & SC_OP_INVALID)
248 return -EIO;
249
250 ath9k_ps_wakeup(sc);
251
252 /*
253 * This is only performed if the channel settings have
254 * actually changed.
255 *
256 * To switch channels clear any pending DMA operations;
257 * wait long enough for the RX fifo to drain, reset the
258 * hardware at the new frequency, and then re-enable
259 * the relevant bits of the h/w.
260 */
261 ath9k_hw_set_interrupts(ah, 0);
262 ath_drain_all_txq(sc, false);
263 stopped = ath_stoprecv(sc);
264
265 /* XXX: do not flush receive queue here. We don't want
266 * to flush data frames already in queue because of
267 * changing channel. */
268
269 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
270 fastcc = false;
271
272 DPRINTF(sc, ATH_DBG_CONFIG,
273 "(%u MHz) -> (%u MHz), chanwidth: %d\n",
274 sc->sc_ah->curchan->channel,
275 channel->center_freq, sc->tx_chan_width);
276
277 spin_lock_bh(&sc->sc_resetlock);
278
279 r = ath9k_hw_reset(ah, hchan, fastcc);
280 if (r) {
281 DPRINTF(sc, ATH_DBG_FATAL,
282 "Unable to reset channel (%u Mhz) "
283 "reset status %u\n",
284 channel->center_freq, r);
285 spin_unlock_bh(&sc->sc_resetlock);
286 return r;
287 }
288 spin_unlock_bh(&sc->sc_resetlock);
289
290 sc->sc_flags &= ~SC_OP_FULL_RESET;
291
292 if (ath_startrecv(sc) != 0) {
293 DPRINTF(sc, ATH_DBG_FATAL,
294 "Unable to restart recv logic\n");
295 return -EIO;
296 }
297
298 ath_cache_conf_rate(sc, &hw->conf);
299 ath_update_txpow(sc);
300 ath9k_hw_set_interrupts(ah, sc->imask);
301 ath9k_ps_restore(sc);
302 return 0;
303}
304
305/*
306 * This routine performs the periodic noise floor calibration function
307 * that is used to adjust and optimize the chip performance. This
308 * takes environmental changes (location, temperature) into account.
309 * When the task is complete, it reschedules itself depending on the
310 * appropriate interval that was calculated.
311 */
312static void ath_ani_calibrate(unsigned long data)
313{
314 struct ath_softc *sc = (struct ath_softc *)data;
315 struct ath_hw *ah = sc->sc_ah;
316 bool longcal = false;
317 bool shortcal = false;
318 bool aniflag = false;
319 unsigned int timestamp = jiffies_to_msecs(jiffies);
320 u32 cal_interval, short_cal_interval;
321
322 short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
323 ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
324
325 /*
326 * don't calibrate when we're scanning.
327 * we are most likely not on our home channel.
328 */
329 if (sc->sc_flags & SC_OP_SCANNING)
330 goto set_timer;
331
332 /* Long calibration runs independently of short calibration. */
333 if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
334 longcal = true;
335 DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
336 sc->ani.longcal_timer = timestamp;
337 }
338
339 /* Short calibration applies only while caldone is false */
340 if (!sc->ani.caldone) {
341 if ((timestamp - sc->ani.shortcal_timer) >= short_cal_interval) {
342 shortcal = true;
343 DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies);
344 sc->ani.shortcal_timer = timestamp;
345 sc->ani.resetcal_timer = timestamp;
346 }
347 } else {
348 if ((timestamp - sc->ani.resetcal_timer) >=
349 ATH_RESTART_CALINTERVAL) {
350 sc->ani.caldone = ath9k_hw_reset_calvalid(ah);
351 if (sc->ani.caldone)
352 sc->ani.resetcal_timer = timestamp;
353 }
354 }
355
356 /* Verify whether we must check ANI */
357 if ((timestamp - sc->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
358 aniflag = true;
359 sc->ani.checkani_timer = timestamp;
360 }
361
362 /* Skip all processing if there's nothing to do. */
363 if (longcal || shortcal || aniflag) {
364 /* Call ANI routine if necessary */
365 if (aniflag)
366 ath9k_hw_ani_monitor(ah, &sc->nodestats, ah->curchan);
367
368 /* Perform calibration if necessary */
369 if (longcal || shortcal) {
370 bool iscaldone = false;
371
372 if (ath9k_hw_calibrate(ah, ah->curchan,
373 sc->rx_chainmask, longcal,
374 &iscaldone)) {
375 if (longcal)
376 sc->ani.noise_floor =
377 ath9k_hw_getchan_noise(ah,
378 ah->curchan);
379
380 DPRINTF(sc, ATH_DBG_ANI,
381 "calibrate chan %u/%x nf: %d\n",
382 ah->curchan->channel,
383 ah->curchan->channelFlags,
384 sc->ani.noise_floor);
385 } else {
386 DPRINTF(sc, ATH_DBG_ANY,
387 "calibrate chan %u/%x failed\n",
388 ah->curchan->channel,
389 ah->curchan->channelFlags);
390 }
391 sc->ani.caldone = iscaldone;
392 }
393 }
394
395set_timer:
396 /*
397 * Set timer interval based on previous results.
398 * The interval must be the shortest necessary to satisfy ANI,
399 * short calibration and long calibration.
400 */
401 cal_interval = ATH_LONG_CALINTERVAL;
402 if (sc->sc_ah->config.enable_ani)
403 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
404 if (!sc->ani.caldone)
405 cal_interval = min(cal_interval, (u32)short_cal_interval);
406
407 mod_timer(&sc->ani.timer, jiffies + msecs_to_jiffies(cal_interval));
408}
409
410/*
411 * Update tx/rx chainmask. For legacy association,
412 * hard code chainmask to 1x1, for 11n association, use
413 * the chainmask configuration, for bt coexistence, use
414 * the chainmask configuration even in legacy mode.
415 */
416void ath_update_chainmask(struct ath_softc *sc, int is_ht)
417{
418 if (is_ht ||
419 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)) {
420 sc->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
421 sc->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
422 } else {
423 sc->tx_chainmask = 1;
424 sc->rx_chainmask = 1;
425 }
426
427 DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n",
428 sc->tx_chainmask, sc->rx_chainmask);
429}
430
431static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
432{
433 struct ath_node *an;
434
435 an = (struct ath_node *)sta->drv_priv;
436
437 if (sc->sc_flags & SC_OP_TXAGGR) {
438 ath_tx_node_init(sc, an);
439 an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
440 sta->ht_cap.ampdu_factor);
441 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
442 }
443}
444
445static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
446{
447 struct ath_node *an = (struct ath_node *)sta->drv_priv;
448
449 if (sc->sc_flags & SC_OP_TXAGGR)
450 ath_tx_node_cleanup(sc, an);
451}
452
453static void ath9k_tasklet(unsigned long data)
454{
455 struct ath_softc *sc = (struct ath_softc *)data;
456 u32 status = sc->intrstatus;
457
458 if (status & ATH9K_INT_FATAL) {
459 ath_reset(sc, false);
460 return;
461 }
462
463 if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
464 spin_lock_bh(&sc->rx.rxflushlock);
465 ath_rx_tasklet(sc, 0);
466 spin_unlock_bh(&sc->rx.rxflushlock);
467 }
468
469 if (status & ATH9K_INT_TX)
470 ath_tx_tasklet(sc);
471
472 /* re-enable hardware interrupt */
473 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
474}
475
476irqreturn_t ath_isr(int irq, void *dev)
477{
478#define SCHED_INTR ( \
479 ATH9K_INT_FATAL | \
480 ATH9K_INT_RXORN | \
481 ATH9K_INT_RXEOL | \
482 ATH9K_INT_RX | \
483 ATH9K_INT_TX | \
484 ATH9K_INT_BMISS | \
485 ATH9K_INT_CST | \
486 ATH9K_INT_TSFOOR)
487
488 struct ath_softc *sc = dev;
489 struct ath_hw *ah = sc->sc_ah;
490 enum ath9k_int status;
491 bool sched = false;
492
493 /*
494 * The hardware is not ready/present, don't
495 * touch anything. Note this can happen early
496 * on if the IRQ is shared.
497 */
498 if (sc->sc_flags & SC_OP_INVALID)
499 return IRQ_NONE;
500
501 ath9k_ps_wakeup(sc);
502
503 /* shared irq, not for us */
504
505 if (!ath9k_hw_intrpend(ah)) {
506 ath9k_ps_restore(sc);
507 return IRQ_NONE;
508 }
509
510 /*
511 * Figure out the reason(s) for the interrupt. Note
512 * that the hal returns a pseudo-ISR that may include
513 * bits we haven't explicitly enabled so we mask the
514 * value to insure we only process bits we requested.
515 */
516 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
517 status &= sc->imask; /* discard unasked-for bits */
518
519 /*
520 * If there are no status bits set, then this interrupt was not
521 * for me (should have been caught above).
522 */
523 if (!status) {
524 ath9k_ps_restore(sc);
525 return IRQ_NONE;
526 }
527
528 /* Cache the status */
529 sc->intrstatus = status;
530
531 if (status & SCHED_INTR)
532 sched = true;
533
534 /*
535 * If a FATAL or RXORN interrupt is received, we have to reset the
536 * chip immediately.
537 */
538 if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN))
539 goto chip_reset;
540
541 if (status & ATH9K_INT_SWBA)
542 tasklet_schedule(&sc->bcon_tasklet);
543
544 if (status & ATH9K_INT_TXURN)
545 ath9k_hw_updatetxtriglevel(ah, true);
546
547 if (status & ATH9K_INT_MIB) {
548 /*
549 * Disable interrupts until we service the MIB
550 * interrupt; otherwise it will continue to
551 * fire.
552 */
553 ath9k_hw_set_interrupts(ah, 0);
554 /*
555 * Let the hal handle the event. We assume
556 * it will clear whatever condition caused
557 * the interrupt.
558 */
559 ath9k_hw_procmibevent(ah, &sc->nodestats);
560 ath9k_hw_set_interrupts(ah, sc->imask);
561 }
562
563 if (status & ATH9K_INT_TIM_TIMER) {
564 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
565 /* Clear RxAbort bit so that we can
566 * receive frames */
567 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
568 ath9k_hw_setrxabort(ah, 0);
569 sched = true;
570 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
571 }
572 }
573
574chip_reset:
575
576 ath9k_ps_restore(sc);
577 ath_debug_stat_interrupt(sc, status);
578
579 if (sched) {
580 /* turn off every interrupt except SWBA */
581 ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA));
582 tasklet_schedule(&sc->intr_tq);
583 }
584
585 return IRQ_HANDLED;
586
587#undef SCHED_INTR
588}
589
590static u32 ath_get_extchanmode(struct ath_softc *sc,
591 struct ieee80211_channel *chan,
592 enum nl80211_channel_type channel_type)
593{
594 u32 chanmode = 0;
595
596 switch (chan->band) {
597 case IEEE80211_BAND_2GHZ:
598 switch(channel_type) {
599 case NL80211_CHAN_NO_HT:
600 case NL80211_CHAN_HT20:
601 chanmode = CHANNEL_G_HT20;
602 break;
603 case NL80211_CHAN_HT40PLUS:
604 chanmode = CHANNEL_G_HT40PLUS;
605 break;
606 case NL80211_CHAN_HT40MINUS:
607 chanmode = CHANNEL_G_HT40MINUS;
608 break;
609 }
610 break;
611 case IEEE80211_BAND_5GHZ:
612 switch(channel_type) {
613 case NL80211_CHAN_NO_HT:
614 case NL80211_CHAN_HT20:
615 chanmode = CHANNEL_A_HT20;
616 break;
617 case NL80211_CHAN_HT40PLUS:
618 chanmode = CHANNEL_A_HT40PLUS;
619 break;
620 case NL80211_CHAN_HT40MINUS:
621 chanmode = CHANNEL_A_HT40MINUS;
622 break;
623 }
624 break;
625 default:
626 break;
627 }
628
629 return chanmode;
630}
631
632static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
633 struct ath9k_keyval *hk, const u8 *addr,
634 bool authenticator)
635{
636 const u8 *key_rxmic;
637 const u8 *key_txmic;
638
639 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
640 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
641
642 if (addr == NULL) {
643 /*
644 * Group key installation - only two key cache entries are used
645 * regardless of splitmic capability since group key is only
646 * used either for TX or RX.
647 */
648 if (authenticator) {
649 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
650 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
651 } else {
652 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
653 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
654 }
655 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr);
656 }
657 if (!sc->splitmic) {
658 /* TX and RX keys share the same key cache entry. */
659 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
660 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
661 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, addr);
662 }
663
664 /* Separate key cache entries for TX and RX */
665
666 /* TX key goes at first index, RX key at +32. */
667 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
668 if (!ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, NULL)) {
669 /* TX MIC entry failed. No need to proceed further */
670 DPRINTF(sc, ATH_DBG_FATAL,
671 "Setting TX MIC Key Failed\n");
672 return 0;
673 }
674
675 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
676 /* XXX delete tx key on failure? */
677 return ath9k_hw_set_keycache_entry(sc->sc_ah, keyix + 32, hk, addr);
678}
679
680static int ath_reserve_key_cache_slot_tkip(struct ath_softc *sc)
681{
682 int i;
683
684 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
685 if (test_bit(i, sc->keymap) ||
686 test_bit(i + 64, sc->keymap))
687 continue; /* At least one part of TKIP key allocated */
688 if (sc->splitmic &&
689 (test_bit(i + 32, sc->keymap) ||
690 test_bit(i + 64 + 32, sc->keymap)))
691 continue; /* At least one part of TKIP key allocated */
692
693 /* Found a free slot for a TKIP key */
694 return i;
695 }
696 return -1;
697}
698
699static int ath_reserve_key_cache_slot(struct ath_softc *sc)
700{
701 int i;
702
703 /* First, try to find slots that would not be available for TKIP. */
704 if (sc->splitmic) {
705 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 4; i++) {
706 if (!test_bit(i, sc->keymap) &&
707 (test_bit(i + 32, sc->keymap) ||
708 test_bit(i + 64, sc->keymap) ||
709 test_bit(i + 64 + 32, sc->keymap)))
710 return i;
711 if (!test_bit(i + 32, sc->keymap) &&
712 (test_bit(i, sc->keymap) ||
713 test_bit(i + 64, sc->keymap) ||
714 test_bit(i + 64 + 32, sc->keymap)))
715 return i + 32;
716 if (!test_bit(i + 64, sc->keymap) &&
717 (test_bit(i , sc->keymap) ||
718 test_bit(i + 32, sc->keymap) ||
719 test_bit(i + 64 + 32, sc->keymap)))
720 return i + 64;
721 if (!test_bit(i + 64 + 32, sc->keymap) &&
722 (test_bit(i, sc->keymap) ||
723 test_bit(i + 32, sc->keymap) ||
724 test_bit(i + 64, sc->keymap)))
725 return i + 64 + 32;
726 }
727 } else {
728 for (i = IEEE80211_WEP_NKID; i < sc->keymax / 2; i++) {
729 if (!test_bit(i, sc->keymap) &&
730 test_bit(i + 64, sc->keymap))
731 return i;
732 if (test_bit(i, sc->keymap) &&
733 !test_bit(i + 64, sc->keymap))
734 return i + 64;
735 }
736 }
737
738 /* No partially used TKIP slots, pick any available slot */
739 for (i = IEEE80211_WEP_NKID; i < sc->keymax; i++) {
740 /* Do not allow slots that could be needed for TKIP group keys
741 * to be used. This limitation could be removed if we know that
742 * TKIP will not be used. */
743 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
744 continue;
745 if (sc->splitmic) {
746 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
747 continue;
748 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
749 continue;
750 }
751
752 if (!test_bit(i, sc->keymap))
753 return i; /* Found a free slot for a key */
754 }
755
756 /* No free slot found */
757 return -1;
758}
759
760static int ath_key_config(struct ath_softc *sc,
761 struct ieee80211_vif *vif,
762 struct ieee80211_sta *sta,
763 struct ieee80211_key_conf *key)
764{
765 struct ath9k_keyval hk;
766 const u8 *mac = NULL;
767 int ret = 0;
768 int idx;
769
770 memset(&hk, 0, sizeof(hk));
771
772 switch (key->alg) {
773 case ALG_WEP:
774 hk.kv_type = ATH9K_CIPHER_WEP;
775 break;
776 case ALG_TKIP:
777 hk.kv_type = ATH9K_CIPHER_TKIP;
778 break;
779 case ALG_CCMP:
780 hk.kv_type = ATH9K_CIPHER_AES_CCM;
781 break;
782 default:
783 return -EOPNOTSUPP;
784 }
785
786 hk.kv_len = key->keylen;
787 memcpy(hk.kv_val, key->key, key->keylen);
788
789 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
790 /* For now, use the default keys for broadcast keys. This may
791 * need to change with virtual interfaces. */
792 idx = key->keyidx;
793 } else if (key->keyidx) {
794 if (WARN_ON(!sta))
795 return -EOPNOTSUPP;
796 mac = sta->addr;
797
798 if (vif->type != NL80211_IFTYPE_AP) {
799 /* Only keyidx 0 should be used with unicast key, but
800 * allow this for client mode for now. */
801 idx = key->keyidx;
802 } else
803 return -EIO;
804 } else {
805 if (WARN_ON(!sta))
806 return -EOPNOTSUPP;
807 mac = sta->addr;
808
809 if (key->alg == ALG_TKIP)
810 idx = ath_reserve_key_cache_slot_tkip(sc);
811 else
812 idx = ath_reserve_key_cache_slot(sc);
813 if (idx < 0)
814 return -ENOSPC; /* no free key cache entries */
815 }
816
817 if (key->alg == ALG_TKIP)
818 ret = ath_setkey_tkip(sc, idx, key->key, &hk, mac,
819 vif->type == NL80211_IFTYPE_AP);
820 else
821 ret = ath9k_hw_set_keycache_entry(sc->sc_ah, idx, &hk, mac);
822
823 if (!ret)
824 return -EIO;
825
826 set_bit(idx, sc->keymap);
827 if (key->alg == ALG_TKIP) {
828 set_bit(idx + 64, sc->keymap);
829 if (sc->splitmic) {
830 set_bit(idx + 32, sc->keymap);
831 set_bit(idx + 64 + 32, sc->keymap);
832 }
833 }
834
835 return idx;
836}
837
838static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
839{
840 ath9k_hw_keyreset(sc->sc_ah, key->hw_key_idx);
841 if (key->hw_key_idx < IEEE80211_WEP_NKID)
842 return;
843
844 clear_bit(key->hw_key_idx, sc->keymap);
845 if (key->alg != ALG_TKIP)
846 return;
847
848 clear_bit(key->hw_key_idx + 64, sc->keymap);
849 if (sc->splitmic) {
850 clear_bit(key->hw_key_idx + 32, sc->keymap);
851 clear_bit(key->hw_key_idx + 64 + 32, sc->keymap);
852 }
853}
854
855static void setup_ht_cap(struct ath_softc *sc,
856 struct ieee80211_sta_ht_cap *ht_info)
857{
858#define ATH9K_HT_CAP_MAXRXAMPDU_65536 0x3 /* 2 ^ 16 */
859#define ATH9K_HT_CAP_MPDUDENSITY_8 0x6 /* 8 usec */
860
861 ht_info->ht_supported = true;
862 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
863 IEEE80211_HT_CAP_SM_PS |
864 IEEE80211_HT_CAP_SGI_40 |
865 IEEE80211_HT_CAP_DSSSCCK40;
866
867 ht_info->ampdu_factor = ATH9K_HT_CAP_MAXRXAMPDU_65536;
868 ht_info->ampdu_density = ATH9K_HT_CAP_MPDUDENSITY_8;
869
870 /* set up supported mcs set */
871 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
872
873 switch(sc->rx_chainmask) {
874 case 1:
875 ht_info->mcs.rx_mask[0] = 0xff;
876 break;
877 case 3:
878 case 5:
879 case 7:
880 default:
881 ht_info->mcs.rx_mask[0] = 0xff;
882 ht_info->mcs.rx_mask[1] = 0xff;
883 break;
884 }
885
886 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
887}
888
889static void ath9k_bss_assoc_info(struct ath_softc *sc,
890 struct ieee80211_vif *vif,
891 struct ieee80211_bss_conf *bss_conf)
892{
893 struct ath_vif *avp = (void *)vif->drv_priv;
894
895 if (bss_conf->assoc) {
896 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
897 bss_conf->aid, sc->curbssid);
898
899 /* New association, store aid */
900 if (avp->av_opmode == NL80211_IFTYPE_STATION) {
901 sc->curaid = bss_conf->aid;
902 ath9k_hw_write_associd(sc);
903 }
904
905 /* Configure the beacon */
906 ath_beacon_config(sc, vif);
907
908 /* Reset rssi stats */
909 sc->nodestats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER;
910 sc->nodestats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER;
911 sc->nodestats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER;
912 sc->nodestats.ns_avgtxrate = ATH_RATE_DUMMY_MARKER;
913
914 /* Start ANI */
915 mod_timer(&sc->ani.timer,
916 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
917 } else {
918 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
919 sc->curaid = 0;
920 }
921}
922
923/********************************/
924/* LED functions */
925/********************************/
926
927static void ath_led_blink_work(struct work_struct *work)
928{
929 struct ath_softc *sc = container_of(work, struct ath_softc,
930 ath_led_blink_work.work);
931
932 if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
933 return;
934
935 if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
936 (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
937 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
938 else
939 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
940 (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
941
942 queue_delayed_work(sc->hw->workqueue, &sc->ath_led_blink_work,
943 (sc->sc_flags & SC_OP_LED_ON) ?
944 msecs_to_jiffies(sc->led_off_duration) :
945 msecs_to_jiffies(sc->led_on_duration));
946
947 sc->led_on_duration = sc->led_on_cnt ?
948 max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
949 ATH_LED_ON_DURATION_IDLE;
950 sc->led_off_duration = sc->led_off_cnt ?
951 max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
952 ATH_LED_OFF_DURATION_IDLE;
953 sc->led_on_cnt = sc->led_off_cnt = 0;
954 if (sc->sc_flags & SC_OP_LED_ON)
955 sc->sc_flags &= ~SC_OP_LED_ON;
956 else
957 sc->sc_flags |= SC_OP_LED_ON;
958}
959
960static void ath_led_brightness(struct led_classdev *led_cdev,
961 enum led_brightness brightness)
962{
963 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
964 struct ath_softc *sc = led->sc;
965
966 switch (brightness) {
967 case LED_OFF:
968 if (led->led_type == ATH_LED_ASSOC ||
969 led->led_type == ATH_LED_RADIO) {
970 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN,
971 (led->led_type == ATH_LED_RADIO));
972 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
973 if (led->led_type == ATH_LED_RADIO)
974 sc->sc_flags &= ~SC_OP_LED_ON;
975 } else {
976 sc->led_off_cnt++;
977 }
978 break;
979 case LED_FULL:
980 if (led->led_type == ATH_LED_ASSOC) {
981 sc->sc_flags |= SC_OP_LED_ASSOCIATED;
982 queue_delayed_work(sc->hw->workqueue,
983 &sc->ath_led_blink_work, 0);
984 } else if (led->led_type == ATH_LED_RADIO) {
985 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 0);
986 sc->sc_flags |= SC_OP_LED_ON;
987 } else {
988 sc->led_on_cnt++;
989 }
990 break;
991 default:
992 break;
993 }
994}
995
996static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
997 char *trigger)
998{
999 int ret;
1000
1001 led->sc = sc;
1002 led->led_cdev.name = led->name;
1003 led->led_cdev.default_trigger = trigger;
1004 led->led_cdev.brightness_set = ath_led_brightness;
1005
1006 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
1007 if (ret)
1008 DPRINTF(sc, ATH_DBG_FATAL,
1009 "Failed to register led:%s", led->name);
1010 else
1011 led->registered = 1;
1012 return ret;
1013}
1014
1015static void ath_unregister_led(struct ath_led *led)
1016{
1017 if (led->registered) {
1018 led_classdev_unregister(&led->led_cdev);
1019 led->registered = 0;
1020 }
1021}
1022
1023static void ath_deinit_leds(struct ath_softc *sc)
1024{
1025 cancel_delayed_work_sync(&sc->ath_led_blink_work);
1026 ath_unregister_led(&sc->assoc_led);
1027 sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
1028 ath_unregister_led(&sc->tx_led);
1029 ath_unregister_led(&sc->rx_led);
1030 ath_unregister_led(&sc->radio_led);
1031 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1032}
1033
1034static void ath_init_leds(struct ath_softc *sc)
1035{
1036 char *trigger;
1037 int ret;
1038
1039 /* Configure gpio 1 for output */
1040 ath9k_hw_cfg_output(sc->sc_ah, ATH_LED_PIN,
1041 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1042 /* LED off, active low */
1043 ath9k_hw_set_gpio(sc->sc_ah, ATH_LED_PIN, 1);
1044
1045 INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
1046
1047 trigger = ieee80211_get_radio_led_name(sc->hw);
1048 snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
1049 "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
1050 ret = ath_register_led(sc, &sc->radio_led, trigger);
1051 sc->radio_led.led_type = ATH_LED_RADIO;
1052 if (ret)
1053 goto fail;
1054
1055 trigger = ieee80211_get_assoc_led_name(sc->hw);
1056 snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
1057 "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
1058 ret = ath_register_led(sc, &sc->assoc_led, trigger);
1059 sc->assoc_led.led_type = ATH_LED_ASSOC;
1060 if (ret)
1061 goto fail;
1062
1063 trigger = ieee80211_get_tx_led_name(sc->hw);
1064 snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
1065 "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
1066 ret = ath_register_led(sc, &sc->tx_led, trigger);
1067 sc->tx_led.led_type = ATH_LED_TX;
1068 if (ret)
1069 goto fail;
1070
1071 trigger = ieee80211_get_rx_led_name(sc->hw);
1072 snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
1073 "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
1074 ret = ath_register_led(sc, &sc->rx_led, trigger);
1075 sc->rx_led.led_type = ATH_LED_RX;
1076 if (ret)
1077 goto fail;
1078
1079 return;
1080
1081fail:
1082 ath_deinit_leds(sc);
1083}
1084
1085void ath_radio_enable(struct ath_softc *sc)
1086{
1087 struct ath_hw *ah = sc->sc_ah;
1088 struct ieee80211_channel *channel = sc->hw->conf.channel;
1089 int r;
1090
1091 ath9k_ps_wakeup(sc);
1092 spin_lock_bh(&sc->sc_resetlock);
1093
1094 r = ath9k_hw_reset(ah, ah->curchan, false);
1095
1096 if (r) {
1097 DPRINTF(sc, ATH_DBG_FATAL,
1098 "Unable to reset channel %u (%uMhz) ",
1099 "reset status %u\n",
1100 channel->center_freq, r);
1101 }
1102 spin_unlock_bh(&sc->sc_resetlock);
1103
1104 ath_update_txpow(sc);
1105 if (ath_startrecv(sc) != 0) {
1106 DPRINTF(sc, ATH_DBG_FATAL,
1107 "Unable to restart recv logic\n");
1108 return;
1109 }
1110
1111 if (sc->sc_flags & SC_OP_BEACONS)
1112 ath_beacon_config(sc, NULL); /* restart beacons */
1113
1114 /* Re-Enable interrupts */
1115 ath9k_hw_set_interrupts(ah, sc->imask);
1116
1117 /* Enable LED */
1118 ath9k_hw_cfg_output(ah, ATH_LED_PIN,
1119 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1120 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 0);
1121
1122 ieee80211_wake_queues(sc->hw);
1123 ath9k_ps_restore(sc);
1124}
1125
1126void ath_radio_disable(struct ath_softc *sc)
1127{
1128 struct ath_hw *ah = sc->sc_ah;
1129 struct ieee80211_channel *channel = sc->hw->conf.channel;
1130 int r;
1131
1132 ath9k_ps_wakeup(sc);
1133 ieee80211_stop_queues(sc->hw);
1134
1135 /* Disable LED */
1136 ath9k_hw_set_gpio(ah, ATH_LED_PIN, 1);
1137 ath9k_hw_cfg_gpio_input(ah, ATH_LED_PIN);
1138
1139 /* Disable interrupts */
1140 ath9k_hw_set_interrupts(ah, 0);
1141
1142 ath_drain_all_txq(sc, false); /* clear pending tx frames */
1143 ath_stoprecv(sc); /* turn off frame recv */
1144 ath_flushrecv(sc); /* flush recv queue */
1145
1146 spin_lock_bh(&sc->sc_resetlock);
1147 r = ath9k_hw_reset(ah, ah->curchan, false);
1148 if (r) {
1149 DPRINTF(sc, ATH_DBG_FATAL,
1150 "Unable to reset channel %u (%uMhz) "
1151 "reset status %u\n",
1152 channel->center_freq, r);
1153 }
1154 spin_unlock_bh(&sc->sc_resetlock);
1155
1156 ath9k_hw_phy_disable(ah);
1157 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1158 ath9k_ps_restore(sc);
1159}
1160
1161#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1162
1163/*******************/
1164/* Rfkill */
1165/*******************/
1166
1167static bool ath_is_rfkill_set(struct ath_softc *sc)
1168{
1169 struct ath_hw *ah = sc->sc_ah;
1170
1171 return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
1172 ah->rfkill_polarity;
1173}
1174
1175/* h/w rfkill poll function */
1176static void ath_rfkill_poll(struct work_struct *work)
1177{
1178 struct ath_softc *sc = container_of(work, struct ath_softc,
1179 rf_kill.rfkill_poll.work);
1180 bool radio_on;
1181
1182 if (sc->sc_flags & SC_OP_INVALID)
1183 return;
1184
1185 radio_on = !ath_is_rfkill_set(sc);
1186
1187 /*
1188 * enable/disable radio only when there is a
1189 * state change in RF switch
1190 */
1191 if (radio_on == !!(sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED)) {
1192 enum rfkill_state state;
1193
1194 if (sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED) {
1195 state = radio_on ? RFKILL_STATE_SOFT_BLOCKED
1196 : RFKILL_STATE_HARD_BLOCKED;
1197 } else if (radio_on) {
1198 ath_radio_enable(sc);
1199 state = RFKILL_STATE_UNBLOCKED;
1200 } else {
1201 ath_radio_disable(sc);
1202 state = RFKILL_STATE_HARD_BLOCKED;
1203 }
1204
1205 if (state == RFKILL_STATE_HARD_BLOCKED)
1206 sc->sc_flags |= SC_OP_RFKILL_HW_BLOCKED;
1207 else
1208 sc->sc_flags &= ~SC_OP_RFKILL_HW_BLOCKED;
1209
1210 rfkill_force_state(sc->rf_kill.rfkill, state);
1211 }
1212
1213 queue_delayed_work(sc->hw->workqueue, &sc->rf_kill.rfkill_poll,
1214 msecs_to_jiffies(ATH_RFKILL_POLL_INTERVAL));
1215}
1216
1217/* s/w rfkill handler */
1218static int ath_sw_toggle_radio(void *data, enum rfkill_state state)
1219{
1220 struct ath_softc *sc = data;
1221
1222 switch (state) {
1223 case RFKILL_STATE_SOFT_BLOCKED:
1224 if (!(sc->sc_flags & (SC_OP_RFKILL_HW_BLOCKED |
1225 SC_OP_RFKILL_SW_BLOCKED)))
1226 ath_radio_disable(sc);
1227 sc->sc_flags |= SC_OP_RFKILL_SW_BLOCKED;
1228 return 0;
1229 case RFKILL_STATE_UNBLOCKED:
1230 if ((sc->sc_flags & SC_OP_RFKILL_SW_BLOCKED)) {
1231 sc->sc_flags &= ~SC_OP_RFKILL_SW_BLOCKED;
1232 if (sc->sc_flags & SC_OP_RFKILL_HW_BLOCKED) {
1233 DPRINTF(sc, ATH_DBG_FATAL, "Can't turn on the"
1234 "radio as it is disabled by h/w\n");
1235 return -EPERM;
1236 }
1237 ath_radio_enable(sc);
1238 }
1239 return 0;
1240 default:
1241 return -EINVAL;
1242 }
1243}
1244
1245/* Init s/w rfkill */
1246static int ath_init_sw_rfkill(struct ath_softc *sc)
1247{
1248 sc->rf_kill.rfkill = rfkill_allocate(wiphy_dev(sc->hw->wiphy),
1249 RFKILL_TYPE_WLAN);
1250 if (!sc->rf_kill.rfkill) {
1251 DPRINTF(sc, ATH_DBG_FATAL, "Failed to allocate rfkill\n");
1252 return -ENOMEM;
1253 }
1254
1255 snprintf(sc->rf_kill.rfkill_name, sizeof(sc->rf_kill.rfkill_name),
1256 "ath9k-%s::rfkill", wiphy_name(sc->hw->wiphy));
1257 sc->rf_kill.rfkill->name = sc->rf_kill.rfkill_name;
1258 sc->rf_kill.rfkill->data = sc;
1259 sc->rf_kill.rfkill->toggle_radio = ath_sw_toggle_radio;
1260 sc->rf_kill.rfkill->state = RFKILL_STATE_UNBLOCKED;
1261
1262 return 0;
1263}
1264
1265/* Deinitialize rfkill */
1266static void ath_deinit_rfkill(struct ath_softc *sc)
1267{
1268 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1269 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
1270
1271 if (sc->sc_flags & SC_OP_RFKILL_REGISTERED) {
1272 rfkill_unregister(sc->rf_kill.rfkill);
1273 sc->sc_flags &= ~SC_OP_RFKILL_REGISTERED;
1274 sc->rf_kill.rfkill = NULL;
1275 }
1276}
1277
1278static int ath_start_rfkill_poll(struct ath_softc *sc)
1279{
1280 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1281 queue_delayed_work(sc->hw->workqueue,
1282 &sc->rf_kill.rfkill_poll, 0);
1283
1284 if (!(sc->sc_flags & SC_OP_RFKILL_REGISTERED)) {
1285 if (rfkill_register(sc->rf_kill.rfkill)) {
1286 DPRINTF(sc, ATH_DBG_FATAL,
1287 "Unable to register rfkill\n");
1288 rfkill_free(sc->rf_kill.rfkill);
1289
1290 /* Deinitialize the device */
1291 ath_cleanup(sc);
1292 return -EIO;
1293 } else {
1294 sc->sc_flags |= SC_OP_RFKILL_REGISTERED;
1295 }
1296 }
1297
1298 return 0;
1299}
1300#endif /* CONFIG_RFKILL */
1301
1302void ath_cleanup(struct ath_softc *sc)
1303{
1304 ath_detach(sc);
1305 free_irq(sc->irq, sc);
1306 ath_bus_cleanup(sc);
1307 kfree(sc->sec_wiphy);
1308 ieee80211_free_hw(sc->hw);
1309}
1310
1311void ath_detach(struct ath_softc *sc)
1312{
1313 struct ieee80211_hw *hw = sc->hw;
1314 int i = 0;
1315
1316 ath9k_ps_wakeup(sc);
1317
1318 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n");
1319
1320#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1321 ath_deinit_rfkill(sc);
1322#endif
1323 ath_deinit_leds(sc);
1324 cancel_work_sync(&sc->chan_work);
1325 cancel_delayed_work_sync(&sc->wiphy_work);
1326
1327 for (i = 0; i < sc->num_sec_wiphy; i++) {
1328 struct ath_wiphy *aphy = sc->sec_wiphy[i];
1329 if (aphy == NULL)
1330 continue;
1331 sc->sec_wiphy[i] = NULL;
1332 ieee80211_unregister_hw(aphy->hw);
1333 ieee80211_free_hw(aphy->hw);
1334 }
1335 ieee80211_unregister_hw(hw);
1336 ath_rx_cleanup(sc);
1337 ath_tx_cleanup(sc);
1338
1339 tasklet_kill(&sc->intr_tq);
1340 tasklet_kill(&sc->bcon_tasklet);
1341
1342 if (!(sc->sc_flags & SC_OP_INVALID))
1343 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
1344
1345 /* cleanup tx queues */
1346 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1347 if (ATH_TXQ_SETUP(sc, i))
1348 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1349
1350 ath9k_hw_detach(sc->sc_ah);
1351 ath9k_exit_debug(sc);
1352 ath9k_ps_restore(sc);
1353}
1354
1355static int ath9k_reg_notifier(struct wiphy *wiphy,
1356 struct regulatory_request *request)
1357{
1358 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1359 struct ath_wiphy *aphy = hw->priv;
1360 struct ath_softc *sc = aphy->sc;
1361 struct ath_regulatory *reg = &sc->sc_ah->regulatory;
1362
1363 return ath_reg_notifier_apply(wiphy, request, reg);
1364}
1365
1366static int ath_init(u16 devid, struct ath_softc *sc)
1367{
1368 struct ath_hw *ah = NULL;
1369 int status;
1370 int error = 0, i;
1371 int csz = 0;
1372
1373 /* XXX: hardware will not be ready until ath_open() being called */
1374 sc->sc_flags |= SC_OP_INVALID;
1375
1376 if (ath9k_init_debug(sc) < 0)
1377 printk(KERN_ERR "Unable to create debugfs files\n");
1378
1379 spin_lock_init(&sc->wiphy_lock);
1380 spin_lock_init(&sc->sc_resetlock);
1381 spin_lock_init(&sc->sc_serial_rw);
1382 mutex_init(&sc->mutex);
1383 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1384 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
1385 (unsigned long)sc);
1386
1387 /*
1388 * Cache line size is used to size and align various
1389 * structures used to communicate with the hardware.
1390 */
1391 ath_read_cachesize(sc, &csz);
1392 /* XXX assert csz is non-zero */
1393 sc->cachelsz = csz << 2; /* convert to bytes */
1394
1395 ah = ath9k_hw_attach(devid, sc, &status);
1396 if (ah == NULL) {
1397 DPRINTF(sc, ATH_DBG_FATAL,
1398 "Unable to attach hardware; HAL status %d\n", status);
1399 error = -ENXIO;
1400 goto bad;
1401 }
1402 sc->sc_ah = ah;
1403
1404 /* Get the hardware key cache size. */
1405 sc->keymax = ah->caps.keycache_size;
1406 if (sc->keymax > ATH_KEYMAX) {
1407 DPRINTF(sc, ATH_DBG_ANY,
1408 "Warning, using only %u entries in %u key cache\n",
1409 ATH_KEYMAX, sc->keymax);
1410 sc->keymax = ATH_KEYMAX;
1411 }
1412
1413 /*
1414 * Reset the key cache since some parts do not
1415 * reset the contents on initial power up.
1416 */
1417 for (i = 0; i < sc->keymax; i++)
1418 ath9k_hw_keyreset(ah, (u16) i);
1419
1420 if (ath_regd_init(&sc->sc_ah->regulatory, sc->hw->wiphy,
1421 ath9k_reg_notifier))
1422 goto bad;
1423
1424 /* default to MONITOR mode */
1425 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1426
1427 /* Setup rate tables */
1428
1429 ath_rate_attach(sc);
1430 ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
1431 ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
1432
1433 /*
1434 * Allocate hardware transmit queues: one queue for
1435 * beacon frames and one data queue for each QoS
1436 * priority. Note that the hal handles reseting
1437 * these queues at the needed time.
1438 */
1439 sc->beacon.beaconq = ath_beaconq_setup(ah);
1440 if (sc->beacon.beaconq == -1) {
1441 DPRINTF(sc, ATH_DBG_FATAL,
1442 "Unable to setup a beacon xmit queue\n");
1443 error = -EIO;
1444 goto bad2;
1445 }
1446 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1447 if (sc->beacon.cabq == NULL) {
1448 DPRINTF(sc, ATH_DBG_FATAL,
1449 "Unable to setup CAB xmit queue\n");
1450 error = -EIO;
1451 goto bad2;
1452 }
1453
1454 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
1455 ath_cabq_update(sc);
1456
1457 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
1458 sc->tx.hwq_map[i] = -1;
1459
1460 /* Setup data queues */
1461 /* NB: ensure BK queue is the lowest priority h/w queue */
1462 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1463 DPRINTF(sc, ATH_DBG_FATAL,
1464 "Unable to setup xmit queue for BK traffic\n");
1465 error = -EIO;
1466 goto bad2;
1467 }
1468
1469 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1470 DPRINTF(sc, ATH_DBG_FATAL,
1471 "Unable to setup xmit queue for BE traffic\n");
1472 error = -EIO;
1473 goto bad2;
1474 }
1475 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1476 DPRINTF(sc, ATH_DBG_FATAL,
1477 "Unable to setup xmit queue for VI traffic\n");
1478 error = -EIO;
1479 goto bad2;
1480 }
1481 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1482 DPRINTF(sc, ATH_DBG_FATAL,
1483 "Unable to setup xmit queue for VO traffic\n");
1484 error = -EIO;
1485 goto bad2;
1486 }
1487
1488 /* Initializes the noise floor to a reasonable default value.
1489 * Later on this will be updated during ANI processing. */
1490
1491 sc->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1492 setup_timer(&sc->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1493
1494 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1495 ATH9K_CIPHER_TKIP, NULL)) {
1496 /*
1497 * Whether we should enable h/w TKIP MIC.
1498 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1499 * report WMM capable, so it's always safe to turn on
1500 * TKIP MIC in this case.
1501 */
1502 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1503 0, 1, NULL);
1504 }
1505
1506 /*
1507 * Check whether the separate key cache entries
1508 * are required to handle both tx+rx MIC keys.
1509 * With split mic keys the number of stations is limited
1510 * to 27 otherwise 59.
1511 */
1512 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1513 ATH9K_CIPHER_TKIP, NULL)
1514 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1515 ATH9K_CIPHER_MIC, NULL)
1516 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1517 0, NULL))
1518 sc->splitmic = 1;
1519
1520 /* turn on mcast key search if possible */
1521 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1522 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1523 1, NULL);
1524
1525 sc->config.txpowlimit = ATH_TXPOWER_MAX;
1526
1527 /* 11n Capabilities */
1528 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1529 sc->sc_flags |= SC_OP_TXAGGR;
1530 sc->sc_flags |= SC_OP_RXAGGR;
1531 }
1532
1533 sc->tx_chainmask = ah->caps.tx_chainmask;
1534 sc->rx_chainmask = ah->caps.rx_chainmask;
1535
1536 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1537 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1538
1539 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1540 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
1541
1542 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1543
1544 /* initialize beacon slots */
1545 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
1546 sc->beacon.bslot[i] = NULL;
1547 sc->beacon.bslot_aphy[i] = NULL;
1548 }
1549
1550 /* setup channels and rates */
1551
1552 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
1553 sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
1554 sc->rates[IEEE80211_BAND_2GHZ];
1555 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1556 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
1557 ARRAY_SIZE(ath9k_2ghz_chantable);
1558
1559 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
1560 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
1561 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1562 sc->rates[IEEE80211_BAND_5GHZ];
1563 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1564 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
1565 ARRAY_SIZE(ath9k_5ghz_chantable);
1566 }
1567
1568 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BT_COEX)
1569 ath9k_hw_btcoex_enable(sc->sc_ah);
1570
1571 return 0;
1572bad2:
1573 /* cleanup tx queues */
1574 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1575 if (ATH_TXQ_SETUP(sc, i))
1576 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1577bad:
1578 if (ah)
1579 ath9k_hw_detach(ah);
1580 ath9k_exit_debug(sc);
1581
1582 return error;
1583}
1584
1585void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1586{
1587 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1588 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1589 IEEE80211_HW_SIGNAL_DBM |
1590 IEEE80211_HW_AMPDU_AGGREGATION |
1591 IEEE80211_HW_SUPPORTS_PS |
1592 IEEE80211_HW_PS_NULLFUNC_STACK |
1593 IEEE80211_HW_SPECTRUM_MGMT;
1594
1595 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1596 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1597
1598 hw->wiphy->interface_modes =
1599 BIT(NL80211_IFTYPE_AP) |
1600 BIT(NL80211_IFTYPE_STATION) |
1601 BIT(NL80211_IFTYPE_ADHOC) |
1602 BIT(NL80211_IFTYPE_MESH_POINT);
1603
1604 hw->queues = 4;
1605 hw->max_rates = 4;
1606 hw->channel_change_time = 5000;
1607 hw->max_listen_interval = 10;
1608 hw->max_rate_tries = ATH_11N_TXMAXTRY;
1609 hw->sta_data_size = sizeof(struct ath_node);
1610 hw->vif_data_size = sizeof(struct ath_vif);
1611
1612 hw->rate_control_algorithm = "ath9k_rate_control";
1613
1614 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1615 &sc->sbands[IEEE80211_BAND_2GHZ];
1616 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1617 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1618 &sc->sbands[IEEE80211_BAND_5GHZ];
1619}
1620
1621int ath_attach(u16 devid, struct ath_softc *sc)
1622{
1623 struct ieee80211_hw *hw = sc->hw;
1624 int error = 0, i;
1625 struct ath_regulatory *reg;
1626
1627 DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n");
1628
1629 error = ath_init(devid, sc);
1630 if (error != 0)
1631 return error;
1632
1633 reg = &sc->sc_ah->regulatory;
1634
1635 /* get mac address from hardware and set in mac80211 */
1636
1637 SET_IEEE80211_PERM_ADDR(hw, sc->sc_ah->macaddr);
1638
1639 ath_set_hw_capab(sc, hw);
1640
1641 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1642 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1643 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1644 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1645 }
1646
1647 /* initialize tx/rx engine */
1648 error = ath_tx_init(sc, ATH_TXBUF);
1649 if (error != 0)
1650 goto error_attach;
1651
1652 error = ath_rx_init(sc, ATH_RXBUF);
1653 if (error != 0)
1654 goto error_attach;
1655
1656#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
1657 /* Initialze h/w Rfkill */
1658 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1659 INIT_DELAYED_WORK(&sc->rf_kill.rfkill_poll, ath_rfkill_poll);
1660
1661 /* Initialize s/w rfkill */
1662 error = ath_init_sw_rfkill(sc);
1663 if (error)
1664 goto error_attach;
1665#endif
1666
1667 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
1668 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
1669 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
1670
1671 error = ieee80211_register_hw(hw);
1672
1673 if (!ath_is_world_regd(reg)) {
1674 error = regulatory_hint(hw->wiphy, reg->alpha2);
1675 if (error)
1676 goto error_attach;
1677 }
1678
1679 /* Initialize LED control */
1680 ath_init_leds(sc);
1681
1682
1683 return 0;
1684
1685error_attach:
1686 /* cleanup tx queues */
1687 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1688 if (ATH_TXQ_SETUP(sc, i))
1689 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1690
1691 ath9k_hw_detach(sc->sc_ah);
1692 ath9k_exit_debug(sc);
1693
1694 return error;
1695}
1696
1697int ath_reset(struct ath_softc *sc, bool retry_tx)
1698{
1699 struct ath_hw *ah = sc->sc_ah;
1700 struct ieee80211_hw *hw = sc->hw;
1701 int r;
1702
1703 ath9k_hw_set_interrupts(ah, 0);
1704 ath_drain_all_txq(sc, retry_tx);
1705 ath_stoprecv(sc);
1706 ath_flushrecv(sc);
1707
1708 spin_lock_bh(&sc->sc_resetlock);
1709 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1710 if (r)
1711 DPRINTF(sc, ATH_DBG_FATAL,
1712 "Unable to reset hardware; reset status %u\n", r);
1713 spin_unlock_bh(&sc->sc_resetlock);
1714
1715 if (ath_startrecv(sc) != 0)
1716 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
1717
1718 /*
1719 * We may be doing a reset in response to a request
1720 * that changes the channel so update any state that
1721 * might change as a result.
1722 */
1723 ath_cache_conf_rate(sc, &hw->conf);
1724
1725 ath_update_txpow(sc);
1726
1727 if (sc->sc_flags & SC_OP_BEACONS)
1728 ath_beacon_config(sc, NULL); /* restart beacons */
1729
1730 ath9k_hw_set_interrupts(ah, sc->imask);
1731
1732 if (retry_tx) {
1733 int i;
1734 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1735 if (ATH_TXQ_SETUP(sc, i)) {
1736 spin_lock_bh(&sc->tx.txq[i].axq_lock);
1737 ath_txq_schedule(sc, &sc->tx.txq[i]);
1738 spin_unlock_bh(&sc->tx.txq[i].axq_lock);
1739 }
1740 }
1741 }
1742
1743 return r;
1744}
1745
1746/*
1747 * This function will allocate both the DMA descriptor structure, and the
1748 * buffers it contains. These are used to contain the descriptors used
1749 * by the system.
1750*/
1751int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1752 struct list_head *head, const char *name,
1753 int nbuf, int ndesc)
1754{
1755#define DS2PHYS(_dd, _ds) \
1756 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1757#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1758#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1759
1760 struct ath_desc *ds;
1761 struct ath_buf *bf;
1762 int i, bsize, error;
1763
1764 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
1765 name, nbuf, ndesc);
1766
1767 INIT_LIST_HEAD(head);
1768 /* ath_desc must be a multiple of DWORDs */
1769 if ((sizeof(struct ath_desc) % 4) != 0) {
1770 DPRINTF(sc, ATH_DBG_FATAL, "ath_desc not DWORD aligned\n");
1771 ASSERT((sizeof(struct ath_desc) % 4) == 0);
1772 error = -ENOMEM;
1773 goto fail;
1774 }
1775
1776 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1777
1778 /*
1779 * Need additional DMA memory because we can't use
1780 * descriptors that cross the 4K page boundary. Assume
1781 * one skipped descriptor per 4K page.
1782 */
1783 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1784 u32 ndesc_skipped =
1785 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1786 u32 dma_len;
1787
1788 while (ndesc_skipped) {
1789 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1790 dd->dd_desc_len += dma_len;
1791
1792 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1793 };
1794 }
1795
1796 /* allocate descriptors */
1797 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
1798 &dd->dd_desc_paddr, GFP_KERNEL);
1799 if (dd->dd_desc == NULL) {
1800 error = -ENOMEM;
1801 goto fail;
1802 }
1803 ds = dd->dd_desc;
1804 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
1805 name, ds, (u32) dd->dd_desc_len,
1806 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1807
1808 /* allocate buffers */
1809 bsize = sizeof(struct ath_buf) * nbuf;
1810 bf = kzalloc(bsize, GFP_KERNEL);
1811 if (bf == NULL) {
1812 error = -ENOMEM;
1813 goto fail2;
1814 }
1815 dd->dd_bufptr = bf;
1816
1817 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1818 bf->bf_desc = ds;
1819 bf->bf_daddr = DS2PHYS(dd, ds);
1820
1821 if (!(sc->sc_ah->caps.hw_caps &
1822 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1823 /*
1824 * Skip descriptor addresses which can cause 4KB
1825 * boundary crossing (addr + length) with a 32 dword
1826 * descriptor fetch.
1827 */
1828 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1829 ASSERT((caddr_t) bf->bf_desc <
1830 ((caddr_t) dd->dd_desc +
1831 dd->dd_desc_len));
1832
1833 ds += ndesc;
1834 bf->bf_desc = ds;
1835 bf->bf_daddr = DS2PHYS(dd, ds);
1836 }
1837 }
1838 list_add_tail(&bf->list, head);
1839 }
1840 return 0;
1841fail2:
1842 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1843 dd->dd_desc_paddr);
1844fail:
1845 memset(dd, 0, sizeof(*dd));
1846 return error;
1847#undef ATH_DESC_4KB_BOUND_CHECK
1848#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1849#undef DS2PHYS
1850}
1851
1852void ath_descdma_cleanup(struct ath_softc *sc,
1853 struct ath_descdma *dd,
1854 struct list_head *head)
1855{
1856 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1857 dd->dd_desc_paddr);
1858
1859 INIT_LIST_HEAD(head);
1860 kfree(dd->dd_bufptr);
1861 memset(dd, 0, sizeof(*dd));
1862}
1863
1864int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1865{
1866 int qnum;
1867
1868 switch (queue) {
1869 case 0:
1870 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VO];
1871 break;
1872 case 1:
1873 qnum = sc->tx.hwq_map[ATH9K_WME_AC_VI];
1874 break;
1875 case 2:
1876 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1877 break;
1878 case 3:
1879 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BK];
1880 break;
1881 default:
1882 qnum = sc->tx.hwq_map[ATH9K_WME_AC_BE];
1883 break;
1884 }
1885
1886 return qnum;
1887}
1888
1889int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1890{
1891 int qnum;
1892
1893 switch (queue) {
1894 case ATH9K_WME_AC_VO:
1895 qnum = 0;
1896 break;
1897 case ATH9K_WME_AC_VI:
1898 qnum = 1;
1899 break;
1900 case ATH9K_WME_AC_BE:
1901 qnum = 2;
1902 break;
1903 case ATH9K_WME_AC_BK:
1904 qnum = 3;
1905 break;
1906 default:
1907 qnum = -1;
1908 break;
1909 }
1910
1911 return qnum;
1912}
1913
1914/* XXX: Remove me once we don't depend on ath9k_channel for all
1915 * this redundant data */
1916void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
1917 struct ath9k_channel *ichan)
1918{
1919 struct ieee80211_channel *chan = hw->conf.channel;
1920 struct ieee80211_conf *conf = &hw->conf;
1921
1922 ichan->channel = chan->center_freq;
1923 ichan->chan = chan;
1924
1925 if (chan->band == IEEE80211_BAND_2GHZ) {
1926 ichan->chanmode = CHANNEL_G;
1927 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
1928 } else {
1929 ichan->chanmode = CHANNEL_A;
1930 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
1931 }
1932
1933 sc->tx_chan_width = ATH9K_HT_MACMODE_20;
1934
1935 if (conf_is_ht(conf)) {
1936 if (conf_is_ht40(conf))
1937 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1938
1939 ichan->chanmode = ath_get_extchanmode(sc, chan,
1940 conf->channel_type);
1941 }
1942}
1943
1944/**********************/
1945/* mac80211 callbacks */
1946/**********************/
1947
1948static int ath9k_start(struct ieee80211_hw *hw)
1949{
1950 struct ath_wiphy *aphy = hw->priv;
1951 struct ath_softc *sc = aphy->sc;
1952 struct ieee80211_channel *curchan = hw->conf.channel;
1953 struct ath9k_channel *init_channel;
1954 int r, pos;
1955
1956 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with "
1957 "initial channel: %d MHz\n", curchan->center_freq);
1958
1959 mutex_lock(&sc->mutex);
1960
1961 if (ath9k_wiphy_started(sc)) {
1962 if (sc->chan_idx == curchan->hw_value) {
1963 /*
1964 * Already on the operational channel, the new wiphy
1965 * can be marked active.
1966 */
1967 aphy->state = ATH_WIPHY_ACTIVE;
1968 ieee80211_wake_queues(hw);
1969 } else {
1970 /*
1971 * Another wiphy is on another channel, start the new
1972 * wiphy in paused state.
1973 */
1974 aphy->state = ATH_WIPHY_PAUSED;
1975 ieee80211_stop_queues(hw);
1976 }
1977 mutex_unlock(&sc->mutex);
1978 return 0;
1979 }
1980 aphy->state = ATH_WIPHY_ACTIVE;
1981
1982 /* setup initial channel */
1983
1984 pos = curchan->hw_value;
1985
1986 sc->chan_idx = pos;
1987 init_channel = &sc->sc_ah->channels[pos];
1988 ath9k_update_ichannel(sc, hw, init_channel);
1989
1990 /* Reset SERDES registers */
1991 ath9k_hw_configpcipowersave(sc->sc_ah, 0);
1992
1993 /*
1994 * The basic interface to setting the hardware in a good
1995 * state is ``reset''. On return the hardware is known to
1996 * be powered up and with interrupts disabled. This must
1997 * be followed by initialization of the appropriate bits
1998 * and then setup of the interrupt mask.
1999 */
2000 spin_lock_bh(&sc->sc_resetlock);
2001 r = ath9k_hw_reset(sc->sc_ah, init_channel, false);
2002 if (r) {
2003 DPRINTF(sc, ATH_DBG_FATAL,
2004 "Unable to reset hardware; reset status %u "
2005 "(freq %u MHz)\n", r,
2006 curchan->center_freq);
2007 spin_unlock_bh(&sc->sc_resetlock);
2008 goto mutex_unlock;
2009 }
2010 spin_unlock_bh(&sc->sc_resetlock);
2011
2012 /*
2013 * This is needed only to setup initial state
2014 * but it's best done after a reset.
2015 */
2016 ath_update_txpow(sc);
2017
2018 /*
2019 * Setup the hardware after reset:
2020 * The receive engine is set going.
2021 * Frame transmit is handled entirely
2022 * in the frame output path; there's nothing to do
2023 * here except setup the interrupt mask.
2024 */
2025 if (ath_startrecv(sc) != 0) {
2026 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n");
2027 r = -EIO;
2028 goto mutex_unlock;
2029 }
2030
2031 /* Setup our intr mask. */
2032 sc->imask = ATH9K_INT_RX | ATH9K_INT_TX
2033 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
2034 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
2035
2036 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
2037 sc->imask |= ATH9K_INT_GTT;
2038
2039 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
2040 sc->imask |= ATH9K_INT_CST;
2041
2042 ath_cache_conf_rate(sc, &hw->conf);
2043
2044 sc->sc_flags &= ~SC_OP_INVALID;
2045
2046 /* Disable BMISS interrupt when we're not associated */
2047 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
2048 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2049
2050 ieee80211_wake_queues(hw);
2051
2052#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2053 r = ath_start_rfkill_poll(sc);
2054#endif
2055
2056mutex_unlock:
2057 mutex_unlock(&sc->mutex);
2058
2059 return r;
2060}
2061
2062static int ath9k_tx(struct ieee80211_hw *hw,
2063 struct sk_buff *skb)
2064{
2065 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2066 struct ath_wiphy *aphy = hw->priv;
2067 struct ath_softc *sc = aphy->sc;
2068 struct ath_tx_control txctl;
2069 int hdrlen, padsize;
2070
2071 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
2072 printk(KERN_DEBUG "ath9k: %s: TX in unexpected wiphy state "
2073 "%d\n", wiphy_name(hw->wiphy), aphy->state);
2074 goto exit;
2075 }
2076
2077 memset(&txctl, 0, sizeof(struct ath_tx_control));
2078
2079 /*
2080 * As a temporary workaround, assign seq# here; this will likely need
2081 * to be cleaned up to work better with Beacon transmission and virtual
2082 * BSSes.
2083 */
2084 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
2085 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
2086 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
2087 sc->tx.seq_no += 0x10;
2088 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
2089 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
2090 }
2091
2092 /* Add the padding after the header if this is not already done */
2093 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
2094 if (hdrlen & 3) {
2095 padsize = hdrlen % 4;
2096 if (skb_headroom(skb) < padsize)
2097 return -1;
2098 skb_push(skb, padsize);
2099 memmove(skb->data, skb->data + padsize, hdrlen);
2100 }
2101
2102 /* Check if a tx queue is available */
2103
2104 txctl.txq = ath_test_get_txq(sc, skb);
2105 if (!txctl.txq)
2106 goto exit;
2107
2108 DPRINTF(sc, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
2109
2110 if (ath_tx_start(hw, skb, &txctl) != 0) {
2111 DPRINTF(sc, ATH_DBG_XMIT, "TX failed\n");
2112 goto exit;
2113 }
2114
2115 return 0;
2116exit:
2117 dev_kfree_skb_any(skb);
2118 return 0;
2119}
2120
2121static void ath9k_stop(struct ieee80211_hw *hw)
2122{
2123 struct ath_wiphy *aphy = hw->priv;
2124 struct ath_softc *sc = aphy->sc;
2125
2126 aphy->state = ATH_WIPHY_INACTIVE;
2127
2128 if (sc->sc_flags & SC_OP_INVALID) {
2129 DPRINTF(sc, ATH_DBG_ANY, "Device not present\n");
2130 return;
2131 }
2132
2133 mutex_lock(&sc->mutex);
2134
2135 ieee80211_stop_queues(hw);
2136
2137 if (ath9k_wiphy_started(sc)) {
2138 mutex_unlock(&sc->mutex);
2139 return; /* another wiphy still in use */
2140 }
2141
2142 /* make sure h/w will not generate any interrupt
2143 * before setting the invalid flag. */
2144 ath9k_hw_set_interrupts(sc->sc_ah, 0);
2145
2146 if (!(sc->sc_flags & SC_OP_INVALID)) {
2147 ath_drain_all_txq(sc, false);
2148 ath_stoprecv(sc);
2149 ath9k_hw_phy_disable(sc->sc_ah);
2150 } else
2151 sc->rx.rxlink = NULL;
2152
2153#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
2154 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
2155 cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
2156#endif
2157 /* disable HAL and put h/w to sleep */
2158 ath9k_hw_disable(sc->sc_ah);
2159 ath9k_hw_configpcipowersave(sc->sc_ah, 1);
2160
2161 sc->sc_flags |= SC_OP_INVALID;
2162
2163 mutex_unlock(&sc->mutex);
2164
2165 DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n");
2166}
2167
2168static int ath9k_add_interface(struct ieee80211_hw *hw,
2169 struct ieee80211_if_init_conf *conf)
2170{
2171 struct ath_wiphy *aphy = hw->priv;
2172 struct ath_softc *sc = aphy->sc;
2173 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2174 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2175 int ret = 0;
2176
2177 mutex_lock(&sc->mutex);
2178
2179 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
2180 sc->nvifs > 0) {
2181 ret = -ENOBUFS;
2182 goto out;
2183 }
2184
2185 switch (conf->type) {
2186 case NL80211_IFTYPE_STATION:
2187 ic_opmode = NL80211_IFTYPE_STATION;
2188 break;
2189 case NL80211_IFTYPE_ADHOC:
2190 case NL80211_IFTYPE_AP:
2191 case NL80211_IFTYPE_MESH_POINT:
2192 if (sc->nbcnvifs >= ATH_BCBUF) {
2193 ret = -ENOBUFS;
2194 goto out;
2195 }
2196 ic_opmode = conf->type;
2197 break;
2198 default:
2199 DPRINTF(sc, ATH_DBG_FATAL,
2200 "Interface type %d not yet supported\n", conf->type);
2201 ret = -EOPNOTSUPP;
2202 goto out;
2203 }
2204
2205 DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VIF of type: %d\n", ic_opmode);
2206
2207 /* Set the VIF opmode */
2208 avp->av_opmode = ic_opmode;
2209 avp->av_bslot = -1;
2210
2211 sc->nvifs++;
2212
2213 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
2214 ath9k_set_bssid_mask(hw);
2215
2216 if (sc->nvifs > 1)
2217 goto out; /* skip global settings for secondary vif */
2218
2219 if (ic_opmode == NL80211_IFTYPE_AP) {
2220 ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
2221 sc->sc_flags |= SC_OP_TSF_RESET;
2222 }
2223
2224 /* Set the device opmode */
2225 sc->sc_ah->opmode = ic_opmode;
2226
2227 /*
2228 * Enable MIB interrupts when there are hardware phy counters.
2229 * Note we only do this (at the moment) for station mode.
2230 */
2231 if ((conf->type == NL80211_IFTYPE_STATION) ||
2232 (conf->type == NL80211_IFTYPE_ADHOC) ||
2233 (conf->type == NL80211_IFTYPE_MESH_POINT)) {
2234 if (ath9k_hw_phycounters(sc->sc_ah))
2235 sc->imask |= ATH9K_INT_MIB;
2236 sc->imask |= ATH9K_INT_TSFOOR;
2237 }
2238
2239 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
2240
2241 if (conf->type == NL80211_IFTYPE_AP) {
2242 /* TODO: is this a suitable place to start ANI for AP mode? */
2243 /* Start ANI */
2244 mod_timer(&sc->ani.timer,
2245 jiffies + msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
2246 }
2247
2248out:
2249 mutex_unlock(&sc->mutex);
2250 return ret;
2251}
2252
2253static void ath9k_remove_interface(struct ieee80211_hw *hw,
2254 struct ieee80211_if_init_conf *conf)
2255{
2256 struct ath_wiphy *aphy = hw->priv;
2257 struct ath_softc *sc = aphy->sc;
2258 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2259 int i;
2260
2261 DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n");
2262
2263 mutex_lock(&sc->mutex);
2264
2265 /* Stop ANI */
2266 del_timer_sync(&sc->ani.timer);
2267
2268 /* Reclaim beacon resources */
2269 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
2270 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
2271 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
2272 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2273 ath_beacon_return(sc, avp);
2274 }
2275
2276 sc->sc_flags &= ~SC_OP_BEACONS;
2277
2278 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
2279 if (sc->beacon.bslot[i] == conf->vif) {
2280 printk(KERN_DEBUG "%s: vif had allocated beacon "
2281 "slot\n", __func__);
2282 sc->beacon.bslot[i] = NULL;
2283 sc->beacon.bslot_aphy[i] = NULL;
2284 }
2285 }
2286
2287 sc->nvifs--;
2288
2289 mutex_unlock(&sc->mutex);
2290}
2291
2292static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2293{
2294 struct ath_wiphy *aphy = hw->priv;
2295 struct ath_softc *sc = aphy->sc;
2296 struct ieee80211_conf *conf = &hw->conf;
2297 struct ath_hw *ah = sc->sc_ah;
2298
2299 mutex_lock(&sc->mutex);
2300
2301 if (changed & IEEE80211_CONF_CHANGE_PS) {
2302 if (conf->flags & IEEE80211_CONF_PS) {
2303 if (!(ah->caps.hw_caps &
2304 ATH9K_HW_CAP_AUTOSLEEP)) {
2305 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
2306 sc->imask |= ATH9K_INT_TIM_TIMER;
2307 ath9k_hw_set_interrupts(sc->sc_ah,
2308 sc->imask);
2309 }
2310 ath9k_hw_setrxabort(sc->sc_ah, 1);
2311 }
2312 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
2313 } else {
2314 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
2315 if (!(ah->caps.hw_caps &
2316 ATH9K_HW_CAP_AUTOSLEEP)) {
2317 ath9k_hw_setrxabort(sc->sc_ah, 0);
2318 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
2319 if (sc->imask & ATH9K_INT_TIM_TIMER) {
2320 sc->imask &= ~ATH9K_INT_TIM_TIMER;
2321 ath9k_hw_set_interrupts(sc->sc_ah,
2322 sc->imask);
2323 }
2324 }
2325 }
2326 }
2327
2328 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2329 struct ieee80211_channel *curchan = hw->conf.channel;
2330 int pos = curchan->hw_value;
2331
2332 aphy->chan_idx = pos;
2333 aphy->chan_is_ht = conf_is_ht(conf);
2334
2335 if (aphy->state == ATH_WIPHY_SCAN ||
2336 aphy->state == ATH_WIPHY_ACTIVE)
2337 ath9k_wiphy_pause_all_forced(sc, aphy);
2338 else {
2339 /*
2340 * Do not change operational channel based on a paused
2341 * wiphy changes.
2342 */
2343 goto skip_chan_change;
2344 }
2345
2346 DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
2347 curchan->center_freq);
2348
2349 /* XXX: remove me eventualy */
2350 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
2351
2352 ath_update_chainmask(sc, conf_is_ht(conf));
2353
2354 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
2355 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n");
2356 mutex_unlock(&sc->mutex);
2357 return -EINVAL;
2358 }
2359 }
2360
2361skip_chan_change:
2362 if (changed & IEEE80211_CONF_CHANGE_POWER)
2363 sc->config.txpowlimit = 2 * conf->power_level;
2364
2365 /*
2366 * The HW TSF has to be reset when the beacon interval changes.
2367 * We set the flag here, and ath_beacon_config_ap() would take this
2368 * into account when it gets called through the subsequent
2369 * config_interface() call - with IFCC_BEACON in the changed field.
2370 */
2371
2372 if (changed & IEEE80211_CONF_CHANGE_BEACON_INTERVAL)
2373 sc->sc_flags |= SC_OP_TSF_RESET;
2374
2375 mutex_unlock(&sc->mutex);
2376
2377 return 0;
2378}
2379
2380static int ath9k_config_interface(struct ieee80211_hw *hw,
2381 struct ieee80211_vif *vif,
2382 struct ieee80211_if_conf *conf)
2383{
2384 struct ath_wiphy *aphy = hw->priv;
2385 struct ath_softc *sc = aphy->sc;
2386 struct ath_hw *ah = sc->sc_ah;
2387 struct ath_vif *avp = (void *)vif->drv_priv;
2388 u32 rfilt = 0;
2389 int error, i;
2390
2391 mutex_lock(&sc->mutex);
2392
2393 /* TODO: Need to decide which hw opmode to use for multi-interface
2394 * cases */
2395 if (vif->type == NL80211_IFTYPE_AP &&
2396 ah->opmode != NL80211_IFTYPE_AP) {
2397 ah->opmode = NL80211_IFTYPE_STATION;
2398 ath9k_hw_setopmode(ah);
2399 memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN);
2400 sc->curaid = 0;
2401 ath9k_hw_write_associd(sc);
2402 /* Request full reset to get hw opmode changed properly */
2403 sc->sc_flags |= SC_OP_FULL_RESET;
2404 }
2405
2406 if ((conf->changed & IEEE80211_IFCC_BSSID) &&
2407 !is_zero_ether_addr(conf->bssid)) {
2408 switch (vif->type) {
2409 case NL80211_IFTYPE_STATION:
2410 case NL80211_IFTYPE_ADHOC:
2411 case NL80211_IFTYPE_MESH_POINT:
2412 /* Set BSSID */
2413 memcpy(sc->curbssid, conf->bssid, ETH_ALEN);
2414 memcpy(avp->bssid, conf->bssid, ETH_ALEN);
2415 sc->curaid = 0;
2416 ath9k_hw_write_associd(sc);
2417
2418 /* Set aggregation protection mode parameters */
2419 sc->config.ath_aggr_prot = 0;
2420
2421 DPRINTF(sc, ATH_DBG_CONFIG,
2422 "RX filter 0x%x bssid %pM aid 0x%x\n",
2423 rfilt, sc->curbssid, sc->curaid);
2424
2425 /* need to reconfigure the beacon */
2426 sc->sc_flags &= ~SC_OP_BEACONS ;
2427
2428 break;
2429 default:
2430 break;
2431 }
2432 }
2433
2434 if ((vif->type == NL80211_IFTYPE_ADHOC) ||
2435 (vif->type == NL80211_IFTYPE_AP) ||
2436 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
2437 if ((conf->changed & IEEE80211_IFCC_BEACON) ||
2438 (conf->changed & IEEE80211_IFCC_BEACON_ENABLED &&
2439 conf->enable_beacon)) {
2440 /*
2441 * Allocate and setup the beacon frame.
2442 *
2443 * Stop any previous beacon DMA. This may be
2444 * necessary, for example, when an ibss merge
2445 * causes reconfiguration; we may be called
2446 * with beacon transmission active.
2447 */
2448 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
2449
2450 error = ath_beacon_alloc(aphy, vif);
2451 if (error != 0) {
2452 mutex_unlock(&sc->mutex);
2453 return error;
2454 }
2455
2456 ath_beacon_config(sc, vif);
2457 }
2458 }
2459
2460 /* Check for WLAN_CAPABILITY_PRIVACY ? */
2461 if ((avp->av_opmode != NL80211_IFTYPE_STATION)) {
2462 for (i = 0; i < IEEE80211_WEP_NKID; i++)
2463 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
2464 ath9k_hw_keysetmac(sc->sc_ah,
2465 (u16)i,
2466 sc->curbssid);
2467 }
2468
2469 /* Only legacy IBSS for now */
2470 if (vif->type == NL80211_IFTYPE_ADHOC)
2471 ath_update_chainmask(sc, 0);
2472
2473 mutex_unlock(&sc->mutex);
2474
2475 return 0;
2476}
2477
2478#define SUPPORTED_FILTERS \
2479 (FIF_PROMISC_IN_BSS | \
2480 FIF_ALLMULTI | \
2481 FIF_CONTROL | \
2482 FIF_OTHER_BSS | \
2483 FIF_BCN_PRBRESP_PROMISC | \
2484 FIF_FCSFAIL)
2485
2486/* FIXME: sc->sc_full_reset ? */
2487static void ath9k_configure_filter(struct ieee80211_hw *hw,
2488 unsigned int changed_flags,
2489 unsigned int *total_flags,
2490 int mc_count,
2491 struct dev_mc_list *mclist)
2492{
2493 struct ath_wiphy *aphy = hw->priv;
2494 struct ath_softc *sc = aphy->sc;
2495 u32 rfilt;
2496
2497 changed_flags &= SUPPORTED_FILTERS;
2498 *total_flags &= SUPPORTED_FILTERS;
2499
2500 sc->rx.rxfilter = *total_flags;
2501 rfilt = ath_calcrxfilter(sc);
2502 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
2503
2504 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", sc->rx.rxfilter);
2505}
2506
2507static void ath9k_sta_notify(struct ieee80211_hw *hw,
2508 struct ieee80211_vif *vif,
2509 enum sta_notify_cmd cmd,
2510 struct ieee80211_sta *sta)
2511{
2512 struct ath_wiphy *aphy = hw->priv;
2513 struct ath_softc *sc = aphy->sc;
2514
2515 switch (cmd) {
2516 case STA_NOTIFY_ADD:
2517 ath_node_attach(sc, sta);
2518 break;
2519 case STA_NOTIFY_REMOVE:
2520 ath_node_detach(sc, sta);
2521 break;
2522 default:
2523 break;
2524 }
2525}
2526
2527static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
2528 const struct ieee80211_tx_queue_params *params)
2529{
2530 struct ath_wiphy *aphy = hw->priv;
2531 struct ath_softc *sc = aphy->sc;
2532 struct ath9k_tx_queue_info qi;
2533 int ret = 0, qnum;
2534
2535 if (queue >= WME_NUM_AC)
2536 return 0;
2537
2538 mutex_lock(&sc->mutex);
2539
2540 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
2541
2542 qi.tqi_aifs = params->aifs;
2543 qi.tqi_cwmin = params->cw_min;
2544 qi.tqi_cwmax = params->cw_max;
2545 qi.tqi_burstTime = params->txop;
2546 qnum = ath_get_hal_qnum(queue, sc);
2547
2548 DPRINTF(sc, ATH_DBG_CONFIG,
2549 "Configure tx [queue/halq] [%d/%d], "
2550 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
2551 queue, qnum, params->aifs, params->cw_min,
2552 params->cw_max, params->txop);
2553
2554 ret = ath_txq_update(sc, qnum, &qi);
2555 if (ret)
2556 DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n");
2557
2558 mutex_unlock(&sc->mutex);
2559
2560 return ret;
2561}
2562
2563static int ath9k_set_key(struct ieee80211_hw *hw,
2564 enum set_key_cmd cmd,
2565 struct ieee80211_vif *vif,
2566 struct ieee80211_sta *sta,
2567 struct ieee80211_key_conf *key)
2568{
2569 struct ath_wiphy *aphy = hw->priv;
2570 struct ath_softc *sc = aphy->sc;
2571 int ret = 0;
2572
2573 if (modparam_nohwcrypt)
2574 return -ENOSPC;
2575
2576 mutex_lock(&sc->mutex);
2577 ath9k_ps_wakeup(sc);
2578 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW Key\n");
2579
2580 switch (cmd) {
2581 case SET_KEY:
2582 ret = ath_key_config(sc, vif, sta, key);
2583 if (ret >= 0) {
2584 key->hw_key_idx = ret;
2585 /* push IV and Michael MIC generation to stack */
2586 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
2587 if (key->alg == ALG_TKIP)
2588 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
2589 if (sc->sc_ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
2590 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
2591 ret = 0;
2592 }
2593 break;
2594 case DISABLE_KEY:
2595 ath_key_delete(sc, key);
2596 break;
2597 default:
2598 ret = -EINVAL;
2599 }
2600
2601 ath9k_ps_restore(sc);
2602 mutex_unlock(&sc->mutex);
2603
2604 return ret;
2605}
2606
2607static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2608 struct ieee80211_vif *vif,
2609 struct ieee80211_bss_conf *bss_conf,
2610 u32 changed)
2611{
2612 struct ath_wiphy *aphy = hw->priv;
2613 struct ath_softc *sc = aphy->sc;
2614
2615 mutex_lock(&sc->mutex);
2616
2617 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2618 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
2619 bss_conf->use_short_preamble);
2620 if (bss_conf->use_short_preamble)
2621 sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
2622 else
2623 sc->sc_flags &= ~SC_OP_PREAMBLE_SHORT;
2624 }
2625
2626 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2627 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
2628 bss_conf->use_cts_prot);
2629 if (bss_conf->use_cts_prot &&
2630 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
2631 sc->sc_flags |= SC_OP_PROTECT_ENABLE;
2632 else
2633 sc->sc_flags &= ~SC_OP_PROTECT_ENABLE;
2634 }
2635
2636 if (changed & BSS_CHANGED_ASSOC) {
2637 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
2638 bss_conf->assoc);
2639 ath9k_bss_assoc_info(sc, vif, bss_conf);
2640 }
2641
2642 mutex_unlock(&sc->mutex);
2643}
2644
2645static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
2646{
2647 u64 tsf;
2648 struct ath_wiphy *aphy = hw->priv;
2649 struct ath_softc *sc = aphy->sc;
2650
2651 mutex_lock(&sc->mutex);
2652 tsf = ath9k_hw_gettsf64(sc->sc_ah);
2653 mutex_unlock(&sc->mutex);
2654
2655 return tsf;
2656}
2657
2658static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
2659{
2660 struct ath_wiphy *aphy = hw->priv;
2661 struct ath_softc *sc = aphy->sc;
2662
2663 mutex_lock(&sc->mutex);
2664 ath9k_hw_settsf64(sc->sc_ah, tsf);
2665 mutex_unlock(&sc->mutex);
2666}
2667
2668static void ath9k_reset_tsf(struct ieee80211_hw *hw)
2669{
2670 struct ath_wiphy *aphy = hw->priv;
2671 struct ath_softc *sc = aphy->sc;
2672
2673 mutex_lock(&sc->mutex);
2674 ath9k_hw_reset_tsf(sc->sc_ah);
2675 mutex_unlock(&sc->mutex);
2676}
2677
2678static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2679 enum ieee80211_ampdu_mlme_action action,
2680 struct ieee80211_sta *sta,
2681 u16 tid, u16 *ssn)
2682{
2683 struct ath_wiphy *aphy = hw->priv;
2684 struct ath_softc *sc = aphy->sc;
2685 int ret = 0;
2686
2687 switch (action) {
2688 case IEEE80211_AMPDU_RX_START:
2689 if (!(sc->sc_flags & SC_OP_RXAGGR))
2690 ret = -ENOTSUPP;
2691 break;
2692 case IEEE80211_AMPDU_RX_STOP:
2693 break;
2694 case IEEE80211_AMPDU_TX_START:
2695 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
2696 if (ret < 0)
2697 DPRINTF(sc, ATH_DBG_FATAL,
2698 "Unable to start TX aggregation\n");
2699 else
2700 ieee80211_start_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2701 break;
2702 case IEEE80211_AMPDU_TX_STOP:
2703 ret = ath_tx_aggr_stop(sc, sta, tid);
2704 if (ret < 0)
2705 DPRINTF(sc, ATH_DBG_FATAL,
2706 "Unable to stop TX aggregation\n");
2707
2708 ieee80211_stop_tx_ba_cb_irqsafe(hw, sta->addr, tid);
2709 break;
2710 case IEEE80211_AMPDU_TX_OPERATIONAL:
2711 ath_tx_aggr_resume(sc, sta, tid);
2712 break;
2713 default:
2714 DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n");
2715 }
2716
2717 return ret;
2718}
2719
2720static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
2721{
2722 struct ath_wiphy *aphy = hw->priv;
2723 struct ath_softc *sc = aphy->sc;
2724
2725 if (ath9k_wiphy_scanning(sc)) {
2726 printk(KERN_DEBUG "ath9k: Two wiphys trying to scan at the "
2727 "same time\n");
2728 /*
2729 * Do not allow the concurrent scanning state for now. This
2730 * could be improved with scanning control moved into ath9k.
2731 */
2732 return;
2733 }
2734
2735 aphy->state = ATH_WIPHY_SCAN;
2736 ath9k_wiphy_pause_all_forced(sc, aphy);
2737
2738 mutex_lock(&sc->mutex);
2739 sc->sc_flags |= SC_OP_SCANNING;
2740 mutex_unlock(&sc->mutex);
2741}
2742
2743static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
2744{
2745 struct ath_wiphy *aphy = hw->priv;
2746 struct ath_softc *sc = aphy->sc;
2747
2748 mutex_lock(&sc->mutex);
2749 aphy->state = ATH_WIPHY_ACTIVE;
2750 sc->sc_flags &= ~SC_OP_SCANNING;
2751 mutex_unlock(&sc->mutex);
2752}
2753
2754struct ieee80211_ops ath9k_ops = {
2755 .tx = ath9k_tx,
2756 .start = ath9k_start,
2757 .stop = ath9k_stop,
2758 .add_interface = ath9k_add_interface,
2759 .remove_interface = ath9k_remove_interface,
2760 .config = ath9k_config,
2761 .config_interface = ath9k_config_interface,
2762 .configure_filter = ath9k_configure_filter,
2763 .sta_notify = ath9k_sta_notify,
2764 .conf_tx = ath9k_conf_tx,
2765 .bss_info_changed = ath9k_bss_info_changed,
2766 .set_key = ath9k_set_key,
2767 .get_tsf = ath9k_get_tsf,
2768 .set_tsf = ath9k_set_tsf,
2769 .reset_tsf = ath9k_reset_tsf,
2770 .ampdu_action = ath9k_ampdu_action,
2771 .sw_scan_start = ath9k_sw_scan_start,
2772 .sw_scan_complete = ath9k_sw_scan_complete,
2773};
2774
2775static struct {
2776 u32 version;
2777 const char * name;
2778} ath_mac_bb_names[] = {
2779 { AR_SREV_VERSION_5416_PCI, "5416" },
2780 { AR_SREV_VERSION_5416_PCIE, "5418" },
2781 { AR_SREV_VERSION_9100, "9100" },
2782 { AR_SREV_VERSION_9160, "9160" },
2783 { AR_SREV_VERSION_9280, "9280" },
2784 { AR_SREV_VERSION_9285, "9285" }
2785};
2786
2787static struct {
2788 u16 version;
2789 const char * name;
2790} ath_rf_names[] = {
2791 { 0, "5133" },
2792 { AR_RAD5133_SREV_MAJOR, "5133" },
2793 { AR_RAD5122_SREV_MAJOR, "5122" },
2794 { AR_RAD2133_SREV_MAJOR, "2133" },
2795 { AR_RAD2122_SREV_MAJOR, "2122" }
2796};
2797
2798/*
2799 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
2800 */
2801const char *
2802ath_mac_bb_name(u32 mac_bb_version)
2803{
2804 int i;
2805
2806 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
2807 if (ath_mac_bb_names[i].version == mac_bb_version) {
2808 return ath_mac_bb_names[i].name;
2809 }
2810 }
2811
2812 return "????";
2813}
2814
2815/*
2816 * Return the RF name. "????" is returned if the RF is unknown.
2817 */
2818const char *
2819ath_rf_name(u16 rf_version)
2820{
2821 int i;
2822
2823 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
2824 if (ath_rf_names[i].version == rf_version) {
2825 return ath_rf_names[i].name;
2826 }
2827 }
2828
2829 return "????";
2830}
2831
2832static int __init ath9k_init(void)
2833{
2834 int error;
2835
2836 /* Register rate control algorithm */
2837 error = ath_rate_control_register();
2838 if (error != 0) {
2839 printk(KERN_ERR
2840 "ath9k: Unable to register rate control "
2841 "algorithm: %d\n",
2842 error);
2843 goto err_out;
2844 }
2845
2846 error = ath9k_debug_create_root();
2847 if (error) {
2848 printk(KERN_ERR
2849 "ath9k: Unable to create debugfs root: %d\n",
2850 error);
2851 goto err_rate_unregister;
2852 }
2853
2854 error = ath_pci_init();
2855 if (error < 0) {
2856 printk(KERN_ERR
2857 "ath9k: No PCI devices found, driver not installed.\n");
2858 error = -ENODEV;
2859 goto err_remove_root;
2860 }
2861
2862 error = ath_ahb_init();
2863 if (error < 0) {
2864 error = -ENODEV;
2865 goto err_pci_exit;
2866 }
2867
2868 return 0;
2869
2870 err_pci_exit:
2871 ath_pci_exit();
2872
2873 err_remove_root:
2874 ath9k_debug_remove_root();
2875 err_rate_unregister:
2876 ath_rate_control_unregister();
2877 err_out:
2878 return error;
2879}
2880module_init(ath9k_init);
2881
2882static void __exit ath9k_exit(void)
2883{
2884 ath_ahb_exit();
2885 ath_pci_exit();
2886 ath9k_debug_remove_root();
2887 ath_rate_control_unregister();
2888 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
2889}
2890module_exit(ath9k_exit);