aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath9k/main.c
diff options
context:
space:
mode:
authorSujith <Sujith.Manoharan@atheros.com>2010-01-08 00:06:02 -0500
committerJohn W. Linville <linville@tuxdriver.com>2010-01-12 14:02:05 -0500
commit556242049cc3992d0ee625e9f15c4b00ea4baac8 (patch)
tree9caceb7ff16572224efd0378c7bfd7505a2fa7e6 /drivers/net/wireless/ath/ath9k/main.c
parent0fca65c1c0569d6a143e978b6f4974c519033e63 (diff)
ath9k: Add new file init.c
Move initialization/de-initialization related code to this file. Signed-off-by: Sujith <Sujith.Manoharan@atheros.com> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ath/ath9k/main.c')
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c842
1 files changed, 3 insertions, 839 deletions
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 31a33cf762da..48bd5d50f4d6 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -18,118 +18,6 @@
18#include "ath9k.h" 18#include "ath9k.h"
19#include "btcoex.h" 19#include "btcoex.h"
20 20
21static char *dev_info = "ath9k";
22
23MODULE_AUTHOR("Atheros Communications");
24MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
25MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
26MODULE_LICENSE("Dual BSD/GPL");
27
28static int modparam_nohwcrypt;
29module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
30MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
31
32static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
33module_param_named(debug, ath9k_debug, uint, 0);
34MODULE_PARM_DESC(debug, "Debugging mask");
35
36/* We use the hw_value as an index into our private channel structure */
37
38#define CHAN2G(_freq, _idx) { \
39 .center_freq = (_freq), \
40 .hw_value = (_idx), \
41 .max_power = 20, \
42}
43
44#define CHAN5G(_freq, _idx) { \
45 .band = IEEE80211_BAND_5GHZ, \
46 .center_freq = (_freq), \
47 .hw_value = (_idx), \
48 .max_power = 20, \
49}
50
51/* Some 2 GHz radios are actually tunable on 2312-2732
52 * on 5 MHz steps, we support the channels which we know
53 * we have calibration data for all cards though to make
54 * this static */
55static struct ieee80211_channel ath9k_2ghz_chantable[] = {
56 CHAN2G(2412, 0), /* Channel 1 */
57 CHAN2G(2417, 1), /* Channel 2 */
58 CHAN2G(2422, 2), /* Channel 3 */
59 CHAN2G(2427, 3), /* Channel 4 */
60 CHAN2G(2432, 4), /* Channel 5 */
61 CHAN2G(2437, 5), /* Channel 6 */
62 CHAN2G(2442, 6), /* Channel 7 */
63 CHAN2G(2447, 7), /* Channel 8 */
64 CHAN2G(2452, 8), /* Channel 9 */
65 CHAN2G(2457, 9), /* Channel 10 */
66 CHAN2G(2462, 10), /* Channel 11 */
67 CHAN2G(2467, 11), /* Channel 12 */
68 CHAN2G(2472, 12), /* Channel 13 */
69 CHAN2G(2484, 13), /* Channel 14 */
70};
71
72/* Some 5 GHz radios are actually tunable on XXXX-YYYY
73 * on 5 MHz steps, we support the channels which we know
74 * we have calibration data for all cards though to make
75 * this static */
76static struct ieee80211_channel ath9k_5ghz_chantable[] = {
77 /* _We_ call this UNII 1 */
78 CHAN5G(5180, 14), /* Channel 36 */
79 CHAN5G(5200, 15), /* Channel 40 */
80 CHAN5G(5220, 16), /* Channel 44 */
81 CHAN5G(5240, 17), /* Channel 48 */
82 /* _We_ call this UNII 2 */
83 CHAN5G(5260, 18), /* Channel 52 */
84 CHAN5G(5280, 19), /* Channel 56 */
85 CHAN5G(5300, 20), /* Channel 60 */
86 CHAN5G(5320, 21), /* Channel 64 */
87 /* _We_ call this "Middle band" */
88 CHAN5G(5500, 22), /* Channel 100 */
89 CHAN5G(5520, 23), /* Channel 104 */
90 CHAN5G(5540, 24), /* Channel 108 */
91 CHAN5G(5560, 25), /* Channel 112 */
92 CHAN5G(5580, 26), /* Channel 116 */
93 CHAN5G(5600, 27), /* Channel 120 */
94 CHAN5G(5620, 28), /* Channel 124 */
95 CHAN5G(5640, 29), /* Channel 128 */
96 CHAN5G(5660, 30), /* Channel 132 */
97 CHAN5G(5680, 31), /* Channel 136 */
98 CHAN5G(5700, 32), /* Channel 140 */
99 /* _We_ call this UNII 3 */
100 CHAN5G(5745, 33), /* Channel 149 */
101 CHAN5G(5765, 34), /* Channel 153 */
102 CHAN5G(5785, 35), /* Channel 157 */
103 CHAN5G(5805, 36), /* Channel 161 */
104 CHAN5G(5825, 37), /* Channel 165 */
105};
106
107/* Atheros hardware rate code addition for short premble */
108#define SHPCHECK(__hw_rate, __flags) \
109 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
110
111#define RATE(_bitrate, _hw_rate, _flags) { \
112 .bitrate = (_bitrate), \
113 .flags = (_flags), \
114 .hw_value = (_hw_rate), \
115 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
116}
117
118static struct ieee80211_rate ath9k_legacy_rates[] = {
119 RATE(10, 0x1b, 0),
120 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
121 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
122 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
123 RATE(60, 0x0b, 0),
124 RATE(90, 0x0f, 0),
125 RATE(120, 0x0a, 0),
126 RATE(180, 0x0e, 0),
127 RATE(240, 0x09, 0),
128 RATE(360, 0x0d, 0),
129 RATE(480, 0x08, 0),
130 RATE(540, 0x0c, 0),
131};
132
133static void ath_cache_conf_rate(struct ath_softc *sc, 21static void ath_cache_conf_rate(struct ath_softc *sc,
134 struct ieee80211_conf *conf) 22 struct ieee80211_conf *conf)
135{ 23{
@@ -221,7 +109,7 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
221 return channel; 109 return channel;
222} 110}
223 111
224static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode) 112bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
225{ 113{
226 unsigned long flags; 114 unsigned long flags;
227 bool ret; 115 bool ret;
@@ -349,7 +237,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
349 * When the task is complete, it reschedules itself depending on the 237 * When the task is complete, it reschedules itself depending on the
350 * appropriate interval that was calculated. 238 * appropriate interval that was calculated.
351 */ 239 */
352static void ath_ani_calibrate(unsigned long data) 240void ath_ani_calibrate(unsigned long data)
353{ 241{
354 struct ath_softc *sc = (struct ath_softc *)data; 242 struct ath_softc *sc = (struct ath_softc *)data;
355 struct ath_hw *ah = sc->sc_ah; 243 struct ath_hw *ah = sc->sc_ah;
@@ -504,7 +392,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
504 ath_tx_node_cleanup(sc, an); 392 ath_tx_node_cleanup(sc, an);
505} 393}
506 394
507static void ath9k_tasklet(unsigned long data) 395void ath9k_tasklet(unsigned long data)
508{ 396{
509 struct ath_softc *sc = (struct ath_softc *)data; 397 struct ath_softc *sc = (struct ath_softc *)data;
510 struct ath_hw *ah = sc->sc_ah; 398 struct ath_hw *ah = sc->sc_ah;
@@ -924,44 +812,6 @@ static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf
924 } 812 }
925} 813}
926 814
927static void setup_ht_cap(struct ath_softc *sc,
928 struct ieee80211_sta_ht_cap *ht_info)
929{
930 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
931 u8 tx_streams, rx_streams;
932
933 ht_info->ht_supported = true;
934 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
935 IEEE80211_HT_CAP_SM_PS |
936 IEEE80211_HT_CAP_SGI_40 |
937 IEEE80211_HT_CAP_DSSSCCK40;
938
939 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
940 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
941
942 /* set up supported mcs set */
943 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
944 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
945 1 : 2;
946 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
947 1 : 2;
948
949 if (tx_streams != rx_streams) {
950 ath_print(common, ATH_DBG_CONFIG,
951 "TX streams %d, RX streams: %d\n",
952 tx_streams, rx_streams);
953 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
954 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
955 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
956 }
957
958 ht_info->mcs.rx_mask[0] = 0xff;
959 if (rx_streams >= 2)
960 ht_info->mcs.rx_mask[1] = 0xff;
961
962 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
963}
964
965static void ath9k_bss_assoc_info(struct ath_softc *sc, 815static void ath9k_bss_assoc_info(struct ath_softc *sc,
966 struct ieee80211_vif *vif, 816 struct ieee80211_vif *vif,
967 struct ieee80211_bss_conf *bss_conf) 817 struct ieee80211_bss_conf *bss_conf)
@@ -1084,513 +934,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
1084 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); 934 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
1085} 935}
1086 936
1087static void ath9k_uninit_hw(struct ath_softc *sc)
1088{
1089 struct ath_hw *ah = sc->sc_ah;
1090
1091 BUG_ON(!ah);
1092
1093 ath9k_exit_debug(ah);
1094 ath9k_hw_detach(ah);
1095 sc->sc_ah = NULL;
1096}
1097
1098static void ath_clean_core(struct ath_softc *sc)
1099{
1100 struct ieee80211_hw *hw = sc->hw;
1101 struct ath_hw *ah = sc->sc_ah;
1102 int i = 0;
1103
1104 ath9k_ps_wakeup(sc);
1105
1106 dev_dbg(sc->dev, "Detach ATH hw\n");
1107
1108 ath_deinit_leds(sc);
1109 wiphy_rfkill_stop_polling(sc->hw->wiphy);
1110
1111 for (i = 0; i < sc->num_sec_wiphy; i++) {
1112 struct ath_wiphy *aphy = sc->sec_wiphy[i];
1113 if (aphy == NULL)
1114 continue;
1115 sc->sec_wiphy[i] = NULL;
1116 ieee80211_unregister_hw(aphy->hw);
1117 ieee80211_free_hw(aphy->hw);
1118 }
1119 ieee80211_unregister_hw(hw);
1120 ath_rx_cleanup(sc);
1121 ath_tx_cleanup(sc);
1122
1123 tasklet_kill(&sc->intr_tq);
1124 tasklet_kill(&sc->bcon_tasklet);
1125
1126 if (!(sc->sc_flags & SC_OP_INVALID))
1127 ath9k_setpower(sc, ATH9K_PM_AWAKE);
1128
1129 /* cleanup tx queues */
1130 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1131 if (ATH_TXQ_SETUP(sc, i))
1132 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1133
1134 if ((sc->btcoex.no_stomp_timer) &&
1135 ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
1136 ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
1137}
1138
1139void ath_detach(struct ath_softc *sc)
1140{
1141 ath_clean_core(sc);
1142 ath9k_uninit_hw(sc);
1143}
1144
1145void ath_cleanup(struct ath_softc *sc)
1146{
1147 struct ath_hw *ah = sc->sc_ah;
1148 struct ath_common *common = ath9k_hw_common(ah);
1149
1150 ath_clean_core(sc);
1151 free_irq(sc->irq, sc);
1152 ath_bus_cleanup(common);
1153 kfree(sc->sec_wiphy);
1154 ieee80211_free_hw(sc->hw);
1155
1156 ath9k_uninit_hw(sc);
1157}
1158
1159static int ath9k_reg_notifier(struct wiphy *wiphy,
1160 struct regulatory_request *request)
1161{
1162 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1163 struct ath_wiphy *aphy = hw->priv;
1164 struct ath_softc *sc = aphy->sc;
1165 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
1166
1167 return ath_reg_notifier_apply(wiphy, request, reg);
1168}
1169
1170/*
1171 * Read and write, they both share the same lock. We do this to serialize
1172 * reads and writes on Atheros 802.11n PCI devices only. This is required
1173 * as the FIFO on these devices can only accept sanely 2 requests. After
1174 * that the device goes bananas. Serializing the reads/writes prevents this
1175 * from happening.
1176 */
1177
1178static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
1179{
1180 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1181 struct ath_common *common = ath9k_hw_common(ah);
1182 struct ath_softc *sc = (struct ath_softc *) common->priv;
1183
1184 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1185 unsigned long flags;
1186 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1187 iowrite32(val, sc->mem + reg_offset);
1188 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1189 } else
1190 iowrite32(val, sc->mem + reg_offset);
1191}
1192
1193static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
1194{
1195 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1196 struct ath_common *common = ath9k_hw_common(ah);
1197 struct ath_softc *sc = (struct ath_softc *) common->priv;
1198 u32 val;
1199
1200 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1201 unsigned long flags;
1202 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1203 val = ioread32(sc->mem + reg_offset);
1204 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1205 } else
1206 val = ioread32(sc->mem + reg_offset);
1207 return val;
1208}
1209
1210static const struct ath_ops ath9k_common_ops = {
1211 .read = ath9k_ioread32,
1212 .write = ath9k_iowrite32,
1213};
1214
1215/*
1216 * Initialize and fill ath_softc, ath_sofct is the
1217 * "Software Carrier" struct. Historically it has existed
1218 * to allow the separation between hardware specific
1219 * variables (now in ath_hw) and driver specific variables.
1220 */
1221static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
1222 const struct ath_bus_ops *bus_ops)
1223{
1224 struct ath_hw *ah = NULL;
1225 struct ath_common *common;
1226 int r = 0, i;
1227 int csz = 0;
1228 int qnum;
1229
1230 /* XXX: hardware will not be ready until ath_open() being called */
1231 sc->sc_flags |= SC_OP_INVALID;
1232
1233 spin_lock_init(&sc->wiphy_lock);
1234 spin_lock_init(&sc->sc_resetlock);
1235 spin_lock_init(&sc->sc_serial_rw);
1236 spin_lock_init(&sc->sc_pm_lock);
1237 mutex_init(&sc->mutex);
1238 tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
1239 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
1240 (unsigned long)sc);
1241
1242 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
1243 if (!ah)
1244 return -ENOMEM;
1245
1246 ah->hw_version.devid = devid;
1247 ah->hw_version.subsysid = subsysid;
1248 sc->sc_ah = ah;
1249
1250 common = ath9k_hw_common(ah);
1251 common->ops = &ath9k_common_ops;
1252 common->bus_ops = bus_ops;
1253 common->ah = ah;
1254 common->hw = sc->hw;
1255 common->priv = sc;
1256 common->debug_mask = ath9k_debug;
1257
1258 /*
1259 * Cache line size is used to size and align various
1260 * structures used to communicate with the hardware.
1261 */
1262 ath_read_cachesize(common, &csz);
1263 /* XXX assert csz is non-zero */
1264 common->cachelsz = csz << 2; /* convert to bytes */
1265
1266 r = ath9k_hw_init(ah);
1267 if (r) {
1268 ath_print(common, ATH_DBG_FATAL,
1269 "Unable to initialize hardware; "
1270 "initialization status: %d\n", r);
1271 goto bad_free_hw;
1272 }
1273
1274 if (ath9k_init_debug(ah) < 0) {
1275 ath_print(common, ATH_DBG_FATAL,
1276 "Unable to create debugfs files\n");
1277 goto bad_free_hw;
1278 }
1279
1280 /* Get the hardware key cache size. */
1281 common->keymax = ah->caps.keycache_size;
1282 if (common->keymax > ATH_KEYMAX) {
1283 ath_print(common, ATH_DBG_ANY,
1284 "Warning, using only %u entries in %u key cache\n",
1285 ATH_KEYMAX, common->keymax);
1286 common->keymax = ATH_KEYMAX;
1287 }
1288
1289 /*
1290 * Reset the key cache since some parts do not
1291 * reset the contents on initial power up.
1292 */
1293 for (i = 0; i < common->keymax; i++)
1294 ath9k_hw_keyreset(ah, (u16) i);
1295
1296 /* default to MONITOR mode */
1297 sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
1298
1299 /*
1300 * Allocate hardware transmit queues: one queue for
1301 * beacon frames and one data queue for each QoS
1302 * priority. Note that the hal handles reseting
1303 * these queues at the needed time.
1304 */
1305 sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
1306 if (sc->beacon.beaconq == -1) {
1307 ath_print(common, ATH_DBG_FATAL,
1308 "Unable to setup a beacon xmit queue\n");
1309 r = -EIO;
1310 goto bad2;
1311 }
1312 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1313 if (sc->beacon.cabq == NULL) {
1314 ath_print(common, ATH_DBG_FATAL,
1315 "Unable to setup CAB xmit queue\n");
1316 r = -EIO;
1317 goto bad2;
1318 }
1319
1320 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
1321 ath_cabq_update(sc);
1322
1323 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
1324 sc->tx.hwq_map[i] = -1;
1325
1326 /* Setup data queues */
1327 /* NB: ensure BK queue is the lowest priority h/w queue */
1328 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1329 ath_print(common, ATH_DBG_FATAL,
1330 "Unable to setup xmit queue for BK traffic\n");
1331 r = -EIO;
1332 goto bad2;
1333 }
1334
1335 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1336 ath_print(common, ATH_DBG_FATAL,
1337 "Unable to setup xmit queue for BE traffic\n");
1338 r = -EIO;
1339 goto bad2;
1340 }
1341 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1342 ath_print(common, ATH_DBG_FATAL,
1343 "Unable to setup xmit queue for VI traffic\n");
1344 r = -EIO;
1345 goto bad2;
1346 }
1347 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1348 ath_print(common, ATH_DBG_FATAL,
1349 "Unable to setup xmit queue for VO traffic\n");
1350 r = -EIO;
1351 goto bad2;
1352 }
1353
1354 /* Initializes the noise floor to a reasonable default value.
1355 * Later on this will be updated during ANI processing. */
1356
1357 common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
1358 setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
1359
1360 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1361 ATH9K_CIPHER_TKIP, NULL)) {
1362 /*
1363 * Whether we should enable h/w TKIP MIC.
1364 * XXX: if we don't support WME TKIP MIC, then we wouldn't
1365 * report WMM capable, so it's always safe to turn on
1366 * TKIP MIC in this case.
1367 */
1368 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
1369 0, 1, NULL);
1370 }
1371
1372 /*
1373 * Check whether the separate key cache entries
1374 * are required to handle both tx+rx MIC keys.
1375 * With split mic keys the number of stations is limited
1376 * to 27 otherwise 59.
1377 */
1378 if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1379 ATH9K_CIPHER_TKIP, NULL)
1380 && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
1381 ATH9K_CIPHER_MIC, NULL)
1382 && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
1383 0, NULL))
1384 common->splitmic = 1;
1385
1386 /* turn on mcast key search if possible */
1387 if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
1388 (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
1389 1, NULL);
1390
1391 sc->config.txpowlimit = ATH_TXPOWER_MAX;
1392
1393 /* 11n Capabilities */
1394 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1395 sc->sc_flags |= SC_OP_TXAGGR;
1396 sc->sc_flags |= SC_OP_RXAGGR;
1397 }
1398
1399 common->tx_chainmask = ah->caps.tx_chainmask;
1400 common->rx_chainmask = ah->caps.rx_chainmask;
1401
1402 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1403 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1404
1405 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1406 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
1407
1408 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1409
1410 /* initialize beacon slots */
1411 for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
1412 sc->beacon.bslot[i] = NULL;
1413 sc->beacon.bslot_aphy[i] = NULL;
1414 }
1415
1416 /* setup channels and rates */
1417
1418 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
1419 sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
1420 sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
1421 sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
1422 ARRAY_SIZE(ath9k_2ghz_chantable);
1423 sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
1424 sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
1425 ARRAY_SIZE(ath9k_legacy_rates);
1426 }
1427
1428 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
1429 sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
1430 sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
1431 sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
1432 ARRAY_SIZE(ath9k_5ghz_chantable);
1433 sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
1434 ath9k_legacy_rates + 4;
1435 sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
1436 ARRAY_SIZE(ath9k_legacy_rates) - 4;
1437 }
1438
1439 switch (ah->btcoex_hw.scheme) {
1440 case ATH_BTCOEX_CFG_NONE:
1441 break;
1442 case ATH_BTCOEX_CFG_2WIRE:
1443 ath9k_hw_btcoex_init_2wire(ah);
1444 break;
1445 case ATH_BTCOEX_CFG_3WIRE:
1446 ath9k_hw_btcoex_init_3wire(ah);
1447 r = ath_init_btcoex_timer(sc);
1448 if (r)
1449 goto bad2;
1450 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
1451 ath9k_hw_init_btcoex_hw(ah, qnum);
1452 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
1453 break;
1454 default:
1455 WARN_ON(1);
1456 break;
1457 }
1458
1459 return 0;
1460bad2:
1461 /* cleanup tx queues */
1462 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1463 if (ATH_TXQ_SETUP(sc, i))
1464 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1465
1466bad_free_hw:
1467 ath9k_uninit_hw(sc);
1468 return r;
1469}
1470
1471void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1472{
1473 struct ath_hw *ah = sc->sc_ah;
1474
1475 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
1476 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1477 IEEE80211_HW_SIGNAL_DBM |
1478 IEEE80211_HW_AMPDU_AGGREGATION |
1479 IEEE80211_HW_SUPPORTS_PS |
1480 IEEE80211_HW_PS_NULLFUNC_STACK |
1481 IEEE80211_HW_SPECTRUM_MGMT;
1482
1483 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
1484 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
1485
1486 hw->wiphy->interface_modes =
1487 BIT(NL80211_IFTYPE_AP) |
1488 BIT(NL80211_IFTYPE_STATION) |
1489 BIT(NL80211_IFTYPE_ADHOC) |
1490 BIT(NL80211_IFTYPE_MESH_POINT);
1491
1492 if (AR_SREV_5416(ah))
1493 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1494
1495 hw->queues = 4;
1496 hw->max_rates = 4;
1497 hw->channel_change_time = 5000;
1498 hw->max_listen_interval = 10;
1499 /* Hardware supports 10 but we use 4 */
1500 hw->max_rate_tries = 4;
1501 hw->sta_data_size = sizeof(struct ath_node);
1502 hw->vif_data_size = sizeof(struct ath_vif);
1503
1504 hw->rate_control_algorithm = "ath9k_rate_control";
1505
1506 if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
1507 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1508 &sc->sbands[IEEE80211_BAND_2GHZ];
1509 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
1510 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1511 &sc->sbands[IEEE80211_BAND_5GHZ];
1512}
1513
1514/* Device driver core initialization */
1515int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
1516 const struct ath_bus_ops *bus_ops)
1517{
1518 struct ieee80211_hw *hw = sc->hw;
1519 struct ath_common *common;
1520 struct ath_hw *ah;
1521 int error = 0, i;
1522 struct ath_regulatory *reg;
1523
1524 dev_dbg(sc->dev, "Attach ATH hw\n");
1525
1526 error = ath_init_softc(devid, sc, subsysid, bus_ops);
1527 if (error != 0)
1528 return error;
1529
1530 ah = sc->sc_ah;
1531 common = ath9k_hw_common(ah);
1532
1533 /* get mac address from hardware and set in mac80211 */
1534
1535 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
1536
1537 ath_set_hw_capab(sc, hw);
1538
1539 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
1540 ath9k_reg_notifier);
1541 if (error)
1542 return error;
1543
1544 reg = &common->regulatory;
1545
1546 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1547 if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
1548 setup_ht_cap(sc,
1549 &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1550 if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
1551 setup_ht_cap(sc,
1552 &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1553 }
1554
1555 /* initialize tx/rx engine */
1556 error = ath_tx_init(sc, ATH_TXBUF);
1557 if (error != 0)
1558 goto error_attach;
1559
1560 error = ath_rx_init(sc, ATH_RXBUF);
1561 if (error != 0)
1562 goto error_attach;
1563
1564 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
1565 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
1566 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
1567
1568 error = ieee80211_register_hw(hw);
1569
1570 if (!ath_is_world_regd(reg)) {
1571 error = regulatory_hint(hw->wiphy, reg->alpha2);
1572 if (error)
1573 goto error_attach;
1574 }
1575
1576 /* Initialize LED control */
1577 ath_init_leds(sc);
1578
1579 ath_start_rfkill_poll(sc);
1580
1581 return 0;
1582
1583error_attach:
1584 /* cleanup tx queues */
1585 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1586 if (ATH_TXQ_SETUP(sc, i))
1587 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1588
1589 ath9k_uninit_hw(sc);
1590
1591 return error;
1592}
1593
1594int ath_reset(struct ath_softc *sc, bool retry_tx) 937int ath_reset(struct ath_softc *sc, bool retry_tx)
1595{ 938{
1596 struct ath_hw *ah = sc->sc_ah; 939 struct ath_hw *ah = sc->sc_ah;
@@ -1648,125 +991,6 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1648 return r; 991 return r;
1649} 992}
1650 993
1651/*
1652 * This function will allocate both the DMA descriptor structure, and the
1653 * buffers it contains. These are used to contain the descriptors used
1654 * by the system.
1655*/
1656int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1657 struct list_head *head, const char *name,
1658 int nbuf, int ndesc)
1659{
1660#define DS2PHYS(_dd, _ds) \
1661 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1662#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1663#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1664 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1665 struct ath_desc *ds;
1666 struct ath_buf *bf;
1667 int i, bsize, error;
1668
1669 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
1670 name, nbuf, ndesc);
1671
1672 INIT_LIST_HEAD(head);
1673 /* ath_desc must be a multiple of DWORDs */
1674 if ((sizeof(struct ath_desc) % 4) != 0) {
1675 ath_print(common, ATH_DBG_FATAL,
1676 "ath_desc not DWORD aligned\n");
1677 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
1678 error = -ENOMEM;
1679 goto fail;
1680 }
1681
1682 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
1683
1684 /*
1685 * Need additional DMA memory because we can't use
1686 * descriptors that cross the 4K page boundary. Assume
1687 * one skipped descriptor per 4K page.
1688 */
1689 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1690 u32 ndesc_skipped =
1691 ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
1692 u32 dma_len;
1693
1694 while (ndesc_skipped) {
1695 dma_len = ndesc_skipped * sizeof(struct ath_desc);
1696 dd->dd_desc_len += dma_len;
1697
1698 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
1699 };
1700 }
1701
1702 /* allocate descriptors */
1703 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
1704 &dd->dd_desc_paddr, GFP_KERNEL);
1705 if (dd->dd_desc == NULL) {
1706 error = -ENOMEM;
1707 goto fail;
1708 }
1709 ds = dd->dd_desc;
1710 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
1711 name, ds, (u32) dd->dd_desc_len,
1712 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1713
1714 /* allocate buffers */
1715 bsize = sizeof(struct ath_buf) * nbuf;
1716 bf = kzalloc(bsize, GFP_KERNEL);
1717 if (bf == NULL) {
1718 error = -ENOMEM;
1719 goto fail2;
1720 }
1721 dd->dd_bufptr = bf;
1722
1723 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
1724 bf->bf_desc = ds;
1725 bf->bf_daddr = DS2PHYS(dd, ds);
1726
1727 if (!(sc->sc_ah->caps.hw_caps &
1728 ATH9K_HW_CAP_4KB_SPLITTRANS)) {
1729 /*
1730 * Skip descriptor addresses which can cause 4KB
1731 * boundary crossing (addr + length) with a 32 dword
1732 * descriptor fetch.
1733 */
1734 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1735 BUG_ON((caddr_t) bf->bf_desc >=
1736 ((caddr_t) dd->dd_desc +
1737 dd->dd_desc_len));
1738
1739 ds += ndesc;
1740 bf->bf_desc = ds;
1741 bf->bf_daddr = DS2PHYS(dd, ds);
1742 }
1743 }
1744 list_add_tail(&bf->list, head);
1745 }
1746 return 0;
1747fail2:
1748 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1749 dd->dd_desc_paddr);
1750fail:
1751 memset(dd, 0, sizeof(*dd));
1752 return error;
1753#undef ATH_DESC_4KB_BOUND_CHECK
1754#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
1755#undef DS2PHYS
1756}
1757
1758void ath_descdma_cleanup(struct ath_softc *sc,
1759 struct ath_descdma *dd,
1760 struct list_head *head)
1761{
1762 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
1763 dd->dd_desc_paddr);
1764
1765 INIT_LIST_HEAD(head);
1766 kfree(dd->dd_bufptr);
1767 memset(dd, 0, sizeof(*dd));
1768}
1769
1770int ath_get_hal_qnum(u16 queue, struct ath_softc *sc) 994int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1771{ 995{
1772 int qnum; 996 int qnum;
@@ -2778,63 +2002,3 @@ struct ieee80211_ops ath9k_ops = {
2778 .sw_scan_complete = ath9k_sw_scan_complete, 2002 .sw_scan_complete = ath9k_sw_scan_complete,
2779 .rfkill_poll = ath9k_rfkill_poll_state, 2003 .rfkill_poll = ath9k_rfkill_poll_state,
2780}; 2004};
2781
2782static int __init ath9k_init(void)
2783{
2784 int error;
2785
2786 /* Register rate control algorithm */
2787 error = ath_rate_control_register();
2788 if (error != 0) {
2789 printk(KERN_ERR
2790 "ath9k: Unable to register rate control "
2791 "algorithm: %d\n",
2792 error);
2793 goto err_out;
2794 }
2795
2796 error = ath9k_debug_create_root();
2797 if (error) {
2798 printk(KERN_ERR
2799 "ath9k: Unable to create debugfs root: %d\n",
2800 error);
2801 goto err_rate_unregister;
2802 }
2803
2804 error = ath_pci_init();
2805 if (error < 0) {
2806 printk(KERN_ERR
2807 "ath9k: No PCI devices found, driver not installed.\n");
2808 error = -ENODEV;
2809 goto err_remove_root;
2810 }
2811
2812 error = ath_ahb_init();
2813 if (error < 0) {
2814 error = -ENODEV;
2815 goto err_pci_exit;
2816 }
2817
2818 return 0;
2819
2820 err_pci_exit:
2821 ath_pci_exit();
2822
2823 err_remove_root:
2824 ath9k_debug_remove_root();
2825 err_rate_unregister:
2826 ath_rate_control_unregister();
2827 err_out:
2828 return error;
2829}
2830module_init(ath9k_init);
2831
2832static void __exit ath9k_exit(void)
2833{
2834 ath_ahb_exit();
2835 ath_pci_exit();
2836 ath9k_debug_remove_root();
2837 ath_rate_control_unregister();
2838 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
2839}
2840module_exit(ath9k_exit);