diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl3945-base.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl3945-base.c | 4334 |
1 files changed, 0 insertions, 4334 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c deleted file mode 100644 index adcef735180..00000000000 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c +++ /dev/null | |||
@@ -1,4334 +0,0 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * Portions of this file are derived from the ipw3945 project, as well | ||
6 | * as portions of the ieee80211 subsystem header files. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of version 2 of the GNU General Public License as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution in the | ||
22 | * file called LICENSE. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
31 | |||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/pci-aspm.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <linux/dma-mapping.h> | ||
39 | #include <linux/delay.h> | ||
40 | #include <linux/sched.h> | ||
41 | #include <linux/skbuff.h> | ||
42 | #include <linux/netdevice.h> | ||
43 | #include <linux/wireless.h> | ||
44 | #include <linux/firmware.h> | ||
45 | #include <linux/etherdevice.h> | ||
46 | #include <linux/if_arp.h> | ||
47 | |||
48 | #include <net/ieee80211_radiotap.h> | ||
49 | #include <net/mac80211.h> | ||
50 | |||
51 | #include <asm/div64.h> | ||
52 | |||
53 | #define DRV_NAME "iwl3945" | ||
54 | |||
55 | #include "iwl-fh.h" | ||
56 | #include "iwl-3945-fh.h" | ||
57 | #include "iwl-commands.h" | ||
58 | #include "iwl-sta.h" | ||
59 | #include "iwl-3945.h" | ||
60 | #include "iwl-core.h" | ||
61 | #include "iwl-helpers.h" | ||
62 | #include "iwl-dev.h" | ||
63 | #include "iwl-spectrum.h" | ||
64 | #include "iwl-legacy.h" | ||
65 | |||
66 | /* | ||
67 | * module name, copyright, version, etc. | ||
68 | */ | ||
69 | |||
70 | #define DRV_DESCRIPTION \ | ||
71 | "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" | ||
72 | |||
73 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
74 | #define VD "d" | ||
75 | #else | ||
76 | #define VD | ||
77 | #endif | ||
78 | |||
79 | /* | ||
80 | * add "s" to indicate spectrum measurement included. | ||
81 | * we add it here to be consistent with previous releases in which | ||
82 | * this was configurable. | ||
83 | */ | ||
84 | #define DRV_VERSION IWLWIFI_VERSION VD "s" | ||
85 | #define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation" | ||
86 | #define DRV_AUTHOR "<ilw@linux.intel.com>" | ||
87 | |||
88 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
89 | MODULE_VERSION(DRV_VERSION); | ||
90 | MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); | ||
91 | MODULE_LICENSE("GPL"); | ||
92 | |||
93 | /* module parameters */ | ||
94 | struct iwl_mod_params iwl3945_mod_params = { | ||
95 | .sw_crypto = 1, | ||
96 | .restart_fw = 1, | ||
97 | /* the rest are 0 by default */ | ||
98 | }; | ||
99 | |||
100 | /** | ||
101 | * iwl3945_get_antenna_flags - Get antenna flags for RXON command | ||
102 | * @priv: eeprom and antenna fields are used to determine antenna flags | ||
103 | * | ||
104 | * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed | ||
105 | * iwl3945_mod_params.antenna specifies the antenna diversity mode: | ||
106 | * | ||
107 | * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself | ||
108 | * IWL_ANTENNA_MAIN - Force MAIN antenna | ||
109 | * IWL_ANTENNA_AUX - Force AUX antenna | ||
110 | */ | ||
111 | __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv) | ||
112 | { | ||
113 | struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; | ||
114 | |||
115 | switch (iwl3945_mod_params.antenna) { | ||
116 | case IWL_ANTENNA_DIVERSITY: | ||
117 | return 0; | ||
118 | |||
119 | case IWL_ANTENNA_MAIN: | ||
120 | if (eeprom->antenna_switch_type) | ||
121 | return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; | ||
122 | return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; | ||
123 | |||
124 | case IWL_ANTENNA_AUX: | ||
125 | if (eeprom->antenna_switch_type) | ||
126 | return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; | ||
127 | return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; | ||
128 | } | ||
129 | |||
130 | /* bad antenna selector value */ | ||
131 | IWL_ERR(priv, "Bad antenna selector value (0x%x)\n", | ||
132 | iwl3945_mod_params.antenna); | ||
133 | |||
134 | return 0; /* "diversity" is default if error */ | ||
135 | } | ||
136 | |||
137 | static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv, | ||
138 | struct ieee80211_key_conf *keyconf, | ||
139 | u8 sta_id) | ||
140 | { | ||
141 | unsigned long flags; | ||
142 | __le16 key_flags = 0; | ||
143 | int ret; | ||
144 | |||
145 | key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); | ||
146 | key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); | ||
147 | |||
148 | if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id) | ||
149 | key_flags |= STA_KEY_MULTICAST_MSK; | ||
150 | |||
151 | keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; | ||
152 | keyconf->hw_key_idx = keyconf->keyidx; | ||
153 | key_flags &= ~STA_KEY_FLG_INVALID; | ||
154 | |||
155 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
156 | priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; | ||
157 | priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; | ||
158 | memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, | ||
159 | keyconf->keylen); | ||
160 | |||
161 | memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, | ||
162 | keyconf->keylen); | ||
163 | |||
164 | if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) | ||
165 | == STA_KEY_FLG_NO_ENC) | ||
166 | priv->stations[sta_id].sta.key.key_offset = | ||
167 | iwl_get_free_ucode_key_index(priv); | ||
168 | /* else, we are overriding an existing key => no need to allocated room | ||
169 | * in uCode. */ | ||
170 | |||
171 | WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, | ||
172 | "no space for a new key"); | ||
173 | |||
174 | priv->stations[sta_id].sta.key.key_flags = key_flags; | ||
175 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | ||
176 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | ||
177 | |||
178 | IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); | ||
179 | |||
180 | ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); | ||
181 | |||
182 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
183 | |||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv, | ||
188 | struct ieee80211_key_conf *keyconf, | ||
189 | u8 sta_id) | ||
190 | { | ||
191 | return -EOPNOTSUPP; | ||
192 | } | ||
193 | |||
194 | static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv, | ||
195 | struct ieee80211_key_conf *keyconf, | ||
196 | u8 sta_id) | ||
197 | { | ||
198 | return -EOPNOTSUPP; | ||
199 | } | ||
200 | |||
201 | static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) | ||
202 | { | ||
203 | unsigned long flags; | ||
204 | struct iwl_addsta_cmd sta_cmd; | ||
205 | |||
206 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
207 | memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); | ||
208 | memset(&priv->stations[sta_id].sta.key, 0, | ||
209 | sizeof(struct iwl4965_keyinfo)); | ||
210 | priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; | ||
211 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | ||
212 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | ||
213 | memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_addsta_cmd)); | ||
214 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
215 | |||
216 | IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); | ||
217 | return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); | ||
218 | } | ||
219 | |||
220 | static int iwl3945_set_dynamic_key(struct iwl_priv *priv, | ||
221 | struct ieee80211_key_conf *keyconf, u8 sta_id) | ||
222 | { | ||
223 | int ret = 0; | ||
224 | |||
225 | keyconf->hw_key_idx = HW_KEY_DYNAMIC; | ||
226 | |||
227 | switch (keyconf->cipher) { | ||
228 | case WLAN_CIPHER_SUITE_CCMP: | ||
229 | ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); | ||
230 | break; | ||
231 | case WLAN_CIPHER_SUITE_TKIP: | ||
232 | ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id); | ||
233 | break; | ||
234 | case WLAN_CIPHER_SUITE_WEP40: | ||
235 | case WLAN_CIPHER_SUITE_WEP104: | ||
236 | ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); | ||
237 | break; | ||
238 | default: | ||
239 | IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__, | ||
240 | keyconf->cipher); | ||
241 | ret = -EINVAL; | ||
242 | } | ||
243 | |||
244 | IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n", | ||
245 | keyconf->cipher, keyconf->keylen, keyconf->keyidx, | ||
246 | sta_id, ret); | ||
247 | |||
248 | return ret; | ||
249 | } | ||
250 | |||
251 | static int iwl3945_remove_static_key(struct iwl_priv *priv) | ||
252 | { | ||
253 | int ret = -EOPNOTSUPP; | ||
254 | |||
255 | return ret; | ||
256 | } | ||
257 | |||
258 | static int iwl3945_set_static_key(struct iwl_priv *priv, | ||
259 | struct ieee80211_key_conf *key) | ||
260 | { | ||
261 | if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || | ||
262 | key->cipher == WLAN_CIPHER_SUITE_WEP104) | ||
263 | return -EOPNOTSUPP; | ||
264 | |||
265 | IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher); | ||
266 | return -EINVAL; | ||
267 | } | ||
268 | |||
269 | static void iwl3945_clear_free_frames(struct iwl_priv *priv) | ||
270 | { | ||
271 | struct list_head *element; | ||
272 | |||
273 | IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", | ||
274 | priv->frames_count); | ||
275 | |||
276 | while (!list_empty(&priv->free_frames)) { | ||
277 | element = priv->free_frames.next; | ||
278 | list_del(element); | ||
279 | kfree(list_entry(element, struct iwl3945_frame, list)); | ||
280 | priv->frames_count--; | ||
281 | } | ||
282 | |||
283 | if (priv->frames_count) { | ||
284 | IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", | ||
285 | priv->frames_count); | ||
286 | priv->frames_count = 0; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv) | ||
291 | { | ||
292 | struct iwl3945_frame *frame; | ||
293 | struct list_head *element; | ||
294 | if (list_empty(&priv->free_frames)) { | ||
295 | frame = kzalloc(sizeof(*frame), GFP_KERNEL); | ||
296 | if (!frame) { | ||
297 | IWL_ERR(priv, "Could not allocate frame!\n"); | ||
298 | return NULL; | ||
299 | } | ||
300 | |||
301 | priv->frames_count++; | ||
302 | return frame; | ||
303 | } | ||
304 | |||
305 | element = priv->free_frames.next; | ||
306 | list_del(element); | ||
307 | return list_entry(element, struct iwl3945_frame, list); | ||
308 | } | ||
309 | |||
310 | static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame) | ||
311 | { | ||
312 | memset(frame, 0, sizeof(*frame)); | ||
313 | list_add(&frame->list, &priv->free_frames); | ||
314 | } | ||
315 | |||
316 | unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, | ||
317 | struct ieee80211_hdr *hdr, | ||
318 | int left) | ||
319 | { | ||
320 | |||
321 | if (!iwl_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb) | ||
322 | return 0; | ||
323 | |||
324 | if (priv->beacon_skb->len > left) | ||
325 | return 0; | ||
326 | |||
327 | memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len); | ||
328 | |||
329 | return priv->beacon_skb->len; | ||
330 | } | ||
331 | |||
332 | static int iwl3945_send_beacon_cmd(struct iwl_priv *priv) | ||
333 | { | ||
334 | struct iwl3945_frame *frame; | ||
335 | unsigned int frame_size; | ||
336 | int rc; | ||
337 | u8 rate; | ||
338 | |||
339 | frame = iwl3945_get_free_frame(priv); | ||
340 | |||
341 | if (!frame) { | ||
342 | IWL_ERR(priv, "Could not obtain free frame buffer for beacon " | ||
343 | "command.\n"); | ||
344 | return -ENOMEM; | ||
345 | } | ||
346 | |||
347 | rate = iwl_rate_get_lowest_plcp(priv, | ||
348 | &priv->contexts[IWL_RXON_CTX_BSS]); | ||
349 | |||
350 | frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); | ||
351 | |||
352 | rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, | ||
353 | &frame->u.cmd[0]); | ||
354 | |||
355 | iwl3945_free_frame(priv, frame); | ||
356 | |||
357 | return rc; | ||
358 | } | ||
359 | |||
360 | static void iwl3945_unset_hw_params(struct iwl_priv *priv) | ||
361 | { | ||
362 | if (priv->_3945.shared_virt) | ||
363 | dma_free_coherent(&priv->pci_dev->dev, | ||
364 | sizeof(struct iwl3945_shared), | ||
365 | priv->_3945.shared_virt, | ||
366 | priv->_3945.shared_phys); | ||
367 | } | ||
368 | |||
369 | static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, | ||
370 | struct ieee80211_tx_info *info, | ||
371 | struct iwl_device_cmd *cmd, | ||
372 | struct sk_buff *skb_frag, | ||
373 | int sta_id) | ||
374 | { | ||
375 | struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; | ||
376 | struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; | ||
377 | |||
378 | tx_cmd->sec_ctl = 0; | ||
379 | |||
380 | switch (keyinfo->cipher) { | ||
381 | case WLAN_CIPHER_SUITE_CCMP: | ||
382 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
383 | memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); | ||
384 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); | ||
385 | break; | ||
386 | |||
387 | case WLAN_CIPHER_SUITE_TKIP: | ||
388 | break; | ||
389 | |||
390 | case WLAN_CIPHER_SUITE_WEP104: | ||
391 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
392 | /* fall through */ | ||
393 | case WLAN_CIPHER_SUITE_WEP40: | ||
394 | tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | | ||
395 | (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; | ||
396 | |||
397 | memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); | ||
398 | |||
399 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " | ||
400 | "with key %d\n", info->control.hw_key->hw_key_idx); | ||
401 | break; | ||
402 | |||
403 | default: | ||
404 | IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher); | ||
405 | break; | ||
406 | } | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * handle build REPLY_TX command notification. | ||
411 | */ | ||
412 | static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, | ||
413 | struct iwl_device_cmd *cmd, | ||
414 | struct ieee80211_tx_info *info, | ||
415 | struct ieee80211_hdr *hdr, u8 std_id) | ||
416 | { | ||
417 | struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; | ||
418 | __le32 tx_flags = tx_cmd->tx_flags; | ||
419 | __le16 fc = hdr->frame_control; | ||
420 | |||
421 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
422 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | ||
423 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
424 | if (ieee80211_is_mgmt(fc)) | ||
425 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
426 | if (ieee80211_is_probe_resp(fc) && | ||
427 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
428 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
429 | } else { | ||
430 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
431 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
432 | } | ||
433 | |||
434 | tx_cmd->sta_id = std_id; | ||
435 | if (ieee80211_has_morefrags(fc)) | ||
436 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
437 | |||
438 | if (ieee80211_is_data_qos(fc)) { | ||
439 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
440 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
441 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
442 | } else { | ||
443 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
444 | } | ||
445 | |||
446 | priv->cfg->ops->utils->tx_cmd_protection(priv, info, fc, &tx_flags); | ||
447 | |||
448 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
449 | if (ieee80211_is_mgmt(fc)) { | ||
450 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | ||
451 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
452 | else | ||
453 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
454 | } else { | ||
455 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
456 | } | ||
457 | |||
458 | tx_cmd->driver_txop = 0; | ||
459 | tx_cmd->tx_flags = tx_flags; | ||
460 | tx_cmd->next_frame_len = 0; | ||
461 | } | ||
462 | |||
463 | /* | ||
464 | * start REPLY_TX command process | ||
465 | */ | ||
466 | static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | ||
467 | { | ||
468 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
469 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
470 | struct iwl3945_tx_cmd *tx_cmd; | ||
471 | struct iwl_tx_queue *txq = NULL; | ||
472 | struct iwl_queue *q = NULL; | ||
473 | struct iwl_device_cmd *out_cmd; | ||
474 | struct iwl_cmd_meta *out_meta; | ||
475 | dma_addr_t phys_addr; | ||
476 | dma_addr_t txcmd_phys; | ||
477 | int txq_id = skb_get_queue_mapping(skb); | ||
478 | u16 len, idx, hdr_len; | ||
479 | u8 id; | ||
480 | u8 unicast; | ||
481 | u8 sta_id; | ||
482 | u8 tid = 0; | ||
483 | __le16 fc; | ||
484 | u8 wait_write_ptr = 0; | ||
485 | unsigned long flags; | ||
486 | |||
487 | spin_lock_irqsave(&priv->lock, flags); | ||
488 | if (iwl_is_rfkill(priv)) { | ||
489 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); | ||
490 | goto drop_unlock; | ||
491 | } | ||
492 | |||
493 | if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) { | ||
494 | IWL_ERR(priv, "ERROR: No TX rate available.\n"); | ||
495 | goto drop_unlock; | ||
496 | } | ||
497 | |||
498 | unicast = !is_multicast_ether_addr(hdr->addr1); | ||
499 | id = 0; | ||
500 | |||
501 | fc = hdr->frame_control; | ||
502 | |||
503 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
504 | if (ieee80211_is_auth(fc)) | ||
505 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); | ||
506 | else if (ieee80211_is_assoc_req(fc)) | ||
507 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); | ||
508 | else if (ieee80211_is_reassoc_req(fc)) | ||
509 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); | ||
510 | #endif | ||
511 | |||
512 | spin_unlock_irqrestore(&priv->lock, flags); | ||
513 | |||
514 | hdr_len = ieee80211_hdrlen(fc); | ||
515 | |||
516 | /* Find index into station table for destination station */ | ||
517 | sta_id = iwl_sta_id_or_broadcast( | ||
518 | priv, &priv->contexts[IWL_RXON_CTX_BSS], | ||
519 | info->control.sta); | ||
520 | if (sta_id == IWL_INVALID_STATION) { | ||
521 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | ||
522 | hdr->addr1); | ||
523 | goto drop; | ||
524 | } | ||
525 | |||
526 | IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id); | ||
527 | |||
528 | if (ieee80211_is_data_qos(fc)) { | ||
529 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
530 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | ||
531 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
532 | goto drop; | ||
533 | } | ||
534 | |||
535 | /* Descriptor for chosen Tx queue */ | ||
536 | txq = &priv->txq[txq_id]; | ||
537 | q = &txq->q; | ||
538 | |||
539 | if ((iwl_queue_space(q) < q->high_mark)) | ||
540 | goto drop; | ||
541 | |||
542 | spin_lock_irqsave(&priv->lock, flags); | ||
543 | |||
544 | idx = get_cmd_index(q, q->write_ptr, 0); | ||
545 | |||
546 | /* Set up driver data for this TFD */ | ||
547 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | ||
548 | txq->txb[q->write_ptr].skb = skb; | ||
549 | txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
550 | |||
551 | /* Init first empty entry in queue's array of Tx/cmd buffers */ | ||
552 | out_cmd = txq->cmd[idx]; | ||
553 | out_meta = &txq->meta[idx]; | ||
554 | tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; | ||
555 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
556 | memset(tx_cmd, 0, sizeof(*tx_cmd)); | ||
557 | |||
558 | /* | ||
559 | * Set up the Tx-command (not MAC!) header. | ||
560 | * Store the chosen Tx queue and TFD index within the sequence field; | ||
561 | * after Tx, uCode's Tx response will return this value so driver can | ||
562 | * locate the frame within the tx queue and do post-tx processing. | ||
563 | */ | ||
564 | out_cmd->hdr.cmd = REPLY_TX; | ||
565 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
566 | INDEX_TO_SEQ(q->write_ptr))); | ||
567 | |||
568 | /* Copy MAC header from skb into command buffer */ | ||
569 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
570 | |||
571 | |||
572 | if (info->control.hw_key) | ||
573 | iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id); | ||
574 | |||
575 | /* TODO need this for burst mode later on */ | ||
576 | iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id); | ||
577 | |||
578 | /* set is_hcca to 0; it probably will never be implemented */ | ||
579 | iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0); | ||
580 | |||
581 | /* Total # bytes to be transmitted */ | ||
582 | len = (u16)skb->len; | ||
583 | tx_cmd->len = cpu_to_le16(len); | ||
584 | |||
585 | iwl_dbg_log_tx_data_frame(priv, len, hdr); | ||
586 | iwl_update_stats(priv, true, fc, len); | ||
587 | tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; | ||
588 | tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; | ||
589 | |||
590 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | ||
591 | txq->need_update = 1; | ||
592 | } else { | ||
593 | wait_write_ptr = 1; | ||
594 | txq->need_update = 0; | ||
595 | } | ||
596 | |||
597 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", | ||
598 | le16_to_cpu(out_cmd->hdr.sequence)); | ||
599 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | ||
600 | iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); | ||
601 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, | ||
602 | ieee80211_hdrlen(fc)); | ||
603 | |||
604 | /* | ||
605 | * Use the first empty entry in this queue's command buffer array | ||
606 | * to contain the Tx command and MAC header concatenated together | ||
607 | * (payload data will be in another buffer). | ||
608 | * Size of this varies, due to varying MAC header length. | ||
609 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
610 | * of the MAC header (device reads on dword boundaries). | ||
611 | * We'll tell device about this padding later. | ||
612 | */ | ||
613 | len = sizeof(struct iwl3945_tx_cmd) + | ||
614 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
615 | len = (len + 3) & ~3; | ||
616 | |||
617 | /* Physical address of this Tx command's header (not MAC header!), | ||
618 | * within command buffer array. */ | ||
619 | txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, | ||
620 | len, PCI_DMA_TODEVICE); | ||
621 | /* we do not map meta data ... so we can safely access address to | ||
622 | * provide to unmap command*/ | ||
623 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
624 | dma_unmap_len_set(out_meta, len, len); | ||
625 | |||
626 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
627 | * first entry */ | ||
628 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
629 | txcmd_phys, len, 1, 0); | ||
630 | |||
631 | |||
632 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
633 | * if any (802.11 null frames have no payload). */ | ||
634 | len = skb->len - hdr_len; | ||
635 | if (len) { | ||
636 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | ||
637 | len, PCI_DMA_TODEVICE); | ||
638 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
639 | phys_addr, len, | ||
640 | 0, U32_PAD(len)); | ||
641 | } | ||
642 | |||
643 | |||
644 | /* Tell device the write index *just past* this latest filled TFD */ | ||
645 | q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
646 | iwl_txq_update_write_ptr(priv, txq); | ||
647 | spin_unlock_irqrestore(&priv->lock, flags); | ||
648 | |||
649 | if ((iwl_queue_space(q) < q->high_mark) | ||
650 | && priv->mac80211_registered) { | ||
651 | if (wait_write_ptr) { | ||
652 | spin_lock_irqsave(&priv->lock, flags); | ||
653 | txq->need_update = 1; | ||
654 | iwl_txq_update_write_ptr(priv, txq); | ||
655 | spin_unlock_irqrestore(&priv->lock, flags); | ||
656 | } | ||
657 | |||
658 | iwl_stop_queue(priv, txq); | ||
659 | } | ||
660 | |||
661 | return 0; | ||
662 | |||
663 | drop_unlock: | ||
664 | spin_unlock_irqrestore(&priv->lock, flags); | ||
665 | drop: | ||
666 | return -1; | ||
667 | } | ||
668 | |||
669 | static int iwl3945_get_measurement(struct iwl_priv *priv, | ||
670 | struct ieee80211_measurement_params *params, | ||
671 | u8 type) | ||
672 | { | ||
673 | struct iwl_spectrum_cmd spectrum; | ||
674 | struct iwl_rx_packet *pkt; | ||
675 | struct iwl_host_cmd cmd = { | ||
676 | .id = REPLY_SPECTRUM_MEASUREMENT_CMD, | ||
677 | .data = (void *)&spectrum, | ||
678 | .flags = CMD_WANT_SKB, | ||
679 | }; | ||
680 | u32 add_time = le64_to_cpu(params->start_time); | ||
681 | int rc; | ||
682 | int spectrum_resp_status; | ||
683 | int duration = le16_to_cpu(params->duration); | ||
684 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
685 | |||
686 | if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) | ||
687 | add_time = iwl_usecs_to_beacons(priv, | ||
688 | le64_to_cpu(params->start_time) - priv->_3945.last_tsf, | ||
689 | le16_to_cpu(ctx->timing.beacon_interval)); | ||
690 | |||
691 | memset(&spectrum, 0, sizeof(spectrum)); | ||
692 | |||
693 | spectrum.channel_count = cpu_to_le16(1); | ||
694 | spectrum.flags = | ||
695 | RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; | ||
696 | spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; | ||
697 | cmd.len = sizeof(spectrum); | ||
698 | spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); | ||
699 | |||
700 | if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) | ||
701 | spectrum.start_time = | ||
702 | iwl_add_beacon_time(priv, | ||
703 | priv->_3945.last_beacon_time, add_time, | ||
704 | le16_to_cpu(ctx->timing.beacon_interval)); | ||
705 | else | ||
706 | spectrum.start_time = 0; | ||
707 | |||
708 | spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); | ||
709 | spectrum.channels[0].channel = params->channel; | ||
710 | spectrum.channels[0].type = type; | ||
711 | if (ctx->active.flags & RXON_FLG_BAND_24G_MSK) | ||
712 | spectrum.flags |= RXON_FLG_BAND_24G_MSK | | ||
713 | RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; | ||
714 | |||
715 | rc = iwl_send_cmd_sync(priv, &cmd); | ||
716 | if (rc) | ||
717 | return rc; | ||
718 | |||
719 | pkt = (struct iwl_rx_packet *)cmd.reply_page; | ||
720 | if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { | ||
721 | IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); | ||
722 | rc = -EIO; | ||
723 | } | ||
724 | |||
725 | spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); | ||
726 | switch (spectrum_resp_status) { | ||
727 | case 0: /* Command will be handled */ | ||
728 | if (pkt->u.spectrum.id != 0xff) { | ||
729 | IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", | ||
730 | pkt->u.spectrum.id); | ||
731 | priv->measurement_status &= ~MEASUREMENT_READY; | ||
732 | } | ||
733 | priv->measurement_status |= MEASUREMENT_ACTIVE; | ||
734 | rc = 0; | ||
735 | break; | ||
736 | |||
737 | case 1: /* Command will not be handled */ | ||
738 | rc = -EAGAIN; | ||
739 | break; | ||
740 | } | ||
741 | |||
742 | iwl_free_pages(priv, cmd.reply_page); | ||
743 | |||
744 | return rc; | ||
745 | } | ||
746 | |||
747 | static void iwl3945_rx_reply_alive(struct iwl_priv *priv, | ||
748 | struct iwl_rx_mem_buffer *rxb) | ||
749 | { | ||
750 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
751 | struct iwl_alive_resp *palive; | ||
752 | struct delayed_work *pwork; | ||
753 | |||
754 | palive = &pkt->u.alive_frame; | ||
755 | |||
756 | IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " | ||
757 | "0x%01X 0x%01X\n", | ||
758 | palive->is_valid, palive->ver_type, | ||
759 | palive->ver_subtype); | ||
760 | |||
761 | if (palive->ver_subtype == INITIALIZE_SUBTYPE) { | ||
762 | IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); | ||
763 | memcpy(&priv->card_alive_init, &pkt->u.alive_frame, | ||
764 | sizeof(struct iwl_alive_resp)); | ||
765 | pwork = &priv->init_alive_start; | ||
766 | } else { | ||
767 | IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); | ||
768 | memcpy(&priv->card_alive, &pkt->u.alive_frame, | ||
769 | sizeof(struct iwl_alive_resp)); | ||
770 | pwork = &priv->alive_start; | ||
771 | iwl3945_disable_events(priv); | ||
772 | } | ||
773 | |||
774 | /* We delay the ALIVE response by 5ms to | ||
775 | * give the HW RF Kill time to activate... */ | ||
776 | if (palive->is_valid == UCODE_VALID_OK) | ||
777 | queue_delayed_work(priv->workqueue, pwork, | ||
778 | msecs_to_jiffies(5)); | ||
779 | else | ||
780 | IWL_WARN(priv, "uCode did not respond OK.\n"); | ||
781 | } | ||
782 | |||
783 | static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, | ||
784 | struct iwl_rx_mem_buffer *rxb) | ||
785 | { | ||
786 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
787 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
788 | #endif | ||
789 | |||
790 | IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); | ||
791 | } | ||
792 | |||
793 | static void iwl3945_bg_beacon_update(struct work_struct *work) | ||
794 | { | ||
795 | struct iwl_priv *priv = | ||
796 | container_of(work, struct iwl_priv, beacon_update); | ||
797 | struct sk_buff *beacon; | ||
798 | |||
799 | /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ | ||
800 | beacon = ieee80211_beacon_get(priv->hw, | ||
801 | priv->contexts[IWL_RXON_CTX_BSS].vif); | ||
802 | |||
803 | if (!beacon) { | ||
804 | IWL_ERR(priv, "update beacon failed\n"); | ||
805 | return; | ||
806 | } | ||
807 | |||
808 | mutex_lock(&priv->mutex); | ||
809 | /* new beacon skb is allocated every time; dispose previous.*/ | ||
810 | if (priv->beacon_skb) | ||
811 | dev_kfree_skb(priv->beacon_skb); | ||
812 | |||
813 | priv->beacon_skb = beacon; | ||
814 | mutex_unlock(&priv->mutex); | ||
815 | |||
816 | iwl3945_send_beacon_cmd(priv); | ||
817 | } | ||
818 | |||
819 | static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, | ||
820 | struct iwl_rx_mem_buffer *rxb) | ||
821 | { | ||
822 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
823 | struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); | ||
824 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
825 | u8 rate = beacon->beacon_notify_hdr.rate; | ||
826 | |||
827 | IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " | ||
828 | "tsf %d %d rate %d\n", | ||
829 | le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, | ||
830 | beacon->beacon_notify_hdr.failure_frame, | ||
831 | le32_to_cpu(beacon->ibss_mgr_status), | ||
832 | le32_to_cpu(beacon->high_tsf), | ||
833 | le32_to_cpu(beacon->low_tsf), rate); | ||
834 | #endif | ||
835 | |||
836 | priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); | ||
837 | |||
838 | if ((priv->iw_mode == NL80211_IFTYPE_AP) && | ||
839 | (!test_bit(STATUS_EXIT_PENDING, &priv->status))) | ||
840 | queue_work(priv->workqueue, &priv->beacon_update); | ||
841 | } | ||
842 | |||
843 | /* Handle notification from uCode that card's power state is changing | ||
844 | * due to software, hardware, or critical temperature RFKILL */ | ||
845 | static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, | ||
846 | struct iwl_rx_mem_buffer *rxb) | ||
847 | { | ||
848 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
849 | u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); | ||
850 | unsigned long status = priv->status; | ||
851 | |||
852 | IWL_WARN(priv, "Card state received: HW:%s SW:%s\n", | ||
853 | (flags & HW_CARD_DISABLED) ? "Kill" : "On", | ||
854 | (flags & SW_CARD_DISABLED) ? "Kill" : "On"); | ||
855 | |||
856 | iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, | ||
857 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
858 | |||
859 | if (flags & HW_CARD_DISABLED) | ||
860 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
861 | else | ||
862 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
863 | |||
864 | |||
865 | iwl_scan_cancel(priv); | ||
866 | |||
867 | if ((test_bit(STATUS_RF_KILL_HW, &status) != | ||
868 | test_bit(STATUS_RF_KILL_HW, &priv->status))) | ||
869 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | ||
870 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | ||
871 | else | ||
872 | wake_up_interruptible(&priv->wait_command_queue); | ||
873 | } | ||
874 | |||
875 | /** | ||
876 | * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks | ||
877 | * | ||
878 | * Setup the RX handlers for each of the reply types sent from the uCode | ||
879 | * to the host. | ||
880 | * | ||
881 | * This function chains into the hardware specific files for them to setup | ||
882 | * any hardware specific handlers as well. | ||
883 | */ | ||
884 | static void iwl3945_setup_rx_handlers(struct iwl_priv *priv) | ||
885 | { | ||
886 | priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; | ||
887 | priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; | ||
888 | priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; | ||
889 | priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; | ||
890 | priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = | ||
891 | iwl_rx_spectrum_measure_notif; | ||
892 | priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; | ||
893 | priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = | ||
894 | iwl_rx_pm_debug_statistics_notif; | ||
895 | priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif; | ||
896 | |||
897 | /* | ||
898 | * The same handler is used for both the REPLY to a discrete | ||
899 | * statistics request from the host as well as for the periodic | ||
900 | * statistics notifications (after received beacons) from the uCode. | ||
901 | */ | ||
902 | priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics; | ||
903 | priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; | ||
904 | |||
905 | iwl_setup_rx_scan_handlers(priv); | ||
906 | priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; | ||
907 | |||
908 | /* Set up hardware specific Rx handlers */ | ||
909 | iwl3945_hw_rx_handler_setup(priv); | ||
910 | } | ||
911 | |||
912 | /************************** RX-FUNCTIONS ****************************/ | ||
913 | /* | ||
914 | * Rx theory of operation | ||
915 | * | ||
916 | * The host allocates 32 DMA target addresses and passes the host address | ||
917 | * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is | ||
918 | * 0 to 31 | ||
919 | * | ||
920 | * Rx Queue Indexes | ||
921 | * The host/firmware share two index registers for managing the Rx buffers. | ||
922 | * | ||
923 | * The READ index maps to the first position that the firmware may be writing | ||
924 | * to -- the driver can read up to (but not including) this position and get | ||
925 | * good data. | ||
926 | * The READ index is managed by the firmware once the card is enabled. | ||
927 | * | ||
928 | * The WRITE index maps to the last position the driver has read from -- the | ||
929 | * position preceding WRITE is the last slot the firmware can place a packet. | ||
930 | * | ||
931 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | ||
932 | * WRITE = READ. | ||
933 | * | ||
934 | * During initialization, the host sets up the READ queue position to the first | ||
935 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | ||
936 | * | ||
937 | * When the firmware places a packet in a buffer, it will advance the READ index | ||
938 | * and fire the RX interrupt. The driver can then query the READ index and | ||
939 | * process as many packets as possible, moving the WRITE index forward as it | ||
940 | * resets the Rx queue buffers with new memory. | ||
941 | * | ||
942 | * The management in the driver is as follows: | ||
943 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | ||
944 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | ||
945 | * to replenish the iwl->rxq->rx_free. | ||
946 | * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the | ||
947 | * iwl->rxq is replenished and the READ INDEX is updated (updating the | ||
948 | * 'processed' and 'read' driver indexes as well) | ||
949 | * + A received packet is processed and handed to the kernel network stack, | ||
950 | * detached from the iwl->rxq. The driver 'processed' index is updated. | ||
951 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free | ||
952 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ | ||
953 | * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there | ||
954 | * were enough free buffers and RX_STALLED is set it is cleared. | ||
955 | * | ||
956 | * | ||
957 | * Driver sequence: | ||
958 | * | ||
959 | * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls | ||
960 | * iwl3945_rx_queue_restock | ||
961 | * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx | ||
962 | * queue, updates firmware pointers, and updates | ||
963 | * the WRITE index. If insufficient rx_free buffers | ||
964 | * are available, schedules iwl3945_rx_replenish | ||
965 | * | ||
966 | * -- enable interrupts -- | ||
967 | * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the | ||
968 | * READ INDEX, detaching the SKB from the pool. | ||
969 | * Moves the packet buffer from queue to rx_used. | ||
970 | * Calls iwl3945_rx_queue_restock to refill any empty | ||
971 | * slots. | ||
972 | * ... | ||
973 | * | ||
974 | */ | ||
975 | |||
976 | /** | ||
977 | * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | ||
978 | */ | ||
979 | static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv, | ||
980 | dma_addr_t dma_addr) | ||
981 | { | ||
982 | return cpu_to_le32((u32)dma_addr); | ||
983 | } | ||
984 | |||
985 | /** | ||
986 | * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool | ||
987 | * | ||
988 | * If there are slots in the RX queue that need to be restocked, | ||
989 | * and we have free pre-allocated buffers, fill the ranks as much | ||
990 | * as we can, pulling from rx_free. | ||
991 | * | ||
992 | * This moves the 'write' index forward to catch up with 'processed', and | ||
993 | * also updates the memory address in the firmware to reference the new | ||
994 | * target buffer. | ||
995 | */ | ||
996 | static void iwl3945_rx_queue_restock(struct iwl_priv *priv) | ||
997 | { | ||
998 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
999 | struct list_head *element; | ||
1000 | struct iwl_rx_mem_buffer *rxb; | ||
1001 | unsigned long flags; | ||
1002 | int write; | ||
1003 | |||
1004 | spin_lock_irqsave(&rxq->lock, flags); | ||
1005 | write = rxq->write & ~0x7; | ||
1006 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | ||
1007 | /* Get next free Rx buffer, remove from free list */ | ||
1008 | element = rxq->rx_free.next; | ||
1009 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
1010 | list_del(element); | ||
1011 | |||
1012 | /* Point to Rx buffer via next RBD in circular buffer */ | ||
1013 | rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma); | ||
1014 | rxq->queue[rxq->write] = rxb; | ||
1015 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | ||
1016 | rxq->free_count--; | ||
1017 | } | ||
1018 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1019 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
1020 | * refill it */ | ||
1021 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
1022 | queue_work(priv->workqueue, &priv->rx_replenish); | ||
1023 | |||
1024 | |||
1025 | /* If we've added more space for the firmware to place data, tell it. | ||
1026 | * Increment device's write pointer in multiples of 8. */ | ||
1027 | if ((rxq->write_actual != (rxq->write & ~0x7)) | ||
1028 | || (abs(rxq->write - rxq->read) > 7)) { | ||
1029 | spin_lock_irqsave(&rxq->lock, flags); | ||
1030 | rxq->need_update = 1; | ||
1031 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1032 | iwl_rx_queue_update_write_ptr(priv, rxq); | ||
1033 | } | ||
1034 | } | ||
1035 | |||
1036 | /** | ||
1037 | * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free | ||
1038 | * | ||
1039 | * When moving to rx_free an SKB is allocated for the slot. | ||
1040 | * | ||
1041 | * Also restock the Rx queue via iwl3945_rx_queue_restock. | ||
1042 | * This is called as a scheduled work item (except for during initialization) | ||
1043 | */ | ||
1044 | static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority) | ||
1045 | { | ||
1046 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
1047 | struct list_head *element; | ||
1048 | struct iwl_rx_mem_buffer *rxb; | ||
1049 | struct page *page; | ||
1050 | unsigned long flags; | ||
1051 | gfp_t gfp_mask = priority; | ||
1052 | |||
1053 | while (1) { | ||
1054 | spin_lock_irqsave(&rxq->lock, flags); | ||
1055 | |||
1056 | if (list_empty(&rxq->rx_used)) { | ||
1057 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1058 | return; | ||
1059 | } | ||
1060 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1061 | |||
1062 | if (rxq->free_count > RX_LOW_WATERMARK) | ||
1063 | gfp_mask |= __GFP_NOWARN; | ||
1064 | |||
1065 | if (priv->hw_params.rx_page_order > 0) | ||
1066 | gfp_mask |= __GFP_COMP; | ||
1067 | |||
1068 | /* Alloc a new receive buffer */ | ||
1069 | page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); | ||
1070 | if (!page) { | ||
1071 | if (net_ratelimit()) | ||
1072 | IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); | ||
1073 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | ||
1074 | net_ratelimit()) | ||
1075 | IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", | ||
1076 | priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", | ||
1077 | rxq->free_count); | ||
1078 | /* We don't reschedule replenish work here -- we will | ||
1079 | * call the restock method and if it still needs | ||
1080 | * more buffers it will schedule replenish */ | ||
1081 | break; | ||
1082 | } | ||
1083 | |||
1084 | spin_lock_irqsave(&rxq->lock, flags); | ||
1085 | if (list_empty(&rxq->rx_used)) { | ||
1086 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1087 | __free_pages(page, priv->hw_params.rx_page_order); | ||
1088 | return; | ||
1089 | } | ||
1090 | element = rxq->rx_used.next; | ||
1091 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
1092 | list_del(element); | ||
1093 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1094 | |||
1095 | rxb->page = page; | ||
1096 | /* Get physical address of RB/SKB */ | ||
1097 | rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, | ||
1098 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
1099 | PCI_DMA_FROMDEVICE); | ||
1100 | |||
1101 | spin_lock_irqsave(&rxq->lock, flags); | ||
1102 | |||
1103 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
1104 | rxq->free_count++; | ||
1105 | priv->alloc_rxb_page++; | ||
1106 | |||
1107 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1108 | } | ||
1109 | } | ||
1110 | |||
1111 | void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
1112 | { | ||
1113 | unsigned long flags; | ||
1114 | int i; | ||
1115 | spin_lock_irqsave(&rxq->lock, flags); | ||
1116 | INIT_LIST_HEAD(&rxq->rx_free); | ||
1117 | INIT_LIST_HEAD(&rxq->rx_used); | ||
1118 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
1119 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
1120 | /* In the reset function, these buffers may have been allocated | ||
1121 | * to an SKB, so we need to unmap and free potential storage */ | ||
1122 | if (rxq->pool[i].page != NULL) { | ||
1123 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
1124 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
1125 | PCI_DMA_FROMDEVICE); | ||
1126 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
1127 | rxq->pool[i].page = NULL; | ||
1128 | } | ||
1129 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
1130 | } | ||
1131 | |||
1132 | /* Set us so that we have processed and used all buffers, but have | ||
1133 | * not restocked the Rx queue with fresh buffers */ | ||
1134 | rxq->read = rxq->write = 0; | ||
1135 | rxq->write_actual = 0; | ||
1136 | rxq->free_count = 0; | ||
1137 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1138 | } | ||
1139 | |||
1140 | void iwl3945_rx_replenish(void *data) | ||
1141 | { | ||
1142 | struct iwl_priv *priv = data; | ||
1143 | unsigned long flags; | ||
1144 | |||
1145 | iwl3945_rx_allocate(priv, GFP_KERNEL); | ||
1146 | |||
1147 | spin_lock_irqsave(&priv->lock, flags); | ||
1148 | iwl3945_rx_queue_restock(priv); | ||
1149 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1150 | } | ||
1151 | |||
1152 | static void iwl3945_rx_replenish_now(struct iwl_priv *priv) | ||
1153 | { | ||
1154 | iwl3945_rx_allocate(priv, GFP_ATOMIC); | ||
1155 | |||
1156 | iwl3945_rx_queue_restock(priv); | ||
1157 | } | ||
1158 | |||
1159 | |||
1160 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | ||
1161 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL | ||
1162 | * This free routine walks the list of POOL entries and if SKB is set to | ||
1163 | * non NULL it is unmapped and freed | ||
1164 | */ | ||
1165 | static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
1166 | { | ||
1167 | int i; | ||
1168 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | ||
1169 | if (rxq->pool[i].page != NULL) { | ||
1170 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
1171 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
1172 | PCI_DMA_FROMDEVICE); | ||
1173 | __iwl_free_pages(priv, rxq->pool[i].page); | ||
1174 | rxq->pool[i].page = NULL; | ||
1175 | } | ||
1176 | } | ||
1177 | |||
1178 | dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, | ||
1179 | rxq->bd_dma); | ||
1180 | dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), | ||
1181 | rxq->rb_stts, rxq->rb_stts_dma); | ||
1182 | rxq->bd = NULL; | ||
1183 | rxq->rb_stts = NULL; | ||
1184 | } | ||
1185 | |||
1186 | |||
1187 | /* Convert linear signal-to-noise ratio into dB */ | ||
1188 | static u8 ratio2dB[100] = { | ||
1189 | /* 0 1 2 3 4 5 6 7 8 9 */ | ||
1190 | 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ | ||
1191 | 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ | ||
1192 | 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ | ||
1193 | 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ | ||
1194 | 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ | ||
1195 | 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ | ||
1196 | 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ | ||
1197 | 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ | ||
1198 | 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ | ||
1199 | 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ | ||
1200 | }; | ||
1201 | |||
1202 | /* Calculates a relative dB value from a ratio of linear | ||
1203 | * (i.e. not dB) signal levels. | ||
1204 | * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ | ||
1205 | int iwl3945_calc_db_from_ratio(int sig_ratio) | ||
1206 | { | ||
1207 | /* 1000:1 or higher just report as 60 dB */ | ||
1208 | if (sig_ratio >= 1000) | ||
1209 | return 60; | ||
1210 | |||
1211 | /* 100:1 or higher, divide by 10 and use table, | ||
1212 | * add 20 dB to make up for divide by 10 */ | ||
1213 | if (sig_ratio >= 100) | ||
1214 | return 20 + (int)ratio2dB[sig_ratio/10]; | ||
1215 | |||
1216 | /* We shouldn't see this */ | ||
1217 | if (sig_ratio < 1) | ||
1218 | return 0; | ||
1219 | |||
1220 | /* Use table for ratios 1:1 - 99:1 */ | ||
1221 | return (int)ratio2dB[sig_ratio]; | ||
1222 | } | ||
1223 | |||
1224 | /** | ||
1225 | * iwl3945_rx_handle - Main entry function for receiving responses from uCode | ||
1226 | * | ||
1227 | * Uses the priv->rx_handlers callback function array to invoke | ||
1228 | * the appropriate handlers, including command responses, | ||
1229 | * frame-received notifications, and other notifications. | ||
1230 | */ | ||
1231 | static void iwl3945_rx_handle(struct iwl_priv *priv) | ||
1232 | { | ||
1233 | struct iwl_rx_mem_buffer *rxb; | ||
1234 | struct iwl_rx_packet *pkt; | ||
1235 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
1236 | u32 r, i; | ||
1237 | int reclaim; | ||
1238 | unsigned long flags; | ||
1239 | u8 fill_rx = 0; | ||
1240 | u32 count = 8; | ||
1241 | int total_empty = 0; | ||
1242 | |||
1243 | /* uCode's read index (stored in shared DRAM) indicates the last Rx | ||
1244 | * buffer that the driver may process (last buffer filled by ucode). */ | ||
1245 | r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; | ||
1246 | i = rxq->read; | ||
1247 | |||
1248 | /* calculate total frames need to be restock after handling RX */ | ||
1249 | total_empty = r - rxq->write_actual; | ||
1250 | if (total_empty < 0) | ||
1251 | total_empty += RX_QUEUE_SIZE; | ||
1252 | |||
1253 | if (total_empty > (RX_QUEUE_SIZE / 2)) | ||
1254 | fill_rx = 1; | ||
1255 | /* Rx interrupt, but nothing sent from uCode */ | ||
1256 | if (i == r) | ||
1257 | IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); | ||
1258 | |||
1259 | while (i != r) { | ||
1260 | int len; | ||
1261 | |||
1262 | rxb = rxq->queue[i]; | ||
1263 | |||
1264 | /* If an RXB doesn't have a Rx queue slot associated with it, | ||
1265 | * then a bug has been introduced in the queue refilling | ||
1266 | * routines -- catch it here */ | ||
1267 | BUG_ON(rxb == NULL); | ||
1268 | |||
1269 | rxq->queue[i] = NULL; | ||
1270 | |||
1271 | pci_unmap_page(priv->pci_dev, rxb->page_dma, | ||
1272 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
1273 | PCI_DMA_FROMDEVICE); | ||
1274 | pkt = rxb_addr(rxb); | ||
1275 | |||
1276 | len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; | ||
1277 | len += sizeof(u32); /* account for status word */ | ||
1278 | trace_iwlwifi_dev_rx(priv, pkt, len); | ||
1279 | |||
1280 | /* Reclaim a command buffer only if this packet is a response | ||
1281 | * to a (driver-originated) command. | ||
1282 | * If the packet (e.g. Rx frame) originated from uCode, | ||
1283 | * there is no command buffer to reclaim. | ||
1284 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | ||
1285 | * but apparently a few don't get set; catch them here. */ | ||
1286 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && | ||
1287 | (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && | ||
1288 | (pkt->hdr.cmd != REPLY_TX); | ||
1289 | |||
1290 | /* Based on type of command response or notification, | ||
1291 | * handle those that need handling via function in | ||
1292 | * rx_handlers table. See iwl3945_setup_rx_handlers() */ | ||
1293 | if (priv->rx_handlers[pkt->hdr.cmd]) { | ||
1294 | IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, | ||
1295 | get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); | ||
1296 | priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; | ||
1297 | priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); | ||
1298 | } else { | ||
1299 | /* No handling needed */ | ||
1300 | IWL_DEBUG_RX(priv, | ||
1301 | "r %d i %d No handler needed for %s, 0x%02x\n", | ||
1302 | r, i, get_cmd_string(pkt->hdr.cmd), | ||
1303 | pkt->hdr.cmd); | ||
1304 | } | ||
1305 | |||
1306 | /* | ||
1307 | * XXX: After here, we should always check rxb->page | ||
1308 | * against NULL before touching it or its virtual | ||
1309 | * memory (pkt). Because some rx_handler might have | ||
1310 | * already taken or freed the pages. | ||
1311 | */ | ||
1312 | |||
1313 | if (reclaim) { | ||
1314 | /* Invoke any callbacks, transfer the buffer to caller, | ||
1315 | * and fire off the (possibly) blocking iwl_send_cmd() | ||
1316 | * as we reclaim the driver command queue */ | ||
1317 | if (rxb->page) | ||
1318 | iwl_tx_cmd_complete(priv, rxb); | ||
1319 | else | ||
1320 | IWL_WARN(priv, "Claim null rxb?\n"); | ||
1321 | } | ||
1322 | |||
1323 | /* Reuse the page if possible. For notification packets and | ||
1324 | * SKBs that fail to Rx correctly, add them back into the | ||
1325 | * rx_free list for reuse later. */ | ||
1326 | spin_lock_irqsave(&rxq->lock, flags); | ||
1327 | if (rxb->page != NULL) { | ||
1328 | rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, | ||
1329 | 0, PAGE_SIZE << priv->hw_params.rx_page_order, | ||
1330 | PCI_DMA_FROMDEVICE); | ||
1331 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
1332 | rxq->free_count++; | ||
1333 | } else | ||
1334 | list_add_tail(&rxb->list, &rxq->rx_used); | ||
1335 | |||
1336 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1337 | |||
1338 | i = (i + 1) & RX_QUEUE_MASK; | ||
1339 | /* If there are a lot of unused frames, | ||
1340 | * restock the Rx queue so ucode won't assert. */ | ||
1341 | if (fill_rx) { | ||
1342 | count++; | ||
1343 | if (count >= 8) { | ||
1344 | rxq->read = i; | ||
1345 | iwl3945_rx_replenish_now(priv); | ||
1346 | count = 0; | ||
1347 | } | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1351 | /* Backtrack one entry */ | ||
1352 | rxq->read = i; | ||
1353 | if (fill_rx) | ||
1354 | iwl3945_rx_replenish_now(priv); | ||
1355 | else | ||
1356 | iwl3945_rx_queue_restock(priv); | ||
1357 | } | ||
1358 | |||
1359 | /* call this function to flush any scheduled tasklet */ | ||
1360 | static inline void iwl_synchronize_irq(struct iwl_priv *priv) | ||
1361 | { | ||
1362 | /* wait to make sure we flush pending tasklet*/ | ||
1363 | synchronize_irq(priv->pci_dev->irq); | ||
1364 | tasklet_kill(&priv->irq_tasklet); | ||
1365 | } | ||
1366 | |||
1367 | static const char *desc_lookup(int i) | ||
1368 | { | ||
1369 | switch (i) { | ||
1370 | case 1: | ||
1371 | return "FAIL"; | ||
1372 | case 2: | ||
1373 | return "BAD_PARAM"; | ||
1374 | case 3: | ||
1375 | return "BAD_CHECKSUM"; | ||
1376 | case 4: | ||
1377 | return "NMI_INTERRUPT"; | ||
1378 | case 5: | ||
1379 | return "SYSASSERT"; | ||
1380 | case 6: | ||
1381 | return "FATAL_ERROR"; | ||
1382 | } | ||
1383 | |||
1384 | return "UNKNOWN"; | ||
1385 | } | ||
1386 | |||
1387 | #define ERROR_START_OFFSET (1 * sizeof(u32)) | ||
1388 | #define ERROR_ELEM_SIZE (7 * sizeof(u32)) | ||
1389 | |||
1390 | void iwl3945_dump_nic_error_log(struct iwl_priv *priv) | ||
1391 | { | ||
1392 | u32 i; | ||
1393 | u32 desc, time, count, base, data1; | ||
1394 | u32 blink1, blink2, ilink1, ilink2; | ||
1395 | |||
1396 | base = le32_to_cpu(priv->card_alive.error_event_table_ptr); | ||
1397 | |||
1398 | if (!iwl3945_hw_valid_rtc_data_addr(base)) { | ||
1399 | IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base); | ||
1400 | return; | ||
1401 | } | ||
1402 | |||
1403 | |||
1404 | count = iwl_read_targ_mem(priv, base); | ||
1405 | |||
1406 | if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { | ||
1407 | IWL_ERR(priv, "Start IWL Error Log Dump:\n"); | ||
1408 | IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", | ||
1409 | priv->status, count); | ||
1410 | } | ||
1411 | |||
1412 | IWL_ERR(priv, "Desc Time asrtPC blink2 " | ||
1413 | "ilink1 nmiPC Line\n"); | ||
1414 | for (i = ERROR_START_OFFSET; | ||
1415 | i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; | ||
1416 | i += ERROR_ELEM_SIZE) { | ||
1417 | desc = iwl_read_targ_mem(priv, base + i); | ||
1418 | time = | ||
1419 | iwl_read_targ_mem(priv, base + i + 1 * sizeof(u32)); | ||
1420 | blink1 = | ||
1421 | iwl_read_targ_mem(priv, base + i + 2 * sizeof(u32)); | ||
1422 | blink2 = | ||
1423 | iwl_read_targ_mem(priv, base + i + 3 * sizeof(u32)); | ||
1424 | ilink1 = | ||
1425 | iwl_read_targ_mem(priv, base + i + 4 * sizeof(u32)); | ||
1426 | ilink2 = | ||
1427 | iwl_read_targ_mem(priv, base + i + 5 * sizeof(u32)); | ||
1428 | data1 = | ||
1429 | iwl_read_targ_mem(priv, base + i + 6 * sizeof(u32)); | ||
1430 | |||
1431 | IWL_ERR(priv, | ||
1432 | "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", | ||
1433 | desc_lookup(desc), desc, time, blink1, blink2, | ||
1434 | ilink1, ilink2, data1); | ||
1435 | trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0, | ||
1436 | 0, blink1, blink2, ilink1, ilink2); | ||
1437 | } | ||
1438 | } | ||
1439 | |||
1440 | #define EVENT_START_OFFSET (6 * sizeof(u32)) | ||
1441 | |||
1442 | /** | ||
1443 | * iwl3945_print_event_log - Dump error event log to syslog | ||
1444 | * | ||
1445 | */ | ||
1446 | static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, | ||
1447 | u32 num_events, u32 mode, | ||
1448 | int pos, char **buf, size_t bufsz) | ||
1449 | { | ||
1450 | u32 i; | ||
1451 | u32 base; /* SRAM byte address of event log header */ | ||
1452 | u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ | ||
1453 | u32 ptr; /* SRAM byte address of log data */ | ||
1454 | u32 ev, time, data; /* event log data */ | ||
1455 | unsigned long reg_flags; | ||
1456 | |||
1457 | if (num_events == 0) | ||
1458 | return pos; | ||
1459 | |||
1460 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); | ||
1461 | |||
1462 | if (mode == 0) | ||
1463 | event_size = 2 * sizeof(u32); | ||
1464 | else | ||
1465 | event_size = 3 * sizeof(u32); | ||
1466 | |||
1467 | ptr = base + EVENT_START_OFFSET + (start_idx * event_size); | ||
1468 | |||
1469 | /* Make sure device is powered up for SRAM reads */ | ||
1470 | spin_lock_irqsave(&priv->reg_lock, reg_flags); | ||
1471 | iwl_grab_nic_access(priv); | ||
1472 | |||
1473 | /* Set starting address; reads will auto-increment */ | ||
1474 | _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); | ||
1475 | rmb(); | ||
1476 | |||
1477 | /* "time" is actually "data" for mode 0 (no timestamp). | ||
1478 | * place event id # at far right for easier visual parsing. */ | ||
1479 | for (i = 0; i < num_events; i++) { | ||
1480 | ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); | ||
1481 | time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); | ||
1482 | if (mode == 0) { | ||
1483 | /* data, ev */ | ||
1484 | if (bufsz) { | ||
1485 | pos += scnprintf(*buf + pos, bufsz - pos, | ||
1486 | "0x%08x:%04u\n", | ||
1487 | time, ev); | ||
1488 | } else { | ||
1489 | IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); | ||
1490 | trace_iwlwifi_dev_ucode_event(priv, 0, | ||
1491 | time, ev); | ||
1492 | } | ||
1493 | } else { | ||
1494 | data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); | ||
1495 | if (bufsz) { | ||
1496 | pos += scnprintf(*buf + pos, bufsz - pos, | ||
1497 | "%010u:0x%08x:%04u\n", | ||
1498 | time, data, ev); | ||
1499 | } else { | ||
1500 | IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", | ||
1501 | time, data, ev); | ||
1502 | trace_iwlwifi_dev_ucode_event(priv, time, | ||
1503 | data, ev); | ||
1504 | } | ||
1505 | } | ||
1506 | } | ||
1507 | |||
1508 | /* Allow device to power down */ | ||
1509 | iwl_release_nic_access(priv); | ||
1510 | spin_unlock_irqrestore(&priv->reg_lock, reg_flags); | ||
1511 | return pos; | ||
1512 | } | ||
1513 | |||
1514 | /** | ||
1515 | * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog | ||
1516 | */ | ||
1517 | static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity, | ||
1518 | u32 num_wraps, u32 next_entry, | ||
1519 | u32 size, u32 mode, | ||
1520 | int pos, char **buf, size_t bufsz) | ||
1521 | { | ||
1522 | /* | ||
1523 | * display the newest DEFAULT_LOG_ENTRIES entries | ||
1524 | * i.e the entries just before the next ont that uCode would fill. | ||
1525 | */ | ||
1526 | if (num_wraps) { | ||
1527 | if (next_entry < size) { | ||
1528 | pos = iwl3945_print_event_log(priv, | ||
1529 | capacity - (size - next_entry), | ||
1530 | size - next_entry, mode, | ||
1531 | pos, buf, bufsz); | ||
1532 | pos = iwl3945_print_event_log(priv, 0, | ||
1533 | next_entry, mode, | ||
1534 | pos, buf, bufsz); | ||
1535 | } else | ||
1536 | pos = iwl3945_print_event_log(priv, next_entry - size, | ||
1537 | size, mode, | ||
1538 | pos, buf, bufsz); | ||
1539 | } else { | ||
1540 | if (next_entry < size) | ||
1541 | pos = iwl3945_print_event_log(priv, 0, | ||
1542 | next_entry, mode, | ||
1543 | pos, buf, bufsz); | ||
1544 | else | ||
1545 | pos = iwl3945_print_event_log(priv, next_entry - size, | ||
1546 | size, mode, | ||
1547 | pos, buf, bufsz); | ||
1548 | } | ||
1549 | return pos; | ||
1550 | } | ||
1551 | |||
1552 | #define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) | ||
1553 | |||
1554 | int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, | ||
1555 | char **buf, bool display) | ||
1556 | { | ||
1557 | u32 base; /* SRAM byte address of event log header */ | ||
1558 | u32 capacity; /* event log capacity in # entries */ | ||
1559 | u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ | ||
1560 | u32 num_wraps; /* # times uCode wrapped to top of log */ | ||
1561 | u32 next_entry; /* index of next entry to be written by uCode */ | ||
1562 | u32 size; /* # entries that we'll print */ | ||
1563 | int pos = 0; | ||
1564 | size_t bufsz = 0; | ||
1565 | |||
1566 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); | ||
1567 | if (!iwl3945_hw_valid_rtc_data_addr(base)) { | ||
1568 | IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); | ||
1569 | return -EINVAL; | ||
1570 | } | ||
1571 | |||
1572 | /* event log header */ | ||
1573 | capacity = iwl_read_targ_mem(priv, base); | ||
1574 | mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32))); | ||
1575 | num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); | ||
1576 | next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); | ||
1577 | |||
1578 | if (capacity > priv->cfg->base_params->max_event_log_size) { | ||
1579 | IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", | ||
1580 | capacity, priv->cfg->base_params->max_event_log_size); | ||
1581 | capacity = priv->cfg->base_params->max_event_log_size; | ||
1582 | } | ||
1583 | |||
1584 | if (next_entry > priv->cfg->base_params->max_event_log_size) { | ||
1585 | IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", | ||
1586 | next_entry, priv->cfg->base_params->max_event_log_size); | ||
1587 | next_entry = priv->cfg->base_params->max_event_log_size; | ||
1588 | } | ||
1589 | |||
1590 | size = num_wraps ? capacity : next_entry; | ||
1591 | |||
1592 | /* bail out if nothing in log */ | ||
1593 | if (size == 0) { | ||
1594 | IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); | ||
1595 | return pos; | ||
1596 | } | ||
1597 | |||
1598 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1599 | if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) | ||
1600 | size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) | ||
1601 | ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; | ||
1602 | #else | ||
1603 | size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) | ||
1604 | ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; | ||
1605 | #endif | ||
1606 | |||
1607 | IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n", | ||
1608 | size); | ||
1609 | |||
1610 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1611 | if (display) { | ||
1612 | if (full_log) | ||
1613 | bufsz = capacity * 48; | ||
1614 | else | ||
1615 | bufsz = size * 48; | ||
1616 | *buf = kmalloc(bufsz, GFP_KERNEL); | ||
1617 | if (!*buf) | ||
1618 | return -ENOMEM; | ||
1619 | } | ||
1620 | if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { | ||
1621 | /* if uCode has wrapped back to top of log, | ||
1622 | * start at the oldest entry, | ||
1623 | * i.e the next one that uCode would fill. | ||
1624 | */ | ||
1625 | if (num_wraps) | ||
1626 | pos = iwl3945_print_event_log(priv, next_entry, | ||
1627 | capacity - next_entry, mode, | ||
1628 | pos, buf, bufsz); | ||
1629 | |||
1630 | /* (then/else) start at top of log */ | ||
1631 | pos = iwl3945_print_event_log(priv, 0, next_entry, mode, | ||
1632 | pos, buf, bufsz); | ||
1633 | } else | ||
1634 | pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps, | ||
1635 | next_entry, size, mode, | ||
1636 | pos, buf, bufsz); | ||
1637 | #else | ||
1638 | pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps, | ||
1639 | next_entry, size, mode, | ||
1640 | pos, buf, bufsz); | ||
1641 | #endif | ||
1642 | return pos; | ||
1643 | } | ||
1644 | |||
1645 | static void iwl3945_irq_tasklet(struct iwl_priv *priv) | ||
1646 | { | ||
1647 | u32 inta, handled = 0; | ||
1648 | u32 inta_fh; | ||
1649 | unsigned long flags; | ||
1650 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1651 | u32 inta_mask; | ||
1652 | #endif | ||
1653 | |||
1654 | spin_lock_irqsave(&priv->lock, flags); | ||
1655 | |||
1656 | /* Ack/clear/reset pending uCode interrupts. | ||
1657 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | ||
1658 | * and will clear only when CSR_FH_INT_STATUS gets cleared. */ | ||
1659 | inta = iwl_read32(priv, CSR_INT); | ||
1660 | iwl_write32(priv, CSR_INT, inta); | ||
1661 | |||
1662 | /* Ack/clear/reset pending flow-handler (DMA) interrupts. | ||
1663 | * Any new interrupts that happen after this, either while we're | ||
1664 | * in this tasklet, or later, will show up in next ISR/tasklet. */ | ||
1665 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | ||
1666 | iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); | ||
1667 | |||
1668 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1669 | if (iwl_get_debug_level(priv) & IWL_DL_ISR) { | ||
1670 | /* just for debug */ | ||
1671 | inta_mask = iwl_read32(priv, CSR_INT_MASK); | ||
1672 | IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", | ||
1673 | inta, inta_mask, inta_fh); | ||
1674 | } | ||
1675 | #endif | ||
1676 | |||
1677 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1678 | |||
1679 | /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not | ||
1680 | * atomic, make sure that inta covers all the interrupts that | ||
1681 | * we've discovered, even if FH interrupt came in just after | ||
1682 | * reading CSR_INT. */ | ||
1683 | if (inta_fh & CSR39_FH_INT_RX_MASK) | ||
1684 | inta |= CSR_INT_BIT_FH_RX; | ||
1685 | if (inta_fh & CSR39_FH_INT_TX_MASK) | ||
1686 | inta |= CSR_INT_BIT_FH_TX; | ||
1687 | |||
1688 | /* Now service all interrupt bits discovered above. */ | ||
1689 | if (inta & CSR_INT_BIT_HW_ERR) { | ||
1690 | IWL_ERR(priv, "Hardware error detected. Restarting.\n"); | ||
1691 | |||
1692 | /* Tell the device to stop sending interrupts */ | ||
1693 | iwl_disable_interrupts(priv); | ||
1694 | |||
1695 | priv->isr_stats.hw++; | ||
1696 | iwl_irq_handle_error(priv); | ||
1697 | |||
1698 | handled |= CSR_INT_BIT_HW_ERR; | ||
1699 | |||
1700 | return; | ||
1701 | } | ||
1702 | |||
1703 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1704 | if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { | ||
1705 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ | ||
1706 | if (inta & CSR_INT_BIT_SCD) { | ||
1707 | IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " | ||
1708 | "the frame/frames.\n"); | ||
1709 | priv->isr_stats.sch++; | ||
1710 | } | ||
1711 | |||
1712 | /* Alive notification via Rx interrupt will do the real work */ | ||
1713 | if (inta & CSR_INT_BIT_ALIVE) { | ||
1714 | IWL_DEBUG_ISR(priv, "Alive interrupt\n"); | ||
1715 | priv->isr_stats.alive++; | ||
1716 | } | ||
1717 | } | ||
1718 | #endif | ||
1719 | /* Safely ignore these bits for debug checks below */ | ||
1720 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | ||
1721 | |||
1722 | /* Error detected by uCode */ | ||
1723 | if (inta & CSR_INT_BIT_SW_ERR) { | ||
1724 | IWL_ERR(priv, "Microcode SW error detected. " | ||
1725 | "Restarting 0x%X.\n", inta); | ||
1726 | priv->isr_stats.sw++; | ||
1727 | iwl_irq_handle_error(priv); | ||
1728 | handled |= CSR_INT_BIT_SW_ERR; | ||
1729 | } | ||
1730 | |||
1731 | /* uCode wakes up after power-down sleep */ | ||
1732 | if (inta & CSR_INT_BIT_WAKEUP) { | ||
1733 | IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); | ||
1734 | iwl_rx_queue_update_write_ptr(priv, &priv->rxq); | ||
1735 | iwl_txq_update_write_ptr(priv, &priv->txq[0]); | ||
1736 | iwl_txq_update_write_ptr(priv, &priv->txq[1]); | ||
1737 | iwl_txq_update_write_ptr(priv, &priv->txq[2]); | ||
1738 | iwl_txq_update_write_ptr(priv, &priv->txq[3]); | ||
1739 | iwl_txq_update_write_ptr(priv, &priv->txq[4]); | ||
1740 | iwl_txq_update_write_ptr(priv, &priv->txq[5]); | ||
1741 | |||
1742 | priv->isr_stats.wakeup++; | ||
1743 | handled |= CSR_INT_BIT_WAKEUP; | ||
1744 | } | ||
1745 | |||
1746 | /* All uCode command responses, including Tx command responses, | ||
1747 | * Rx "responses" (frame-received notification), and other | ||
1748 | * notifications from uCode come through here*/ | ||
1749 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { | ||
1750 | iwl3945_rx_handle(priv); | ||
1751 | priv->isr_stats.rx++; | ||
1752 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | ||
1753 | } | ||
1754 | |||
1755 | if (inta & CSR_INT_BIT_FH_TX) { | ||
1756 | IWL_DEBUG_ISR(priv, "Tx interrupt\n"); | ||
1757 | priv->isr_stats.tx++; | ||
1758 | |||
1759 | iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); | ||
1760 | iwl_write_direct32(priv, FH39_TCSR_CREDIT | ||
1761 | (FH39_SRVC_CHNL), 0x0); | ||
1762 | handled |= CSR_INT_BIT_FH_TX; | ||
1763 | } | ||
1764 | |||
1765 | if (inta & ~handled) { | ||
1766 | IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); | ||
1767 | priv->isr_stats.unhandled++; | ||
1768 | } | ||
1769 | |||
1770 | if (inta & ~priv->inta_mask) { | ||
1771 | IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", | ||
1772 | inta & ~priv->inta_mask); | ||
1773 | IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); | ||
1774 | } | ||
1775 | |||
1776 | /* Re-enable all interrupts */ | ||
1777 | /* only Re-enable if disabled by irq */ | ||
1778 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | ||
1779 | iwl_enable_interrupts(priv); | ||
1780 | |||
1781 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1782 | if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) { | ||
1783 | inta = iwl_read32(priv, CSR_INT); | ||
1784 | inta_mask = iwl_read32(priv, CSR_INT_MASK); | ||
1785 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | ||
1786 | IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " | ||
1787 | "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); | ||
1788 | } | ||
1789 | #endif | ||
1790 | } | ||
1791 | |||
1792 | static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv, | ||
1793 | struct ieee80211_vif *vif, | ||
1794 | enum ieee80211_band band, | ||
1795 | struct iwl3945_scan_channel *scan_ch) | ||
1796 | { | ||
1797 | const struct ieee80211_supported_band *sband; | ||
1798 | u16 passive_dwell = 0; | ||
1799 | u16 active_dwell = 0; | ||
1800 | int added = 0; | ||
1801 | u8 channel = 0; | ||
1802 | |||
1803 | sband = iwl_get_hw_mode(priv, band); | ||
1804 | if (!sband) { | ||
1805 | IWL_ERR(priv, "invalid band\n"); | ||
1806 | return added; | ||
1807 | } | ||
1808 | |||
1809 | active_dwell = iwl_get_active_dwell_time(priv, band, 0); | ||
1810 | passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); | ||
1811 | |||
1812 | if (passive_dwell <= active_dwell) | ||
1813 | passive_dwell = active_dwell + 1; | ||
1814 | |||
1815 | |||
1816 | channel = iwl_get_single_channel_number(priv, band); | ||
1817 | |||
1818 | if (channel) { | ||
1819 | scan_ch->channel = channel; | ||
1820 | scan_ch->type = 0; /* passive */ | ||
1821 | scan_ch->active_dwell = cpu_to_le16(active_dwell); | ||
1822 | scan_ch->passive_dwell = cpu_to_le16(passive_dwell); | ||
1823 | /* Set txpower levels to defaults */ | ||
1824 | scan_ch->tpc.dsp_atten = 110; | ||
1825 | if (band == IEEE80211_BAND_5GHZ) | ||
1826 | scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; | ||
1827 | else | ||
1828 | scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); | ||
1829 | added++; | ||
1830 | } else | ||
1831 | IWL_ERR(priv, "no valid channel found\n"); | ||
1832 | return added; | ||
1833 | } | ||
1834 | |||
1835 | static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, | ||
1836 | enum ieee80211_band band, | ||
1837 | u8 is_active, u8 n_probes, | ||
1838 | struct iwl3945_scan_channel *scan_ch, | ||
1839 | struct ieee80211_vif *vif) | ||
1840 | { | ||
1841 | struct ieee80211_channel *chan; | ||
1842 | const struct ieee80211_supported_band *sband; | ||
1843 | const struct iwl_channel_info *ch_info; | ||
1844 | u16 passive_dwell = 0; | ||
1845 | u16 active_dwell = 0; | ||
1846 | int added, i; | ||
1847 | |||
1848 | sband = iwl_get_hw_mode(priv, band); | ||
1849 | if (!sband) | ||
1850 | return 0; | ||
1851 | |||
1852 | active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); | ||
1853 | passive_dwell = iwl_get_passive_dwell_time(priv, band, vif); | ||
1854 | |||
1855 | if (passive_dwell <= active_dwell) | ||
1856 | passive_dwell = active_dwell + 1; | ||
1857 | |||
1858 | for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { | ||
1859 | chan = priv->scan_request->channels[i]; | ||
1860 | |||
1861 | if (chan->band != band) | ||
1862 | continue; | ||
1863 | |||
1864 | scan_ch->channel = chan->hw_value; | ||
1865 | |||
1866 | ch_info = iwl_get_channel_info(priv, band, scan_ch->channel); | ||
1867 | if (!is_channel_valid(ch_info)) { | ||
1868 | IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n", | ||
1869 | scan_ch->channel); | ||
1870 | continue; | ||
1871 | } | ||
1872 | |||
1873 | scan_ch->active_dwell = cpu_to_le16(active_dwell); | ||
1874 | scan_ch->passive_dwell = cpu_to_le16(passive_dwell); | ||
1875 | /* If passive , set up for auto-switch | ||
1876 | * and use long active_dwell time. | ||
1877 | */ | ||
1878 | if (!is_active || is_channel_passive(ch_info) || | ||
1879 | (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { | ||
1880 | scan_ch->type = 0; /* passive */ | ||
1881 | if (IWL_UCODE_API(priv->ucode_ver) == 1) | ||
1882 | scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1); | ||
1883 | } else { | ||
1884 | scan_ch->type = 1; /* active */ | ||
1885 | } | ||
1886 | |||
1887 | /* Set direct probe bits. These may be used both for active | ||
1888 | * scan channels (probes gets sent right away), | ||
1889 | * or for passive channels (probes get se sent only after | ||
1890 | * hearing clear Rx packet).*/ | ||
1891 | if (IWL_UCODE_API(priv->ucode_ver) >= 2) { | ||
1892 | if (n_probes) | ||
1893 | scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); | ||
1894 | } else { | ||
1895 | /* uCode v1 does not allow setting direct probe bits on | ||
1896 | * passive channel. */ | ||
1897 | if ((scan_ch->type & 1) && n_probes) | ||
1898 | scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); | ||
1899 | } | ||
1900 | |||
1901 | /* Set txpower levels to defaults */ | ||
1902 | scan_ch->tpc.dsp_atten = 110; | ||
1903 | /* scan_pwr_info->tpc.dsp_atten; */ | ||
1904 | |||
1905 | /*scan_pwr_info->tpc.tx_gain; */ | ||
1906 | if (band == IEEE80211_BAND_5GHZ) | ||
1907 | scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; | ||
1908 | else { | ||
1909 | scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); | ||
1910 | /* NOTE: if we were doing 6Mb OFDM for scans we'd use | ||
1911 | * power level: | ||
1912 | * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; | ||
1913 | */ | ||
1914 | } | ||
1915 | |||
1916 | IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n", | ||
1917 | scan_ch->channel, | ||
1918 | (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", | ||
1919 | (scan_ch->type & 1) ? | ||
1920 | active_dwell : passive_dwell); | ||
1921 | |||
1922 | scan_ch++; | ||
1923 | added++; | ||
1924 | } | ||
1925 | |||
1926 | IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); | ||
1927 | return added; | ||
1928 | } | ||
1929 | |||
1930 | static void iwl3945_init_hw_rates(struct iwl_priv *priv, | ||
1931 | struct ieee80211_rate *rates) | ||
1932 | { | ||
1933 | int i; | ||
1934 | |||
1935 | for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { | ||
1936 | rates[i].bitrate = iwl3945_rates[i].ieee * 5; | ||
1937 | rates[i].hw_value = i; /* Rate scaling will work on indexes */ | ||
1938 | rates[i].hw_value_short = i; | ||
1939 | rates[i].flags = 0; | ||
1940 | if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) { | ||
1941 | /* | ||
1942 | * If CCK != 1M then set short preamble rate flag. | ||
1943 | */ | ||
1944 | rates[i].flags |= (iwl3945_rates[i].plcp == 10) ? | ||
1945 | 0 : IEEE80211_RATE_SHORT_PREAMBLE; | ||
1946 | } | ||
1947 | } | ||
1948 | } | ||
1949 | |||
1950 | /****************************************************************************** | ||
1951 | * | ||
1952 | * uCode download functions | ||
1953 | * | ||
1954 | ******************************************************************************/ | ||
1955 | |||
1956 | static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv) | ||
1957 | { | ||
1958 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); | ||
1959 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); | ||
1960 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); | ||
1961 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init); | ||
1962 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); | ||
1963 | iwl_free_fw_desc(priv->pci_dev, &priv->ucode_boot); | ||
1964 | } | ||
1965 | |||
1966 | /** | ||
1967 | * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host, | ||
1968 | * looking at all data. | ||
1969 | */ | ||
1970 | static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len) | ||
1971 | { | ||
1972 | u32 val; | ||
1973 | u32 save_len = len; | ||
1974 | int rc = 0; | ||
1975 | u32 errcnt; | ||
1976 | |||
1977 | IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); | ||
1978 | |||
1979 | iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, | ||
1980 | IWL39_RTC_INST_LOWER_BOUND); | ||
1981 | |||
1982 | errcnt = 0; | ||
1983 | for (; len > 0; len -= sizeof(u32), image++) { | ||
1984 | /* read data comes through single port, auto-incr addr */ | ||
1985 | /* NOTE: Use the debugless read so we don't flood kernel log | ||
1986 | * if IWL_DL_IO is set */ | ||
1987 | val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); | ||
1988 | if (val != le32_to_cpu(*image)) { | ||
1989 | IWL_ERR(priv, "uCode INST section is invalid at " | ||
1990 | "offset 0x%x, is 0x%x, s/b 0x%x\n", | ||
1991 | save_len - len, val, le32_to_cpu(*image)); | ||
1992 | rc = -EIO; | ||
1993 | errcnt++; | ||
1994 | if (errcnt >= 20) | ||
1995 | break; | ||
1996 | } | ||
1997 | } | ||
1998 | |||
1999 | |||
2000 | if (!errcnt) | ||
2001 | IWL_DEBUG_INFO(priv, | ||
2002 | "ucode image in INSTRUCTION memory is good\n"); | ||
2003 | |||
2004 | return rc; | ||
2005 | } | ||
2006 | |||
2007 | |||
2008 | /** | ||
2009 | * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host, | ||
2010 | * using sample data 100 bytes apart. If these sample points are good, | ||
2011 | * it's a pretty good bet that everything between them is good, too. | ||
2012 | */ | ||
2013 | static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) | ||
2014 | { | ||
2015 | u32 val; | ||
2016 | int rc = 0; | ||
2017 | u32 errcnt = 0; | ||
2018 | u32 i; | ||
2019 | |||
2020 | IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); | ||
2021 | |||
2022 | for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { | ||
2023 | /* read data comes through single port, auto-incr addr */ | ||
2024 | /* NOTE: Use the debugless read so we don't flood kernel log | ||
2025 | * if IWL_DL_IO is set */ | ||
2026 | iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, | ||
2027 | i + IWL39_RTC_INST_LOWER_BOUND); | ||
2028 | val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT); | ||
2029 | if (val != le32_to_cpu(*image)) { | ||
2030 | #if 0 /* Enable this if you want to see details */ | ||
2031 | IWL_ERR(priv, "uCode INST section is invalid at " | ||
2032 | "offset 0x%x, is 0x%x, s/b 0x%x\n", | ||
2033 | i, val, *image); | ||
2034 | #endif | ||
2035 | rc = -EIO; | ||
2036 | errcnt++; | ||
2037 | if (errcnt >= 3) | ||
2038 | break; | ||
2039 | } | ||
2040 | } | ||
2041 | |||
2042 | return rc; | ||
2043 | } | ||
2044 | |||
2045 | |||
2046 | /** | ||
2047 | * iwl3945_verify_ucode - determine which instruction image is in SRAM, | ||
2048 | * and verify its contents | ||
2049 | */ | ||
2050 | static int iwl3945_verify_ucode(struct iwl_priv *priv) | ||
2051 | { | ||
2052 | __le32 *image; | ||
2053 | u32 len; | ||
2054 | int rc = 0; | ||
2055 | |||
2056 | /* Try bootstrap */ | ||
2057 | image = (__le32 *)priv->ucode_boot.v_addr; | ||
2058 | len = priv->ucode_boot.len; | ||
2059 | rc = iwl3945_verify_inst_sparse(priv, image, len); | ||
2060 | if (rc == 0) { | ||
2061 | IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); | ||
2062 | return 0; | ||
2063 | } | ||
2064 | |||
2065 | /* Try initialize */ | ||
2066 | image = (__le32 *)priv->ucode_init.v_addr; | ||
2067 | len = priv->ucode_init.len; | ||
2068 | rc = iwl3945_verify_inst_sparse(priv, image, len); | ||
2069 | if (rc == 0) { | ||
2070 | IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); | ||
2071 | return 0; | ||
2072 | } | ||
2073 | |||
2074 | /* Try runtime/protocol */ | ||
2075 | image = (__le32 *)priv->ucode_code.v_addr; | ||
2076 | len = priv->ucode_code.len; | ||
2077 | rc = iwl3945_verify_inst_sparse(priv, image, len); | ||
2078 | if (rc == 0) { | ||
2079 | IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); | ||
2080 | return 0; | ||
2081 | } | ||
2082 | |||
2083 | IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); | ||
2084 | |||
2085 | /* Since nothing seems to match, show first several data entries in | ||
2086 | * instruction SRAM, so maybe visual inspection will give a clue. | ||
2087 | * Selection of bootstrap image (vs. other images) is arbitrary. */ | ||
2088 | image = (__le32 *)priv->ucode_boot.v_addr; | ||
2089 | len = priv->ucode_boot.len; | ||
2090 | rc = iwl3945_verify_inst_full(priv, image, len); | ||
2091 | |||
2092 | return rc; | ||
2093 | } | ||
2094 | |||
2095 | static void iwl3945_nic_start(struct iwl_priv *priv) | ||
2096 | { | ||
2097 | /* Remove all resets to allow NIC to operate */ | ||
2098 | iwl_write32(priv, CSR_RESET, 0); | ||
2099 | } | ||
2100 | |||
2101 | #define IWL3945_UCODE_GET(item) \ | ||
2102 | static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\ | ||
2103 | { \ | ||
2104 | return le32_to_cpu(ucode->u.v1.item); \ | ||
2105 | } | ||
2106 | |||
2107 | static u32 iwl3945_ucode_get_header_size(u32 api_ver) | ||
2108 | { | ||
2109 | return 24; | ||
2110 | } | ||
2111 | |||
2112 | static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode) | ||
2113 | { | ||
2114 | return (u8 *) ucode->u.v1.data; | ||
2115 | } | ||
2116 | |||
2117 | IWL3945_UCODE_GET(inst_size); | ||
2118 | IWL3945_UCODE_GET(data_size); | ||
2119 | IWL3945_UCODE_GET(init_size); | ||
2120 | IWL3945_UCODE_GET(init_data_size); | ||
2121 | IWL3945_UCODE_GET(boot_size); | ||
2122 | |||
2123 | /** | ||
2124 | * iwl3945_read_ucode - Read uCode images from disk file. | ||
2125 | * | ||
2126 | * Copy into buffers for card to fetch via bus-mastering | ||
2127 | */ | ||
2128 | static int iwl3945_read_ucode(struct iwl_priv *priv) | ||
2129 | { | ||
2130 | const struct iwl_ucode_header *ucode; | ||
2131 | int ret = -EINVAL, index; | ||
2132 | const struct firmware *ucode_raw; | ||
2133 | /* firmware file name contains uCode/driver compatibility version */ | ||
2134 | const char *name_pre = priv->cfg->fw_name_pre; | ||
2135 | const unsigned int api_max = priv->cfg->ucode_api_max; | ||
2136 | const unsigned int api_min = priv->cfg->ucode_api_min; | ||
2137 | char buf[25]; | ||
2138 | u8 *src; | ||
2139 | size_t len; | ||
2140 | u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; | ||
2141 | |||
2142 | /* Ask kernel firmware_class module to get the boot firmware off disk. | ||
2143 | * request_firmware() is synchronous, file is in memory on return. */ | ||
2144 | for (index = api_max; index >= api_min; index--) { | ||
2145 | sprintf(buf, "%s%u%s", name_pre, index, ".ucode"); | ||
2146 | ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev); | ||
2147 | if (ret < 0) { | ||
2148 | IWL_ERR(priv, "%s firmware file req failed: %d\n", | ||
2149 | buf, ret); | ||
2150 | if (ret == -ENOENT) | ||
2151 | continue; | ||
2152 | else | ||
2153 | goto error; | ||
2154 | } else { | ||
2155 | if (index < api_max) | ||
2156 | IWL_ERR(priv, "Loaded firmware %s, " | ||
2157 | "which is deprecated. " | ||
2158 | " Please use API v%u instead.\n", | ||
2159 | buf, api_max); | ||
2160 | IWL_DEBUG_INFO(priv, "Got firmware '%s' file " | ||
2161 | "(%zd bytes) from disk\n", | ||
2162 | buf, ucode_raw->size); | ||
2163 | break; | ||
2164 | } | ||
2165 | } | ||
2166 | |||
2167 | if (ret < 0) | ||
2168 | goto error; | ||
2169 | |||
2170 | /* Make sure that we got at least our header! */ | ||
2171 | if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) { | ||
2172 | IWL_ERR(priv, "File size way too small!\n"); | ||
2173 | ret = -EINVAL; | ||
2174 | goto err_release; | ||
2175 | } | ||
2176 | |||
2177 | /* Data from ucode file: header followed by uCode images */ | ||
2178 | ucode = (struct iwl_ucode_header *)ucode_raw->data; | ||
2179 | |||
2180 | priv->ucode_ver = le32_to_cpu(ucode->ver); | ||
2181 | api_ver = IWL_UCODE_API(priv->ucode_ver); | ||
2182 | inst_size = iwl3945_ucode_get_inst_size(ucode); | ||
2183 | data_size = iwl3945_ucode_get_data_size(ucode); | ||
2184 | init_size = iwl3945_ucode_get_init_size(ucode); | ||
2185 | init_data_size = iwl3945_ucode_get_init_data_size(ucode); | ||
2186 | boot_size = iwl3945_ucode_get_boot_size(ucode); | ||
2187 | src = iwl3945_ucode_get_data(ucode); | ||
2188 | |||
2189 | /* api_ver should match the api version forming part of the | ||
2190 | * firmware filename ... but we don't check for that and only rely | ||
2191 | * on the API version read from firmware header from here on forward */ | ||
2192 | |||
2193 | if (api_ver < api_min || api_ver > api_max) { | ||
2194 | IWL_ERR(priv, "Driver unable to support your firmware API. " | ||
2195 | "Driver supports v%u, firmware is v%u.\n", | ||
2196 | api_max, api_ver); | ||
2197 | priv->ucode_ver = 0; | ||
2198 | ret = -EINVAL; | ||
2199 | goto err_release; | ||
2200 | } | ||
2201 | if (api_ver != api_max) | ||
2202 | IWL_ERR(priv, "Firmware has old API version. Expected %u, " | ||
2203 | "got %u. New firmware can be obtained " | ||
2204 | "from http://www.intellinuxwireless.org.\n", | ||
2205 | api_max, api_ver); | ||
2206 | |||
2207 | IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", | ||
2208 | IWL_UCODE_MAJOR(priv->ucode_ver), | ||
2209 | IWL_UCODE_MINOR(priv->ucode_ver), | ||
2210 | IWL_UCODE_API(priv->ucode_ver), | ||
2211 | IWL_UCODE_SERIAL(priv->ucode_ver)); | ||
2212 | |||
2213 | snprintf(priv->hw->wiphy->fw_version, | ||
2214 | sizeof(priv->hw->wiphy->fw_version), | ||
2215 | "%u.%u.%u.%u", | ||
2216 | IWL_UCODE_MAJOR(priv->ucode_ver), | ||
2217 | IWL_UCODE_MINOR(priv->ucode_ver), | ||
2218 | IWL_UCODE_API(priv->ucode_ver), | ||
2219 | IWL_UCODE_SERIAL(priv->ucode_ver)); | ||
2220 | |||
2221 | IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", | ||
2222 | priv->ucode_ver); | ||
2223 | IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", | ||
2224 | inst_size); | ||
2225 | IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n", | ||
2226 | data_size); | ||
2227 | IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n", | ||
2228 | init_size); | ||
2229 | IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n", | ||
2230 | init_data_size); | ||
2231 | IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n", | ||
2232 | boot_size); | ||
2233 | |||
2234 | |||
2235 | /* Verify size of file vs. image size info in file's header */ | ||
2236 | if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) + | ||
2237 | inst_size + data_size + init_size + | ||
2238 | init_data_size + boot_size) { | ||
2239 | |||
2240 | IWL_DEBUG_INFO(priv, | ||
2241 | "uCode file size %zd does not match expected size\n", | ||
2242 | ucode_raw->size); | ||
2243 | ret = -EINVAL; | ||
2244 | goto err_release; | ||
2245 | } | ||
2246 | |||
2247 | /* Verify that uCode images will fit in card's SRAM */ | ||
2248 | if (inst_size > IWL39_MAX_INST_SIZE) { | ||
2249 | IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n", | ||
2250 | inst_size); | ||
2251 | ret = -EINVAL; | ||
2252 | goto err_release; | ||
2253 | } | ||
2254 | |||
2255 | if (data_size > IWL39_MAX_DATA_SIZE) { | ||
2256 | IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n", | ||
2257 | data_size); | ||
2258 | ret = -EINVAL; | ||
2259 | goto err_release; | ||
2260 | } | ||
2261 | if (init_size > IWL39_MAX_INST_SIZE) { | ||
2262 | IWL_DEBUG_INFO(priv, | ||
2263 | "uCode init instr len %d too large to fit in\n", | ||
2264 | init_size); | ||
2265 | ret = -EINVAL; | ||
2266 | goto err_release; | ||
2267 | } | ||
2268 | if (init_data_size > IWL39_MAX_DATA_SIZE) { | ||
2269 | IWL_DEBUG_INFO(priv, | ||
2270 | "uCode init data len %d too large to fit in\n", | ||
2271 | init_data_size); | ||
2272 | ret = -EINVAL; | ||
2273 | goto err_release; | ||
2274 | } | ||
2275 | if (boot_size > IWL39_MAX_BSM_SIZE) { | ||
2276 | IWL_DEBUG_INFO(priv, | ||
2277 | "uCode boot instr len %d too large to fit in\n", | ||
2278 | boot_size); | ||
2279 | ret = -EINVAL; | ||
2280 | goto err_release; | ||
2281 | } | ||
2282 | |||
2283 | /* Allocate ucode buffers for card's bus-master loading ... */ | ||
2284 | |||
2285 | /* Runtime instructions and 2 copies of data: | ||
2286 | * 1) unmodified from disk | ||
2287 | * 2) backup cache for save/restore during power-downs */ | ||
2288 | priv->ucode_code.len = inst_size; | ||
2289 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); | ||
2290 | |||
2291 | priv->ucode_data.len = data_size; | ||
2292 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); | ||
2293 | |||
2294 | priv->ucode_data_backup.len = data_size; | ||
2295 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); | ||
2296 | |||
2297 | if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || | ||
2298 | !priv->ucode_data_backup.v_addr) | ||
2299 | goto err_pci_alloc; | ||
2300 | |||
2301 | /* Initialization instructions and data */ | ||
2302 | if (init_size && init_data_size) { | ||
2303 | priv->ucode_init.len = init_size; | ||
2304 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); | ||
2305 | |||
2306 | priv->ucode_init_data.len = init_data_size; | ||
2307 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); | ||
2308 | |||
2309 | if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) | ||
2310 | goto err_pci_alloc; | ||
2311 | } | ||
2312 | |||
2313 | /* Bootstrap (instructions only, no data) */ | ||
2314 | if (boot_size) { | ||
2315 | priv->ucode_boot.len = boot_size; | ||
2316 | iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); | ||
2317 | |||
2318 | if (!priv->ucode_boot.v_addr) | ||
2319 | goto err_pci_alloc; | ||
2320 | } | ||
2321 | |||
2322 | /* Copy images into buffers for card's bus-master reads ... */ | ||
2323 | |||
2324 | /* Runtime instructions (first block of data in file) */ | ||
2325 | len = inst_size; | ||
2326 | IWL_DEBUG_INFO(priv, | ||
2327 | "Copying (but not loading) uCode instr len %zd\n", len); | ||
2328 | memcpy(priv->ucode_code.v_addr, src, len); | ||
2329 | src += len; | ||
2330 | |||
2331 | IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", | ||
2332 | priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); | ||
2333 | |||
2334 | /* Runtime data (2nd block) | ||
2335 | * NOTE: Copy into backup buffer will be done in iwl3945_up() */ | ||
2336 | len = data_size; | ||
2337 | IWL_DEBUG_INFO(priv, | ||
2338 | "Copying (but not loading) uCode data len %zd\n", len); | ||
2339 | memcpy(priv->ucode_data.v_addr, src, len); | ||
2340 | memcpy(priv->ucode_data_backup.v_addr, src, len); | ||
2341 | src += len; | ||
2342 | |||
2343 | /* Initialization instructions (3rd block) */ | ||
2344 | if (init_size) { | ||
2345 | len = init_size; | ||
2346 | IWL_DEBUG_INFO(priv, | ||
2347 | "Copying (but not loading) init instr len %zd\n", len); | ||
2348 | memcpy(priv->ucode_init.v_addr, src, len); | ||
2349 | src += len; | ||
2350 | } | ||
2351 | |||
2352 | /* Initialization data (4th block) */ | ||
2353 | if (init_data_size) { | ||
2354 | len = init_data_size; | ||
2355 | IWL_DEBUG_INFO(priv, | ||
2356 | "Copying (but not loading) init data len %zd\n", len); | ||
2357 | memcpy(priv->ucode_init_data.v_addr, src, len); | ||
2358 | src += len; | ||
2359 | } | ||
2360 | |||
2361 | /* Bootstrap instructions (5th block) */ | ||
2362 | len = boot_size; | ||
2363 | IWL_DEBUG_INFO(priv, | ||
2364 | "Copying (but not loading) boot instr len %zd\n", len); | ||
2365 | memcpy(priv->ucode_boot.v_addr, src, len); | ||
2366 | |||
2367 | /* We have our copies now, allow OS release its copies */ | ||
2368 | release_firmware(ucode_raw); | ||
2369 | return 0; | ||
2370 | |||
2371 | err_pci_alloc: | ||
2372 | IWL_ERR(priv, "failed to allocate pci memory\n"); | ||
2373 | ret = -ENOMEM; | ||
2374 | iwl3945_dealloc_ucode_pci(priv); | ||
2375 | |||
2376 | err_release: | ||
2377 | release_firmware(ucode_raw); | ||
2378 | |||
2379 | error: | ||
2380 | return ret; | ||
2381 | } | ||
2382 | |||
2383 | |||
2384 | /** | ||
2385 | * iwl3945_set_ucode_ptrs - Set uCode address location | ||
2386 | * | ||
2387 | * Tell initialization uCode where to find runtime uCode. | ||
2388 | * | ||
2389 | * BSM registers initially contain pointers to initialization uCode. | ||
2390 | * We need to replace them to load runtime uCode inst and data, | ||
2391 | * and to save runtime data when powering down. | ||
2392 | */ | ||
2393 | static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv) | ||
2394 | { | ||
2395 | dma_addr_t pinst; | ||
2396 | dma_addr_t pdata; | ||
2397 | |||
2398 | /* bits 31:0 for 3945 */ | ||
2399 | pinst = priv->ucode_code.p_addr; | ||
2400 | pdata = priv->ucode_data_backup.p_addr; | ||
2401 | |||
2402 | /* Tell bootstrap uCode where to find image to load */ | ||
2403 | iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); | ||
2404 | iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); | ||
2405 | iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, | ||
2406 | priv->ucode_data.len); | ||
2407 | |||
2408 | /* Inst byte count must be last to set up, bit 31 signals uCode | ||
2409 | * that all new ptr/size info is in place */ | ||
2410 | iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, | ||
2411 | priv->ucode_code.len | BSM_DRAM_INST_LOAD); | ||
2412 | |||
2413 | IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); | ||
2414 | |||
2415 | return 0; | ||
2416 | } | ||
2417 | |||
2418 | /** | ||
2419 | * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received | ||
2420 | * | ||
2421 | * Called after REPLY_ALIVE notification received from "initialize" uCode. | ||
2422 | * | ||
2423 | * Tell "initialize" uCode to go ahead and load the runtime uCode. | ||
2424 | */ | ||
2425 | static void iwl3945_init_alive_start(struct iwl_priv *priv) | ||
2426 | { | ||
2427 | /* Check alive response for "valid" sign from uCode */ | ||
2428 | if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { | ||
2429 | /* We had an error bringing up the hardware, so take it | ||
2430 | * all the way back down so we can try again */ | ||
2431 | IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); | ||
2432 | goto restart; | ||
2433 | } | ||
2434 | |||
2435 | /* Bootstrap uCode has loaded initialize uCode ... verify inst image. | ||
2436 | * This is a paranoid check, because we would not have gotten the | ||
2437 | * "initialize" alive if code weren't properly loaded. */ | ||
2438 | if (iwl3945_verify_ucode(priv)) { | ||
2439 | /* Runtime instruction load was bad; | ||
2440 | * take it all the way back down so we can try again */ | ||
2441 | IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); | ||
2442 | goto restart; | ||
2443 | } | ||
2444 | |||
2445 | /* Send pointers to protocol/runtime uCode image ... init code will | ||
2446 | * load and launch runtime uCode, which will send us another "Alive" | ||
2447 | * notification. */ | ||
2448 | IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); | ||
2449 | if (iwl3945_set_ucode_ptrs(priv)) { | ||
2450 | /* Runtime instruction load won't happen; | ||
2451 | * take it all the way back down so we can try again */ | ||
2452 | IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n"); | ||
2453 | goto restart; | ||
2454 | } | ||
2455 | return; | ||
2456 | |||
2457 | restart: | ||
2458 | queue_work(priv->workqueue, &priv->restart); | ||
2459 | } | ||
2460 | |||
2461 | /** | ||
2462 | * iwl3945_alive_start - called after REPLY_ALIVE notification received | ||
2463 | * from protocol/runtime uCode (initialization uCode's | ||
2464 | * Alive gets handled by iwl3945_init_alive_start()). | ||
2465 | */ | ||
2466 | static void iwl3945_alive_start(struct iwl_priv *priv) | ||
2467 | { | ||
2468 | int thermal_spin = 0; | ||
2469 | u32 rfkill; | ||
2470 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
2471 | |||
2472 | IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); | ||
2473 | |||
2474 | if (priv->card_alive.is_valid != UCODE_VALID_OK) { | ||
2475 | /* We had an error bringing up the hardware, so take it | ||
2476 | * all the way back down so we can try again */ | ||
2477 | IWL_DEBUG_INFO(priv, "Alive failed.\n"); | ||
2478 | goto restart; | ||
2479 | } | ||
2480 | |||
2481 | /* Initialize uCode has loaded Runtime uCode ... verify inst image. | ||
2482 | * This is a paranoid check, because we would not have gotten the | ||
2483 | * "runtime" alive if code weren't properly loaded. */ | ||
2484 | if (iwl3945_verify_ucode(priv)) { | ||
2485 | /* Runtime instruction load was bad; | ||
2486 | * take it all the way back down so we can try again */ | ||
2487 | IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); | ||
2488 | goto restart; | ||
2489 | } | ||
2490 | |||
2491 | rfkill = iwl_read_prph(priv, APMG_RFKILL_REG); | ||
2492 | IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); | ||
2493 | |||
2494 | if (rfkill & 0x1) { | ||
2495 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2496 | /* if RFKILL is not on, then wait for thermal | ||
2497 | * sensor in adapter to kick in */ | ||
2498 | while (iwl3945_hw_get_temperature(priv) == 0) { | ||
2499 | thermal_spin++; | ||
2500 | udelay(10); | ||
2501 | } | ||
2502 | |||
2503 | if (thermal_spin) | ||
2504 | IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n", | ||
2505 | thermal_spin * 10); | ||
2506 | } else | ||
2507 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2508 | |||
2509 | /* After the ALIVE response, we can send commands to 3945 uCode */ | ||
2510 | set_bit(STATUS_ALIVE, &priv->status); | ||
2511 | |||
2512 | /* Enable watchdog to monitor the driver tx queues */ | ||
2513 | iwl_setup_watchdog(priv); | ||
2514 | |||
2515 | if (iwl_is_rfkill(priv)) | ||
2516 | return; | ||
2517 | |||
2518 | ieee80211_wake_queues(priv->hw); | ||
2519 | |||
2520 | priv->active_rate = IWL_RATES_MASK_3945; | ||
2521 | |||
2522 | iwl_power_update_mode(priv, true); | ||
2523 | |||
2524 | if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { | ||
2525 | struct iwl3945_rxon_cmd *active_rxon = | ||
2526 | (struct iwl3945_rxon_cmd *)(&ctx->active); | ||
2527 | |||
2528 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
2529 | active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
2530 | } else { | ||
2531 | /* Initialize our rx_config data */ | ||
2532 | iwl_connection_init_rx_config(priv, ctx); | ||
2533 | } | ||
2534 | |||
2535 | /* Configure Bluetooth device coexistence support */ | ||
2536 | priv->cfg->ops->hcmd->send_bt_config(priv); | ||
2537 | |||
2538 | set_bit(STATUS_READY, &priv->status); | ||
2539 | |||
2540 | /* Configure the adapter for unassociated operation */ | ||
2541 | iwl3945_commit_rxon(priv, ctx); | ||
2542 | |||
2543 | iwl3945_reg_txpower_periodic(priv); | ||
2544 | |||
2545 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | ||
2546 | wake_up_interruptible(&priv->wait_command_queue); | ||
2547 | |||
2548 | return; | ||
2549 | |||
2550 | restart: | ||
2551 | queue_work(priv->workqueue, &priv->restart); | ||
2552 | } | ||
2553 | |||
2554 | static void iwl3945_cancel_deferred_work(struct iwl_priv *priv); | ||
2555 | |||
2556 | static void __iwl3945_down(struct iwl_priv *priv) | ||
2557 | { | ||
2558 | unsigned long flags; | ||
2559 | int exit_pending; | ||
2560 | |||
2561 | IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); | ||
2562 | |||
2563 | iwl_scan_cancel_timeout(priv, 200); | ||
2564 | |||
2565 | exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
2566 | |||
2567 | /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set | ||
2568 | * to prevent rearm timer */ | ||
2569 | del_timer_sync(&priv->watchdog); | ||
2570 | |||
2571 | /* Station information will now be cleared in device */ | ||
2572 | iwl_clear_ucode_stations(priv, NULL); | ||
2573 | iwl_dealloc_bcast_stations(priv); | ||
2574 | iwl_clear_driver_stations(priv); | ||
2575 | |||
2576 | /* Unblock any waiting calls */ | ||
2577 | wake_up_interruptible_all(&priv->wait_command_queue); | ||
2578 | |||
2579 | /* Wipe out the EXIT_PENDING status bit if we are not actually | ||
2580 | * exiting the module */ | ||
2581 | if (!exit_pending) | ||
2582 | clear_bit(STATUS_EXIT_PENDING, &priv->status); | ||
2583 | |||
2584 | /* stop and reset the on-board processor */ | ||
2585 | iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); | ||
2586 | |||
2587 | /* tell the device to stop sending interrupts */ | ||
2588 | spin_lock_irqsave(&priv->lock, flags); | ||
2589 | iwl_disable_interrupts(priv); | ||
2590 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2591 | iwl_synchronize_irq(priv); | ||
2592 | |||
2593 | if (priv->mac80211_registered) | ||
2594 | ieee80211_stop_queues(priv->hw); | ||
2595 | |||
2596 | /* If we have not previously called iwl3945_init() then | ||
2597 | * clear all bits but the RF Kill bits and return */ | ||
2598 | if (!iwl_is_init(priv)) { | ||
2599 | priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << | ||
2600 | STATUS_RF_KILL_HW | | ||
2601 | test_bit(STATUS_GEO_CONFIGURED, &priv->status) << | ||
2602 | STATUS_GEO_CONFIGURED | | ||
2603 | test_bit(STATUS_EXIT_PENDING, &priv->status) << | ||
2604 | STATUS_EXIT_PENDING; | ||
2605 | goto exit; | ||
2606 | } | ||
2607 | |||
2608 | /* ...otherwise clear out all the status bits but the RF Kill | ||
2609 | * bit and continue taking the NIC down. */ | ||
2610 | priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << | ||
2611 | STATUS_RF_KILL_HW | | ||
2612 | test_bit(STATUS_GEO_CONFIGURED, &priv->status) << | ||
2613 | STATUS_GEO_CONFIGURED | | ||
2614 | test_bit(STATUS_FW_ERROR, &priv->status) << | ||
2615 | STATUS_FW_ERROR | | ||
2616 | test_bit(STATUS_EXIT_PENDING, &priv->status) << | ||
2617 | STATUS_EXIT_PENDING; | ||
2618 | |||
2619 | iwl3945_hw_txq_ctx_stop(priv); | ||
2620 | iwl3945_hw_rxq_stop(priv); | ||
2621 | |||
2622 | /* Power-down device's busmaster DMA clocks */ | ||
2623 | iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); | ||
2624 | udelay(5); | ||
2625 | |||
2626 | /* Stop the device, and put it in low power state */ | ||
2627 | iwl_apm_stop(priv); | ||
2628 | |||
2629 | exit: | ||
2630 | memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); | ||
2631 | |||
2632 | if (priv->beacon_skb) | ||
2633 | dev_kfree_skb(priv->beacon_skb); | ||
2634 | priv->beacon_skb = NULL; | ||
2635 | |||
2636 | /* clear out any free frames */ | ||
2637 | iwl3945_clear_free_frames(priv); | ||
2638 | } | ||
2639 | |||
2640 | static void iwl3945_down(struct iwl_priv *priv) | ||
2641 | { | ||
2642 | mutex_lock(&priv->mutex); | ||
2643 | __iwl3945_down(priv); | ||
2644 | mutex_unlock(&priv->mutex); | ||
2645 | |||
2646 | iwl3945_cancel_deferred_work(priv); | ||
2647 | } | ||
2648 | |||
2649 | #define MAX_HW_RESTARTS 5 | ||
2650 | |||
2651 | static int iwl3945_alloc_bcast_station(struct iwl_priv *priv) | ||
2652 | { | ||
2653 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
2654 | unsigned long flags; | ||
2655 | u8 sta_id; | ||
2656 | |||
2657 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
2658 | sta_id = iwl_prep_station(priv, ctx, iwl_bcast_addr, false, NULL); | ||
2659 | if (sta_id == IWL_INVALID_STATION) { | ||
2660 | IWL_ERR(priv, "Unable to prepare broadcast station\n"); | ||
2661 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
2662 | |||
2663 | return -EINVAL; | ||
2664 | } | ||
2665 | |||
2666 | priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; | ||
2667 | priv->stations[sta_id].used |= IWL_STA_BCAST; | ||
2668 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
2669 | |||
2670 | return 0; | ||
2671 | } | ||
2672 | |||
2673 | static int __iwl3945_up(struct iwl_priv *priv) | ||
2674 | { | ||
2675 | int rc, i; | ||
2676 | |||
2677 | rc = iwl3945_alloc_bcast_station(priv); | ||
2678 | if (rc) | ||
2679 | return rc; | ||
2680 | |||
2681 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { | ||
2682 | IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); | ||
2683 | return -EIO; | ||
2684 | } | ||
2685 | |||
2686 | if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { | ||
2687 | IWL_ERR(priv, "ucode not available for device bring up\n"); | ||
2688 | return -EIO; | ||
2689 | } | ||
2690 | |||
2691 | /* If platform's RF_KILL switch is NOT set to KILL */ | ||
2692 | if (iwl_read32(priv, CSR_GP_CNTRL) & | ||
2693 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) | ||
2694 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2695 | else { | ||
2696 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2697 | IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); | ||
2698 | return -ENODEV; | ||
2699 | } | ||
2700 | |||
2701 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
2702 | |||
2703 | rc = iwl3945_hw_nic_init(priv); | ||
2704 | if (rc) { | ||
2705 | IWL_ERR(priv, "Unable to int nic\n"); | ||
2706 | return rc; | ||
2707 | } | ||
2708 | |||
2709 | /* make sure rfkill handshake bits are cleared */ | ||
2710 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
2711 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | ||
2712 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
2713 | |||
2714 | /* clear (again), then enable host interrupts */ | ||
2715 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
2716 | iwl_enable_interrupts(priv); | ||
2717 | |||
2718 | /* really make sure rfkill handshake bits are cleared */ | ||
2719 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
2720 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
2721 | |||
2722 | /* Copy original ucode data image from disk into backup cache. | ||
2723 | * This will be used to initialize the on-board processor's | ||
2724 | * data SRAM for a clean start when the runtime program first loads. */ | ||
2725 | memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, | ||
2726 | priv->ucode_data.len); | ||
2727 | |||
2728 | /* We return success when we resume from suspend and rf_kill is on. */ | ||
2729 | if (test_bit(STATUS_RF_KILL_HW, &priv->status)) | ||
2730 | return 0; | ||
2731 | |||
2732 | for (i = 0; i < MAX_HW_RESTARTS; i++) { | ||
2733 | |||
2734 | /* load bootstrap state machine, | ||
2735 | * load bootstrap program into processor's memory, | ||
2736 | * prepare to load the "initialize" uCode */ | ||
2737 | rc = priv->cfg->ops->lib->load_ucode(priv); | ||
2738 | |||
2739 | if (rc) { | ||
2740 | IWL_ERR(priv, | ||
2741 | "Unable to set up bootstrap uCode: %d\n", rc); | ||
2742 | continue; | ||
2743 | } | ||
2744 | |||
2745 | /* start card; "initialize" will load runtime ucode */ | ||
2746 | iwl3945_nic_start(priv); | ||
2747 | |||
2748 | IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); | ||
2749 | |||
2750 | return 0; | ||
2751 | } | ||
2752 | |||
2753 | set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
2754 | __iwl3945_down(priv); | ||
2755 | clear_bit(STATUS_EXIT_PENDING, &priv->status); | ||
2756 | |||
2757 | /* tried to restart and config the device for as long as our | ||
2758 | * patience could withstand */ | ||
2759 | IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); | ||
2760 | return -EIO; | ||
2761 | } | ||
2762 | |||
2763 | |||
2764 | /***************************************************************************** | ||
2765 | * | ||
2766 | * Workqueue callbacks | ||
2767 | * | ||
2768 | *****************************************************************************/ | ||
2769 | |||
2770 | static void iwl3945_bg_init_alive_start(struct work_struct *data) | ||
2771 | { | ||
2772 | struct iwl_priv *priv = | ||
2773 | container_of(data, struct iwl_priv, init_alive_start.work); | ||
2774 | |||
2775 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
2776 | return; | ||
2777 | |||
2778 | mutex_lock(&priv->mutex); | ||
2779 | iwl3945_init_alive_start(priv); | ||
2780 | mutex_unlock(&priv->mutex); | ||
2781 | } | ||
2782 | |||
2783 | static void iwl3945_bg_alive_start(struct work_struct *data) | ||
2784 | { | ||
2785 | struct iwl_priv *priv = | ||
2786 | container_of(data, struct iwl_priv, alive_start.work); | ||
2787 | |||
2788 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
2789 | return; | ||
2790 | |||
2791 | mutex_lock(&priv->mutex); | ||
2792 | iwl3945_alive_start(priv); | ||
2793 | mutex_unlock(&priv->mutex); | ||
2794 | } | ||
2795 | |||
2796 | /* | ||
2797 | * 3945 cannot interrupt driver when hardware rf kill switch toggles; | ||
2798 | * driver must poll CSR_GP_CNTRL_REG register for change. This register | ||
2799 | * *is* readable even when device has been SW_RESET into low power mode | ||
2800 | * (e.g. during RF KILL). | ||
2801 | */ | ||
2802 | static void iwl3945_rfkill_poll(struct work_struct *data) | ||
2803 | { | ||
2804 | struct iwl_priv *priv = | ||
2805 | container_of(data, struct iwl_priv, _3945.rfkill_poll.work); | ||
2806 | bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2807 | bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL) | ||
2808 | & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); | ||
2809 | |||
2810 | if (new_rfkill != old_rfkill) { | ||
2811 | if (new_rfkill) | ||
2812 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2813 | else | ||
2814 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2815 | |||
2816 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill); | ||
2817 | |||
2818 | IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n", | ||
2819 | new_rfkill ? "disable radio" : "enable radio"); | ||
2820 | } | ||
2821 | |||
2822 | /* Keep this running, even if radio now enabled. This will be | ||
2823 | * cancelled in mac_start() if system decides to start again */ | ||
2824 | queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, | ||
2825 | round_jiffies_relative(2 * HZ)); | ||
2826 | |||
2827 | } | ||
2828 | |||
2829 | int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) | ||
2830 | { | ||
2831 | struct iwl_host_cmd cmd = { | ||
2832 | .id = REPLY_SCAN_CMD, | ||
2833 | .len = sizeof(struct iwl3945_scan_cmd), | ||
2834 | .flags = CMD_SIZE_HUGE, | ||
2835 | }; | ||
2836 | struct iwl3945_scan_cmd *scan; | ||
2837 | u8 n_probes = 0; | ||
2838 | enum ieee80211_band band; | ||
2839 | bool is_active = false; | ||
2840 | int ret; | ||
2841 | |||
2842 | lockdep_assert_held(&priv->mutex); | ||
2843 | |||
2844 | if (!priv->scan_cmd) { | ||
2845 | priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) + | ||
2846 | IWL_MAX_SCAN_SIZE, GFP_KERNEL); | ||
2847 | if (!priv->scan_cmd) { | ||
2848 | IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); | ||
2849 | return -ENOMEM; | ||
2850 | } | ||
2851 | } | ||
2852 | scan = priv->scan_cmd; | ||
2853 | memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE); | ||
2854 | |||
2855 | scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; | ||
2856 | scan->quiet_time = IWL_ACTIVE_QUIET_TIME; | ||
2857 | |||
2858 | if (iwl_is_associated(priv, IWL_RXON_CTX_BSS)) { | ||
2859 | u16 interval = 0; | ||
2860 | u32 extra; | ||
2861 | u32 suspend_time = 100; | ||
2862 | u32 scan_suspend_time = 100; | ||
2863 | |||
2864 | IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); | ||
2865 | |||
2866 | if (priv->is_internal_short_scan) | ||
2867 | interval = 0; | ||
2868 | else | ||
2869 | interval = vif->bss_conf.beacon_int; | ||
2870 | |||
2871 | scan->suspend_time = 0; | ||
2872 | scan->max_out_time = cpu_to_le32(200 * 1024); | ||
2873 | if (!interval) | ||
2874 | interval = suspend_time; | ||
2875 | /* | ||
2876 | * suspend time format: | ||
2877 | * 0-19: beacon interval in usec (time before exec.) | ||
2878 | * 20-23: 0 | ||
2879 | * 24-31: number of beacons (suspend between channels) | ||
2880 | */ | ||
2881 | |||
2882 | extra = (suspend_time / interval) << 24; | ||
2883 | scan_suspend_time = 0xFF0FFFFF & | ||
2884 | (extra | ((suspend_time % interval) * 1024)); | ||
2885 | |||
2886 | scan->suspend_time = cpu_to_le32(scan_suspend_time); | ||
2887 | IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", | ||
2888 | scan_suspend_time, interval); | ||
2889 | } | ||
2890 | |||
2891 | if (priv->is_internal_short_scan) { | ||
2892 | IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); | ||
2893 | } else if (priv->scan_request->n_ssids) { | ||
2894 | int i, p = 0; | ||
2895 | IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); | ||
2896 | for (i = 0; i < priv->scan_request->n_ssids; i++) { | ||
2897 | /* always does wildcard anyway */ | ||
2898 | if (!priv->scan_request->ssids[i].ssid_len) | ||
2899 | continue; | ||
2900 | scan->direct_scan[p].id = WLAN_EID_SSID; | ||
2901 | scan->direct_scan[p].len = | ||
2902 | priv->scan_request->ssids[i].ssid_len; | ||
2903 | memcpy(scan->direct_scan[p].ssid, | ||
2904 | priv->scan_request->ssids[i].ssid, | ||
2905 | priv->scan_request->ssids[i].ssid_len); | ||
2906 | n_probes++; | ||
2907 | p++; | ||
2908 | } | ||
2909 | is_active = true; | ||
2910 | } else | ||
2911 | IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n"); | ||
2912 | |||
2913 | /* We don't build a direct scan probe request; the uCode will do | ||
2914 | * that based on the direct_mask added to each channel entry */ | ||
2915 | scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; | ||
2916 | scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; | ||
2917 | scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
2918 | |||
2919 | /* flags + rate selection */ | ||
2920 | |||
2921 | switch (priv->scan_band) { | ||
2922 | case IEEE80211_BAND_2GHZ: | ||
2923 | scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; | ||
2924 | scan->tx_cmd.rate = IWL_RATE_1M_PLCP; | ||
2925 | band = IEEE80211_BAND_2GHZ; | ||
2926 | break; | ||
2927 | case IEEE80211_BAND_5GHZ: | ||
2928 | scan->tx_cmd.rate = IWL_RATE_6M_PLCP; | ||
2929 | band = IEEE80211_BAND_5GHZ; | ||
2930 | break; | ||
2931 | default: | ||
2932 | IWL_WARN(priv, "Invalid scan band\n"); | ||
2933 | return -EIO; | ||
2934 | } | ||
2935 | |||
2936 | /* | ||
2937 | * If active scaning is requested but a certain channel | ||
2938 | * is marked passive, we can do active scanning if we | ||
2939 | * detect transmissions. | ||
2940 | */ | ||
2941 | scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : | ||
2942 | IWL_GOOD_CRC_TH_DISABLED; | ||
2943 | |||
2944 | if (!priv->is_internal_short_scan) { | ||
2945 | scan->tx_cmd.len = cpu_to_le16( | ||
2946 | iwl_fill_probe_req(priv, | ||
2947 | (struct ieee80211_mgmt *)scan->data, | ||
2948 | vif->addr, | ||
2949 | priv->scan_request->ie, | ||
2950 | priv->scan_request->ie_len, | ||
2951 | IWL_MAX_SCAN_SIZE - sizeof(*scan))); | ||
2952 | } else { | ||
2953 | /* use bcast addr, will not be transmitted but must be valid */ | ||
2954 | scan->tx_cmd.len = cpu_to_le16( | ||
2955 | iwl_fill_probe_req(priv, | ||
2956 | (struct ieee80211_mgmt *)scan->data, | ||
2957 | iwl_bcast_addr, NULL, 0, | ||
2958 | IWL_MAX_SCAN_SIZE - sizeof(*scan))); | ||
2959 | } | ||
2960 | /* select Rx antennas */ | ||
2961 | scan->flags |= iwl3945_get_antenna_flags(priv); | ||
2962 | |||
2963 | if (priv->is_internal_short_scan) { | ||
2964 | scan->channel_count = | ||
2965 | iwl3945_get_single_channel_for_scan(priv, vif, band, | ||
2966 | (void *)&scan->data[le16_to_cpu( | ||
2967 | scan->tx_cmd.len)]); | ||
2968 | } else { | ||
2969 | scan->channel_count = | ||
2970 | iwl3945_get_channels_for_scan(priv, band, is_active, n_probes, | ||
2971 | (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif); | ||
2972 | } | ||
2973 | |||
2974 | if (scan->channel_count == 0) { | ||
2975 | IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); | ||
2976 | return -EIO; | ||
2977 | } | ||
2978 | |||
2979 | cmd.len += le16_to_cpu(scan->tx_cmd.len) + | ||
2980 | scan->channel_count * sizeof(struct iwl3945_scan_channel); | ||
2981 | cmd.data = scan; | ||
2982 | scan->len = cpu_to_le16(cmd.len); | ||
2983 | |||
2984 | set_bit(STATUS_SCAN_HW, &priv->status); | ||
2985 | ret = iwl_send_cmd_sync(priv, &cmd); | ||
2986 | if (ret) | ||
2987 | clear_bit(STATUS_SCAN_HW, &priv->status); | ||
2988 | return ret; | ||
2989 | } | ||
2990 | |||
2991 | void iwl3945_post_scan(struct iwl_priv *priv) | ||
2992 | { | ||
2993 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
2994 | |||
2995 | /* | ||
2996 | * Since setting the RXON may have been deferred while | ||
2997 | * performing the scan, fire one off if needed | ||
2998 | */ | ||
2999 | if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) | ||
3000 | iwl3945_commit_rxon(priv, ctx); | ||
3001 | } | ||
3002 | |||
3003 | static void iwl3945_bg_restart(struct work_struct *data) | ||
3004 | { | ||
3005 | struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); | ||
3006 | |||
3007 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
3008 | return; | ||
3009 | |||
3010 | if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { | ||
3011 | struct iwl_rxon_context *ctx; | ||
3012 | mutex_lock(&priv->mutex); | ||
3013 | for_each_context(priv, ctx) | ||
3014 | ctx->vif = NULL; | ||
3015 | priv->is_open = 0; | ||
3016 | mutex_unlock(&priv->mutex); | ||
3017 | iwl3945_down(priv); | ||
3018 | ieee80211_restart_hw(priv->hw); | ||
3019 | } else { | ||
3020 | iwl3945_down(priv); | ||
3021 | |||
3022 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
3023 | return; | ||
3024 | |||
3025 | mutex_lock(&priv->mutex); | ||
3026 | __iwl3945_up(priv); | ||
3027 | mutex_unlock(&priv->mutex); | ||
3028 | } | ||
3029 | } | ||
3030 | |||
3031 | static void iwl3945_bg_rx_replenish(struct work_struct *data) | ||
3032 | { | ||
3033 | struct iwl_priv *priv = | ||
3034 | container_of(data, struct iwl_priv, rx_replenish); | ||
3035 | |||
3036 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
3037 | return; | ||
3038 | |||
3039 | mutex_lock(&priv->mutex); | ||
3040 | iwl3945_rx_replenish(priv); | ||
3041 | mutex_unlock(&priv->mutex); | ||
3042 | } | ||
3043 | |||
3044 | void iwl3945_post_associate(struct iwl_priv *priv) | ||
3045 | { | ||
3046 | int rc = 0; | ||
3047 | struct ieee80211_conf *conf = NULL; | ||
3048 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3049 | |||
3050 | if (!ctx->vif || !priv->is_open) | ||
3051 | return; | ||
3052 | |||
3053 | if (ctx->vif->type == NL80211_IFTYPE_AP) { | ||
3054 | IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); | ||
3055 | return; | ||
3056 | } | ||
3057 | |||
3058 | IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", | ||
3059 | ctx->vif->bss_conf.aid, ctx->active.bssid_addr); | ||
3060 | |||
3061 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
3062 | return; | ||
3063 | |||
3064 | iwl_scan_cancel_timeout(priv, 200); | ||
3065 | |||
3066 | conf = ieee80211_get_hw_conf(priv->hw); | ||
3067 | |||
3068 | ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
3069 | iwl3945_commit_rxon(priv, ctx); | ||
3070 | |||
3071 | rc = iwl_send_rxon_timing(priv, ctx); | ||
3072 | if (rc) | ||
3073 | IWL_WARN(priv, "REPLY_RXON_TIMING failed - " | ||
3074 | "Attempting to continue.\n"); | ||
3075 | |||
3076 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
3077 | |||
3078 | ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid); | ||
3079 | |||
3080 | IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", | ||
3081 | ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int); | ||
3082 | |||
3083 | if (ctx->vif->bss_conf.use_short_preamble) | ||
3084 | ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; | ||
3085 | else | ||
3086 | ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; | ||
3087 | |||
3088 | if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { | ||
3089 | if (ctx->vif->bss_conf.use_short_slot) | ||
3090 | ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; | ||
3091 | else | ||
3092 | ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | ||
3093 | } | ||
3094 | |||
3095 | iwl3945_commit_rxon(priv, ctx); | ||
3096 | |||
3097 | switch (ctx->vif->type) { | ||
3098 | case NL80211_IFTYPE_STATION: | ||
3099 | iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); | ||
3100 | break; | ||
3101 | case NL80211_IFTYPE_ADHOC: | ||
3102 | iwl3945_send_beacon_cmd(priv); | ||
3103 | break; | ||
3104 | default: | ||
3105 | IWL_ERR(priv, "%s Should not be called in %d mode\n", | ||
3106 | __func__, ctx->vif->type); | ||
3107 | break; | ||
3108 | } | ||
3109 | } | ||
3110 | |||
3111 | /***************************************************************************** | ||
3112 | * | ||
3113 | * mac80211 entry point functions | ||
3114 | * | ||
3115 | *****************************************************************************/ | ||
3116 | |||
3117 | #define UCODE_READY_TIMEOUT (2 * HZ) | ||
3118 | |||
3119 | static int iwl3945_mac_start(struct ieee80211_hw *hw) | ||
3120 | { | ||
3121 | struct iwl_priv *priv = hw->priv; | ||
3122 | int ret; | ||
3123 | |||
3124 | IWL_DEBUG_MAC80211(priv, "enter\n"); | ||
3125 | |||
3126 | /* we should be verifying the device is ready to be opened */ | ||
3127 | mutex_lock(&priv->mutex); | ||
3128 | |||
3129 | /* fetch ucode file from disk, alloc and copy to bus-master buffers ... | ||
3130 | * ucode filename and max sizes are card-specific. */ | ||
3131 | |||
3132 | if (!priv->ucode_code.len) { | ||
3133 | ret = iwl3945_read_ucode(priv); | ||
3134 | if (ret) { | ||
3135 | IWL_ERR(priv, "Could not read microcode: %d\n", ret); | ||
3136 | mutex_unlock(&priv->mutex); | ||
3137 | goto out_release_irq; | ||
3138 | } | ||
3139 | } | ||
3140 | |||
3141 | ret = __iwl3945_up(priv); | ||
3142 | |||
3143 | mutex_unlock(&priv->mutex); | ||
3144 | |||
3145 | if (ret) | ||
3146 | goto out_release_irq; | ||
3147 | |||
3148 | IWL_DEBUG_INFO(priv, "Start UP work.\n"); | ||
3149 | |||
3150 | /* Wait for START_ALIVE from ucode. Otherwise callbacks from | ||
3151 | * mac80211 will not be run successfully. */ | ||
3152 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | ||
3153 | test_bit(STATUS_READY, &priv->status), | ||
3154 | UCODE_READY_TIMEOUT); | ||
3155 | if (!ret) { | ||
3156 | if (!test_bit(STATUS_READY, &priv->status)) { | ||
3157 | IWL_ERR(priv, | ||
3158 | "Wait for START_ALIVE timeout after %dms.\n", | ||
3159 | jiffies_to_msecs(UCODE_READY_TIMEOUT)); | ||
3160 | ret = -ETIMEDOUT; | ||
3161 | goto out_release_irq; | ||
3162 | } | ||
3163 | } | ||
3164 | |||
3165 | /* ucode is running and will send rfkill notifications, | ||
3166 | * no need to poll the killswitch state anymore */ | ||
3167 | cancel_delayed_work(&priv->_3945.rfkill_poll); | ||
3168 | |||
3169 | priv->is_open = 1; | ||
3170 | IWL_DEBUG_MAC80211(priv, "leave\n"); | ||
3171 | return 0; | ||
3172 | |||
3173 | out_release_irq: | ||
3174 | priv->is_open = 0; | ||
3175 | IWL_DEBUG_MAC80211(priv, "leave - failed\n"); | ||
3176 | return ret; | ||
3177 | } | ||
3178 | |||
3179 | static void iwl3945_mac_stop(struct ieee80211_hw *hw) | ||
3180 | { | ||
3181 | struct iwl_priv *priv = hw->priv; | ||
3182 | |||
3183 | IWL_DEBUG_MAC80211(priv, "enter\n"); | ||
3184 | |||
3185 | if (!priv->is_open) { | ||
3186 | IWL_DEBUG_MAC80211(priv, "leave - skip\n"); | ||
3187 | return; | ||
3188 | } | ||
3189 | |||
3190 | priv->is_open = 0; | ||
3191 | |||
3192 | iwl3945_down(priv); | ||
3193 | |||
3194 | flush_workqueue(priv->workqueue); | ||
3195 | |||
3196 | /* start polling the killswitch state again */ | ||
3197 | queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, | ||
3198 | round_jiffies_relative(2 * HZ)); | ||
3199 | |||
3200 | IWL_DEBUG_MAC80211(priv, "leave\n"); | ||
3201 | } | ||
3202 | |||
3203 | static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | ||
3204 | { | ||
3205 | struct iwl_priv *priv = hw->priv; | ||
3206 | |||
3207 | IWL_DEBUG_MAC80211(priv, "enter\n"); | ||
3208 | |||
3209 | IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, | ||
3210 | ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); | ||
3211 | |||
3212 | if (iwl3945_tx_skb(priv, skb)) | ||
3213 | dev_kfree_skb_any(skb); | ||
3214 | |||
3215 | IWL_DEBUG_MAC80211(priv, "leave\n"); | ||
3216 | return NETDEV_TX_OK; | ||
3217 | } | ||
3218 | |||
3219 | void iwl3945_config_ap(struct iwl_priv *priv) | ||
3220 | { | ||
3221 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3222 | struct ieee80211_vif *vif = ctx->vif; | ||
3223 | int rc = 0; | ||
3224 | |||
3225 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
3226 | return; | ||
3227 | |||
3228 | /* The following should be done only at AP bring up */ | ||
3229 | if (!(iwl_is_associated(priv, IWL_RXON_CTX_BSS))) { | ||
3230 | |||
3231 | /* RXON - unassoc (to set timing command) */ | ||
3232 | ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
3233 | iwl3945_commit_rxon(priv, ctx); | ||
3234 | |||
3235 | /* RXON Timing */ | ||
3236 | rc = iwl_send_rxon_timing(priv, ctx); | ||
3237 | if (rc) | ||
3238 | IWL_WARN(priv, "REPLY_RXON_TIMING failed - " | ||
3239 | "Attempting to continue.\n"); | ||
3240 | |||
3241 | ctx->staging.assoc_id = 0; | ||
3242 | |||
3243 | if (vif->bss_conf.use_short_preamble) | ||
3244 | ctx->staging.flags |= | ||
3245 | RXON_FLG_SHORT_PREAMBLE_MSK; | ||
3246 | else | ||
3247 | ctx->staging.flags &= | ||
3248 | ~RXON_FLG_SHORT_PREAMBLE_MSK; | ||
3249 | |||
3250 | if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { | ||
3251 | if (vif->bss_conf.use_short_slot) | ||
3252 | ctx->staging.flags |= | ||
3253 | RXON_FLG_SHORT_SLOT_MSK; | ||
3254 | else | ||
3255 | ctx->staging.flags &= | ||
3256 | ~RXON_FLG_SHORT_SLOT_MSK; | ||
3257 | } | ||
3258 | /* restore RXON assoc */ | ||
3259 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
3260 | iwl3945_commit_rxon(priv, ctx); | ||
3261 | } | ||
3262 | iwl3945_send_beacon_cmd(priv); | ||
3263 | |||
3264 | /* FIXME - we need to add code here to detect a totally new | ||
3265 | * configuration, reset the AP, unassoc, rxon timing, assoc, | ||
3266 | * clear sta table, add BCAST sta... */ | ||
3267 | } | ||
3268 | |||
3269 | static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | ||
3270 | struct ieee80211_vif *vif, | ||
3271 | struct ieee80211_sta *sta, | ||
3272 | struct ieee80211_key_conf *key) | ||
3273 | { | ||
3274 | struct iwl_priv *priv = hw->priv; | ||
3275 | int ret = 0; | ||
3276 | u8 sta_id = IWL_INVALID_STATION; | ||
3277 | u8 static_key; | ||
3278 | |||
3279 | IWL_DEBUG_MAC80211(priv, "enter\n"); | ||
3280 | |||
3281 | if (iwl3945_mod_params.sw_crypto) { | ||
3282 | IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); | ||
3283 | return -EOPNOTSUPP; | ||
3284 | } | ||
3285 | |||
3286 | /* | ||
3287 | * To support IBSS RSN, don't program group keys in IBSS, the | ||
3288 | * hardware will then not attempt to decrypt the frames. | ||
3289 | */ | ||
3290 | if (vif->type == NL80211_IFTYPE_ADHOC && | ||
3291 | !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) | ||
3292 | return -EOPNOTSUPP; | ||
3293 | |||
3294 | static_key = !iwl_is_associated(priv, IWL_RXON_CTX_BSS); | ||
3295 | |||
3296 | if (!static_key) { | ||
3297 | sta_id = iwl_sta_id_or_broadcast( | ||
3298 | priv, &priv->contexts[IWL_RXON_CTX_BSS], sta); | ||
3299 | if (sta_id == IWL_INVALID_STATION) | ||
3300 | return -EINVAL; | ||
3301 | } | ||
3302 | |||
3303 | mutex_lock(&priv->mutex); | ||
3304 | iwl_scan_cancel_timeout(priv, 100); | ||
3305 | |||
3306 | switch (cmd) { | ||
3307 | case SET_KEY: | ||
3308 | if (static_key) | ||
3309 | ret = iwl3945_set_static_key(priv, key); | ||
3310 | else | ||
3311 | ret = iwl3945_set_dynamic_key(priv, key, sta_id); | ||
3312 | IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); | ||
3313 | break; | ||
3314 | case DISABLE_KEY: | ||
3315 | if (static_key) | ||
3316 | ret = iwl3945_remove_static_key(priv); | ||
3317 | else | ||
3318 | ret = iwl3945_clear_sta_key_info(priv, sta_id); | ||
3319 | IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); | ||
3320 | break; | ||
3321 | default: | ||
3322 | ret = -EINVAL; | ||
3323 | } | ||
3324 | |||
3325 | mutex_unlock(&priv->mutex); | ||
3326 | IWL_DEBUG_MAC80211(priv, "leave\n"); | ||
3327 | |||
3328 | return ret; | ||
3329 | } | ||
3330 | |||
3331 | static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, | ||
3332 | struct ieee80211_vif *vif, | ||
3333 | struct ieee80211_sta *sta) | ||
3334 | { | ||
3335 | struct iwl_priv *priv = hw->priv; | ||
3336 | struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv; | ||
3337 | int ret; | ||
3338 | bool is_ap = vif->type == NL80211_IFTYPE_STATION; | ||
3339 | u8 sta_id; | ||
3340 | |||
3341 | IWL_DEBUG_INFO(priv, "received request to add station %pM\n", | ||
3342 | sta->addr); | ||
3343 | mutex_lock(&priv->mutex); | ||
3344 | IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", | ||
3345 | sta->addr); | ||
3346 | sta_priv->common.sta_id = IWL_INVALID_STATION; | ||
3347 | |||
3348 | |||
3349 | ret = iwl_add_station_common(priv, &priv->contexts[IWL_RXON_CTX_BSS], | ||
3350 | sta->addr, is_ap, sta, &sta_id); | ||
3351 | if (ret) { | ||
3352 | IWL_ERR(priv, "Unable to add station %pM (%d)\n", | ||
3353 | sta->addr, ret); | ||
3354 | /* Should we return success if return code is EEXIST ? */ | ||
3355 | mutex_unlock(&priv->mutex); | ||
3356 | return ret; | ||
3357 | } | ||
3358 | |||
3359 | sta_priv->common.sta_id = sta_id; | ||
3360 | |||
3361 | /* Initialize rate scaling */ | ||
3362 | IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", | ||
3363 | sta->addr); | ||
3364 | iwl3945_rs_rate_init(priv, sta, sta_id); | ||
3365 | mutex_unlock(&priv->mutex); | ||
3366 | |||
3367 | return 0; | ||
3368 | } | ||
3369 | |||
3370 | static void iwl3945_configure_filter(struct ieee80211_hw *hw, | ||
3371 | unsigned int changed_flags, | ||
3372 | unsigned int *total_flags, | ||
3373 | u64 multicast) | ||
3374 | { | ||
3375 | struct iwl_priv *priv = hw->priv; | ||
3376 | __le32 filter_or = 0, filter_nand = 0; | ||
3377 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3378 | |||
3379 | #define CHK(test, flag) do { \ | ||
3380 | if (*total_flags & (test)) \ | ||
3381 | filter_or |= (flag); \ | ||
3382 | else \ | ||
3383 | filter_nand |= (flag); \ | ||
3384 | } while (0) | ||
3385 | |||
3386 | IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", | ||
3387 | changed_flags, *total_flags); | ||
3388 | |||
3389 | CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); | ||
3390 | CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); | ||
3391 | CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); | ||
3392 | |||
3393 | #undef CHK | ||
3394 | |||
3395 | mutex_lock(&priv->mutex); | ||
3396 | |||
3397 | ctx->staging.filter_flags &= ~filter_nand; | ||
3398 | ctx->staging.filter_flags |= filter_or; | ||
3399 | |||
3400 | /* | ||
3401 | * Not committing directly because hardware can perform a scan, | ||
3402 | * but even if hw is ready, committing here breaks for some reason, | ||
3403 | * we'll eventually commit the filter flags change anyway. | ||
3404 | */ | ||
3405 | |||
3406 | mutex_unlock(&priv->mutex); | ||
3407 | |||
3408 | /* | ||
3409 | * Receiving all multicast frames is always enabled by the | ||
3410 | * default flags setup in iwl_connection_init_rx_config() | ||
3411 | * since we currently do not support programming multicast | ||
3412 | * filters into the device. | ||
3413 | */ | ||
3414 | *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | | ||
3415 | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; | ||
3416 | } | ||
3417 | |||
3418 | |||
3419 | /***************************************************************************** | ||
3420 | * | ||
3421 | * sysfs attributes | ||
3422 | * | ||
3423 | *****************************************************************************/ | ||
3424 | |||
3425 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
3426 | |||
3427 | /* | ||
3428 | * The following adds a new attribute to the sysfs representation | ||
3429 | * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) | ||
3430 | * used for controlling the debug level. | ||
3431 | * | ||
3432 | * See the level definitions in iwl for details. | ||
3433 | * | ||
3434 | * The debug_level being managed using sysfs below is a per device debug | ||
3435 | * level that is used instead of the global debug level if it (the per | ||
3436 | * device debug level) is set. | ||
3437 | */ | ||
3438 | static ssize_t show_debug_level(struct device *d, | ||
3439 | struct device_attribute *attr, char *buf) | ||
3440 | { | ||
3441 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3442 | return sprintf(buf, "0x%08X\n", iwl_get_debug_level(priv)); | ||
3443 | } | ||
3444 | static ssize_t store_debug_level(struct device *d, | ||
3445 | struct device_attribute *attr, | ||
3446 | const char *buf, size_t count) | ||
3447 | { | ||
3448 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3449 | unsigned long val; | ||
3450 | int ret; | ||
3451 | |||
3452 | ret = strict_strtoul(buf, 0, &val); | ||
3453 | if (ret) | ||
3454 | IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf); | ||
3455 | else { | ||
3456 | priv->debug_level = val; | ||
3457 | if (iwl_alloc_traffic_mem(priv)) | ||
3458 | IWL_ERR(priv, | ||
3459 | "Not enough memory to generate traffic log\n"); | ||
3460 | } | ||
3461 | return strnlen(buf, count); | ||
3462 | } | ||
3463 | |||
3464 | static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, | ||
3465 | show_debug_level, store_debug_level); | ||
3466 | |||
3467 | #endif /* CONFIG_IWLWIFI_DEBUG */ | ||
3468 | |||
3469 | static ssize_t show_temperature(struct device *d, | ||
3470 | struct device_attribute *attr, char *buf) | ||
3471 | { | ||
3472 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3473 | |||
3474 | if (!iwl_is_alive(priv)) | ||
3475 | return -EAGAIN; | ||
3476 | |||
3477 | return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); | ||
3478 | } | ||
3479 | |||
3480 | static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); | ||
3481 | |||
3482 | static ssize_t show_tx_power(struct device *d, | ||
3483 | struct device_attribute *attr, char *buf) | ||
3484 | { | ||
3485 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3486 | return sprintf(buf, "%d\n", priv->tx_power_user_lmt); | ||
3487 | } | ||
3488 | |||
3489 | static ssize_t store_tx_power(struct device *d, | ||
3490 | struct device_attribute *attr, | ||
3491 | const char *buf, size_t count) | ||
3492 | { | ||
3493 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3494 | char *p = (char *)buf; | ||
3495 | u32 val; | ||
3496 | |||
3497 | val = simple_strtoul(p, &p, 10); | ||
3498 | if (p == buf) | ||
3499 | IWL_INFO(priv, ": %s is not in decimal form.\n", buf); | ||
3500 | else | ||
3501 | iwl3945_hw_reg_set_txpower(priv, val); | ||
3502 | |||
3503 | return count; | ||
3504 | } | ||
3505 | |||
3506 | static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); | ||
3507 | |||
3508 | static ssize_t show_flags(struct device *d, | ||
3509 | struct device_attribute *attr, char *buf) | ||
3510 | { | ||
3511 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3512 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3513 | |||
3514 | return sprintf(buf, "0x%04X\n", ctx->active.flags); | ||
3515 | } | ||
3516 | |||
3517 | static ssize_t store_flags(struct device *d, | ||
3518 | struct device_attribute *attr, | ||
3519 | const char *buf, size_t count) | ||
3520 | { | ||
3521 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3522 | u32 flags = simple_strtoul(buf, NULL, 0); | ||
3523 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3524 | |||
3525 | mutex_lock(&priv->mutex); | ||
3526 | if (le32_to_cpu(ctx->staging.flags) != flags) { | ||
3527 | /* Cancel any currently running scans... */ | ||
3528 | if (iwl_scan_cancel_timeout(priv, 100)) | ||
3529 | IWL_WARN(priv, "Could not cancel scan.\n"); | ||
3530 | else { | ||
3531 | IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", | ||
3532 | flags); | ||
3533 | ctx->staging.flags = cpu_to_le32(flags); | ||
3534 | iwl3945_commit_rxon(priv, ctx); | ||
3535 | } | ||
3536 | } | ||
3537 | mutex_unlock(&priv->mutex); | ||
3538 | |||
3539 | return count; | ||
3540 | } | ||
3541 | |||
3542 | static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); | ||
3543 | |||
3544 | static ssize_t show_filter_flags(struct device *d, | ||
3545 | struct device_attribute *attr, char *buf) | ||
3546 | { | ||
3547 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3548 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3549 | |||
3550 | return sprintf(buf, "0x%04X\n", | ||
3551 | le32_to_cpu(ctx->active.filter_flags)); | ||
3552 | } | ||
3553 | |||
3554 | static ssize_t store_filter_flags(struct device *d, | ||
3555 | struct device_attribute *attr, | ||
3556 | const char *buf, size_t count) | ||
3557 | { | ||
3558 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3559 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3560 | u32 filter_flags = simple_strtoul(buf, NULL, 0); | ||
3561 | |||
3562 | mutex_lock(&priv->mutex); | ||
3563 | if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) { | ||
3564 | /* Cancel any currently running scans... */ | ||
3565 | if (iwl_scan_cancel_timeout(priv, 100)) | ||
3566 | IWL_WARN(priv, "Could not cancel scan.\n"); | ||
3567 | else { | ||
3568 | IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " | ||
3569 | "0x%04X\n", filter_flags); | ||
3570 | ctx->staging.filter_flags = | ||
3571 | cpu_to_le32(filter_flags); | ||
3572 | iwl3945_commit_rxon(priv, ctx); | ||
3573 | } | ||
3574 | } | ||
3575 | mutex_unlock(&priv->mutex); | ||
3576 | |||
3577 | return count; | ||
3578 | } | ||
3579 | |||
3580 | static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, | ||
3581 | store_filter_flags); | ||
3582 | |||
3583 | static ssize_t show_measurement(struct device *d, | ||
3584 | struct device_attribute *attr, char *buf) | ||
3585 | { | ||
3586 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3587 | struct iwl_spectrum_notification measure_report; | ||
3588 | u32 size = sizeof(measure_report), len = 0, ofs = 0; | ||
3589 | u8 *data = (u8 *)&measure_report; | ||
3590 | unsigned long flags; | ||
3591 | |||
3592 | spin_lock_irqsave(&priv->lock, flags); | ||
3593 | if (!(priv->measurement_status & MEASUREMENT_READY)) { | ||
3594 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3595 | return 0; | ||
3596 | } | ||
3597 | memcpy(&measure_report, &priv->measure_report, size); | ||
3598 | priv->measurement_status = 0; | ||
3599 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3600 | |||
3601 | while (size && (PAGE_SIZE - len)) { | ||
3602 | hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, | ||
3603 | PAGE_SIZE - len, 1); | ||
3604 | len = strlen(buf); | ||
3605 | if (PAGE_SIZE - len) | ||
3606 | buf[len++] = '\n'; | ||
3607 | |||
3608 | ofs += 16; | ||
3609 | size -= min(size, 16U); | ||
3610 | } | ||
3611 | |||
3612 | return len; | ||
3613 | } | ||
3614 | |||
3615 | static ssize_t store_measurement(struct device *d, | ||
3616 | struct device_attribute *attr, | ||
3617 | const char *buf, size_t count) | ||
3618 | { | ||
3619 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3620 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3621 | struct ieee80211_measurement_params params = { | ||
3622 | .channel = le16_to_cpu(ctx->active.channel), | ||
3623 | .start_time = cpu_to_le64(priv->_3945.last_tsf), | ||
3624 | .duration = cpu_to_le16(1), | ||
3625 | }; | ||
3626 | u8 type = IWL_MEASURE_BASIC; | ||
3627 | u8 buffer[32]; | ||
3628 | u8 channel; | ||
3629 | |||
3630 | if (count) { | ||
3631 | char *p = buffer; | ||
3632 | strncpy(buffer, buf, min(sizeof(buffer), count)); | ||
3633 | channel = simple_strtoul(p, NULL, 0); | ||
3634 | if (channel) | ||
3635 | params.channel = channel; | ||
3636 | |||
3637 | p = buffer; | ||
3638 | while (*p && *p != ' ') | ||
3639 | p++; | ||
3640 | if (*p) | ||
3641 | type = simple_strtoul(p + 1, NULL, 0); | ||
3642 | } | ||
3643 | |||
3644 | IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on " | ||
3645 | "channel %d (for '%s')\n", type, params.channel, buf); | ||
3646 | iwl3945_get_measurement(priv, ¶ms, type); | ||
3647 | |||
3648 | return count; | ||
3649 | } | ||
3650 | |||
3651 | static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, | ||
3652 | show_measurement, store_measurement); | ||
3653 | |||
3654 | static ssize_t store_retry_rate(struct device *d, | ||
3655 | struct device_attribute *attr, | ||
3656 | const char *buf, size_t count) | ||
3657 | { | ||
3658 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3659 | |||
3660 | priv->retry_rate = simple_strtoul(buf, NULL, 0); | ||
3661 | if (priv->retry_rate <= 0) | ||
3662 | priv->retry_rate = 1; | ||
3663 | |||
3664 | return count; | ||
3665 | } | ||
3666 | |||
3667 | static ssize_t show_retry_rate(struct device *d, | ||
3668 | struct device_attribute *attr, char *buf) | ||
3669 | { | ||
3670 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3671 | return sprintf(buf, "%d", priv->retry_rate); | ||
3672 | } | ||
3673 | |||
3674 | static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, | ||
3675 | store_retry_rate); | ||
3676 | |||
3677 | |||
3678 | static ssize_t show_channels(struct device *d, | ||
3679 | struct device_attribute *attr, char *buf) | ||
3680 | { | ||
3681 | /* all this shit doesn't belong into sysfs anyway */ | ||
3682 | return 0; | ||
3683 | } | ||
3684 | |||
3685 | static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); | ||
3686 | |||
3687 | static ssize_t show_antenna(struct device *d, | ||
3688 | struct device_attribute *attr, char *buf) | ||
3689 | { | ||
3690 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3691 | |||
3692 | if (!iwl_is_alive(priv)) | ||
3693 | return -EAGAIN; | ||
3694 | |||
3695 | return sprintf(buf, "%d\n", iwl3945_mod_params.antenna); | ||
3696 | } | ||
3697 | |||
3698 | static ssize_t store_antenna(struct device *d, | ||
3699 | struct device_attribute *attr, | ||
3700 | const char *buf, size_t count) | ||
3701 | { | ||
3702 | struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d); | ||
3703 | int ant; | ||
3704 | |||
3705 | if (count == 0) | ||
3706 | return 0; | ||
3707 | |||
3708 | if (sscanf(buf, "%1i", &ant) != 1) { | ||
3709 | IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n"); | ||
3710 | return count; | ||
3711 | } | ||
3712 | |||
3713 | if ((ant >= 0) && (ant <= 2)) { | ||
3714 | IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant); | ||
3715 | iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant; | ||
3716 | } else | ||
3717 | IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant); | ||
3718 | |||
3719 | |||
3720 | return count; | ||
3721 | } | ||
3722 | |||
3723 | static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); | ||
3724 | |||
3725 | static ssize_t show_status(struct device *d, | ||
3726 | struct device_attribute *attr, char *buf) | ||
3727 | { | ||
3728 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3729 | if (!iwl_is_alive(priv)) | ||
3730 | return -EAGAIN; | ||
3731 | return sprintf(buf, "0x%08x\n", (int)priv->status); | ||
3732 | } | ||
3733 | |||
3734 | static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); | ||
3735 | |||
3736 | static ssize_t dump_error_log(struct device *d, | ||
3737 | struct device_attribute *attr, | ||
3738 | const char *buf, size_t count) | ||
3739 | { | ||
3740 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3741 | char *p = (char *)buf; | ||
3742 | |||
3743 | if (p[0] == '1') | ||
3744 | iwl3945_dump_nic_error_log(priv); | ||
3745 | |||
3746 | return strnlen(buf, count); | ||
3747 | } | ||
3748 | |||
3749 | static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); | ||
3750 | |||
3751 | /***************************************************************************** | ||
3752 | * | ||
3753 | * driver setup and tear down | ||
3754 | * | ||
3755 | *****************************************************************************/ | ||
3756 | |||
3757 | static void iwl3945_setup_deferred_work(struct iwl_priv *priv) | ||
3758 | { | ||
3759 | priv->workqueue = create_singlethread_workqueue(DRV_NAME); | ||
3760 | |||
3761 | init_waitqueue_head(&priv->wait_command_queue); | ||
3762 | |||
3763 | INIT_WORK(&priv->restart, iwl3945_bg_restart); | ||
3764 | INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); | ||
3765 | INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); | ||
3766 | INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); | ||
3767 | INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); | ||
3768 | INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); | ||
3769 | |||
3770 | iwl_setup_scan_deferred_work(priv); | ||
3771 | |||
3772 | iwl3945_hw_setup_deferred_work(priv); | ||
3773 | |||
3774 | init_timer(&priv->watchdog); | ||
3775 | priv->watchdog.data = (unsigned long)priv; | ||
3776 | priv->watchdog.function = iwl_bg_watchdog; | ||
3777 | |||
3778 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | ||
3779 | iwl3945_irq_tasklet, (unsigned long)priv); | ||
3780 | } | ||
3781 | |||
3782 | static void iwl3945_cancel_deferred_work(struct iwl_priv *priv) | ||
3783 | { | ||
3784 | iwl3945_hw_cancel_deferred_work(priv); | ||
3785 | |||
3786 | cancel_delayed_work_sync(&priv->init_alive_start); | ||
3787 | cancel_delayed_work(&priv->alive_start); | ||
3788 | cancel_work_sync(&priv->beacon_update); | ||
3789 | |||
3790 | iwl_cancel_scan_deferred_work(priv); | ||
3791 | } | ||
3792 | |||
3793 | static struct attribute *iwl3945_sysfs_entries[] = { | ||
3794 | &dev_attr_antenna.attr, | ||
3795 | &dev_attr_channels.attr, | ||
3796 | &dev_attr_dump_errors.attr, | ||
3797 | &dev_attr_flags.attr, | ||
3798 | &dev_attr_filter_flags.attr, | ||
3799 | &dev_attr_measurement.attr, | ||
3800 | &dev_attr_retry_rate.attr, | ||
3801 | &dev_attr_status.attr, | ||
3802 | &dev_attr_temperature.attr, | ||
3803 | &dev_attr_tx_power.attr, | ||
3804 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
3805 | &dev_attr_debug_level.attr, | ||
3806 | #endif | ||
3807 | NULL | ||
3808 | }; | ||
3809 | |||
3810 | static struct attribute_group iwl3945_attribute_group = { | ||
3811 | .name = NULL, /* put in device directory */ | ||
3812 | .attrs = iwl3945_sysfs_entries, | ||
3813 | }; | ||
3814 | |||
3815 | struct ieee80211_ops iwl3945_hw_ops = { | ||
3816 | .tx = iwl3945_mac_tx, | ||
3817 | .start = iwl3945_mac_start, | ||
3818 | .stop = iwl3945_mac_stop, | ||
3819 | .add_interface = iwl_mac_add_interface, | ||
3820 | .remove_interface = iwl_mac_remove_interface, | ||
3821 | .change_interface = iwl_mac_change_interface, | ||
3822 | .config = iwl_legacy_mac_config, | ||
3823 | .configure_filter = iwl3945_configure_filter, | ||
3824 | .set_key = iwl3945_mac_set_key, | ||
3825 | .conf_tx = iwl_mac_conf_tx, | ||
3826 | .reset_tsf = iwl_legacy_mac_reset_tsf, | ||
3827 | .bss_info_changed = iwl_legacy_mac_bss_info_changed, | ||
3828 | .hw_scan = iwl_mac_hw_scan, | ||
3829 | .sta_add = iwl3945_mac_sta_add, | ||
3830 | .sta_remove = iwl_mac_sta_remove, | ||
3831 | .tx_last_beacon = iwl_mac_tx_last_beacon, | ||
3832 | }; | ||
3833 | |||
3834 | static int iwl3945_init_drv(struct iwl_priv *priv) | ||
3835 | { | ||
3836 | int ret; | ||
3837 | struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; | ||
3838 | |||
3839 | priv->retry_rate = 1; | ||
3840 | priv->beacon_skb = NULL; | ||
3841 | |||
3842 | spin_lock_init(&priv->sta_lock); | ||
3843 | spin_lock_init(&priv->hcmd_lock); | ||
3844 | |||
3845 | INIT_LIST_HEAD(&priv->free_frames); | ||
3846 | |||
3847 | mutex_init(&priv->mutex); | ||
3848 | mutex_init(&priv->sync_cmd_mutex); | ||
3849 | |||
3850 | priv->ieee_channels = NULL; | ||
3851 | priv->ieee_rates = NULL; | ||
3852 | priv->band = IEEE80211_BAND_2GHZ; | ||
3853 | |||
3854 | priv->iw_mode = NL80211_IFTYPE_STATION; | ||
3855 | priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; | ||
3856 | |||
3857 | /* initialize force reset */ | ||
3858 | priv->force_reset[IWL_RF_RESET].reset_duration = | ||
3859 | IWL_DELAY_NEXT_FORCE_RF_RESET; | ||
3860 | priv->force_reset[IWL_FW_RESET].reset_duration = | ||
3861 | IWL_DELAY_NEXT_FORCE_FW_RELOAD; | ||
3862 | |||
3863 | |||
3864 | priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; | ||
3865 | priv->tx_power_next = IWL_DEFAULT_TX_POWER; | ||
3866 | |||
3867 | if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { | ||
3868 | IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", | ||
3869 | eeprom->version); | ||
3870 | ret = -EINVAL; | ||
3871 | goto err; | ||
3872 | } | ||
3873 | ret = iwl_init_channel_map(priv); | ||
3874 | if (ret) { | ||
3875 | IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); | ||
3876 | goto err; | ||
3877 | } | ||
3878 | |||
3879 | /* Set up txpower settings in driver for all channels */ | ||
3880 | if (iwl3945_txpower_set_from_eeprom(priv)) { | ||
3881 | ret = -EIO; | ||
3882 | goto err_free_channel_map; | ||
3883 | } | ||
3884 | |||
3885 | ret = iwlcore_init_geos(priv); | ||
3886 | if (ret) { | ||
3887 | IWL_ERR(priv, "initializing geos failed: %d\n", ret); | ||
3888 | goto err_free_channel_map; | ||
3889 | } | ||
3890 | iwl3945_init_hw_rates(priv, priv->ieee_rates); | ||
3891 | |||
3892 | return 0; | ||
3893 | |||
3894 | err_free_channel_map: | ||
3895 | iwl_free_channel_map(priv); | ||
3896 | err: | ||
3897 | return ret; | ||
3898 | } | ||
3899 | |||
3900 | #define IWL3945_MAX_PROBE_REQUEST 200 | ||
3901 | |||
3902 | static int iwl3945_setup_mac(struct iwl_priv *priv) | ||
3903 | { | ||
3904 | int ret; | ||
3905 | struct ieee80211_hw *hw = priv->hw; | ||
3906 | |||
3907 | hw->rate_control_algorithm = "iwl-3945-rs"; | ||
3908 | hw->sta_data_size = sizeof(struct iwl3945_sta_priv); | ||
3909 | hw->vif_data_size = sizeof(struct iwl_vif_priv); | ||
3910 | |||
3911 | /* Tell mac80211 our characteristics */ | ||
3912 | hw->flags = IEEE80211_HW_SIGNAL_DBM | | ||
3913 | IEEE80211_HW_SPECTRUM_MGMT; | ||
3914 | |||
3915 | if (!priv->cfg->base_params->broken_powersave) | ||
3916 | hw->flags |= IEEE80211_HW_SUPPORTS_PS | | ||
3917 | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; | ||
3918 | |||
3919 | hw->wiphy->interface_modes = | ||
3920 | priv->contexts[IWL_RXON_CTX_BSS].interface_modes; | ||
3921 | |||
3922 | hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | | ||
3923 | WIPHY_FLAG_DISABLE_BEACON_HINTS | | ||
3924 | WIPHY_FLAG_IBSS_RSN; | ||
3925 | |||
3926 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; | ||
3927 | /* we create the 802.11 header and a zero-length SSID element */ | ||
3928 | hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2; | ||
3929 | |||
3930 | /* Default value; 4 EDCA QOS priorities */ | ||
3931 | hw->queues = 4; | ||
3932 | |||
3933 | if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) | ||
3934 | priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = | ||
3935 | &priv->bands[IEEE80211_BAND_2GHZ]; | ||
3936 | |||
3937 | if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) | ||
3938 | priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = | ||
3939 | &priv->bands[IEEE80211_BAND_5GHZ]; | ||
3940 | |||
3941 | iwl_leds_init(priv); | ||
3942 | |||
3943 | ret = ieee80211_register_hw(priv->hw); | ||
3944 | if (ret) { | ||
3945 | IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); | ||
3946 | return ret; | ||
3947 | } | ||
3948 | priv->mac80211_registered = 1; | ||
3949 | |||
3950 | return 0; | ||
3951 | } | ||
3952 | |||
3953 | static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
3954 | { | ||
3955 | int err = 0, i; | ||
3956 | struct iwl_priv *priv; | ||
3957 | struct ieee80211_hw *hw; | ||
3958 | struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); | ||
3959 | struct iwl3945_eeprom *eeprom; | ||
3960 | unsigned long flags; | ||
3961 | |||
3962 | /*********************** | ||
3963 | * 1. Allocating HW data | ||
3964 | * ********************/ | ||
3965 | |||
3966 | /* mac80211 allocates memory for this device instance, including | ||
3967 | * space for this driver's private structure */ | ||
3968 | hw = iwl_alloc_all(cfg); | ||
3969 | if (hw == NULL) { | ||
3970 | pr_err("Can not allocate network device\n"); | ||
3971 | err = -ENOMEM; | ||
3972 | goto out; | ||
3973 | } | ||
3974 | priv = hw->priv; | ||
3975 | SET_IEEE80211_DEV(hw, &pdev->dev); | ||
3976 | |||
3977 | priv->cmd_queue = IWL39_CMD_QUEUE_NUM; | ||
3978 | |||
3979 | /* 3945 has only one valid context */ | ||
3980 | priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); | ||
3981 | |||
3982 | for (i = 0; i < NUM_IWL_RXON_CTX; i++) | ||
3983 | priv->contexts[i].ctxid = i; | ||
3984 | |||
3985 | priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; | ||
3986 | priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; | ||
3987 | priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; | ||
3988 | priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; | ||
3989 | priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; | ||
3990 | priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; | ||
3991 | priv->contexts[IWL_RXON_CTX_BSS].interface_modes = | ||
3992 | BIT(NL80211_IFTYPE_STATION) | | ||
3993 | BIT(NL80211_IFTYPE_ADHOC); | ||
3994 | priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; | ||
3995 | priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; | ||
3996 | priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; | ||
3997 | |||
3998 | /* | ||
3999 | * Disabling hardware scan means that mac80211 will perform scans | ||
4000 | * "the hard way", rather than using device's scan. | ||
4001 | */ | ||
4002 | if (iwl3945_mod_params.disable_hw_scan) { | ||
4003 | dev_printk(KERN_DEBUG, &(pdev->dev), | ||
4004 | "sw scan support is deprecated\n"); | ||
4005 | iwl3945_hw_ops.hw_scan = NULL; | ||
4006 | } | ||
4007 | |||
4008 | |||
4009 | IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); | ||
4010 | priv->cfg = cfg; | ||
4011 | priv->pci_dev = pdev; | ||
4012 | priv->inta_mask = CSR_INI_SET_MASK; | ||
4013 | |||
4014 | if (iwl_alloc_traffic_mem(priv)) | ||
4015 | IWL_ERR(priv, "Not enough memory to generate traffic log\n"); | ||
4016 | |||
4017 | /*************************** | ||
4018 | * 2. Initializing PCI bus | ||
4019 | * *************************/ | ||
4020 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
4021 | PCIE_LINK_STATE_CLKPM); | ||
4022 | |||
4023 | if (pci_enable_device(pdev)) { | ||
4024 | err = -ENODEV; | ||
4025 | goto out_ieee80211_free_hw; | ||
4026 | } | ||
4027 | |||
4028 | pci_set_master(pdev); | ||
4029 | |||
4030 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
4031 | if (!err) | ||
4032 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
4033 | if (err) { | ||
4034 | IWL_WARN(priv, "No suitable DMA available.\n"); | ||
4035 | goto out_pci_disable_device; | ||
4036 | } | ||
4037 | |||
4038 | pci_set_drvdata(pdev, priv); | ||
4039 | err = pci_request_regions(pdev, DRV_NAME); | ||
4040 | if (err) | ||
4041 | goto out_pci_disable_device; | ||
4042 | |||
4043 | /*********************** | ||
4044 | * 3. Read REV Register | ||
4045 | * ********************/ | ||
4046 | priv->hw_base = pci_iomap(pdev, 0, 0); | ||
4047 | if (!priv->hw_base) { | ||
4048 | err = -ENODEV; | ||
4049 | goto out_pci_release_regions; | ||
4050 | } | ||
4051 | |||
4052 | IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", | ||
4053 | (unsigned long long) pci_resource_len(pdev, 0)); | ||
4054 | IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); | ||
4055 | |||
4056 | /* We disable the RETRY_TIMEOUT register (0x41) to keep | ||
4057 | * PCI Tx retries from interfering with C3 CPU state */ | ||
4058 | pci_write_config_byte(pdev, 0x41, 0x00); | ||
4059 | |||
4060 | /* these spin locks will be used in apm_ops.init and EEPROM access | ||
4061 | * we should init now | ||
4062 | */ | ||
4063 | spin_lock_init(&priv->reg_lock); | ||
4064 | spin_lock_init(&priv->lock); | ||
4065 | |||
4066 | /* | ||
4067 | * stop and reset the on-board processor just in case it is in a | ||
4068 | * strange state ... like being left stranded by a primary kernel | ||
4069 | * and this is now the kdump kernel trying to start up | ||
4070 | */ | ||
4071 | iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); | ||
4072 | |||
4073 | /*********************** | ||
4074 | * 4. Read EEPROM | ||
4075 | * ********************/ | ||
4076 | |||
4077 | /* Read the EEPROM */ | ||
4078 | err = iwl_eeprom_init(priv); | ||
4079 | if (err) { | ||
4080 | IWL_ERR(priv, "Unable to init EEPROM\n"); | ||
4081 | goto out_iounmap; | ||
4082 | } | ||
4083 | /* MAC Address location in EEPROM same for 3945/4965 */ | ||
4084 | eeprom = (struct iwl3945_eeprom *)priv->eeprom; | ||
4085 | IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address); | ||
4086 | SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address); | ||
4087 | |||
4088 | /*********************** | ||
4089 | * 5. Setup HW Constants | ||
4090 | * ********************/ | ||
4091 | /* Device-specific setup */ | ||
4092 | if (iwl3945_hw_set_hw_params(priv)) { | ||
4093 | IWL_ERR(priv, "failed to set hw settings\n"); | ||
4094 | goto out_eeprom_free; | ||
4095 | } | ||
4096 | |||
4097 | /*********************** | ||
4098 | * 6. Setup priv | ||
4099 | * ********************/ | ||
4100 | |||
4101 | err = iwl3945_init_drv(priv); | ||
4102 | if (err) { | ||
4103 | IWL_ERR(priv, "initializing driver failed\n"); | ||
4104 | goto out_unset_hw_params; | ||
4105 | } | ||
4106 | |||
4107 | IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n", | ||
4108 | priv->cfg->name); | ||
4109 | |||
4110 | /*********************** | ||
4111 | * 7. Setup Services | ||
4112 | * ********************/ | ||
4113 | |||
4114 | spin_lock_irqsave(&priv->lock, flags); | ||
4115 | iwl_disable_interrupts(priv); | ||
4116 | spin_unlock_irqrestore(&priv->lock, flags); | ||
4117 | |||
4118 | pci_enable_msi(priv->pci_dev); | ||
4119 | |||
4120 | err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr, | ||
4121 | IRQF_SHARED, DRV_NAME, priv); | ||
4122 | if (err) { | ||
4123 | IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); | ||
4124 | goto out_disable_msi; | ||
4125 | } | ||
4126 | |||
4127 | err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group); | ||
4128 | if (err) { | ||
4129 | IWL_ERR(priv, "failed to create sysfs device attributes\n"); | ||
4130 | goto out_release_irq; | ||
4131 | } | ||
4132 | |||
4133 | iwl_set_rxon_channel(priv, | ||
4134 | &priv->bands[IEEE80211_BAND_2GHZ].channels[5], | ||
4135 | &priv->contexts[IWL_RXON_CTX_BSS]); | ||
4136 | iwl3945_setup_deferred_work(priv); | ||
4137 | iwl3945_setup_rx_handlers(priv); | ||
4138 | iwl_power_initialize(priv); | ||
4139 | |||
4140 | /********************************* | ||
4141 | * 8. Setup and Register mac80211 | ||
4142 | * *******************************/ | ||
4143 | |||
4144 | iwl_enable_interrupts(priv); | ||
4145 | |||
4146 | err = iwl3945_setup_mac(priv); | ||
4147 | if (err) | ||
4148 | goto out_remove_sysfs; | ||
4149 | |||
4150 | err = iwl_dbgfs_register(priv, DRV_NAME); | ||
4151 | if (err) | ||
4152 | IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); | ||
4153 | |||
4154 | /* Start monitoring the killswitch */ | ||
4155 | queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, | ||
4156 | 2 * HZ); | ||
4157 | |||
4158 | return 0; | ||
4159 | |||
4160 | out_remove_sysfs: | ||
4161 | destroy_workqueue(priv->workqueue); | ||
4162 | priv->workqueue = NULL; | ||
4163 | sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); | ||
4164 | out_release_irq: | ||
4165 | free_irq(priv->pci_dev->irq, priv); | ||
4166 | out_disable_msi: | ||
4167 | pci_disable_msi(priv->pci_dev); | ||
4168 | iwlcore_free_geos(priv); | ||
4169 | iwl_free_channel_map(priv); | ||
4170 | out_unset_hw_params: | ||
4171 | iwl3945_unset_hw_params(priv); | ||
4172 | out_eeprom_free: | ||
4173 | iwl_eeprom_free(priv); | ||
4174 | out_iounmap: | ||
4175 | pci_iounmap(pdev, priv->hw_base); | ||
4176 | out_pci_release_regions: | ||
4177 | pci_release_regions(pdev); | ||
4178 | out_pci_disable_device: | ||
4179 | pci_set_drvdata(pdev, NULL); | ||
4180 | pci_disable_device(pdev); | ||
4181 | out_ieee80211_free_hw: | ||
4182 | iwl_free_traffic_mem(priv); | ||
4183 | ieee80211_free_hw(priv->hw); | ||
4184 | out: | ||
4185 | return err; | ||
4186 | } | ||
4187 | |||
4188 | static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) | ||
4189 | { | ||
4190 | struct iwl_priv *priv = pci_get_drvdata(pdev); | ||
4191 | unsigned long flags; | ||
4192 | |||
4193 | if (!priv) | ||
4194 | return; | ||
4195 | |||
4196 | IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); | ||
4197 | |||
4198 | iwl_dbgfs_unregister(priv); | ||
4199 | |||
4200 | set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
4201 | |||
4202 | iwl_leds_exit(priv); | ||
4203 | |||
4204 | if (priv->mac80211_registered) { | ||
4205 | ieee80211_unregister_hw(priv->hw); | ||
4206 | priv->mac80211_registered = 0; | ||
4207 | } else { | ||
4208 | iwl3945_down(priv); | ||
4209 | } | ||
4210 | |||
4211 | /* | ||
4212 | * Make sure device is reset to low power before unloading driver. | ||
4213 | * This may be redundant with iwl_down(), but there are paths to | ||
4214 | * run iwl_down() without calling apm_ops.stop(), and there are | ||
4215 | * paths to avoid running iwl_down() at all before leaving driver. | ||
4216 | * This (inexpensive) call *makes sure* device is reset. | ||
4217 | */ | ||
4218 | iwl_apm_stop(priv); | ||
4219 | |||
4220 | /* make sure we flush any pending irq or | ||
4221 | * tasklet for the driver | ||
4222 | */ | ||
4223 | spin_lock_irqsave(&priv->lock, flags); | ||
4224 | iwl_disable_interrupts(priv); | ||
4225 | spin_unlock_irqrestore(&priv->lock, flags); | ||
4226 | |||
4227 | iwl_synchronize_irq(priv); | ||
4228 | |||
4229 | sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); | ||
4230 | |||
4231 | cancel_delayed_work_sync(&priv->_3945.rfkill_poll); | ||
4232 | |||
4233 | iwl3945_dealloc_ucode_pci(priv); | ||
4234 | |||
4235 | if (priv->rxq.bd) | ||
4236 | iwl3945_rx_queue_free(priv, &priv->rxq); | ||
4237 | iwl3945_hw_txq_ctx_free(priv); | ||
4238 | |||
4239 | iwl3945_unset_hw_params(priv); | ||
4240 | |||
4241 | /*netif_stop_queue(dev); */ | ||
4242 | flush_workqueue(priv->workqueue); | ||
4243 | |||
4244 | /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes | ||
4245 | * priv->workqueue... so we can't take down the workqueue | ||
4246 | * until now... */ | ||
4247 | destroy_workqueue(priv->workqueue); | ||
4248 | priv->workqueue = NULL; | ||
4249 | iwl_free_traffic_mem(priv); | ||
4250 | |||
4251 | free_irq(pdev->irq, priv); | ||
4252 | pci_disable_msi(pdev); | ||
4253 | |||
4254 | pci_iounmap(pdev, priv->hw_base); | ||
4255 | pci_release_regions(pdev); | ||
4256 | pci_disable_device(pdev); | ||
4257 | pci_set_drvdata(pdev, NULL); | ||
4258 | |||
4259 | iwl_free_channel_map(priv); | ||
4260 | iwlcore_free_geos(priv); | ||
4261 | kfree(priv->scan_cmd); | ||
4262 | if (priv->beacon_skb) | ||
4263 | dev_kfree_skb(priv->beacon_skb); | ||
4264 | |||
4265 | ieee80211_free_hw(priv->hw); | ||
4266 | } | ||
4267 | |||
4268 | |||
4269 | /***************************************************************************** | ||
4270 | * | ||
4271 | * driver and module entry point | ||
4272 | * | ||
4273 | *****************************************************************************/ | ||
4274 | |||
4275 | static struct pci_driver iwl3945_driver = { | ||
4276 | .name = DRV_NAME, | ||
4277 | .id_table = iwl3945_hw_card_ids, | ||
4278 | .probe = iwl3945_pci_probe, | ||
4279 | .remove = __devexit_p(iwl3945_pci_remove), | ||
4280 | .driver.pm = IWL_PM_OPS, | ||
4281 | }; | ||
4282 | |||
4283 | static int __init iwl3945_init(void) | ||
4284 | { | ||
4285 | |||
4286 | int ret; | ||
4287 | pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); | ||
4288 | pr_info(DRV_COPYRIGHT "\n"); | ||
4289 | |||
4290 | ret = iwl3945_rate_control_register(); | ||
4291 | if (ret) { | ||
4292 | pr_err("Unable to register rate control algorithm: %d\n", ret); | ||
4293 | return ret; | ||
4294 | } | ||
4295 | |||
4296 | ret = pci_register_driver(&iwl3945_driver); | ||
4297 | if (ret) { | ||
4298 | pr_err("Unable to initialize PCI module\n"); | ||
4299 | goto error_register; | ||
4300 | } | ||
4301 | |||
4302 | return ret; | ||
4303 | |||
4304 | error_register: | ||
4305 | iwl3945_rate_control_unregister(); | ||
4306 | return ret; | ||
4307 | } | ||
4308 | |||
4309 | static void __exit iwl3945_exit(void) | ||
4310 | { | ||
4311 | pci_unregister_driver(&iwl3945_driver); | ||
4312 | iwl3945_rate_control_unregister(); | ||
4313 | } | ||
4314 | |||
4315 | MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); | ||
4316 | |||
4317 | module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO); | ||
4318 | MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); | ||
4319 | module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); | ||
4320 | MODULE_PARM_DESC(swcrypto, | ||
4321 | "using software crypto (default 1 [software])\n"); | ||
4322 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
4323 | module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR); | ||
4324 | MODULE_PARM_DESC(debug, "debug output mask"); | ||
4325 | #endif | ||
4326 | module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, | ||
4327 | int, S_IRUGO); | ||
4328 | MODULE_PARM_DESC(disable_hw_scan, | ||
4329 | "disable hardware scanning (default 0) (deprecated)"); | ||
4330 | module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO); | ||
4331 | MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); | ||
4332 | |||
4333 | module_exit(iwl3945_exit); | ||
4334 | module_init(iwl3945_init); | ||