diff options
Diffstat (limited to 'drivers/net/wireless/iwlegacy/iwl3945-base.c')
-rw-r--r-- | drivers/net/wireless/iwlegacy/iwl3945-base.c | 4293 |
1 files changed, 4293 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c new file mode 100644 index 000000000000..ab87e1b73529 --- /dev/null +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c | |||
@@ -0,0 +1,4293 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * Portions of this file are derived from the ipw3945 project, as well | ||
6 | * as portions of the ieee80211 subsystem header files. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of version 2 of the GNU General Public License as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution in the | ||
22 | * file called LICENSE. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * Intel Linux Wireless <ilw@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
31 | |||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/init.h> | ||
35 | #include <linux/pci.h> | ||
36 | #include <linux/pci-aspm.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <linux/dma-mapping.h> | ||
39 | #include <linux/delay.h> | ||
40 | #include <linux/sched.h> | ||
41 | #include <linux/skbuff.h> | ||
42 | #include <linux/netdevice.h> | ||
43 | #include <linux/wireless.h> | ||
44 | #include <linux/firmware.h> | ||
45 | #include <linux/etherdevice.h> | ||
46 | #include <linux/if_arp.h> | ||
47 | |||
48 | #include <net/ieee80211_radiotap.h> | ||
49 | #include <net/mac80211.h> | ||
50 | |||
51 | #include <asm/div64.h> | ||
52 | |||
53 | #define DRV_NAME "iwl3945" | ||
54 | |||
55 | #include "iwl-fh.h" | ||
56 | #include "iwl-3945-fh.h" | ||
57 | #include "iwl-commands.h" | ||
58 | #include "iwl-sta.h" | ||
59 | #include "iwl-3945.h" | ||
60 | #include "iwl-core.h" | ||
61 | #include "iwl-helpers.h" | ||
62 | #include "iwl-dev.h" | ||
63 | #include "iwl-spectrum.h" | ||
64 | |||
65 | /* | ||
66 | * module name, copyright, version, etc. | ||
67 | */ | ||
68 | |||
69 | #define DRV_DESCRIPTION \ | ||
70 | "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" | ||
71 | |||
72 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
73 | #define VD "d" | ||
74 | #else | ||
75 | #define VD | ||
76 | #endif | ||
77 | |||
78 | /* | ||
79 | * add "s" to indicate spectrum measurement included. | ||
80 | * we add it here to be consistent with previous releases in which | ||
81 | * this was configurable. | ||
82 | */ | ||
83 | #define DRV_VERSION IWLWIFI_VERSION VD "s" | ||
84 | #define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation" | ||
85 | #define DRV_AUTHOR "<ilw@linux.intel.com>" | ||
86 | |||
87 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
88 | MODULE_VERSION(DRV_VERSION); | ||
89 | MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); | ||
90 | MODULE_LICENSE("GPL"); | ||
91 | |||
92 | /* module parameters */ | ||
93 | struct iwl_mod_params iwl3945_mod_params = { | ||
94 | .sw_crypto = 1, | ||
95 | .restart_fw = 1, | ||
96 | /* the rest are 0 by default */ | ||
97 | }; | ||
98 | |||
99 | /** | ||
100 | * iwl3945_get_antenna_flags - Get antenna flags for RXON command | ||
101 | * @priv: eeprom and antenna fields are used to determine antenna flags | ||
102 | * | ||
103 | * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed | ||
104 | * iwl3945_mod_params.antenna specifies the antenna diversity mode: | ||
105 | * | ||
106 | * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself | ||
107 | * IWL_ANTENNA_MAIN - Force MAIN antenna | ||
108 | * IWL_ANTENNA_AUX - Force AUX antenna | ||
109 | */ | ||
110 | __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv) | ||
111 | { | ||
112 | struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; | ||
113 | |||
114 | switch (iwl3945_mod_params.antenna) { | ||
115 | case IWL_ANTENNA_DIVERSITY: | ||
116 | return 0; | ||
117 | |||
118 | case IWL_ANTENNA_MAIN: | ||
119 | if (eeprom->antenna_switch_type) | ||
120 | return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; | ||
121 | return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; | ||
122 | |||
123 | case IWL_ANTENNA_AUX: | ||
124 | if (eeprom->antenna_switch_type) | ||
125 | return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK; | ||
126 | return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK; | ||
127 | } | ||
128 | |||
129 | /* bad antenna selector value */ | ||
130 | IWL_ERR(priv, "Bad antenna selector value (0x%x)\n", | ||
131 | iwl3945_mod_params.antenna); | ||
132 | |||
133 | return 0; /* "diversity" is default if error */ | ||
134 | } | ||
135 | |||
136 | static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv, | ||
137 | struct ieee80211_key_conf *keyconf, | ||
138 | u8 sta_id) | ||
139 | { | ||
140 | unsigned long flags; | ||
141 | __le16 key_flags = 0; | ||
142 | int ret; | ||
143 | |||
144 | key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK); | ||
145 | key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS); | ||
146 | |||
147 | if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id) | ||
148 | key_flags |= STA_KEY_MULTICAST_MSK; | ||
149 | |||
150 | keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; | ||
151 | keyconf->hw_key_idx = keyconf->keyidx; | ||
152 | key_flags &= ~STA_KEY_FLG_INVALID; | ||
153 | |||
154 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
155 | priv->stations[sta_id].keyinfo.cipher = keyconf->cipher; | ||
156 | priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; | ||
157 | memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, | ||
158 | keyconf->keylen); | ||
159 | |||
160 | memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, | ||
161 | keyconf->keylen); | ||
162 | |||
163 | if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK) | ||
164 | == STA_KEY_FLG_NO_ENC) | ||
165 | priv->stations[sta_id].sta.key.key_offset = | ||
166 | iwl_legacy_get_free_ucode_key_index(priv); | ||
167 | /* else, we are overriding an existing key => no need to allocated room | ||
168 | * in uCode. */ | ||
169 | |||
170 | WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET, | ||
171 | "no space for a new key"); | ||
172 | |||
173 | priv->stations[sta_id].sta.key.key_flags = key_flags; | ||
174 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | ||
175 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | ||
176 | |||
177 | IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n"); | ||
178 | |||
179 | ret = iwl_legacy_send_add_sta(priv, | ||
180 | &priv->stations[sta_id].sta, CMD_ASYNC); | ||
181 | |||
182 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
183 | |||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv, | ||
188 | struct ieee80211_key_conf *keyconf, | ||
189 | u8 sta_id) | ||
190 | { | ||
191 | return -EOPNOTSUPP; | ||
192 | } | ||
193 | |||
194 | static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv, | ||
195 | struct ieee80211_key_conf *keyconf, | ||
196 | u8 sta_id) | ||
197 | { | ||
198 | return -EOPNOTSUPP; | ||
199 | } | ||
200 | |||
201 | static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) | ||
202 | { | ||
203 | unsigned long flags; | ||
204 | struct iwl_legacy_addsta_cmd sta_cmd; | ||
205 | |||
206 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
207 | memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); | ||
208 | memset(&priv->stations[sta_id].sta.key, 0, | ||
209 | sizeof(struct iwl4965_keyinfo)); | ||
210 | priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; | ||
211 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | ||
212 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | ||
213 | memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd)); | ||
214 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
215 | |||
216 | IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n"); | ||
217 | return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC); | ||
218 | } | ||
219 | |||
220 | static int iwl3945_set_dynamic_key(struct iwl_priv *priv, | ||
221 | struct ieee80211_key_conf *keyconf, u8 sta_id) | ||
222 | { | ||
223 | int ret = 0; | ||
224 | |||
225 | keyconf->hw_key_idx = HW_KEY_DYNAMIC; | ||
226 | |||
227 | switch (keyconf->cipher) { | ||
228 | case WLAN_CIPHER_SUITE_CCMP: | ||
229 | ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id); | ||
230 | break; | ||
231 | case WLAN_CIPHER_SUITE_TKIP: | ||
232 | ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id); | ||
233 | break; | ||
234 | case WLAN_CIPHER_SUITE_WEP40: | ||
235 | case WLAN_CIPHER_SUITE_WEP104: | ||
236 | ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id); | ||
237 | break; | ||
238 | default: | ||
239 | IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__, | ||
240 | keyconf->cipher); | ||
241 | ret = -EINVAL; | ||
242 | } | ||
243 | |||
244 | IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n", | ||
245 | keyconf->cipher, keyconf->keylen, keyconf->keyidx, | ||
246 | sta_id, ret); | ||
247 | |||
248 | return ret; | ||
249 | } | ||
250 | |||
251 | static int iwl3945_remove_static_key(struct iwl_priv *priv) | ||
252 | { | ||
253 | int ret = -EOPNOTSUPP; | ||
254 | |||
255 | return ret; | ||
256 | } | ||
257 | |||
258 | static int iwl3945_set_static_key(struct iwl_priv *priv, | ||
259 | struct ieee80211_key_conf *key) | ||
260 | { | ||
261 | if (key->cipher == WLAN_CIPHER_SUITE_WEP40 || | ||
262 | key->cipher == WLAN_CIPHER_SUITE_WEP104) | ||
263 | return -EOPNOTSUPP; | ||
264 | |||
265 | IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher); | ||
266 | return -EINVAL; | ||
267 | } | ||
268 | |||
269 | static void iwl3945_clear_free_frames(struct iwl_priv *priv) | ||
270 | { | ||
271 | struct list_head *element; | ||
272 | |||
273 | IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n", | ||
274 | priv->frames_count); | ||
275 | |||
276 | while (!list_empty(&priv->free_frames)) { | ||
277 | element = priv->free_frames.next; | ||
278 | list_del(element); | ||
279 | kfree(list_entry(element, struct iwl3945_frame, list)); | ||
280 | priv->frames_count--; | ||
281 | } | ||
282 | |||
283 | if (priv->frames_count) { | ||
284 | IWL_WARN(priv, "%d frames still in use. Did we lose one?\n", | ||
285 | priv->frames_count); | ||
286 | priv->frames_count = 0; | ||
287 | } | ||
288 | } | ||
289 | |||
290 | static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv) | ||
291 | { | ||
292 | struct iwl3945_frame *frame; | ||
293 | struct list_head *element; | ||
294 | if (list_empty(&priv->free_frames)) { | ||
295 | frame = kzalloc(sizeof(*frame), GFP_KERNEL); | ||
296 | if (!frame) { | ||
297 | IWL_ERR(priv, "Could not allocate frame!\n"); | ||
298 | return NULL; | ||
299 | } | ||
300 | |||
301 | priv->frames_count++; | ||
302 | return frame; | ||
303 | } | ||
304 | |||
305 | element = priv->free_frames.next; | ||
306 | list_del(element); | ||
307 | return list_entry(element, struct iwl3945_frame, list); | ||
308 | } | ||
309 | |||
310 | static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame) | ||
311 | { | ||
312 | memset(frame, 0, sizeof(*frame)); | ||
313 | list_add(&frame->list, &priv->free_frames); | ||
314 | } | ||
315 | |||
316 | unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv, | ||
317 | struct ieee80211_hdr *hdr, | ||
318 | int left) | ||
319 | { | ||
320 | |||
321 | if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb) | ||
322 | return 0; | ||
323 | |||
324 | if (priv->beacon_skb->len > left) | ||
325 | return 0; | ||
326 | |||
327 | memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len); | ||
328 | |||
329 | return priv->beacon_skb->len; | ||
330 | } | ||
331 | |||
332 | static int iwl3945_send_beacon_cmd(struct iwl_priv *priv) | ||
333 | { | ||
334 | struct iwl3945_frame *frame; | ||
335 | unsigned int frame_size; | ||
336 | int rc; | ||
337 | u8 rate; | ||
338 | |||
339 | frame = iwl3945_get_free_frame(priv); | ||
340 | |||
341 | if (!frame) { | ||
342 | IWL_ERR(priv, "Could not obtain free frame buffer for beacon " | ||
343 | "command.\n"); | ||
344 | return -ENOMEM; | ||
345 | } | ||
346 | |||
347 | rate = iwl_legacy_get_lowest_plcp(priv, | ||
348 | &priv->contexts[IWL_RXON_CTX_BSS]); | ||
349 | |||
350 | frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate); | ||
351 | |||
352 | rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, | ||
353 | &frame->u.cmd[0]); | ||
354 | |||
355 | iwl3945_free_frame(priv, frame); | ||
356 | |||
357 | return rc; | ||
358 | } | ||
359 | |||
360 | static void iwl3945_unset_hw_params(struct iwl_priv *priv) | ||
361 | { | ||
362 | if (priv->_3945.shared_virt) | ||
363 | dma_free_coherent(&priv->pci_dev->dev, | ||
364 | sizeof(struct iwl3945_shared), | ||
365 | priv->_3945.shared_virt, | ||
366 | priv->_3945.shared_phys); | ||
367 | } | ||
368 | |||
369 | static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, | ||
370 | struct ieee80211_tx_info *info, | ||
371 | struct iwl_device_cmd *cmd, | ||
372 | struct sk_buff *skb_frag, | ||
373 | int sta_id) | ||
374 | { | ||
375 | struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; | ||
376 | struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; | ||
377 | |||
378 | tx_cmd->sec_ctl = 0; | ||
379 | |||
380 | switch (keyinfo->cipher) { | ||
381 | case WLAN_CIPHER_SUITE_CCMP: | ||
382 | tx_cmd->sec_ctl = TX_CMD_SEC_CCM; | ||
383 | memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen); | ||
384 | IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); | ||
385 | break; | ||
386 | |||
387 | case WLAN_CIPHER_SUITE_TKIP: | ||
388 | break; | ||
389 | |||
390 | case WLAN_CIPHER_SUITE_WEP104: | ||
391 | tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128; | ||
392 | /* fall through */ | ||
393 | case WLAN_CIPHER_SUITE_WEP40: | ||
394 | tx_cmd->sec_ctl |= TX_CMD_SEC_WEP | | ||
395 | (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; | ||
396 | |||
397 | memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen); | ||
398 | |||
399 | IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " | ||
400 | "with key %d\n", info->control.hw_key->hw_key_idx); | ||
401 | break; | ||
402 | |||
403 | default: | ||
404 | IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher); | ||
405 | break; | ||
406 | } | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * handle build REPLY_TX command notification. | ||
411 | */ | ||
412 | static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, | ||
413 | struct iwl_device_cmd *cmd, | ||
414 | struct ieee80211_tx_info *info, | ||
415 | struct ieee80211_hdr *hdr, u8 std_id) | ||
416 | { | ||
417 | struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload; | ||
418 | __le32 tx_flags = tx_cmd->tx_flags; | ||
419 | __le16 fc = hdr->frame_control; | ||
420 | |||
421 | tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
422 | if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { | ||
423 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
424 | if (ieee80211_is_mgmt(fc)) | ||
425 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
426 | if (ieee80211_is_probe_resp(fc) && | ||
427 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
428 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
429 | } else { | ||
430 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
431 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
432 | } | ||
433 | |||
434 | tx_cmd->sta_id = std_id; | ||
435 | if (ieee80211_has_morefrags(fc)) | ||
436 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
437 | |||
438 | if (ieee80211_is_data_qos(fc)) { | ||
439 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
440 | tx_cmd->tid_tspec = qc[0] & 0xf; | ||
441 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
442 | } else { | ||
443 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
444 | } | ||
445 | |||
446 | iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags); | ||
447 | |||
448 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
449 | if (ieee80211_is_mgmt(fc)) { | ||
450 | if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) | ||
451 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3); | ||
452 | else | ||
453 | tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2); | ||
454 | } else { | ||
455 | tx_cmd->timeout.pm_frame_timeout = 0; | ||
456 | } | ||
457 | |||
458 | tx_cmd->driver_txop = 0; | ||
459 | tx_cmd->tx_flags = tx_flags; | ||
460 | tx_cmd->next_frame_len = 0; | ||
461 | } | ||
462 | |||
463 | /* | ||
464 | * start REPLY_TX command process | ||
465 | */ | ||
466 | static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) | ||
467 | { | ||
468 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
469 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
470 | struct iwl3945_tx_cmd *tx_cmd; | ||
471 | struct iwl_tx_queue *txq = NULL; | ||
472 | struct iwl_queue *q = NULL; | ||
473 | struct iwl_device_cmd *out_cmd; | ||
474 | struct iwl_cmd_meta *out_meta; | ||
475 | dma_addr_t phys_addr; | ||
476 | dma_addr_t txcmd_phys; | ||
477 | int txq_id = skb_get_queue_mapping(skb); | ||
478 | u16 len, idx, hdr_len; | ||
479 | u8 id; | ||
480 | u8 unicast; | ||
481 | u8 sta_id; | ||
482 | u8 tid = 0; | ||
483 | __le16 fc; | ||
484 | u8 wait_write_ptr = 0; | ||
485 | unsigned long flags; | ||
486 | |||
487 | spin_lock_irqsave(&priv->lock, flags); | ||
488 | if (iwl_legacy_is_rfkill(priv)) { | ||
489 | IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n"); | ||
490 | goto drop_unlock; | ||
491 | } | ||
492 | |||
493 | if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) { | ||
494 | IWL_ERR(priv, "ERROR: No TX rate available.\n"); | ||
495 | goto drop_unlock; | ||
496 | } | ||
497 | |||
498 | unicast = !is_multicast_ether_addr(hdr->addr1); | ||
499 | id = 0; | ||
500 | |||
501 | fc = hdr->frame_control; | ||
502 | |||
503 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
504 | if (ieee80211_is_auth(fc)) | ||
505 | IWL_DEBUG_TX(priv, "Sending AUTH frame\n"); | ||
506 | else if (ieee80211_is_assoc_req(fc)) | ||
507 | IWL_DEBUG_TX(priv, "Sending ASSOC frame\n"); | ||
508 | else if (ieee80211_is_reassoc_req(fc)) | ||
509 | IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); | ||
510 | #endif | ||
511 | |||
512 | spin_unlock_irqrestore(&priv->lock, flags); | ||
513 | |||
514 | hdr_len = ieee80211_hdrlen(fc); | ||
515 | |||
516 | /* Find index into station table for destination station */ | ||
517 | sta_id = iwl_legacy_sta_id_or_broadcast( | ||
518 | priv, &priv->contexts[IWL_RXON_CTX_BSS], | ||
519 | info->control.sta); | ||
520 | if (sta_id == IWL_INVALID_STATION) { | ||
521 | IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", | ||
522 | hdr->addr1); | ||
523 | goto drop; | ||
524 | } | ||
525 | |||
526 | IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id); | ||
527 | |||
528 | if (ieee80211_is_data_qos(fc)) { | ||
529 | u8 *qc = ieee80211_get_qos_ctl(hdr); | ||
530 | tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK; | ||
531 | if (unlikely(tid >= MAX_TID_COUNT)) | ||
532 | goto drop; | ||
533 | } | ||
534 | |||
535 | /* Descriptor for chosen Tx queue */ | ||
536 | txq = &priv->txq[txq_id]; | ||
537 | q = &txq->q; | ||
538 | |||
539 | if ((iwl_legacy_queue_space(q) < q->high_mark)) | ||
540 | goto drop; | ||
541 | |||
542 | spin_lock_irqsave(&priv->lock, flags); | ||
543 | |||
544 | idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0); | ||
545 | |||
546 | /* Set up driver data for this TFD */ | ||
547 | memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info)); | ||
548 | txq->txb[q->write_ptr].skb = skb; | ||
549 | txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
550 | |||
551 | /* Init first empty entry in queue's array of Tx/cmd buffers */ | ||
552 | out_cmd = txq->cmd[idx]; | ||
553 | out_meta = &txq->meta[idx]; | ||
554 | tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; | ||
555 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
556 | memset(tx_cmd, 0, sizeof(*tx_cmd)); | ||
557 | |||
558 | /* | ||
559 | * Set up the Tx-command (not MAC!) header. | ||
560 | * Store the chosen Tx queue and TFD index within the sequence field; | ||
561 | * after Tx, uCode's Tx response will return this value so driver can | ||
562 | * locate the frame within the tx queue and do post-tx processing. | ||
563 | */ | ||
564 | out_cmd->hdr.cmd = REPLY_TX; | ||
565 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
566 | INDEX_TO_SEQ(q->write_ptr))); | ||
567 | |||
568 | /* Copy MAC header from skb into command buffer */ | ||
569 | memcpy(tx_cmd->hdr, hdr, hdr_len); | ||
570 | |||
571 | |||
572 | if (info->control.hw_key) | ||
573 | iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id); | ||
574 | |||
575 | /* TODO need this for burst mode later on */ | ||
576 | iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id); | ||
577 | |||
578 | /* set is_hcca to 0; it probably will never be implemented */ | ||
579 | iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0); | ||
580 | |||
581 | /* Total # bytes to be transmitted */ | ||
582 | len = (u16)skb->len; | ||
583 | tx_cmd->len = cpu_to_le16(len); | ||
584 | |||
585 | iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr); | ||
586 | iwl_legacy_update_stats(priv, true, fc, len); | ||
587 | tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; | ||
588 | tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; | ||
589 | |||
590 | if (!ieee80211_has_morefrags(hdr->frame_control)) { | ||
591 | txq->need_update = 1; | ||
592 | } else { | ||
593 | wait_write_ptr = 1; | ||
594 | txq->need_update = 0; | ||
595 | } | ||
596 | |||
597 | IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n", | ||
598 | le16_to_cpu(out_cmd->hdr.sequence)); | ||
599 | IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags)); | ||
600 | iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); | ||
601 | iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, | ||
602 | ieee80211_hdrlen(fc)); | ||
603 | |||
604 | /* | ||
605 | * Use the first empty entry in this queue's command buffer array | ||
606 | * to contain the Tx command and MAC header concatenated together | ||
607 | * (payload data will be in another buffer). | ||
608 | * Size of this varies, due to varying MAC header length. | ||
609 | * If end is not dword aligned, we'll have 2 extra bytes at the end | ||
610 | * of the MAC header (device reads on dword boundaries). | ||
611 | * We'll tell device about this padding later. | ||
612 | */ | ||
613 | len = sizeof(struct iwl3945_tx_cmd) + | ||
614 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
615 | len = (len + 3) & ~3; | ||
616 | |||
617 | /* Physical address of this Tx command's header (not MAC header!), | ||
618 | * within command buffer array. */ | ||
619 | txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, | ||
620 | len, PCI_DMA_TODEVICE); | ||
621 | /* we do not map meta data ... so we can safely access address to | ||
622 | * provide to unmap command*/ | ||
623 | dma_unmap_addr_set(out_meta, mapping, txcmd_phys); | ||
624 | dma_unmap_len_set(out_meta, len, len); | ||
625 | |||
626 | /* Add buffer containing Tx command and MAC(!) header to TFD's | ||
627 | * first entry */ | ||
628 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
629 | txcmd_phys, len, 1, 0); | ||
630 | |||
631 | |||
632 | /* Set up TFD's 2nd entry to point directly to remainder of skb, | ||
633 | * if any (802.11 null frames have no payload). */ | ||
634 | len = skb->len - hdr_len; | ||
635 | if (len) { | ||
636 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | ||
637 | len, PCI_DMA_TODEVICE); | ||
638 | priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, | ||
639 | phys_addr, len, | ||
640 | 0, U32_PAD(len)); | ||
641 | } | ||
642 | |||
643 | |||
644 | /* Tell device the write index *just past* this latest filled TFD */ | ||
645 | q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd); | ||
646 | iwl_legacy_txq_update_write_ptr(priv, txq); | ||
647 | spin_unlock_irqrestore(&priv->lock, flags); | ||
648 | |||
649 | if ((iwl_legacy_queue_space(q) < q->high_mark) | ||
650 | && priv->mac80211_registered) { | ||
651 | if (wait_write_ptr) { | ||
652 | spin_lock_irqsave(&priv->lock, flags); | ||
653 | txq->need_update = 1; | ||
654 | iwl_legacy_txq_update_write_ptr(priv, txq); | ||
655 | spin_unlock_irqrestore(&priv->lock, flags); | ||
656 | } | ||
657 | |||
658 | iwl_legacy_stop_queue(priv, txq); | ||
659 | } | ||
660 | |||
661 | return 0; | ||
662 | |||
663 | drop_unlock: | ||
664 | spin_unlock_irqrestore(&priv->lock, flags); | ||
665 | drop: | ||
666 | return -1; | ||
667 | } | ||
668 | |||
669 | static int iwl3945_get_measurement(struct iwl_priv *priv, | ||
670 | struct ieee80211_measurement_params *params, | ||
671 | u8 type) | ||
672 | { | ||
673 | struct iwl_spectrum_cmd spectrum; | ||
674 | struct iwl_rx_packet *pkt; | ||
675 | struct iwl_host_cmd cmd = { | ||
676 | .id = REPLY_SPECTRUM_MEASUREMENT_CMD, | ||
677 | .data = (void *)&spectrum, | ||
678 | .flags = CMD_WANT_SKB, | ||
679 | }; | ||
680 | u32 add_time = le64_to_cpu(params->start_time); | ||
681 | int rc; | ||
682 | int spectrum_resp_status; | ||
683 | int duration = le16_to_cpu(params->duration); | ||
684 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
685 | |||
686 | if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) | ||
687 | add_time = iwl_legacy_usecs_to_beacons(priv, | ||
688 | le64_to_cpu(params->start_time) - priv->_3945.last_tsf, | ||
689 | le16_to_cpu(ctx->timing.beacon_interval)); | ||
690 | |||
691 | memset(&spectrum, 0, sizeof(spectrum)); | ||
692 | |||
693 | spectrum.channel_count = cpu_to_le16(1); | ||
694 | spectrum.flags = | ||
695 | RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; | ||
696 | spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; | ||
697 | cmd.len = sizeof(spectrum); | ||
698 | spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); | ||
699 | |||
700 | if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) | ||
701 | spectrum.start_time = | ||
702 | iwl_legacy_add_beacon_time(priv, | ||
703 | priv->_3945.last_beacon_time, add_time, | ||
704 | le16_to_cpu(ctx->timing.beacon_interval)); | ||
705 | else | ||
706 | spectrum.start_time = 0; | ||
707 | |||
708 | spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); | ||
709 | spectrum.channels[0].channel = params->channel; | ||
710 | spectrum.channels[0].type = type; | ||
711 | if (ctx->active.flags & RXON_FLG_BAND_24G_MSK) | ||
712 | spectrum.flags |= RXON_FLG_BAND_24G_MSK | | ||
713 | RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; | ||
714 | |||
715 | rc = iwl_legacy_send_cmd_sync(priv, &cmd); | ||
716 | if (rc) | ||
717 | return rc; | ||
718 | |||
719 | pkt = (struct iwl_rx_packet *)cmd.reply_page; | ||
720 | if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { | ||
721 | IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); | ||
722 | rc = -EIO; | ||
723 | } | ||
724 | |||
725 | spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status); | ||
726 | switch (spectrum_resp_status) { | ||
727 | case 0: /* Command will be handled */ | ||
728 | if (pkt->u.spectrum.id != 0xff) { | ||
729 | IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", | ||
730 | pkt->u.spectrum.id); | ||
731 | priv->measurement_status &= ~MEASUREMENT_READY; | ||
732 | } | ||
733 | priv->measurement_status |= MEASUREMENT_ACTIVE; | ||
734 | rc = 0; | ||
735 | break; | ||
736 | |||
737 | case 1: /* Command will not be handled */ | ||
738 | rc = -EAGAIN; | ||
739 | break; | ||
740 | } | ||
741 | |||
742 | iwl_legacy_free_pages(priv, cmd.reply_page); | ||
743 | |||
744 | return rc; | ||
745 | } | ||
746 | |||
747 | static void iwl3945_rx_reply_alive(struct iwl_priv *priv, | ||
748 | struct iwl_rx_mem_buffer *rxb) | ||
749 | { | ||
750 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
751 | struct iwl_alive_resp *palive; | ||
752 | struct delayed_work *pwork; | ||
753 | |||
754 | palive = &pkt->u.alive_frame; | ||
755 | |||
756 | IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision " | ||
757 | "0x%01X 0x%01X\n", | ||
758 | palive->is_valid, palive->ver_type, | ||
759 | palive->ver_subtype); | ||
760 | |||
761 | if (palive->ver_subtype == INITIALIZE_SUBTYPE) { | ||
762 | IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); | ||
763 | memcpy(&priv->card_alive_init, &pkt->u.alive_frame, | ||
764 | sizeof(struct iwl_alive_resp)); | ||
765 | pwork = &priv->init_alive_start; | ||
766 | } else { | ||
767 | IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); | ||
768 | memcpy(&priv->card_alive, &pkt->u.alive_frame, | ||
769 | sizeof(struct iwl_alive_resp)); | ||
770 | pwork = &priv->alive_start; | ||
771 | iwl3945_disable_events(priv); | ||
772 | } | ||
773 | |||
774 | /* We delay the ALIVE response by 5ms to | ||
775 | * give the HW RF Kill time to activate... */ | ||
776 | if (palive->is_valid == UCODE_VALID_OK) | ||
777 | queue_delayed_work(priv->workqueue, pwork, | ||
778 | msecs_to_jiffies(5)); | ||
779 | else | ||
780 | IWL_WARN(priv, "uCode did not respond OK.\n"); | ||
781 | } | ||
782 | |||
783 | static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv, | ||
784 | struct iwl_rx_mem_buffer *rxb) | ||
785 | { | ||
786 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
787 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
788 | #endif | ||
789 | |||
790 | IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); | ||
791 | } | ||
792 | |||
793 | static void iwl3945_rx_beacon_notif(struct iwl_priv *priv, | ||
794 | struct iwl_rx_mem_buffer *rxb) | ||
795 | { | ||
796 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
797 | struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); | ||
798 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
799 | u8 rate = beacon->beacon_notify_hdr.rate; | ||
800 | |||
801 | IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d " | ||
802 | "tsf %d %d rate %d\n", | ||
803 | le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, | ||
804 | beacon->beacon_notify_hdr.failure_frame, | ||
805 | le32_to_cpu(beacon->ibss_mgr_status), | ||
806 | le32_to_cpu(beacon->high_tsf), | ||
807 | le32_to_cpu(beacon->low_tsf), rate); | ||
808 | #endif | ||
809 | |||
810 | priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status); | ||
811 | |||
812 | } | ||
813 | |||
814 | /* Handle notification from uCode that card's power state is changing | ||
815 | * due to software, hardware, or critical temperature RFKILL */ | ||
816 | static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, | ||
817 | struct iwl_rx_mem_buffer *rxb) | ||
818 | { | ||
819 | struct iwl_rx_packet *pkt = rxb_addr(rxb); | ||
820 | u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); | ||
821 | unsigned long status = priv->status; | ||
822 | |||
823 | IWL_WARN(priv, "Card state received: HW:%s SW:%s\n", | ||
824 | (flags & HW_CARD_DISABLED) ? "Kill" : "On", | ||
825 | (flags & SW_CARD_DISABLED) ? "Kill" : "On"); | ||
826 | |||
827 | iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, | ||
828 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
829 | |||
830 | if (flags & HW_CARD_DISABLED) | ||
831 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
832 | else | ||
833 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
834 | |||
835 | |||
836 | iwl_legacy_scan_cancel(priv); | ||
837 | |||
838 | if ((test_bit(STATUS_RF_KILL_HW, &status) != | ||
839 | test_bit(STATUS_RF_KILL_HW, &priv->status))) | ||
840 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | ||
841 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | ||
842 | else | ||
843 | wake_up_interruptible(&priv->wait_command_queue); | ||
844 | } | ||
845 | |||
846 | /** | ||
847 | * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks | ||
848 | * | ||
849 | * Setup the RX handlers for each of the reply types sent from the uCode | ||
850 | * to the host. | ||
851 | * | ||
852 | * This function chains into the hardware specific files for them to setup | ||
853 | * any hardware specific handlers as well. | ||
854 | */ | ||
855 | static void iwl3945_setup_rx_handlers(struct iwl_priv *priv) | ||
856 | { | ||
857 | priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive; | ||
858 | priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta; | ||
859 | priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error; | ||
860 | priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa; | ||
861 | priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = | ||
862 | iwl_legacy_rx_spectrum_measure_notif; | ||
863 | priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif; | ||
864 | priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = | ||
865 | iwl_legacy_rx_pm_debug_statistics_notif; | ||
866 | priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif; | ||
867 | |||
868 | /* | ||
869 | * The same handler is used for both the REPLY to a discrete | ||
870 | * statistics request from the host as well as for the periodic | ||
871 | * statistics notifications (after received beacons) from the uCode. | ||
872 | */ | ||
873 | priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics; | ||
874 | priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; | ||
875 | |||
876 | iwl_legacy_setup_rx_scan_handlers(priv); | ||
877 | priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; | ||
878 | |||
879 | /* Set up hardware specific Rx handlers */ | ||
880 | iwl3945_hw_rx_handler_setup(priv); | ||
881 | } | ||
882 | |||
883 | /************************** RX-FUNCTIONS ****************************/ | ||
884 | /* | ||
885 | * Rx theory of operation | ||
886 | * | ||
887 | * The host allocates 32 DMA target addresses and passes the host address | ||
888 | * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is | ||
889 | * 0 to 31 | ||
890 | * | ||
891 | * Rx Queue Indexes | ||
892 | * The host/firmware share two index registers for managing the Rx buffers. | ||
893 | * | ||
894 | * The READ index maps to the first position that the firmware may be writing | ||
895 | * to -- the driver can read up to (but not including) this position and get | ||
896 | * good data. | ||
897 | * The READ index is managed by the firmware once the card is enabled. | ||
898 | * | ||
899 | * The WRITE index maps to the last position the driver has read from -- the | ||
900 | * position preceding WRITE is the last slot the firmware can place a packet. | ||
901 | * | ||
902 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | ||
903 | * WRITE = READ. | ||
904 | * | ||
905 | * During initialization, the host sets up the READ queue position to the first | ||
906 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | ||
907 | * | ||
908 | * When the firmware places a packet in a buffer, it will advance the READ index | ||
909 | * and fire the RX interrupt. The driver can then query the READ index and | ||
910 | * process as many packets as possible, moving the WRITE index forward as it | ||
911 | * resets the Rx queue buffers with new memory. | ||
912 | * | ||
913 | * The management in the driver is as follows: | ||
914 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | ||
915 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | ||
916 | * to replenish the iwl->rxq->rx_free. | ||
917 | * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the | ||
918 | * iwl->rxq is replenished and the READ INDEX is updated (updating the | ||
919 | * 'processed' and 'read' driver indexes as well) | ||
920 | * + A received packet is processed and handed to the kernel network stack, | ||
921 | * detached from the iwl->rxq. The driver 'processed' index is updated. | ||
922 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free | ||
923 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ | ||
924 | * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there | ||
925 | * were enough free buffers and RX_STALLED is set it is cleared. | ||
926 | * | ||
927 | * | ||
928 | * Driver sequence: | ||
929 | * | ||
930 | * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls | ||
931 | * iwl3945_rx_queue_restock | ||
932 | * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx | ||
933 | * queue, updates firmware pointers, and updates | ||
934 | * the WRITE index. If insufficient rx_free buffers | ||
935 | * are available, schedules iwl3945_rx_replenish | ||
936 | * | ||
937 | * -- enable interrupts -- | ||
938 | * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the | ||
939 | * READ INDEX, detaching the SKB from the pool. | ||
940 | * Moves the packet buffer from queue to rx_used. | ||
941 | * Calls iwl3945_rx_queue_restock to refill any empty | ||
942 | * slots. | ||
943 | * ... | ||
944 | * | ||
945 | */ | ||
946 | |||
947 | /** | ||
948 | * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr | ||
949 | */ | ||
950 | static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv, | ||
951 | dma_addr_t dma_addr) | ||
952 | { | ||
953 | return cpu_to_le32((u32)dma_addr); | ||
954 | } | ||
955 | |||
956 | /** | ||
957 | * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool | ||
958 | * | ||
959 | * If there are slots in the RX queue that need to be restocked, | ||
960 | * and we have free pre-allocated buffers, fill the ranks as much | ||
961 | * as we can, pulling from rx_free. | ||
962 | * | ||
963 | * This moves the 'write' index forward to catch up with 'processed', and | ||
964 | * also updates the memory address in the firmware to reference the new | ||
965 | * target buffer. | ||
966 | */ | ||
967 | static void iwl3945_rx_queue_restock(struct iwl_priv *priv) | ||
968 | { | ||
969 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
970 | struct list_head *element; | ||
971 | struct iwl_rx_mem_buffer *rxb; | ||
972 | unsigned long flags; | ||
973 | int write; | ||
974 | |||
975 | spin_lock_irqsave(&rxq->lock, flags); | ||
976 | write = rxq->write & ~0x7; | ||
977 | while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | ||
978 | /* Get next free Rx buffer, remove from free list */ | ||
979 | element = rxq->rx_free.next; | ||
980 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
981 | list_del(element); | ||
982 | |||
983 | /* Point to Rx buffer via next RBD in circular buffer */ | ||
984 | rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma); | ||
985 | rxq->queue[rxq->write] = rxb; | ||
986 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | ||
987 | rxq->free_count--; | ||
988 | } | ||
989 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
990 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
991 | * refill it */ | ||
992 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
993 | queue_work(priv->workqueue, &priv->rx_replenish); | ||
994 | |||
995 | |||
996 | /* If we've added more space for the firmware to place data, tell it. | ||
997 | * Increment device's write pointer in multiples of 8. */ | ||
998 | if ((rxq->write_actual != (rxq->write & ~0x7)) | ||
999 | || (abs(rxq->write - rxq->read) > 7)) { | ||
1000 | spin_lock_irqsave(&rxq->lock, flags); | ||
1001 | rxq->need_update = 1; | ||
1002 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1003 | iwl_legacy_rx_queue_update_write_ptr(priv, rxq); | ||
1004 | } | ||
1005 | } | ||
1006 | |||
1007 | /** | ||
1008 | * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free | ||
1009 | * | ||
1010 | * When moving to rx_free an SKB is allocated for the slot. | ||
1011 | * | ||
1012 | * Also restock the Rx queue via iwl3945_rx_queue_restock. | ||
1013 | * This is called as a scheduled work item (except for during initialization) | ||
1014 | */ | ||
1015 | static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority) | ||
1016 | { | ||
1017 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
1018 | struct list_head *element; | ||
1019 | struct iwl_rx_mem_buffer *rxb; | ||
1020 | struct page *page; | ||
1021 | unsigned long flags; | ||
1022 | gfp_t gfp_mask = priority; | ||
1023 | |||
1024 | while (1) { | ||
1025 | spin_lock_irqsave(&rxq->lock, flags); | ||
1026 | |||
1027 | if (list_empty(&rxq->rx_used)) { | ||
1028 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1029 | return; | ||
1030 | } | ||
1031 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1032 | |||
1033 | if (rxq->free_count > RX_LOW_WATERMARK) | ||
1034 | gfp_mask |= __GFP_NOWARN; | ||
1035 | |||
1036 | if (priv->hw_params.rx_page_order > 0) | ||
1037 | gfp_mask |= __GFP_COMP; | ||
1038 | |||
1039 | /* Alloc a new receive buffer */ | ||
1040 | page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order); | ||
1041 | if (!page) { | ||
1042 | if (net_ratelimit()) | ||
1043 | IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); | ||
1044 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | ||
1045 | net_ratelimit()) | ||
1046 | IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", | ||
1047 | priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", | ||
1048 | rxq->free_count); | ||
1049 | /* We don't reschedule replenish work here -- we will | ||
1050 | * call the restock method and if it still needs | ||
1051 | * more buffers it will schedule replenish */ | ||
1052 | break; | ||
1053 | } | ||
1054 | |||
1055 | spin_lock_irqsave(&rxq->lock, flags); | ||
1056 | if (list_empty(&rxq->rx_used)) { | ||
1057 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1058 | __free_pages(page, priv->hw_params.rx_page_order); | ||
1059 | return; | ||
1060 | } | ||
1061 | element = rxq->rx_used.next; | ||
1062 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
1063 | list_del(element); | ||
1064 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1065 | |||
1066 | rxb->page = page; | ||
1067 | /* Get physical address of RB/SKB */ | ||
1068 | rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, | ||
1069 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
1070 | PCI_DMA_FROMDEVICE); | ||
1071 | |||
1072 | spin_lock_irqsave(&rxq->lock, flags); | ||
1073 | |||
1074 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
1075 | rxq->free_count++; | ||
1076 | priv->alloc_rxb_page++; | ||
1077 | |||
1078 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1079 | } | ||
1080 | } | ||
1081 | |||
1082 | void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
1083 | { | ||
1084 | unsigned long flags; | ||
1085 | int i; | ||
1086 | spin_lock_irqsave(&rxq->lock, flags); | ||
1087 | INIT_LIST_HEAD(&rxq->rx_free); | ||
1088 | INIT_LIST_HEAD(&rxq->rx_used); | ||
1089 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
1090 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
1091 | /* In the reset function, these buffers may have been allocated | ||
1092 | * to an SKB, so we need to unmap and free potential storage */ | ||
1093 | if (rxq->pool[i].page != NULL) { | ||
1094 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
1095 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
1096 | PCI_DMA_FROMDEVICE); | ||
1097 | __iwl_legacy_free_pages(priv, rxq->pool[i].page); | ||
1098 | rxq->pool[i].page = NULL; | ||
1099 | } | ||
1100 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
1101 | } | ||
1102 | |||
1103 | /* Set us so that we have processed and used all buffers, but have | ||
1104 | * not restocked the Rx queue with fresh buffers */ | ||
1105 | rxq->read = rxq->write = 0; | ||
1106 | rxq->write_actual = 0; | ||
1107 | rxq->free_count = 0; | ||
1108 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1109 | } | ||
1110 | |||
1111 | void iwl3945_rx_replenish(void *data) | ||
1112 | { | ||
1113 | struct iwl_priv *priv = data; | ||
1114 | unsigned long flags; | ||
1115 | |||
1116 | iwl3945_rx_allocate(priv, GFP_KERNEL); | ||
1117 | |||
1118 | spin_lock_irqsave(&priv->lock, flags); | ||
1119 | iwl3945_rx_queue_restock(priv); | ||
1120 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1121 | } | ||
1122 | |||
1123 | static void iwl3945_rx_replenish_now(struct iwl_priv *priv) | ||
1124 | { | ||
1125 | iwl3945_rx_allocate(priv, GFP_ATOMIC); | ||
1126 | |||
1127 | iwl3945_rx_queue_restock(priv); | ||
1128 | } | ||
1129 | |||
1130 | |||
1131 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | ||
1132 | * If an SKB has been detached, the POOL needs to have its SKB set to NULL | ||
1133 | * This free routine walks the list of POOL entries and if SKB is set to | ||
1134 | * non NULL it is unmapped and freed | ||
1135 | */ | ||
1136 | static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
1137 | { | ||
1138 | int i; | ||
1139 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | ||
1140 | if (rxq->pool[i].page != NULL) { | ||
1141 | pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, | ||
1142 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
1143 | PCI_DMA_FROMDEVICE); | ||
1144 | __iwl_legacy_free_pages(priv, rxq->pool[i].page); | ||
1145 | rxq->pool[i].page = NULL; | ||
1146 | } | ||
1147 | } | ||
1148 | |||
1149 | dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd, | ||
1150 | rxq->bd_dma); | ||
1151 | dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status), | ||
1152 | rxq->rb_stts, rxq->rb_stts_dma); | ||
1153 | rxq->bd = NULL; | ||
1154 | rxq->rb_stts = NULL; | ||
1155 | } | ||
1156 | |||
1157 | |||
1158 | /* Convert linear signal-to-noise ratio into dB */ | ||
1159 | static u8 ratio2dB[100] = { | ||
1160 | /* 0 1 2 3 4 5 6 7 8 9 */ | ||
1161 | 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ | ||
1162 | 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ | ||
1163 | 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ | ||
1164 | 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ | ||
1165 | 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ | ||
1166 | 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ | ||
1167 | 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ | ||
1168 | 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ | ||
1169 | 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ | ||
1170 | 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ | ||
1171 | }; | ||
1172 | |||
1173 | /* Calculates a relative dB value from a ratio of linear | ||
1174 | * (i.e. not dB) signal levels. | ||
1175 | * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ | ||
1176 | int iwl3945_calc_db_from_ratio(int sig_ratio) | ||
1177 | { | ||
1178 | /* 1000:1 or higher just report as 60 dB */ | ||
1179 | if (sig_ratio >= 1000) | ||
1180 | return 60; | ||
1181 | |||
1182 | /* 100:1 or higher, divide by 10 and use table, | ||
1183 | * add 20 dB to make up for divide by 10 */ | ||
1184 | if (sig_ratio >= 100) | ||
1185 | return 20 + (int)ratio2dB[sig_ratio/10]; | ||
1186 | |||
1187 | /* We shouldn't see this */ | ||
1188 | if (sig_ratio < 1) | ||
1189 | return 0; | ||
1190 | |||
1191 | /* Use table for ratios 1:1 - 99:1 */ | ||
1192 | return (int)ratio2dB[sig_ratio]; | ||
1193 | } | ||
1194 | |||
1195 | /** | ||
1196 | * iwl3945_rx_handle - Main entry function for receiving responses from uCode | ||
1197 | * | ||
1198 | * Uses the priv->rx_handlers callback function array to invoke | ||
1199 | * the appropriate handlers, including command responses, | ||
1200 | * frame-received notifications, and other notifications. | ||
1201 | */ | ||
1202 | static void iwl3945_rx_handle(struct iwl_priv *priv) | ||
1203 | { | ||
1204 | struct iwl_rx_mem_buffer *rxb; | ||
1205 | struct iwl_rx_packet *pkt; | ||
1206 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
1207 | u32 r, i; | ||
1208 | int reclaim; | ||
1209 | unsigned long flags; | ||
1210 | u8 fill_rx = 0; | ||
1211 | u32 count = 8; | ||
1212 | int total_empty = 0; | ||
1213 | |||
1214 | /* uCode's read index (stored in shared DRAM) indicates the last Rx | ||
1215 | * buffer that the driver may process (last buffer filled by ucode). */ | ||
1216 | r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF; | ||
1217 | i = rxq->read; | ||
1218 | |||
1219 | /* calculate total frames need to be restock after handling RX */ | ||
1220 | total_empty = r - rxq->write_actual; | ||
1221 | if (total_empty < 0) | ||
1222 | total_empty += RX_QUEUE_SIZE; | ||
1223 | |||
1224 | if (total_empty > (RX_QUEUE_SIZE / 2)) | ||
1225 | fill_rx = 1; | ||
1226 | /* Rx interrupt, but nothing sent from uCode */ | ||
1227 | if (i == r) | ||
1228 | IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); | ||
1229 | |||
1230 | while (i != r) { | ||
1231 | int len; | ||
1232 | |||
1233 | rxb = rxq->queue[i]; | ||
1234 | |||
1235 | /* If an RXB doesn't have a Rx queue slot associated with it, | ||
1236 | * then a bug has been introduced in the queue refilling | ||
1237 | * routines -- catch it here */ | ||
1238 | BUG_ON(rxb == NULL); | ||
1239 | |||
1240 | rxq->queue[i] = NULL; | ||
1241 | |||
1242 | pci_unmap_page(priv->pci_dev, rxb->page_dma, | ||
1243 | PAGE_SIZE << priv->hw_params.rx_page_order, | ||
1244 | PCI_DMA_FROMDEVICE); | ||
1245 | pkt = rxb_addr(rxb); | ||
1246 | |||
1247 | len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; | ||
1248 | len += sizeof(u32); /* account for status word */ | ||
1249 | trace_iwlwifi_legacy_dev_rx(priv, pkt, len); | ||
1250 | |||
1251 | /* Reclaim a command buffer only if this packet is a response | ||
1252 | * to a (driver-originated) command. | ||
1253 | * If the packet (e.g. Rx frame) originated from uCode, | ||
1254 | * there is no command buffer to reclaim. | ||
1255 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | ||
1256 | * but apparently a few don't get set; catch them here. */ | ||
1257 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && | ||
1258 | (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && | ||
1259 | (pkt->hdr.cmd != REPLY_TX); | ||
1260 | |||
1261 | /* Based on type of command response or notification, | ||
1262 | * handle those that need handling via function in | ||
1263 | * rx_handlers table. See iwl3945_setup_rx_handlers() */ | ||
1264 | if (priv->rx_handlers[pkt->hdr.cmd]) { | ||
1265 | IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, | ||
1266 | iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); | ||
1267 | priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; | ||
1268 | priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); | ||
1269 | } else { | ||
1270 | /* No handling needed */ | ||
1271 | IWL_DEBUG_RX(priv, | ||
1272 | "r %d i %d No handler needed for %s, 0x%02x\n", | ||
1273 | r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd), | ||
1274 | pkt->hdr.cmd); | ||
1275 | } | ||
1276 | |||
1277 | /* | ||
1278 | * XXX: After here, we should always check rxb->page | ||
1279 | * against NULL before touching it or its virtual | ||
1280 | * memory (pkt). Because some rx_handler might have | ||
1281 | * already taken or freed the pages. | ||
1282 | */ | ||
1283 | |||
1284 | if (reclaim) { | ||
1285 | /* Invoke any callbacks, transfer the buffer to caller, | ||
1286 | * and fire off the (possibly) blocking iwl_legacy_send_cmd() | ||
1287 | * as we reclaim the driver command queue */ | ||
1288 | if (rxb->page) | ||
1289 | iwl_legacy_tx_cmd_complete(priv, rxb); | ||
1290 | else | ||
1291 | IWL_WARN(priv, "Claim null rxb?\n"); | ||
1292 | } | ||
1293 | |||
1294 | /* Reuse the page if possible. For notification packets and | ||
1295 | * SKBs that fail to Rx correctly, add them back into the | ||
1296 | * rx_free list for reuse later. */ | ||
1297 | spin_lock_irqsave(&rxq->lock, flags); | ||
1298 | if (rxb->page != NULL) { | ||
1299 | rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page, | ||
1300 | 0, PAGE_SIZE << priv->hw_params.rx_page_order, | ||
1301 | PCI_DMA_FROMDEVICE); | ||
1302 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
1303 | rxq->free_count++; | ||
1304 | } else | ||
1305 | list_add_tail(&rxb->list, &rxq->rx_used); | ||
1306 | |||
1307 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
1308 | |||
1309 | i = (i + 1) & RX_QUEUE_MASK; | ||
1310 | /* If there are a lot of unused frames, | ||
1311 | * restock the Rx queue so ucode won't assert. */ | ||
1312 | if (fill_rx) { | ||
1313 | count++; | ||
1314 | if (count >= 8) { | ||
1315 | rxq->read = i; | ||
1316 | iwl3945_rx_replenish_now(priv); | ||
1317 | count = 0; | ||
1318 | } | ||
1319 | } | ||
1320 | } | ||
1321 | |||
1322 | /* Backtrack one entry */ | ||
1323 | rxq->read = i; | ||
1324 | if (fill_rx) | ||
1325 | iwl3945_rx_replenish_now(priv); | ||
1326 | else | ||
1327 | iwl3945_rx_queue_restock(priv); | ||
1328 | } | ||
1329 | |||
1330 | /* call this function to flush any scheduled tasklet */ | ||
1331 | static inline void iwl3945_synchronize_irq(struct iwl_priv *priv) | ||
1332 | { | ||
1333 | /* wait to make sure we flush pending tasklet*/ | ||
1334 | synchronize_irq(priv->pci_dev->irq); | ||
1335 | tasklet_kill(&priv->irq_tasklet); | ||
1336 | } | ||
1337 | |||
1338 | static const char *iwl3945_desc_lookup(int i) | ||
1339 | { | ||
1340 | switch (i) { | ||
1341 | case 1: | ||
1342 | return "FAIL"; | ||
1343 | case 2: | ||
1344 | return "BAD_PARAM"; | ||
1345 | case 3: | ||
1346 | return "BAD_CHECKSUM"; | ||
1347 | case 4: | ||
1348 | return "NMI_INTERRUPT"; | ||
1349 | case 5: | ||
1350 | return "SYSASSERT"; | ||
1351 | case 6: | ||
1352 | return "FATAL_ERROR"; | ||
1353 | } | ||
1354 | |||
1355 | return "UNKNOWN"; | ||
1356 | } | ||
1357 | |||
1358 | #define ERROR_START_OFFSET (1 * sizeof(u32)) | ||
1359 | #define ERROR_ELEM_SIZE (7 * sizeof(u32)) | ||
1360 | |||
1361 | void iwl3945_dump_nic_error_log(struct iwl_priv *priv) | ||
1362 | { | ||
1363 | u32 i; | ||
1364 | u32 desc, time, count, base, data1; | ||
1365 | u32 blink1, blink2, ilink1, ilink2; | ||
1366 | |||
1367 | base = le32_to_cpu(priv->card_alive.error_event_table_ptr); | ||
1368 | |||
1369 | if (!iwl3945_hw_valid_rtc_data_addr(base)) { | ||
1370 | IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base); | ||
1371 | return; | ||
1372 | } | ||
1373 | |||
1374 | |||
1375 | count = iwl_legacy_read_targ_mem(priv, base); | ||
1376 | |||
1377 | if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { | ||
1378 | IWL_ERR(priv, "Start IWL Error Log Dump:\n"); | ||
1379 | IWL_ERR(priv, "Status: 0x%08lX, count: %d\n", | ||
1380 | priv->status, count); | ||
1381 | } | ||
1382 | |||
1383 | IWL_ERR(priv, "Desc Time asrtPC blink2 " | ||
1384 | "ilink1 nmiPC Line\n"); | ||
1385 | for (i = ERROR_START_OFFSET; | ||
1386 | i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; | ||
1387 | i += ERROR_ELEM_SIZE) { | ||
1388 | desc = iwl_legacy_read_targ_mem(priv, base + i); | ||
1389 | time = | ||
1390 | iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32)); | ||
1391 | blink1 = | ||
1392 | iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32)); | ||
1393 | blink2 = | ||
1394 | iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32)); | ||
1395 | ilink1 = | ||
1396 | iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32)); | ||
1397 | ilink2 = | ||
1398 | iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32)); | ||
1399 | data1 = | ||
1400 | iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32)); | ||
1401 | |||
1402 | IWL_ERR(priv, | ||
1403 | "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", | ||
1404 | iwl3945_desc_lookup(desc), desc, time, blink1, blink2, | ||
1405 | ilink1, ilink2, data1); | ||
1406 | trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0, | ||
1407 | 0, blink1, blink2, ilink1, ilink2); | ||
1408 | } | ||
1409 | } | ||
1410 | |||
1411 | #define EVENT_START_OFFSET (6 * sizeof(u32)) | ||
1412 | |||
1413 | /** | ||
1414 | * iwl3945_print_event_log - Dump error event log to syslog | ||
1415 | * | ||
1416 | */ | ||
1417 | static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx, | ||
1418 | u32 num_events, u32 mode, | ||
1419 | int pos, char **buf, size_t bufsz) | ||
1420 | { | ||
1421 | u32 i; | ||
1422 | u32 base; /* SRAM byte address of event log header */ | ||
1423 | u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ | ||
1424 | u32 ptr; /* SRAM byte address of log data */ | ||
1425 | u32 ev, time, data; /* event log data */ | ||
1426 | unsigned long reg_flags; | ||
1427 | |||
1428 | if (num_events == 0) | ||
1429 | return pos; | ||
1430 | |||
1431 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); | ||
1432 | |||
1433 | if (mode == 0) | ||
1434 | event_size = 2 * sizeof(u32); | ||
1435 | else | ||
1436 | event_size = 3 * sizeof(u32); | ||
1437 | |||
1438 | ptr = base + EVENT_START_OFFSET + (start_idx * event_size); | ||
1439 | |||
1440 | /* Make sure device is powered up for SRAM reads */ | ||
1441 | spin_lock_irqsave(&priv->reg_lock, reg_flags); | ||
1442 | iwl_grab_nic_access(priv); | ||
1443 | |||
1444 | /* Set starting address; reads will auto-increment */ | ||
1445 | _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr); | ||
1446 | rmb(); | ||
1447 | |||
1448 | /* "time" is actually "data" for mode 0 (no timestamp). | ||
1449 | * place event id # at far right for easier visual parsing. */ | ||
1450 | for (i = 0; i < num_events; i++) { | ||
1451 | ev = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); | ||
1452 | time = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); | ||
1453 | if (mode == 0) { | ||
1454 | /* data, ev */ | ||
1455 | if (bufsz) { | ||
1456 | pos += scnprintf(*buf + pos, bufsz - pos, | ||
1457 | "0x%08x:%04u\n", | ||
1458 | time, ev); | ||
1459 | } else { | ||
1460 | IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); | ||
1461 | trace_iwlwifi_legacy_dev_ucode_event(priv, 0, | ||
1462 | time, ev); | ||
1463 | } | ||
1464 | } else { | ||
1465 | data = _iwl_legacy_read_direct32(priv, | ||
1466 | HBUS_TARG_MEM_RDAT); | ||
1467 | if (bufsz) { | ||
1468 | pos += scnprintf(*buf + pos, bufsz - pos, | ||
1469 | "%010u:0x%08x:%04u\n", | ||
1470 | time, data, ev); | ||
1471 | } else { | ||
1472 | IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", | ||
1473 | time, data, ev); | ||
1474 | trace_iwlwifi_legacy_dev_ucode_event(priv, time, | ||
1475 | data, ev); | ||
1476 | } | ||
1477 | } | ||
1478 | } | ||
1479 | |||
1480 | /* Allow device to power down */ | ||
1481 | iwl_release_nic_access(priv); | ||
1482 | spin_unlock_irqrestore(&priv->reg_lock, reg_flags); | ||
1483 | return pos; | ||
1484 | } | ||
1485 | |||
1486 | /** | ||
1487 | * iwl3945_print_last_event_logs - Dump the newest # of event log to syslog | ||
1488 | */ | ||
1489 | static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity, | ||
1490 | u32 num_wraps, u32 next_entry, | ||
1491 | u32 size, u32 mode, | ||
1492 | int pos, char **buf, size_t bufsz) | ||
1493 | { | ||
1494 | /* | ||
1495 | * display the newest DEFAULT_LOG_ENTRIES entries | ||
1496 | * i.e the entries just before the next ont that uCode would fill. | ||
1497 | */ | ||
1498 | if (num_wraps) { | ||
1499 | if (next_entry < size) { | ||
1500 | pos = iwl3945_print_event_log(priv, | ||
1501 | capacity - (size - next_entry), | ||
1502 | size - next_entry, mode, | ||
1503 | pos, buf, bufsz); | ||
1504 | pos = iwl3945_print_event_log(priv, 0, | ||
1505 | next_entry, mode, | ||
1506 | pos, buf, bufsz); | ||
1507 | } else | ||
1508 | pos = iwl3945_print_event_log(priv, next_entry - size, | ||
1509 | size, mode, | ||
1510 | pos, buf, bufsz); | ||
1511 | } else { | ||
1512 | if (next_entry < size) | ||
1513 | pos = iwl3945_print_event_log(priv, 0, | ||
1514 | next_entry, mode, | ||
1515 | pos, buf, bufsz); | ||
1516 | else | ||
1517 | pos = iwl3945_print_event_log(priv, next_entry - size, | ||
1518 | size, mode, | ||
1519 | pos, buf, bufsz); | ||
1520 | } | ||
1521 | return pos; | ||
1522 | } | ||
1523 | |||
1524 | #define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) | ||
1525 | |||
1526 | int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, | ||
1527 | char **buf, bool display) | ||
1528 | { | ||
1529 | u32 base; /* SRAM byte address of event log header */ | ||
1530 | u32 capacity; /* event log capacity in # entries */ | ||
1531 | u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ | ||
1532 | u32 num_wraps; /* # times uCode wrapped to top of log */ | ||
1533 | u32 next_entry; /* index of next entry to be written by uCode */ | ||
1534 | u32 size; /* # entries that we'll print */ | ||
1535 | int pos = 0; | ||
1536 | size_t bufsz = 0; | ||
1537 | |||
1538 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); | ||
1539 | if (!iwl3945_hw_valid_rtc_data_addr(base)) { | ||
1540 | IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base); | ||
1541 | return -EINVAL; | ||
1542 | } | ||
1543 | |||
1544 | /* event log header */ | ||
1545 | capacity = iwl_legacy_read_targ_mem(priv, base); | ||
1546 | mode = iwl_legacy_read_targ_mem(priv, base + (1 * sizeof(u32))); | ||
1547 | num_wraps = iwl_legacy_read_targ_mem(priv, base + (2 * sizeof(u32))); | ||
1548 | next_entry = iwl_legacy_read_targ_mem(priv, base + (3 * sizeof(u32))); | ||
1549 | |||
1550 | if (capacity > priv->cfg->base_params->max_event_log_size) { | ||
1551 | IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", | ||
1552 | capacity, priv->cfg->base_params->max_event_log_size); | ||
1553 | capacity = priv->cfg->base_params->max_event_log_size; | ||
1554 | } | ||
1555 | |||
1556 | if (next_entry > priv->cfg->base_params->max_event_log_size) { | ||
1557 | IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", | ||
1558 | next_entry, priv->cfg->base_params->max_event_log_size); | ||
1559 | next_entry = priv->cfg->base_params->max_event_log_size; | ||
1560 | } | ||
1561 | |||
1562 | size = num_wraps ? capacity : next_entry; | ||
1563 | |||
1564 | /* bail out if nothing in log */ | ||
1565 | if (size == 0) { | ||
1566 | IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n"); | ||
1567 | return pos; | ||
1568 | } | ||
1569 | |||
1570 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
1571 | if (!(iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log) | ||
1572 | size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) | ||
1573 | ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; | ||
1574 | #else | ||
1575 | size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES) | ||
1576 | ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size; | ||
1577 | #endif | ||
1578 | |||
1579 | IWL_ERR(priv, "Start IWL Event Log Dump: display last %d count\n", | ||
1580 | size); | ||
1581 | |||
1582 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
1583 | if (display) { | ||
1584 | if (full_log) | ||
1585 | bufsz = capacity * 48; | ||
1586 | else | ||
1587 | bufsz = size * 48; | ||
1588 | *buf = kmalloc(bufsz, GFP_KERNEL); | ||
1589 | if (!*buf) | ||
1590 | return -ENOMEM; | ||
1591 | } | ||
1592 | if ((iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) { | ||
1593 | /* if uCode has wrapped back to top of log, | ||
1594 | * start at the oldest entry, | ||
1595 | * i.e the next one that uCode would fill. | ||
1596 | */ | ||
1597 | if (num_wraps) | ||
1598 | pos = iwl3945_print_event_log(priv, next_entry, | ||
1599 | capacity - next_entry, mode, | ||
1600 | pos, buf, bufsz); | ||
1601 | |||
1602 | /* (then/else) start at top of log */ | ||
1603 | pos = iwl3945_print_event_log(priv, 0, next_entry, mode, | ||
1604 | pos, buf, bufsz); | ||
1605 | } else | ||
1606 | pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps, | ||
1607 | next_entry, size, mode, | ||
1608 | pos, buf, bufsz); | ||
1609 | #else | ||
1610 | pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps, | ||
1611 | next_entry, size, mode, | ||
1612 | pos, buf, bufsz); | ||
1613 | #endif | ||
1614 | return pos; | ||
1615 | } | ||
1616 | |||
1617 | static void iwl3945_irq_tasklet(struct iwl_priv *priv) | ||
1618 | { | ||
1619 | u32 inta, handled = 0; | ||
1620 | u32 inta_fh; | ||
1621 | unsigned long flags; | ||
1622 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
1623 | u32 inta_mask; | ||
1624 | #endif | ||
1625 | |||
1626 | spin_lock_irqsave(&priv->lock, flags); | ||
1627 | |||
1628 | /* Ack/clear/reset pending uCode interrupts. | ||
1629 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | ||
1630 | * and will clear only when CSR_FH_INT_STATUS gets cleared. */ | ||
1631 | inta = iwl_read32(priv, CSR_INT); | ||
1632 | iwl_write32(priv, CSR_INT, inta); | ||
1633 | |||
1634 | /* Ack/clear/reset pending flow-handler (DMA) interrupts. | ||
1635 | * Any new interrupts that happen after this, either while we're | ||
1636 | * in this tasklet, or later, will show up in next ISR/tasklet. */ | ||
1637 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | ||
1638 | iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); | ||
1639 | |||
1640 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
1641 | if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) { | ||
1642 | /* just for debug */ | ||
1643 | inta_mask = iwl_read32(priv, CSR_INT_MASK); | ||
1644 | IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", | ||
1645 | inta, inta_mask, inta_fh); | ||
1646 | } | ||
1647 | #endif | ||
1648 | |||
1649 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1650 | |||
1651 | /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not | ||
1652 | * atomic, make sure that inta covers all the interrupts that | ||
1653 | * we've discovered, even if FH interrupt came in just after | ||
1654 | * reading CSR_INT. */ | ||
1655 | if (inta_fh & CSR39_FH_INT_RX_MASK) | ||
1656 | inta |= CSR_INT_BIT_FH_RX; | ||
1657 | if (inta_fh & CSR39_FH_INT_TX_MASK) | ||
1658 | inta |= CSR_INT_BIT_FH_TX; | ||
1659 | |||
1660 | /* Now service all interrupt bits discovered above. */ | ||
1661 | if (inta & CSR_INT_BIT_HW_ERR) { | ||
1662 | IWL_ERR(priv, "Hardware error detected. Restarting.\n"); | ||
1663 | |||
1664 | /* Tell the device to stop sending interrupts */ | ||
1665 | iwl_legacy_disable_interrupts(priv); | ||
1666 | |||
1667 | priv->isr_stats.hw++; | ||
1668 | iwl_legacy_irq_handle_error(priv); | ||
1669 | |||
1670 | handled |= CSR_INT_BIT_HW_ERR; | ||
1671 | |||
1672 | return; | ||
1673 | } | ||
1674 | |||
1675 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
1676 | if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { | ||
1677 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ | ||
1678 | if (inta & CSR_INT_BIT_SCD) { | ||
1679 | IWL_DEBUG_ISR(priv, "Scheduler finished to transmit " | ||
1680 | "the frame/frames.\n"); | ||
1681 | priv->isr_stats.sch++; | ||
1682 | } | ||
1683 | |||
1684 | /* Alive notification via Rx interrupt will do the real work */ | ||
1685 | if (inta & CSR_INT_BIT_ALIVE) { | ||
1686 | IWL_DEBUG_ISR(priv, "Alive interrupt\n"); | ||
1687 | priv->isr_stats.alive++; | ||
1688 | } | ||
1689 | } | ||
1690 | #endif | ||
1691 | /* Safely ignore these bits for debug checks below */ | ||
1692 | inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE); | ||
1693 | |||
1694 | /* Error detected by uCode */ | ||
1695 | if (inta & CSR_INT_BIT_SW_ERR) { | ||
1696 | IWL_ERR(priv, "Microcode SW error detected. " | ||
1697 | "Restarting 0x%X.\n", inta); | ||
1698 | priv->isr_stats.sw++; | ||
1699 | iwl_legacy_irq_handle_error(priv); | ||
1700 | handled |= CSR_INT_BIT_SW_ERR; | ||
1701 | } | ||
1702 | |||
1703 | /* uCode wakes up after power-down sleep */ | ||
1704 | if (inta & CSR_INT_BIT_WAKEUP) { | ||
1705 | IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); | ||
1706 | iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq); | ||
1707 | iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]); | ||
1708 | iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]); | ||
1709 | iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]); | ||
1710 | iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]); | ||
1711 | iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]); | ||
1712 | iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]); | ||
1713 | |||
1714 | priv->isr_stats.wakeup++; | ||
1715 | handled |= CSR_INT_BIT_WAKEUP; | ||
1716 | } | ||
1717 | |||
1718 | /* All uCode command responses, including Tx command responses, | ||
1719 | * Rx "responses" (frame-received notification), and other | ||
1720 | * notifications from uCode come through here*/ | ||
1721 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { | ||
1722 | iwl3945_rx_handle(priv); | ||
1723 | priv->isr_stats.rx++; | ||
1724 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | ||
1725 | } | ||
1726 | |||
1727 | if (inta & CSR_INT_BIT_FH_TX) { | ||
1728 | IWL_DEBUG_ISR(priv, "Tx interrupt\n"); | ||
1729 | priv->isr_stats.tx++; | ||
1730 | |||
1731 | iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); | ||
1732 | iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT | ||
1733 | (FH39_SRVC_CHNL), 0x0); | ||
1734 | handled |= CSR_INT_BIT_FH_TX; | ||
1735 | } | ||
1736 | |||
1737 | if (inta & ~handled) { | ||
1738 | IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled); | ||
1739 | priv->isr_stats.unhandled++; | ||
1740 | } | ||
1741 | |||
1742 | if (inta & ~priv->inta_mask) { | ||
1743 | IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n", | ||
1744 | inta & ~priv->inta_mask); | ||
1745 | IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh); | ||
1746 | } | ||
1747 | |||
1748 | /* Re-enable all interrupts */ | ||
1749 | /* only Re-enable if disabled by irq */ | ||
1750 | if (test_bit(STATUS_INT_ENABLED, &priv->status)) | ||
1751 | iwl_legacy_enable_interrupts(priv); | ||
1752 | |||
1753 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
1754 | if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) { | ||
1755 | inta = iwl_read32(priv, CSR_INT); | ||
1756 | inta_mask = iwl_read32(priv, CSR_INT_MASK); | ||
1757 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | ||
1758 | IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " | ||
1759 | "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); | ||
1760 | } | ||
1761 | #endif | ||
1762 | } | ||
1763 | |||
1764 | static int iwl3945_get_single_channel_for_scan(struct iwl_priv *priv, | ||
1765 | struct ieee80211_vif *vif, | ||
1766 | enum ieee80211_band band, | ||
1767 | struct iwl3945_scan_channel *scan_ch) | ||
1768 | { | ||
1769 | const struct ieee80211_supported_band *sband; | ||
1770 | u16 passive_dwell = 0; | ||
1771 | u16 active_dwell = 0; | ||
1772 | int added = 0; | ||
1773 | u8 channel = 0; | ||
1774 | |||
1775 | sband = iwl_get_hw_mode(priv, band); | ||
1776 | if (!sband) { | ||
1777 | IWL_ERR(priv, "invalid band\n"); | ||
1778 | return added; | ||
1779 | } | ||
1780 | |||
1781 | active_dwell = iwl_legacy_get_active_dwell_time(priv, band, 0); | ||
1782 | passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); | ||
1783 | |||
1784 | if (passive_dwell <= active_dwell) | ||
1785 | passive_dwell = active_dwell + 1; | ||
1786 | |||
1787 | |||
1788 | channel = iwl_legacy_get_single_channel_number(priv, band); | ||
1789 | |||
1790 | if (channel) { | ||
1791 | scan_ch->channel = channel; | ||
1792 | scan_ch->type = 0; /* passive */ | ||
1793 | scan_ch->active_dwell = cpu_to_le16(active_dwell); | ||
1794 | scan_ch->passive_dwell = cpu_to_le16(passive_dwell); | ||
1795 | /* Set txpower levels to defaults */ | ||
1796 | scan_ch->tpc.dsp_atten = 110; | ||
1797 | if (band == IEEE80211_BAND_5GHZ) | ||
1798 | scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; | ||
1799 | else | ||
1800 | scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); | ||
1801 | added++; | ||
1802 | } else | ||
1803 | IWL_ERR(priv, "no valid channel found\n"); | ||
1804 | return added; | ||
1805 | } | ||
1806 | |||
1807 | static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, | ||
1808 | enum ieee80211_band band, | ||
1809 | u8 is_active, u8 n_probes, | ||
1810 | struct iwl3945_scan_channel *scan_ch, | ||
1811 | struct ieee80211_vif *vif) | ||
1812 | { | ||
1813 | struct ieee80211_channel *chan; | ||
1814 | const struct ieee80211_supported_band *sband; | ||
1815 | const struct iwl_channel_info *ch_info; | ||
1816 | u16 passive_dwell = 0; | ||
1817 | u16 active_dwell = 0; | ||
1818 | int added, i; | ||
1819 | |||
1820 | sband = iwl_get_hw_mode(priv, band); | ||
1821 | if (!sband) | ||
1822 | return 0; | ||
1823 | |||
1824 | active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes); | ||
1825 | passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif); | ||
1826 | |||
1827 | if (passive_dwell <= active_dwell) | ||
1828 | passive_dwell = active_dwell + 1; | ||
1829 | |||
1830 | for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) { | ||
1831 | chan = priv->scan_request->channels[i]; | ||
1832 | |||
1833 | if (chan->band != band) | ||
1834 | continue; | ||
1835 | |||
1836 | scan_ch->channel = chan->hw_value; | ||
1837 | |||
1838 | ch_info = iwl_legacy_get_channel_info(priv, band, | ||
1839 | scan_ch->channel); | ||
1840 | if (!iwl_legacy_is_channel_valid(ch_info)) { | ||
1841 | IWL_DEBUG_SCAN(priv, | ||
1842 | "Channel %d is INVALID for this band.\n", | ||
1843 | scan_ch->channel); | ||
1844 | continue; | ||
1845 | } | ||
1846 | |||
1847 | scan_ch->active_dwell = cpu_to_le16(active_dwell); | ||
1848 | scan_ch->passive_dwell = cpu_to_le16(passive_dwell); | ||
1849 | /* If passive , set up for auto-switch | ||
1850 | * and use long active_dwell time. | ||
1851 | */ | ||
1852 | if (!is_active || iwl_legacy_is_channel_passive(ch_info) || | ||
1853 | (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) { | ||
1854 | scan_ch->type = 0; /* passive */ | ||
1855 | if (IWL_UCODE_API(priv->ucode_ver) == 1) | ||
1856 | scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1); | ||
1857 | } else { | ||
1858 | scan_ch->type = 1; /* active */ | ||
1859 | } | ||
1860 | |||
1861 | /* Set direct probe bits. These may be used both for active | ||
1862 | * scan channels (probes gets sent right away), | ||
1863 | * or for passive channels (probes get se sent only after | ||
1864 | * hearing clear Rx packet).*/ | ||
1865 | if (IWL_UCODE_API(priv->ucode_ver) >= 2) { | ||
1866 | if (n_probes) | ||
1867 | scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); | ||
1868 | } else { | ||
1869 | /* uCode v1 does not allow setting direct probe bits on | ||
1870 | * passive channel. */ | ||
1871 | if ((scan_ch->type & 1) && n_probes) | ||
1872 | scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes); | ||
1873 | } | ||
1874 | |||
1875 | /* Set txpower levels to defaults */ | ||
1876 | scan_ch->tpc.dsp_atten = 110; | ||
1877 | /* scan_pwr_info->tpc.dsp_atten; */ | ||
1878 | |||
1879 | /*scan_pwr_info->tpc.tx_gain; */ | ||
1880 | if (band == IEEE80211_BAND_5GHZ) | ||
1881 | scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; | ||
1882 | else { | ||
1883 | scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); | ||
1884 | /* NOTE: if we were doing 6Mb OFDM for scans we'd use | ||
1885 | * power level: | ||
1886 | * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3; | ||
1887 | */ | ||
1888 | } | ||
1889 | |||
1890 | IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n", | ||
1891 | scan_ch->channel, | ||
1892 | (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", | ||
1893 | (scan_ch->type & 1) ? | ||
1894 | active_dwell : passive_dwell); | ||
1895 | |||
1896 | scan_ch++; | ||
1897 | added++; | ||
1898 | } | ||
1899 | |||
1900 | IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added); | ||
1901 | return added; | ||
1902 | } | ||
1903 | |||
1904 | static void iwl3945_init_hw_rates(struct iwl_priv *priv, | ||
1905 | struct ieee80211_rate *rates) | ||
1906 | { | ||
1907 | int i; | ||
1908 | |||
1909 | for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) { | ||
1910 | rates[i].bitrate = iwl3945_rates[i].ieee * 5; | ||
1911 | rates[i].hw_value = i; /* Rate scaling will work on indexes */ | ||
1912 | rates[i].hw_value_short = i; | ||
1913 | rates[i].flags = 0; | ||
1914 | if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) { | ||
1915 | /* | ||
1916 | * If CCK != 1M then set short preamble rate flag. | ||
1917 | */ | ||
1918 | rates[i].flags |= (iwl3945_rates[i].plcp == 10) ? | ||
1919 | 0 : IEEE80211_RATE_SHORT_PREAMBLE; | ||
1920 | } | ||
1921 | } | ||
1922 | } | ||
1923 | |||
1924 | /****************************************************************************** | ||
1925 | * | ||
1926 | * uCode download functions | ||
1927 | * | ||
1928 | ******************************************************************************/ | ||
1929 | |||
1930 | static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv) | ||
1931 | { | ||
1932 | iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code); | ||
1933 | iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data); | ||
1934 | iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup); | ||
1935 | iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init); | ||
1936 | iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data); | ||
1937 | iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot); | ||
1938 | } | ||
1939 | |||
1940 | /** | ||
1941 | * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host, | ||
1942 | * looking at all data. | ||
1943 | */ | ||
1944 | static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len) | ||
1945 | { | ||
1946 | u32 val; | ||
1947 | u32 save_len = len; | ||
1948 | int rc = 0; | ||
1949 | u32 errcnt; | ||
1950 | |||
1951 | IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); | ||
1952 | |||
1953 | iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, | ||
1954 | IWL39_RTC_INST_LOWER_BOUND); | ||
1955 | |||
1956 | errcnt = 0; | ||
1957 | for (; len > 0; len -= sizeof(u32), image++) { | ||
1958 | /* read data comes through single port, auto-incr addr */ | ||
1959 | /* NOTE: Use the debugless read so we don't flood kernel log | ||
1960 | * if IWL_DL_IO is set */ | ||
1961 | val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); | ||
1962 | if (val != le32_to_cpu(*image)) { | ||
1963 | IWL_ERR(priv, "uCode INST section is invalid at " | ||
1964 | "offset 0x%x, is 0x%x, s/b 0x%x\n", | ||
1965 | save_len - len, val, le32_to_cpu(*image)); | ||
1966 | rc = -EIO; | ||
1967 | errcnt++; | ||
1968 | if (errcnt >= 20) | ||
1969 | break; | ||
1970 | } | ||
1971 | } | ||
1972 | |||
1973 | |||
1974 | if (!errcnt) | ||
1975 | IWL_DEBUG_INFO(priv, | ||
1976 | "ucode image in INSTRUCTION memory is good\n"); | ||
1977 | |||
1978 | return rc; | ||
1979 | } | ||
1980 | |||
1981 | |||
1982 | /** | ||
1983 | * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host, | ||
1984 | * using sample data 100 bytes apart. If these sample points are good, | ||
1985 | * it's a pretty good bet that everything between them is good, too. | ||
1986 | */ | ||
1987 | static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) | ||
1988 | { | ||
1989 | u32 val; | ||
1990 | int rc = 0; | ||
1991 | u32 errcnt = 0; | ||
1992 | u32 i; | ||
1993 | |||
1994 | IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len); | ||
1995 | |||
1996 | for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { | ||
1997 | /* read data comes through single port, auto-incr addr */ | ||
1998 | /* NOTE: Use the debugless read so we don't flood kernel log | ||
1999 | * if IWL_DL_IO is set */ | ||
2000 | iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, | ||
2001 | i + IWL39_RTC_INST_LOWER_BOUND); | ||
2002 | val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT); | ||
2003 | if (val != le32_to_cpu(*image)) { | ||
2004 | #if 0 /* Enable this if you want to see details */ | ||
2005 | IWL_ERR(priv, "uCode INST section is invalid at " | ||
2006 | "offset 0x%x, is 0x%x, s/b 0x%x\n", | ||
2007 | i, val, *image); | ||
2008 | #endif | ||
2009 | rc = -EIO; | ||
2010 | errcnt++; | ||
2011 | if (errcnt >= 3) | ||
2012 | break; | ||
2013 | } | ||
2014 | } | ||
2015 | |||
2016 | return rc; | ||
2017 | } | ||
2018 | |||
2019 | |||
2020 | /** | ||
2021 | * iwl3945_verify_ucode - determine which instruction image is in SRAM, | ||
2022 | * and verify its contents | ||
2023 | */ | ||
2024 | static int iwl3945_verify_ucode(struct iwl_priv *priv) | ||
2025 | { | ||
2026 | __le32 *image; | ||
2027 | u32 len; | ||
2028 | int rc = 0; | ||
2029 | |||
2030 | /* Try bootstrap */ | ||
2031 | image = (__le32 *)priv->ucode_boot.v_addr; | ||
2032 | len = priv->ucode_boot.len; | ||
2033 | rc = iwl3945_verify_inst_sparse(priv, image, len); | ||
2034 | if (rc == 0) { | ||
2035 | IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n"); | ||
2036 | return 0; | ||
2037 | } | ||
2038 | |||
2039 | /* Try initialize */ | ||
2040 | image = (__le32 *)priv->ucode_init.v_addr; | ||
2041 | len = priv->ucode_init.len; | ||
2042 | rc = iwl3945_verify_inst_sparse(priv, image, len); | ||
2043 | if (rc == 0) { | ||
2044 | IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n"); | ||
2045 | return 0; | ||
2046 | } | ||
2047 | |||
2048 | /* Try runtime/protocol */ | ||
2049 | image = (__le32 *)priv->ucode_code.v_addr; | ||
2050 | len = priv->ucode_code.len; | ||
2051 | rc = iwl3945_verify_inst_sparse(priv, image, len); | ||
2052 | if (rc == 0) { | ||
2053 | IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n"); | ||
2054 | return 0; | ||
2055 | } | ||
2056 | |||
2057 | IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); | ||
2058 | |||
2059 | /* Since nothing seems to match, show first several data entries in | ||
2060 | * instruction SRAM, so maybe visual inspection will give a clue. | ||
2061 | * Selection of bootstrap image (vs. other images) is arbitrary. */ | ||
2062 | image = (__le32 *)priv->ucode_boot.v_addr; | ||
2063 | len = priv->ucode_boot.len; | ||
2064 | rc = iwl3945_verify_inst_full(priv, image, len); | ||
2065 | |||
2066 | return rc; | ||
2067 | } | ||
2068 | |||
2069 | static void iwl3945_nic_start(struct iwl_priv *priv) | ||
2070 | { | ||
2071 | /* Remove all resets to allow NIC to operate */ | ||
2072 | iwl_write32(priv, CSR_RESET, 0); | ||
2073 | } | ||
2074 | |||
2075 | #define IWL3945_UCODE_GET(item) \ | ||
2076 | static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\ | ||
2077 | { \ | ||
2078 | return le32_to_cpu(ucode->v1.item); \ | ||
2079 | } | ||
2080 | |||
2081 | static u32 iwl3945_ucode_get_header_size(u32 api_ver) | ||
2082 | { | ||
2083 | return 24; | ||
2084 | } | ||
2085 | |||
2086 | static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode) | ||
2087 | { | ||
2088 | return (u8 *) ucode->v1.data; | ||
2089 | } | ||
2090 | |||
2091 | IWL3945_UCODE_GET(inst_size); | ||
2092 | IWL3945_UCODE_GET(data_size); | ||
2093 | IWL3945_UCODE_GET(init_size); | ||
2094 | IWL3945_UCODE_GET(init_data_size); | ||
2095 | IWL3945_UCODE_GET(boot_size); | ||
2096 | |||
2097 | /** | ||
2098 | * iwl3945_read_ucode - Read uCode images from disk file. | ||
2099 | * | ||
2100 | * Copy into buffers for card to fetch via bus-mastering | ||
2101 | */ | ||
2102 | static int iwl3945_read_ucode(struct iwl_priv *priv) | ||
2103 | { | ||
2104 | const struct iwl_ucode_header *ucode; | ||
2105 | int ret = -EINVAL, index; | ||
2106 | const struct firmware *ucode_raw; | ||
2107 | /* firmware file name contains uCode/driver compatibility version */ | ||
2108 | const char *name_pre = priv->cfg->fw_name_pre; | ||
2109 | const unsigned int api_max = priv->cfg->ucode_api_max; | ||
2110 | const unsigned int api_min = priv->cfg->ucode_api_min; | ||
2111 | char buf[25]; | ||
2112 | u8 *src; | ||
2113 | size_t len; | ||
2114 | u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size; | ||
2115 | |||
2116 | /* Ask kernel firmware_class module to get the boot firmware off disk. | ||
2117 | * request_firmware() is synchronous, file is in memory on return. */ | ||
2118 | for (index = api_max; index >= api_min; index--) { | ||
2119 | sprintf(buf, "%s%u%s", name_pre, index, ".ucode"); | ||
2120 | ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev); | ||
2121 | if (ret < 0) { | ||
2122 | IWL_ERR(priv, "%s firmware file req failed: %d\n", | ||
2123 | buf, ret); | ||
2124 | if (ret == -ENOENT) | ||
2125 | continue; | ||
2126 | else | ||
2127 | goto error; | ||
2128 | } else { | ||
2129 | if (index < api_max) | ||
2130 | IWL_ERR(priv, "Loaded firmware %s, " | ||
2131 | "which is deprecated. " | ||
2132 | " Please use API v%u instead.\n", | ||
2133 | buf, api_max); | ||
2134 | IWL_DEBUG_INFO(priv, "Got firmware '%s' file " | ||
2135 | "(%zd bytes) from disk\n", | ||
2136 | buf, ucode_raw->size); | ||
2137 | break; | ||
2138 | } | ||
2139 | } | ||
2140 | |||
2141 | if (ret < 0) | ||
2142 | goto error; | ||
2143 | |||
2144 | /* Make sure that we got at least our header! */ | ||
2145 | if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) { | ||
2146 | IWL_ERR(priv, "File size way too small!\n"); | ||
2147 | ret = -EINVAL; | ||
2148 | goto err_release; | ||
2149 | } | ||
2150 | |||
2151 | /* Data from ucode file: header followed by uCode images */ | ||
2152 | ucode = (struct iwl_ucode_header *)ucode_raw->data; | ||
2153 | |||
2154 | priv->ucode_ver = le32_to_cpu(ucode->ver); | ||
2155 | api_ver = IWL_UCODE_API(priv->ucode_ver); | ||
2156 | inst_size = iwl3945_ucode_get_inst_size(ucode); | ||
2157 | data_size = iwl3945_ucode_get_data_size(ucode); | ||
2158 | init_size = iwl3945_ucode_get_init_size(ucode); | ||
2159 | init_data_size = iwl3945_ucode_get_init_data_size(ucode); | ||
2160 | boot_size = iwl3945_ucode_get_boot_size(ucode); | ||
2161 | src = iwl3945_ucode_get_data(ucode); | ||
2162 | |||
2163 | /* api_ver should match the api version forming part of the | ||
2164 | * firmware filename ... but we don't check for that and only rely | ||
2165 | * on the API version read from firmware header from here on forward */ | ||
2166 | |||
2167 | if (api_ver < api_min || api_ver > api_max) { | ||
2168 | IWL_ERR(priv, "Driver unable to support your firmware API. " | ||
2169 | "Driver supports v%u, firmware is v%u.\n", | ||
2170 | api_max, api_ver); | ||
2171 | priv->ucode_ver = 0; | ||
2172 | ret = -EINVAL; | ||
2173 | goto err_release; | ||
2174 | } | ||
2175 | if (api_ver != api_max) | ||
2176 | IWL_ERR(priv, "Firmware has old API version. Expected %u, " | ||
2177 | "got %u. New firmware can be obtained " | ||
2178 | "from http://www.intellinuxwireless.org.\n", | ||
2179 | api_max, api_ver); | ||
2180 | |||
2181 | IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", | ||
2182 | IWL_UCODE_MAJOR(priv->ucode_ver), | ||
2183 | IWL_UCODE_MINOR(priv->ucode_ver), | ||
2184 | IWL_UCODE_API(priv->ucode_ver), | ||
2185 | IWL_UCODE_SERIAL(priv->ucode_ver)); | ||
2186 | |||
2187 | snprintf(priv->hw->wiphy->fw_version, | ||
2188 | sizeof(priv->hw->wiphy->fw_version), | ||
2189 | "%u.%u.%u.%u", | ||
2190 | IWL_UCODE_MAJOR(priv->ucode_ver), | ||
2191 | IWL_UCODE_MINOR(priv->ucode_ver), | ||
2192 | IWL_UCODE_API(priv->ucode_ver), | ||
2193 | IWL_UCODE_SERIAL(priv->ucode_ver)); | ||
2194 | |||
2195 | IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", | ||
2196 | priv->ucode_ver); | ||
2197 | IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", | ||
2198 | inst_size); | ||
2199 | IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n", | ||
2200 | data_size); | ||
2201 | IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n", | ||
2202 | init_size); | ||
2203 | IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n", | ||
2204 | init_data_size); | ||
2205 | IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n", | ||
2206 | boot_size); | ||
2207 | |||
2208 | |||
2209 | /* Verify size of file vs. image size info in file's header */ | ||
2210 | if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) + | ||
2211 | inst_size + data_size + init_size + | ||
2212 | init_data_size + boot_size) { | ||
2213 | |||
2214 | IWL_DEBUG_INFO(priv, | ||
2215 | "uCode file size %zd does not match expected size\n", | ||
2216 | ucode_raw->size); | ||
2217 | ret = -EINVAL; | ||
2218 | goto err_release; | ||
2219 | } | ||
2220 | |||
2221 | /* Verify that uCode images will fit in card's SRAM */ | ||
2222 | if (inst_size > IWL39_MAX_INST_SIZE) { | ||
2223 | IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n", | ||
2224 | inst_size); | ||
2225 | ret = -EINVAL; | ||
2226 | goto err_release; | ||
2227 | } | ||
2228 | |||
2229 | if (data_size > IWL39_MAX_DATA_SIZE) { | ||
2230 | IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n", | ||
2231 | data_size); | ||
2232 | ret = -EINVAL; | ||
2233 | goto err_release; | ||
2234 | } | ||
2235 | if (init_size > IWL39_MAX_INST_SIZE) { | ||
2236 | IWL_DEBUG_INFO(priv, | ||
2237 | "uCode init instr len %d too large to fit in\n", | ||
2238 | init_size); | ||
2239 | ret = -EINVAL; | ||
2240 | goto err_release; | ||
2241 | } | ||
2242 | if (init_data_size > IWL39_MAX_DATA_SIZE) { | ||
2243 | IWL_DEBUG_INFO(priv, | ||
2244 | "uCode init data len %d too large to fit in\n", | ||
2245 | init_data_size); | ||
2246 | ret = -EINVAL; | ||
2247 | goto err_release; | ||
2248 | } | ||
2249 | if (boot_size > IWL39_MAX_BSM_SIZE) { | ||
2250 | IWL_DEBUG_INFO(priv, | ||
2251 | "uCode boot instr len %d too large to fit in\n", | ||
2252 | boot_size); | ||
2253 | ret = -EINVAL; | ||
2254 | goto err_release; | ||
2255 | } | ||
2256 | |||
2257 | /* Allocate ucode buffers for card's bus-master loading ... */ | ||
2258 | |||
2259 | /* Runtime instructions and 2 copies of data: | ||
2260 | * 1) unmodified from disk | ||
2261 | * 2) backup cache for save/restore during power-downs */ | ||
2262 | priv->ucode_code.len = inst_size; | ||
2263 | iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); | ||
2264 | |||
2265 | priv->ucode_data.len = data_size; | ||
2266 | iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); | ||
2267 | |||
2268 | priv->ucode_data_backup.len = data_size; | ||
2269 | iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); | ||
2270 | |||
2271 | if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || | ||
2272 | !priv->ucode_data_backup.v_addr) | ||
2273 | goto err_pci_alloc; | ||
2274 | |||
2275 | /* Initialization instructions and data */ | ||
2276 | if (init_size && init_data_size) { | ||
2277 | priv->ucode_init.len = init_size; | ||
2278 | iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); | ||
2279 | |||
2280 | priv->ucode_init_data.len = init_data_size; | ||
2281 | iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); | ||
2282 | |||
2283 | if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) | ||
2284 | goto err_pci_alloc; | ||
2285 | } | ||
2286 | |||
2287 | /* Bootstrap (instructions only, no data) */ | ||
2288 | if (boot_size) { | ||
2289 | priv->ucode_boot.len = boot_size; | ||
2290 | iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); | ||
2291 | |||
2292 | if (!priv->ucode_boot.v_addr) | ||
2293 | goto err_pci_alloc; | ||
2294 | } | ||
2295 | |||
2296 | /* Copy images into buffers for card's bus-master reads ... */ | ||
2297 | |||
2298 | /* Runtime instructions (first block of data in file) */ | ||
2299 | len = inst_size; | ||
2300 | IWL_DEBUG_INFO(priv, | ||
2301 | "Copying (but not loading) uCode instr len %zd\n", len); | ||
2302 | memcpy(priv->ucode_code.v_addr, src, len); | ||
2303 | src += len; | ||
2304 | |||
2305 | IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", | ||
2306 | priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); | ||
2307 | |||
2308 | /* Runtime data (2nd block) | ||
2309 | * NOTE: Copy into backup buffer will be done in iwl3945_up() */ | ||
2310 | len = data_size; | ||
2311 | IWL_DEBUG_INFO(priv, | ||
2312 | "Copying (but not loading) uCode data len %zd\n", len); | ||
2313 | memcpy(priv->ucode_data.v_addr, src, len); | ||
2314 | memcpy(priv->ucode_data_backup.v_addr, src, len); | ||
2315 | src += len; | ||
2316 | |||
2317 | /* Initialization instructions (3rd block) */ | ||
2318 | if (init_size) { | ||
2319 | len = init_size; | ||
2320 | IWL_DEBUG_INFO(priv, | ||
2321 | "Copying (but not loading) init instr len %zd\n", len); | ||
2322 | memcpy(priv->ucode_init.v_addr, src, len); | ||
2323 | src += len; | ||
2324 | } | ||
2325 | |||
2326 | /* Initialization data (4th block) */ | ||
2327 | if (init_data_size) { | ||
2328 | len = init_data_size; | ||
2329 | IWL_DEBUG_INFO(priv, | ||
2330 | "Copying (but not loading) init data len %zd\n", len); | ||
2331 | memcpy(priv->ucode_init_data.v_addr, src, len); | ||
2332 | src += len; | ||
2333 | } | ||
2334 | |||
2335 | /* Bootstrap instructions (5th block) */ | ||
2336 | len = boot_size; | ||
2337 | IWL_DEBUG_INFO(priv, | ||
2338 | "Copying (but not loading) boot instr len %zd\n", len); | ||
2339 | memcpy(priv->ucode_boot.v_addr, src, len); | ||
2340 | |||
2341 | /* We have our copies now, allow OS release its copies */ | ||
2342 | release_firmware(ucode_raw); | ||
2343 | return 0; | ||
2344 | |||
2345 | err_pci_alloc: | ||
2346 | IWL_ERR(priv, "failed to allocate pci memory\n"); | ||
2347 | ret = -ENOMEM; | ||
2348 | iwl3945_dealloc_ucode_pci(priv); | ||
2349 | |||
2350 | err_release: | ||
2351 | release_firmware(ucode_raw); | ||
2352 | |||
2353 | error: | ||
2354 | return ret; | ||
2355 | } | ||
2356 | |||
2357 | |||
2358 | /** | ||
2359 | * iwl3945_set_ucode_ptrs - Set uCode address location | ||
2360 | * | ||
2361 | * Tell initialization uCode where to find runtime uCode. | ||
2362 | * | ||
2363 | * BSM registers initially contain pointers to initialization uCode. | ||
2364 | * We need to replace them to load runtime uCode inst and data, | ||
2365 | * and to save runtime data when powering down. | ||
2366 | */ | ||
2367 | static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv) | ||
2368 | { | ||
2369 | dma_addr_t pinst; | ||
2370 | dma_addr_t pdata; | ||
2371 | |||
2372 | /* bits 31:0 for 3945 */ | ||
2373 | pinst = priv->ucode_code.p_addr; | ||
2374 | pdata = priv->ucode_data_backup.p_addr; | ||
2375 | |||
2376 | /* Tell bootstrap uCode where to find image to load */ | ||
2377 | iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); | ||
2378 | iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); | ||
2379 | iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, | ||
2380 | priv->ucode_data.len); | ||
2381 | |||
2382 | /* Inst byte count must be last to set up, bit 31 signals uCode | ||
2383 | * that all new ptr/size info is in place */ | ||
2384 | iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, | ||
2385 | priv->ucode_code.len | BSM_DRAM_INST_LOAD); | ||
2386 | |||
2387 | IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n"); | ||
2388 | |||
2389 | return 0; | ||
2390 | } | ||
2391 | |||
2392 | /** | ||
2393 | * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received | ||
2394 | * | ||
2395 | * Called after REPLY_ALIVE notification received from "initialize" uCode. | ||
2396 | * | ||
2397 | * Tell "initialize" uCode to go ahead and load the runtime uCode. | ||
2398 | */ | ||
2399 | static void iwl3945_init_alive_start(struct iwl_priv *priv) | ||
2400 | { | ||
2401 | /* Check alive response for "valid" sign from uCode */ | ||
2402 | if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { | ||
2403 | /* We had an error bringing up the hardware, so take it | ||
2404 | * all the way back down so we can try again */ | ||
2405 | IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n"); | ||
2406 | goto restart; | ||
2407 | } | ||
2408 | |||
2409 | /* Bootstrap uCode has loaded initialize uCode ... verify inst image. | ||
2410 | * This is a paranoid check, because we would not have gotten the | ||
2411 | * "initialize" alive if code weren't properly loaded. */ | ||
2412 | if (iwl3945_verify_ucode(priv)) { | ||
2413 | /* Runtime instruction load was bad; | ||
2414 | * take it all the way back down so we can try again */ | ||
2415 | IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n"); | ||
2416 | goto restart; | ||
2417 | } | ||
2418 | |||
2419 | /* Send pointers to protocol/runtime uCode image ... init code will | ||
2420 | * load and launch runtime uCode, which will send us another "Alive" | ||
2421 | * notification. */ | ||
2422 | IWL_DEBUG_INFO(priv, "Initialization Alive received.\n"); | ||
2423 | if (iwl3945_set_ucode_ptrs(priv)) { | ||
2424 | /* Runtime instruction load won't happen; | ||
2425 | * take it all the way back down so we can try again */ | ||
2426 | IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n"); | ||
2427 | goto restart; | ||
2428 | } | ||
2429 | return; | ||
2430 | |||
2431 | restart: | ||
2432 | queue_work(priv->workqueue, &priv->restart); | ||
2433 | } | ||
2434 | |||
2435 | /** | ||
2436 | * iwl3945_alive_start - called after REPLY_ALIVE notification received | ||
2437 | * from protocol/runtime uCode (initialization uCode's | ||
2438 | * Alive gets handled by iwl3945_init_alive_start()). | ||
2439 | */ | ||
2440 | static void iwl3945_alive_start(struct iwl_priv *priv) | ||
2441 | { | ||
2442 | int thermal_spin = 0; | ||
2443 | u32 rfkill; | ||
2444 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
2445 | |||
2446 | IWL_DEBUG_INFO(priv, "Runtime Alive received.\n"); | ||
2447 | |||
2448 | if (priv->card_alive.is_valid != UCODE_VALID_OK) { | ||
2449 | /* We had an error bringing up the hardware, so take it | ||
2450 | * all the way back down so we can try again */ | ||
2451 | IWL_DEBUG_INFO(priv, "Alive failed.\n"); | ||
2452 | goto restart; | ||
2453 | } | ||
2454 | |||
2455 | /* Initialize uCode has loaded Runtime uCode ... verify inst image. | ||
2456 | * This is a paranoid check, because we would not have gotten the | ||
2457 | * "runtime" alive if code weren't properly loaded. */ | ||
2458 | if (iwl3945_verify_ucode(priv)) { | ||
2459 | /* Runtime instruction load was bad; | ||
2460 | * take it all the way back down so we can try again */ | ||
2461 | IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n"); | ||
2462 | goto restart; | ||
2463 | } | ||
2464 | |||
2465 | rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG); | ||
2466 | IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); | ||
2467 | |||
2468 | if (rfkill & 0x1) { | ||
2469 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2470 | /* if RFKILL is not on, then wait for thermal | ||
2471 | * sensor in adapter to kick in */ | ||
2472 | while (iwl3945_hw_get_temperature(priv) == 0) { | ||
2473 | thermal_spin++; | ||
2474 | udelay(10); | ||
2475 | } | ||
2476 | |||
2477 | if (thermal_spin) | ||
2478 | IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n", | ||
2479 | thermal_spin * 10); | ||
2480 | } else | ||
2481 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2482 | |||
2483 | /* After the ALIVE response, we can send commands to 3945 uCode */ | ||
2484 | set_bit(STATUS_ALIVE, &priv->status); | ||
2485 | |||
2486 | /* Enable watchdog to monitor the driver tx queues */ | ||
2487 | iwl_legacy_setup_watchdog(priv); | ||
2488 | |||
2489 | if (iwl_legacy_is_rfkill(priv)) | ||
2490 | return; | ||
2491 | |||
2492 | ieee80211_wake_queues(priv->hw); | ||
2493 | |||
2494 | priv->active_rate = IWL_RATES_MASK_3945; | ||
2495 | |||
2496 | iwl_legacy_power_update_mode(priv, true); | ||
2497 | |||
2498 | if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { | ||
2499 | struct iwl3945_rxon_cmd *active_rxon = | ||
2500 | (struct iwl3945_rxon_cmd *)(&ctx->active); | ||
2501 | |||
2502 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
2503 | active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
2504 | } else { | ||
2505 | /* Initialize our rx_config data */ | ||
2506 | iwl_legacy_connection_init_rx_config(priv, ctx); | ||
2507 | } | ||
2508 | |||
2509 | /* Configure Bluetooth device coexistence support */ | ||
2510 | iwl_legacy_send_bt_config(priv); | ||
2511 | |||
2512 | set_bit(STATUS_READY, &priv->status); | ||
2513 | |||
2514 | /* Configure the adapter for unassociated operation */ | ||
2515 | iwl3945_commit_rxon(priv, ctx); | ||
2516 | |||
2517 | iwl3945_reg_txpower_periodic(priv); | ||
2518 | |||
2519 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | ||
2520 | wake_up_interruptible(&priv->wait_command_queue); | ||
2521 | |||
2522 | return; | ||
2523 | |||
2524 | restart: | ||
2525 | queue_work(priv->workqueue, &priv->restart); | ||
2526 | } | ||
2527 | |||
2528 | static void iwl3945_cancel_deferred_work(struct iwl_priv *priv); | ||
2529 | |||
2530 | static void __iwl3945_down(struct iwl_priv *priv) | ||
2531 | { | ||
2532 | unsigned long flags; | ||
2533 | int exit_pending; | ||
2534 | |||
2535 | IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n"); | ||
2536 | |||
2537 | iwl_legacy_scan_cancel_timeout(priv, 200); | ||
2538 | |||
2539 | exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
2540 | |||
2541 | /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set | ||
2542 | * to prevent rearm timer */ | ||
2543 | del_timer_sync(&priv->watchdog); | ||
2544 | |||
2545 | /* Station information will now be cleared in device */ | ||
2546 | iwl_legacy_clear_ucode_stations(priv, NULL); | ||
2547 | iwl_legacy_dealloc_bcast_stations(priv); | ||
2548 | iwl_legacy_clear_driver_stations(priv); | ||
2549 | |||
2550 | /* Unblock any waiting calls */ | ||
2551 | wake_up_interruptible_all(&priv->wait_command_queue); | ||
2552 | |||
2553 | /* Wipe out the EXIT_PENDING status bit if we are not actually | ||
2554 | * exiting the module */ | ||
2555 | if (!exit_pending) | ||
2556 | clear_bit(STATUS_EXIT_PENDING, &priv->status); | ||
2557 | |||
2558 | /* stop and reset the on-board processor */ | ||
2559 | iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); | ||
2560 | |||
2561 | /* tell the device to stop sending interrupts */ | ||
2562 | spin_lock_irqsave(&priv->lock, flags); | ||
2563 | iwl_legacy_disable_interrupts(priv); | ||
2564 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2565 | iwl3945_synchronize_irq(priv); | ||
2566 | |||
2567 | if (priv->mac80211_registered) | ||
2568 | ieee80211_stop_queues(priv->hw); | ||
2569 | |||
2570 | /* If we have not previously called iwl3945_init() then | ||
2571 | * clear all bits but the RF Kill bits and return */ | ||
2572 | if (!iwl_legacy_is_init(priv)) { | ||
2573 | priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << | ||
2574 | STATUS_RF_KILL_HW | | ||
2575 | test_bit(STATUS_GEO_CONFIGURED, &priv->status) << | ||
2576 | STATUS_GEO_CONFIGURED | | ||
2577 | test_bit(STATUS_EXIT_PENDING, &priv->status) << | ||
2578 | STATUS_EXIT_PENDING; | ||
2579 | goto exit; | ||
2580 | } | ||
2581 | |||
2582 | /* ...otherwise clear out all the status bits but the RF Kill | ||
2583 | * bit and continue taking the NIC down. */ | ||
2584 | priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << | ||
2585 | STATUS_RF_KILL_HW | | ||
2586 | test_bit(STATUS_GEO_CONFIGURED, &priv->status) << | ||
2587 | STATUS_GEO_CONFIGURED | | ||
2588 | test_bit(STATUS_FW_ERROR, &priv->status) << | ||
2589 | STATUS_FW_ERROR | | ||
2590 | test_bit(STATUS_EXIT_PENDING, &priv->status) << | ||
2591 | STATUS_EXIT_PENDING; | ||
2592 | |||
2593 | iwl3945_hw_txq_ctx_stop(priv); | ||
2594 | iwl3945_hw_rxq_stop(priv); | ||
2595 | |||
2596 | /* Power-down device's busmaster DMA clocks */ | ||
2597 | iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); | ||
2598 | udelay(5); | ||
2599 | |||
2600 | /* Stop the device, and put it in low power state */ | ||
2601 | iwl_legacy_apm_stop(priv); | ||
2602 | |||
2603 | exit: | ||
2604 | memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); | ||
2605 | |||
2606 | if (priv->beacon_skb) | ||
2607 | dev_kfree_skb(priv->beacon_skb); | ||
2608 | priv->beacon_skb = NULL; | ||
2609 | |||
2610 | /* clear out any free frames */ | ||
2611 | iwl3945_clear_free_frames(priv); | ||
2612 | } | ||
2613 | |||
2614 | static void iwl3945_down(struct iwl_priv *priv) | ||
2615 | { | ||
2616 | mutex_lock(&priv->mutex); | ||
2617 | __iwl3945_down(priv); | ||
2618 | mutex_unlock(&priv->mutex); | ||
2619 | |||
2620 | iwl3945_cancel_deferred_work(priv); | ||
2621 | } | ||
2622 | |||
2623 | #define MAX_HW_RESTARTS 5 | ||
2624 | |||
2625 | static int iwl3945_alloc_bcast_station(struct iwl_priv *priv) | ||
2626 | { | ||
2627 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
2628 | unsigned long flags; | ||
2629 | u8 sta_id; | ||
2630 | |||
2631 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
2632 | sta_id = iwl_legacy_prep_station(priv, ctx, | ||
2633 | iwlegacy_bcast_addr, false, NULL); | ||
2634 | if (sta_id == IWL_INVALID_STATION) { | ||
2635 | IWL_ERR(priv, "Unable to prepare broadcast station\n"); | ||
2636 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
2637 | |||
2638 | return -EINVAL; | ||
2639 | } | ||
2640 | |||
2641 | priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE; | ||
2642 | priv->stations[sta_id].used |= IWL_STA_BCAST; | ||
2643 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
2644 | |||
2645 | return 0; | ||
2646 | } | ||
2647 | |||
2648 | static int __iwl3945_up(struct iwl_priv *priv) | ||
2649 | { | ||
2650 | int rc, i; | ||
2651 | |||
2652 | rc = iwl3945_alloc_bcast_station(priv); | ||
2653 | if (rc) | ||
2654 | return rc; | ||
2655 | |||
2656 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { | ||
2657 | IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); | ||
2658 | return -EIO; | ||
2659 | } | ||
2660 | |||
2661 | if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) { | ||
2662 | IWL_ERR(priv, "ucode not available for device bring up\n"); | ||
2663 | return -EIO; | ||
2664 | } | ||
2665 | |||
2666 | /* If platform's RF_KILL switch is NOT set to KILL */ | ||
2667 | if (iwl_read32(priv, CSR_GP_CNTRL) & | ||
2668 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) | ||
2669 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2670 | else { | ||
2671 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2672 | IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n"); | ||
2673 | return -ENODEV; | ||
2674 | } | ||
2675 | |||
2676 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
2677 | |||
2678 | rc = iwl3945_hw_nic_init(priv); | ||
2679 | if (rc) { | ||
2680 | IWL_ERR(priv, "Unable to int nic\n"); | ||
2681 | return rc; | ||
2682 | } | ||
2683 | |||
2684 | /* make sure rfkill handshake bits are cleared */ | ||
2685 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
2686 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | ||
2687 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
2688 | |||
2689 | /* clear (again), then enable host interrupts */ | ||
2690 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
2691 | iwl_legacy_enable_interrupts(priv); | ||
2692 | |||
2693 | /* really make sure rfkill handshake bits are cleared */ | ||
2694 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
2695 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
2696 | |||
2697 | /* Copy original ucode data image from disk into backup cache. | ||
2698 | * This will be used to initialize the on-board processor's | ||
2699 | * data SRAM for a clean start when the runtime program first loads. */ | ||
2700 | memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, | ||
2701 | priv->ucode_data.len); | ||
2702 | |||
2703 | /* We return success when we resume from suspend and rf_kill is on. */ | ||
2704 | if (test_bit(STATUS_RF_KILL_HW, &priv->status)) | ||
2705 | return 0; | ||
2706 | |||
2707 | for (i = 0; i < MAX_HW_RESTARTS; i++) { | ||
2708 | |||
2709 | /* load bootstrap state machine, | ||
2710 | * load bootstrap program into processor's memory, | ||
2711 | * prepare to load the "initialize" uCode */ | ||
2712 | rc = priv->cfg->ops->lib->load_ucode(priv); | ||
2713 | |||
2714 | if (rc) { | ||
2715 | IWL_ERR(priv, | ||
2716 | "Unable to set up bootstrap uCode: %d\n", rc); | ||
2717 | continue; | ||
2718 | } | ||
2719 | |||
2720 | /* start card; "initialize" will load runtime ucode */ | ||
2721 | iwl3945_nic_start(priv); | ||
2722 | |||
2723 | IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n"); | ||
2724 | |||
2725 | return 0; | ||
2726 | } | ||
2727 | |||
2728 | set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
2729 | __iwl3945_down(priv); | ||
2730 | clear_bit(STATUS_EXIT_PENDING, &priv->status); | ||
2731 | |||
2732 | /* tried to restart and config the device for as long as our | ||
2733 | * patience could withstand */ | ||
2734 | IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i); | ||
2735 | return -EIO; | ||
2736 | } | ||
2737 | |||
2738 | |||
2739 | /***************************************************************************** | ||
2740 | * | ||
2741 | * Workqueue callbacks | ||
2742 | * | ||
2743 | *****************************************************************************/ | ||
2744 | |||
2745 | static void iwl3945_bg_init_alive_start(struct work_struct *data) | ||
2746 | { | ||
2747 | struct iwl_priv *priv = | ||
2748 | container_of(data, struct iwl_priv, init_alive_start.work); | ||
2749 | |||
2750 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
2751 | return; | ||
2752 | |||
2753 | mutex_lock(&priv->mutex); | ||
2754 | iwl3945_init_alive_start(priv); | ||
2755 | mutex_unlock(&priv->mutex); | ||
2756 | } | ||
2757 | |||
2758 | static void iwl3945_bg_alive_start(struct work_struct *data) | ||
2759 | { | ||
2760 | struct iwl_priv *priv = | ||
2761 | container_of(data, struct iwl_priv, alive_start.work); | ||
2762 | |||
2763 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
2764 | return; | ||
2765 | |||
2766 | mutex_lock(&priv->mutex); | ||
2767 | iwl3945_alive_start(priv); | ||
2768 | mutex_unlock(&priv->mutex); | ||
2769 | } | ||
2770 | |||
2771 | /* | ||
2772 | * 3945 cannot interrupt driver when hardware rf kill switch toggles; | ||
2773 | * driver must poll CSR_GP_CNTRL_REG register for change. This register | ||
2774 | * *is* readable even when device has been SW_RESET into low power mode | ||
2775 | * (e.g. during RF KILL). | ||
2776 | */ | ||
2777 | static void iwl3945_rfkill_poll(struct work_struct *data) | ||
2778 | { | ||
2779 | struct iwl_priv *priv = | ||
2780 | container_of(data, struct iwl_priv, _3945.rfkill_poll.work); | ||
2781 | bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2782 | bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL) | ||
2783 | & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); | ||
2784 | |||
2785 | if (new_rfkill != old_rfkill) { | ||
2786 | if (new_rfkill) | ||
2787 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2788 | else | ||
2789 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
2790 | |||
2791 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill); | ||
2792 | |||
2793 | IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n", | ||
2794 | new_rfkill ? "disable radio" : "enable radio"); | ||
2795 | } | ||
2796 | |||
2797 | /* Keep this running, even if radio now enabled. This will be | ||
2798 | * cancelled in mac_start() if system decides to start again */ | ||
2799 | queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, | ||
2800 | round_jiffies_relative(2 * HZ)); | ||
2801 | |||
2802 | } | ||
2803 | |||
2804 | int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif) | ||
2805 | { | ||
2806 | struct iwl_host_cmd cmd = { | ||
2807 | .id = REPLY_SCAN_CMD, | ||
2808 | .len = sizeof(struct iwl3945_scan_cmd), | ||
2809 | .flags = CMD_SIZE_HUGE, | ||
2810 | }; | ||
2811 | struct iwl3945_scan_cmd *scan; | ||
2812 | u8 n_probes = 0; | ||
2813 | enum ieee80211_band band; | ||
2814 | bool is_active = false; | ||
2815 | int ret; | ||
2816 | |||
2817 | lockdep_assert_held(&priv->mutex); | ||
2818 | |||
2819 | if (!priv->scan_cmd) { | ||
2820 | priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) + | ||
2821 | IWL_MAX_SCAN_SIZE, GFP_KERNEL); | ||
2822 | if (!priv->scan_cmd) { | ||
2823 | IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n"); | ||
2824 | return -ENOMEM; | ||
2825 | } | ||
2826 | } | ||
2827 | scan = priv->scan_cmd; | ||
2828 | memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE); | ||
2829 | |||
2830 | scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; | ||
2831 | scan->quiet_time = IWL_ACTIVE_QUIET_TIME; | ||
2832 | |||
2833 | if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) { | ||
2834 | u16 interval = 0; | ||
2835 | u32 extra; | ||
2836 | u32 suspend_time = 100; | ||
2837 | u32 scan_suspend_time = 100; | ||
2838 | |||
2839 | IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); | ||
2840 | |||
2841 | if (priv->is_internal_short_scan) | ||
2842 | interval = 0; | ||
2843 | else | ||
2844 | interval = vif->bss_conf.beacon_int; | ||
2845 | |||
2846 | scan->suspend_time = 0; | ||
2847 | scan->max_out_time = cpu_to_le32(200 * 1024); | ||
2848 | if (!interval) | ||
2849 | interval = suspend_time; | ||
2850 | /* | ||
2851 | * suspend time format: | ||
2852 | * 0-19: beacon interval in usec (time before exec.) | ||
2853 | * 20-23: 0 | ||
2854 | * 24-31: number of beacons (suspend between channels) | ||
2855 | */ | ||
2856 | |||
2857 | extra = (suspend_time / interval) << 24; | ||
2858 | scan_suspend_time = 0xFF0FFFFF & | ||
2859 | (extra | ((suspend_time % interval) * 1024)); | ||
2860 | |||
2861 | scan->suspend_time = cpu_to_le32(scan_suspend_time); | ||
2862 | IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n", | ||
2863 | scan_suspend_time, interval); | ||
2864 | } | ||
2865 | |||
2866 | if (priv->is_internal_short_scan) { | ||
2867 | IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n"); | ||
2868 | } else if (priv->scan_request->n_ssids) { | ||
2869 | int i, p = 0; | ||
2870 | IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); | ||
2871 | for (i = 0; i < priv->scan_request->n_ssids; i++) { | ||
2872 | /* always does wildcard anyway */ | ||
2873 | if (!priv->scan_request->ssids[i].ssid_len) | ||
2874 | continue; | ||
2875 | scan->direct_scan[p].id = WLAN_EID_SSID; | ||
2876 | scan->direct_scan[p].len = | ||
2877 | priv->scan_request->ssids[i].ssid_len; | ||
2878 | memcpy(scan->direct_scan[p].ssid, | ||
2879 | priv->scan_request->ssids[i].ssid, | ||
2880 | priv->scan_request->ssids[i].ssid_len); | ||
2881 | n_probes++; | ||
2882 | p++; | ||
2883 | } | ||
2884 | is_active = true; | ||
2885 | } else | ||
2886 | IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n"); | ||
2887 | |||
2888 | /* We don't build a direct scan probe request; the uCode will do | ||
2889 | * that based on the direct_mask added to each channel entry */ | ||
2890 | scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; | ||
2891 | scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id; | ||
2892 | scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
2893 | |||
2894 | /* flags + rate selection */ | ||
2895 | |||
2896 | switch (priv->scan_band) { | ||
2897 | case IEEE80211_BAND_2GHZ: | ||
2898 | scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; | ||
2899 | scan->tx_cmd.rate = IWL_RATE_1M_PLCP; | ||
2900 | band = IEEE80211_BAND_2GHZ; | ||
2901 | break; | ||
2902 | case IEEE80211_BAND_5GHZ: | ||
2903 | scan->tx_cmd.rate = IWL_RATE_6M_PLCP; | ||
2904 | band = IEEE80211_BAND_5GHZ; | ||
2905 | break; | ||
2906 | default: | ||
2907 | IWL_WARN(priv, "Invalid scan band\n"); | ||
2908 | return -EIO; | ||
2909 | } | ||
2910 | |||
2911 | /* | ||
2912 | * If active scaning is requested but a certain channel | ||
2913 | * is marked passive, we can do active scanning if we | ||
2914 | * detect transmissions. | ||
2915 | */ | ||
2916 | scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT : | ||
2917 | IWL_GOOD_CRC_TH_DISABLED; | ||
2918 | |||
2919 | if (!priv->is_internal_short_scan) { | ||
2920 | scan->tx_cmd.len = cpu_to_le16( | ||
2921 | iwl_legacy_fill_probe_req(priv, | ||
2922 | (struct ieee80211_mgmt *)scan->data, | ||
2923 | vif->addr, | ||
2924 | priv->scan_request->ie, | ||
2925 | priv->scan_request->ie_len, | ||
2926 | IWL_MAX_SCAN_SIZE - sizeof(*scan))); | ||
2927 | } else { | ||
2928 | /* use bcast addr, will not be transmitted but must be valid */ | ||
2929 | scan->tx_cmd.len = cpu_to_le16( | ||
2930 | iwl_legacy_fill_probe_req(priv, | ||
2931 | (struct ieee80211_mgmt *)scan->data, | ||
2932 | iwlegacy_bcast_addr, NULL, 0, | ||
2933 | IWL_MAX_SCAN_SIZE - sizeof(*scan))); | ||
2934 | } | ||
2935 | /* select Rx antennas */ | ||
2936 | scan->flags |= iwl3945_get_antenna_flags(priv); | ||
2937 | |||
2938 | if (priv->is_internal_short_scan) { | ||
2939 | scan->channel_count = | ||
2940 | iwl3945_get_single_channel_for_scan(priv, vif, band, | ||
2941 | (void *)&scan->data[le16_to_cpu( | ||
2942 | scan->tx_cmd.len)]); | ||
2943 | } else { | ||
2944 | scan->channel_count = | ||
2945 | iwl3945_get_channels_for_scan(priv, band, is_active, n_probes, | ||
2946 | (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif); | ||
2947 | } | ||
2948 | |||
2949 | if (scan->channel_count == 0) { | ||
2950 | IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); | ||
2951 | return -EIO; | ||
2952 | } | ||
2953 | |||
2954 | cmd.len += le16_to_cpu(scan->tx_cmd.len) + | ||
2955 | scan->channel_count * sizeof(struct iwl3945_scan_channel); | ||
2956 | cmd.data = scan; | ||
2957 | scan->len = cpu_to_le16(cmd.len); | ||
2958 | |||
2959 | set_bit(STATUS_SCAN_HW, &priv->status); | ||
2960 | ret = iwl_legacy_send_cmd_sync(priv, &cmd); | ||
2961 | if (ret) | ||
2962 | clear_bit(STATUS_SCAN_HW, &priv->status); | ||
2963 | return ret; | ||
2964 | } | ||
2965 | |||
2966 | void iwl3945_post_scan(struct iwl_priv *priv) | ||
2967 | { | ||
2968 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
2969 | |||
2970 | /* | ||
2971 | * Since setting the RXON may have been deferred while | ||
2972 | * performing the scan, fire one off if needed | ||
2973 | */ | ||
2974 | if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) | ||
2975 | iwl3945_commit_rxon(priv, ctx); | ||
2976 | } | ||
2977 | |||
2978 | static void iwl3945_bg_restart(struct work_struct *data) | ||
2979 | { | ||
2980 | struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); | ||
2981 | |||
2982 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
2983 | return; | ||
2984 | |||
2985 | if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) { | ||
2986 | struct iwl_rxon_context *ctx; | ||
2987 | mutex_lock(&priv->mutex); | ||
2988 | for_each_context(priv, ctx) | ||
2989 | ctx->vif = NULL; | ||
2990 | priv->is_open = 0; | ||
2991 | mutex_unlock(&priv->mutex); | ||
2992 | iwl3945_down(priv); | ||
2993 | ieee80211_restart_hw(priv->hw); | ||
2994 | } else { | ||
2995 | iwl3945_down(priv); | ||
2996 | |||
2997 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
2998 | return; | ||
2999 | |||
3000 | mutex_lock(&priv->mutex); | ||
3001 | __iwl3945_up(priv); | ||
3002 | mutex_unlock(&priv->mutex); | ||
3003 | } | ||
3004 | } | ||
3005 | |||
3006 | static void iwl3945_bg_rx_replenish(struct work_struct *data) | ||
3007 | { | ||
3008 | struct iwl_priv *priv = | ||
3009 | container_of(data, struct iwl_priv, rx_replenish); | ||
3010 | |||
3011 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
3012 | return; | ||
3013 | |||
3014 | mutex_lock(&priv->mutex); | ||
3015 | iwl3945_rx_replenish(priv); | ||
3016 | mutex_unlock(&priv->mutex); | ||
3017 | } | ||
3018 | |||
3019 | void iwl3945_post_associate(struct iwl_priv *priv) | ||
3020 | { | ||
3021 | int rc = 0; | ||
3022 | struct ieee80211_conf *conf = NULL; | ||
3023 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3024 | |||
3025 | if (!ctx->vif || !priv->is_open) | ||
3026 | return; | ||
3027 | |||
3028 | IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", | ||
3029 | ctx->vif->bss_conf.aid, ctx->active.bssid_addr); | ||
3030 | |||
3031 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
3032 | return; | ||
3033 | |||
3034 | iwl_legacy_scan_cancel_timeout(priv, 200); | ||
3035 | |||
3036 | conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw); | ||
3037 | |||
3038 | ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
3039 | iwl3945_commit_rxon(priv, ctx); | ||
3040 | |||
3041 | rc = iwl_legacy_send_rxon_timing(priv, ctx); | ||
3042 | if (rc) | ||
3043 | IWL_WARN(priv, "REPLY_RXON_TIMING failed - " | ||
3044 | "Attempting to continue.\n"); | ||
3045 | |||
3046 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
3047 | |||
3048 | ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid); | ||
3049 | |||
3050 | IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", | ||
3051 | ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int); | ||
3052 | |||
3053 | if (ctx->vif->bss_conf.use_short_preamble) | ||
3054 | ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; | ||
3055 | else | ||
3056 | ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; | ||
3057 | |||
3058 | if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { | ||
3059 | if (ctx->vif->bss_conf.use_short_slot) | ||
3060 | ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; | ||
3061 | else | ||
3062 | ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | ||
3063 | } | ||
3064 | |||
3065 | iwl3945_commit_rxon(priv, ctx); | ||
3066 | |||
3067 | switch (ctx->vif->type) { | ||
3068 | case NL80211_IFTYPE_STATION: | ||
3069 | iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); | ||
3070 | break; | ||
3071 | case NL80211_IFTYPE_ADHOC: | ||
3072 | iwl3945_send_beacon_cmd(priv); | ||
3073 | break; | ||
3074 | default: | ||
3075 | IWL_ERR(priv, "%s Should not be called in %d mode\n", | ||
3076 | __func__, ctx->vif->type); | ||
3077 | break; | ||
3078 | } | ||
3079 | } | ||
3080 | |||
3081 | /***************************************************************************** | ||
3082 | * | ||
3083 | * mac80211 entry point functions | ||
3084 | * | ||
3085 | *****************************************************************************/ | ||
3086 | |||
3087 | #define UCODE_READY_TIMEOUT (2 * HZ) | ||
3088 | |||
3089 | static int iwl3945_mac_start(struct ieee80211_hw *hw) | ||
3090 | { | ||
3091 | struct iwl_priv *priv = hw->priv; | ||
3092 | int ret; | ||
3093 | |||
3094 | IWL_DEBUG_MAC80211(priv, "enter\n"); | ||
3095 | |||
3096 | /* we should be verifying the device is ready to be opened */ | ||
3097 | mutex_lock(&priv->mutex); | ||
3098 | |||
3099 | /* fetch ucode file from disk, alloc and copy to bus-master buffers ... | ||
3100 | * ucode filename and max sizes are card-specific. */ | ||
3101 | |||
3102 | if (!priv->ucode_code.len) { | ||
3103 | ret = iwl3945_read_ucode(priv); | ||
3104 | if (ret) { | ||
3105 | IWL_ERR(priv, "Could not read microcode: %d\n", ret); | ||
3106 | mutex_unlock(&priv->mutex); | ||
3107 | goto out_release_irq; | ||
3108 | } | ||
3109 | } | ||
3110 | |||
3111 | ret = __iwl3945_up(priv); | ||
3112 | |||
3113 | mutex_unlock(&priv->mutex); | ||
3114 | |||
3115 | if (ret) | ||
3116 | goto out_release_irq; | ||
3117 | |||
3118 | IWL_DEBUG_INFO(priv, "Start UP work.\n"); | ||
3119 | |||
3120 | /* Wait for START_ALIVE from ucode. Otherwise callbacks from | ||
3121 | * mac80211 will not be run successfully. */ | ||
3122 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | ||
3123 | test_bit(STATUS_READY, &priv->status), | ||
3124 | UCODE_READY_TIMEOUT); | ||
3125 | if (!ret) { | ||
3126 | if (!test_bit(STATUS_READY, &priv->status)) { | ||
3127 | IWL_ERR(priv, | ||
3128 | "Wait for START_ALIVE timeout after %dms.\n", | ||
3129 | jiffies_to_msecs(UCODE_READY_TIMEOUT)); | ||
3130 | ret = -ETIMEDOUT; | ||
3131 | goto out_release_irq; | ||
3132 | } | ||
3133 | } | ||
3134 | |||
3135 | /* ucode is running and will send rfkill notifications, | ||
3136 | * no need to poll the killswitch state anymore */ | ||
3137 | cancel_delayed_work(&priv->_3945.rfkill_poll); | ||
3138 | |||
3139 | priv->is_open = 1; | ||
3140 | IWL_DEBUG_MAC80211(priv, "leave\n"); | ||
3141 | return 0; | ||
3142 | |||
3143 | out_release_irq: | ||
3144 | priv->is_open = 0; | ||
3145 | IWL_DEBUG_MAC80211(priv, "leave - failed\n"); | ||
3146 | return ret; | ||
3147 | } | ||
3148 | |||
3149 | static void iwl3945_mac_stop(struct ieee80211_hw *hw) | ||
3150 | { | ||
3151 | struct iwl_priv *priv = hw->priv; | ||
3152 | |||
3153 | IWL_DEBUG_MAC80211(priv, "enter\n"); | ||
3154 | |||
3155 | if (!priv->is_open) { | ||
3156 | IWL_DEBUG_MAC80211(priv, "leave - skip\n"); | ||
3157 | return; | ||
3158 | } | ||
3159 | |||
3160 | priv->is_open = 0; | ||
3161 | |||
3162 | iwl3945_down(priv); | ||
3163 | |||
3164 | flush_workqueue(priv->workqueue); | ||
3165 | |||
3166 | /* start polling the killswitch state again */ | ||
3167 | queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, | ||
3168 | round_jiffies_relative(2 * HZ)); | ||
3169 | |||
3170 | IWL_DEBUG_MAC80211(priv, "leave\n"); | ||
3171 | } | ||
3172 | |||
3173 | static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) | ||
3174 | { | ||
3175 | struct iwl_priv *priv = hw->priv; | ||
3176 | |||
3177 | IWL_DEBUG_MAC80211(priv, "enter\n"); | ||
3178 | |||
3179 | IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, | ||
3180 | ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); | ||
3181 | |||
3182 | if (iwl3945_tx_skb(priv, skb)) | ||
3183 | dev_kfree_skb_any(skb); | ||
3184 | |||
3185 | IWL_DEBUG_MAC80211(priv, "leave\n"); | ||
3186 | } | ||
3187 | |||
3188 | void iwl3945_config_ap(struct iwl_priv *priv) | ||
3189 | { | ||
3190 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3191 | struct ieee80211_vif *vif = ctx->vif; | ||
3192 | int rc = 0; | ||
3193 | |||
3194 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
3195 | return; | ||
3196 | |||
3197 | /* The following should be done only at AP bring up */ | ||
3198 | if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) { | ||
3199 | |||
3200 | /* RXON - unassoc (to set timing command) */ | ||
3201 | ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
3202 | iwl3945_commit_rxon(priv, ctx); | ||
3203 | |||
3204 | /* RXON Timing */ | ||
3205 | rc = iwl_legacy_send_rxon_timing(priv, ctx); | ||
3206 | if (rc) | ||
3207 | IWL_WARN(priv, "REPLY_RXON_TIMING failed - " | ||
3208 | "Attempting to continue.\n"); | ||
3209 | |||
3210 | ctx->staging.assoc_id = 0; | ||
3211 | |||
3212 | if (vif->bss_conf.use_short_preamble) | ||
3213 | ctx->staging.flags |= | ||
3214 | RXON_FLG_SHORT_PREAMBLE_MSK; | ||
3215 | else | ||
3216 | ctx->staging.flags &= | ||
3217 | ~RXON_FLG_SHORT_PREAMBLE_MSK; | ||
3218 | |||
3219 | if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { | ||
3220 | if (vif->bss_conf.use_short_slot) | ||
3221 | ctx->staging.flags |= | ||
3222 | RXON_FLG_SHORT_SLOT_MSK; | ||
3223 | else | ||
3224 | ctx->staging.flags &= | ||
3225 | ~RXON_FLG_SHORT_SLOT_MSK; | ||
3226 | } | ||
3227 | /* restore RXON assoc */ | ||
3228 | ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
3229 | iwl3945_commit_rxon(priv, ctx); | ||
3230 | } | ||
3231 | iwl3945_send_beacon_cmd(priv); | ||
3232 | } | ||
3233 | |||
3234 | static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, | ||
3235 | struct ieee80211_vif *vif, | ||
3236 | struct ieee80211_sta *sta, | ||
3237 | struct ieee80211_key_conf *key) | ||
3238 | { | ||
3239 | struct iwl_priv *priv = hw->priv; | ||
3240 | int ret = 0; | ||
3241 | u8 sta_id = IWL_INVALID_STATION; | ||
3242 | u8 static_key; | ||
3243 | |||
3244 | IWL_DEBUG_MAC80211(priv, "enter\n"); | ||
3245 | |||
3246 | if (iwl3945_mod_params.sw_crypto) { | ||
3247 | IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); | ||
3248 | return -EOPNOTSUPP; | ||
3249 | } | ||
3250 | |||
3251 | /* | ||
3252 | * To support IBSS RSN, don't program group keys in IBSS, the | ||
3253 | * hardware will then not attempt to decrypt the frames. | ||
3254 | */ | ||
3255 | if (vif->type == NL80211_IFTYPE_ADHOC && | ||
3256 | !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) | ||
3257 | return -EOPNOTSUPP; | ||
3258 | |||
3259 | static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS); | ||
3260 | |||
3261 | if (!static_key) { | ||
3262 | sta_id = iwl_legacy_sta_id_or_broadcast( | ||
3263 | priv, &priv->contexts[IWL_RXON_CTX_BSS], sta); | ||
3264 | if (sta_id == IWL_INVALID_STATION) | ||
3265 | return -EINVAL; | ||
3266 | } | ||
3267 | |||
3268 | mutex_lock(&priv->mutex); | ||
3269 | iwl_legacy_scan_cancel_timeout(priv, 100); | ||
3270 | |||
3271 | switch (cmd) { | ||
3272 | case SET_KEY: | ||
3273 | if (static_key) | ||
3274 | ret = iwl3945_set_static_key(priv, key); | ||
3275 | else | ||
3276 | ret = iwl3945_set_dynamic_key(priv, key, sta_id); | ||
3277 | IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n"); | ||
3278 | break; | ||
3279 | case DISABLE_KEY: | ||
3280 | if (static_key) | ||
3281 | ret = iwl3945_remove_static_key(priv); | ||
3282 | else | ||
3283 | ret = iwl3945_clear_sta_key_info(priv, sta_id); | ||
3284 | IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n"); | ||
3285 | break; | ||
3286 | default: | ||
3287 | ret = -EINVAL; | ||
3288 | } | ||
3289 | |||
3290 | mutex_unlock(&priv->mutex); | ||
3291 | IWL_DEBUG_MAC80211(priv, "leave\n"); | ||
3292 | |||
3293 | return ret; | ||
3294 | } | ||
3295 | |||
3296 | static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, | ||
3297 | struct ieee80211_vif *vif, | ||
3298 | struct ieee80211_sta *sta) | ||
3299 | { | ||
3300 | struct iwl_priv *priv = hw->priv; | ||
3301 | struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv; | ||
3302 | int ret; | ||
3303 | bool is_ap = vif->type == NL80211_IFTYPE_STATION; | ||
3304 | u8 sta_id; | ||
3305 | |||
3306 | IWL_DEBUG_INFO(priv, "received request to add station %pM\n", | ||
3307 | sta->addr); | ||
3308 | mutex_lock(&priv->mutex); | ||
3309 | IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n", | ||
3310 | sta->addr); | ||
3311 | sta_priv->common.sta_id = IWL_INVALID_STATION; | ||
3312 | |||
3313 | |||
3314 | ret = iwl_legacy_add_station_common(priv, | ||
3315 | &priv->contexts[IWL_RXON_CTX_BSS], | ||
3316 | sta->addr, is_ap, sta, &sta_id); | ||
3317 | if (ret) { | ||
3318 | IWL_ERR(priv, "Unable to add station %pM (%d)\n", | ||
3319 | sta->addr, ret); | ||
3320 | /* Should we return success if return code is EEXIST ? */ | ||
3321 | mutex_unlock(&priv->mutex); | ||
3322 | return ret; | ||
3323 | } | ||
3324 | |||
3325 | sta_priv->common.sta_id = sta_id; | ||
3326 | |||
3327 | /* Initialize rate scaling */ | ||
3328 | IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n", | ||
3329 | sta->addr); | ||
3330 | iwl3945_rs_rate_init(priv, sta, sta_id); | ||
3331 | mutex_unlock(&priv->mutex); | ||
3332 | |||
3333 | return 0; | ||
3334 | } | ||
3335 | |||
3336 | static void iwl3945_configure_filter(struct ieee80211_hw *hw, | ||
3337 | unsigned int changed_flags, | ||
3338 | unsigned int *total_flags, | ||
3339 | u64 multicast) | ||
3340 | { | ||
3341 | struct iwl_priv *priv = hw->priv; | ||
3342 | __le32 filter_or = 0, filter_nand = 0; | ||
3343 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3344 | |||
3345 | #define CHK(test, flag) do { \ | ||
3346 | if (*total_flags & (test)) \ | ||
3347 | filter_or |= (flag); \ | ||
3348 | else \ | ||
3349 | filter_nand |= (flag); \ | ||
3350 | } while (0) | ||
3351 | |||
3352 | IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", | ||
3353 | changed_flags, *total_flags); | ||
3354 | |||
3355 | CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); | ||
3356 | CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); | ||
3357 | CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); | ||
3358 | |||
3359 | #undef CHK | ||
3360 | |||
3361 | mutex_lock(&priv->mutex); | ||
3362 | |||
3363 | ctx->staging.filter_flags &= ~filter_nand; | ||
3364 | ctx->staging.filter_flags |= filter_or; | ||
3365 | |||
3366 | /* | ||
3367 | * Not committing directly because hardware can perform a scan, | ||
3368 | * but even if hw is ready, committing here breaks for some reason, | ||
3369 | * we'll eventually commit the filter flags change anyway. | ||
3370 | */ | ||
3371 | |||
3372 | mutex_unlock(&priv->mutex); | ||
3373 | |||
3374 | /* | ||
3375 | * Receiving all multicast frames is always enabled by the | ||
3376 | * default flags setup in iwl_legacy_connection_init_rx_config() | ||
3377 | * since we currently do not support programming multicast | ||
3378 | * filters into the device. | ||
3379 | */ | ||
3380 | *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | | ||
3381 | FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; | ||
3382 | } | ||
3383 | |||
3384 | |||
3385 | /***************************************************************************** | ||
3386 | * | ||
3387 | * sysfs attributes | ||
3388 | * | ||
3389 | *****************************************************************************/ | ||
3390 | |||
3391 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
3392 | |||
3393 | /* | ||
3394 | * The following adds a new attribute to the sysfs representation | ||
3395 | * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) | ||
3396 | * used for controlling the debug level. | ||
3397 | * | ||
3398 | * See the level definitions in iwl for details. | ||
3399 | * | ||
3400 | * The debug_level being managed using sysfs below is a per device debug | ||
3401 | * level that is used instead of the global debug level if it (the per | ||
3402 | * device debug level) is set. | ||
3403 | */ | ||
3404 | static ssize_t iwl3945_show_debug_level(struct device *d, | ||
3405 | struct device_attribute *attr, char *buf) | ||
3406 | { | ||
3407 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3408 | return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv)); | ||
3409 | } | ||
3410 | static ssize_t iwl3945_store_debug_level(struct device *d, | ||
3411 | struct device_attribute *attr, | ||
3412 | const char *buf, size_t count) | ||
3413 | { | ||
3414 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3415 | unsigned long val; | ||
3416 | int ret; | ||
3417 | |||
3418 | ret = strict_strtoul(buf, 0, &val); | ||
3419 | if (ret) | ||
3420 | IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf); | ||
3421 | else { | ||
3422 | priv->debug_level = val; | ||
3423 | if (iwl_legacy_alloc_traffic_mem(priv)) | ||
3424 | IWL_ERR(priv, | ||
3425 | "Not enough memory to generate traffic log\n"); | ||
3426 | } | ||
3427 | return strnlen(buf, count); | ||
3428 | } | ||
3429 | |||
3430 | static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, | ||
3431 | iwl3945_show_debug_level, iwl3945_store_debug_level); | ||
3432 | |||
3433 | #endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */ | ||
3434 | |||
3435 | static ssize_t iwl3945_show_temperature(struct device *d, | ||
3436 | struct device_attribute *attr, char *buf) | ||
3437 | { | ||
3438 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3439 | |||
3440 | if (!iwl_legacy_is_alive(priv)) | ||
3441 | return -EAGAIN; | ||
3442 | |||
3443 | return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv)); | ||
3444 | } | ||
3445 | |||
3446 | static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL); | ||
3447 | |||
3448 | static ssize_t iwl3945_show_tx_power(struct device *d, | ||
3449 | struct device_attribute *attr, char *buf) | ||
3450 | { | ||
3451 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3452 | return sprintf(buf, "%d\n", priv->tx_power_user_lmt); | ||
3453 | } | ||
3454 | |||
3455 | static ssize_t iwl3945_store_tx_power(struct device *d, | ||
3456 | struct device_attribute *attr, | ||
3457 | const char *buf, size_t count) | ||
3458 | { | ||
3459 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3460 | char *p = (char *)buf; | ||
3461 | u32 val; | ||
3462 | |||
3463 | val = simple_strtoul(p, &p, 10); | ||
3464 | if (p == buf) | ||
3465 | IWL_INFO(priv, ": %s is not in decimal form.\n", buf); | ||
3466 | else | ||
3467 | iwl3945_hw_reg_set_txpower(priv, val); | ||
3468 | |||
3469 | return count; | ||
3470 | } | ||
3471 | |||
3472 | static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power); | ||
3473 | |||
3474 | static ssize_t iwl3945_show_flags(struct device *d, | ||
3475 | struct device_attribute *attr, char *buf) | ||
3476 | { | ||
3477 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3478 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3479 | |||
3480 | return sprintf(buf, "0x%04X\n", ctx->active.flags); | ||
3481 | } | ||
3482 | |||
3483 | static ssize_t iwl3945_store_flags(struct device *d, | ||
3484 | struct device_attribute *attr, | ||
3485 | const char *buf, size_t count) | ||
3486 | { | ||
3487 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3488 | u32 flags = simple_strtoul(buf, NULL, 0); | ||
3489 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3490 | |||
3491 | mutex_lock(&priv->mutex); | ||
3492 | if (le32_to_cpu(ctx->staging.flags) != flags) { | ||
3493 | /* Cancel any currently running scans... */ | ||
3494 | if (iwl_legacy_scan_cancel_timeout(priv, 100)) | ||
3495 | IWL_WARN(priv, "Could not cancel scan.\n"); | ||
3496 | else { | ||
3497 | IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n", | ||
3498 | flags); | ||
3499 | ctx->staging.flags = cpu_to_le32(flags); | ||
3500 | iwl3945_commit_rxon(priv, ctx); | ||
3501 | } | ||
3502 | } | ||
3503 | mutex_unlock(&priv->mutex); | ||
3504 | |||
3505 | return count; | ||
3506 | } | ||
3507 | |||
3508 | static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags); | ||
3509 | |||
3510 | static ssize_t iwl3945_show_filter_flags(struct device *d, | ||
3511 | struct device_attribute *attr, char *buf) | ||
3512 | { | ||
3513 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3514 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3515 | |||
3516 | return sprintf(buf, "0x%04X\n", | ||
3517 | le32_to_cpu(ctx->active.filter_flags)); | ||
3518 | } | ||
3519 | |||
3520 | static ssize_t iwl3945_store_filter_flags(struct device *d, | ||
3521 | struct device_attribute *attr, | ||
3522 | const char *buf, size_t count) | ||
3523 | { | ||
3524 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3525 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3526 | u32 filter_flags = simple_strtoul(buf, NULL, 0); | ||
3527 | |||
3528 | mutex_lock(&priv->mutex); | ||
3529 | if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) { | ||
3530 | /* Cancel any currently running scans... */ | ||
3531 | if (iwl_legacy_scan_cancel_timeout(priv, 100)) | ||
3532 | IWL_WARN(priv, "Could not cancel scan.\n"); | ||
3533 | else { | ||
3534 | IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = " | ||
3535 | "0x%04X\n", filter_flags); | ||
3536 | ctx->staging.filter_flags = | ||
3537 | cpu_to_le32(filter_flags); | ||
3538 | iwl3945_commit_rxon(priv, ctx); | ||
3539 | } | ||
3540 | } | ||
3541 | mutex_unlock(&priv->mutex); | ||
3542 | |||
3543 | return count; | ||
3544 | } | ||
3545 | |||
3546 | static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags, | ||
3547 | iwl3945_store_filter_flags); | ||
3548 | |||
3549 | static ssize_t iwl3945_show_measurement(struct device *d, | ||
3550 | struct device_attribute *attr, char *buf) | ||
3551 | { | ||
3552 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3553 | struct iwl_spectrum_notification measure_report; | ||
3554 | u32 size = sizeof(measure_report), len = 0, ofs = 0; | ||
3555 | u8 *data = (u8 *)&measure_report; | ||
3556 | unsigned long flags; | ||
3557 | |||
3558 | spin_lock_irqsave(&priv->lock, flags); | ||
3559 | if (!(priv->measurement_status & MEASUREMENT_READY)) { | ||
3560 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3561 | return 0; | ||
3562 | } | ||
3563 | memcpy(&measure_report, &priv->measure_report, size); | ||
3564 | priv->measurement_status = 0; | ||
3565 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3566 | |||
3567 | while (size && (PAGE_SIZE - len)) { | ||
3568 | hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, | ||
3569 | PAGE_SIZE - len, 1); | ||
3570 | len = strlen(buf); | ||
3571 | if (PAGE_SIZE - len) | ||
3572 | buf[len++] = '\n'; | ||
3573 | |||
3574 | ofs += 16; | ||
3575 | size -= min(size, 16U); | ||
3576 | } | ||
3577 | |||
3578 | return len; | ||
3579 | } | ||
3580 | |||
3581 | static ssize_t iwl3945_store_measurement(struct device *d, | ||
3582 | struct device_attribute *attr, | ||
3583 | const char *buf, size_t count) | ||
3584 | { | ||
3585 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3586 | struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; | ||
3587 | struct ieee80211_measurement_params params = { | ||
3588 | .channel = le16_to_cpu(ctx->active.channel), | ||
3589 | .start_time = cpu_to_le64(priv->_3945.last_tsf), | ||
3590 | .duration = cpu_to_le16(1), | ||
3591 | }; | ||
3592 | u8 type = IWL_MEASURE_BASIC; | ||
3593 | u8 buffer[32]; | ||
3594 | u8 channel; | ||
3595 | |||
3596 | if (count) { | ||
3597 | char *p = buffer; | ||
3598 | strncpy(buffer, buf, min(sizeof(buffer), count)); | ||
3599 | channel = simple_strtoul(p, NULL, 0); | ||
3600 | if (channel) | ||
3601 | params.channel = channel; | ||
3602 | |||
3603 | p = buffer; | ||
3604 | while (*p && *p != ' ') | ||
3605 | p++; | ||
3606 | if (*p) | ||
3607 | type = simple_strtoul(p + 1, NULL, 0); | ||
3608 | } | ||
3609 | |||
3610 | IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on " | ||
3611 | "channel %d (for '%s')\n", type, params.channel, buf); | ||
3612 | iwl3945_get_measurement(priv, ¶ms, type); | ||
3613 | |||
3614 | return count; | ||
3615 | } | ||
3616 | |||
3617 | static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, | ||
3618 | iwl3945_show_measurement, iwl3945_store_measurement); | ||
3619 | |||
3620 | static ssize_t iwl3945_store_retry_rate(struct device *d, | ||
3621 | struct device_attribute *attr, | ||
3622 | const char *buf, size_t count) | ||
3623 | { | ||
3624 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3625 | |||
3626 | priv->retry_rate = simple_strtoul(buf, NULL, 0); | ||
3627 | if (priv->retry_rate <= 0) | ||
3628 | priv->retry_rate = 1; | ||
3629 | |||
3630 | return count; | ||
3631 | } | ||
3632 | |||
3633 | static ssize_t iwl3945_show_retry_rate(struct device *d, | ||
3634 | struct device_attribute *attr, char *buf) | ||
3635 | { | ||
3636 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3637 | return sprintf(buf, "%d", priv->retry_rate); | ||
3638 | } | ||
3639 | |||
3640 | static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate, | ||
3641 | iwl3945_store_retry_rate); | ||
3642 | |||
3643 | |||
3644 | static ssize_t iwl3945_show_channels(struct device *d, | ||
3645 | struct device_attribute *attr, char *buf) | ||
3646 | { | ||
3647 | /* all this shit doesn't belong into sysfs anyway */ | ||
3648 | return 0; | ||
3649 | } | ||
3650 | |||
3651 | static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL); | ||
3652 | |||
3653 | static ssize_t iwl3945_show_antenna(struct device *d, | ||
3654 | struct device_attribute *attr, char *buf) | ||
3655 | { | ||
3656 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3657 | |||
3658 | if (!iwl_legacy_is_alive(priv)) | ||
3659 | return -EAGAIN; | ||
3660 | |||
3661 | return sprintf(buf, "%d\n", iwl3945_mod_params.antenna); | ||
3662 | } | ||
3663 | |||
3664 | static ssize_t iwl3945_store_antenna(struct device *d, | ||
3665 | struct device_attribute *attr, | ||
3666 | const char *buf, size_t count) | ||
3667 | { | ||
3668 | struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d); | ||
3669 | int ant; | ||
3670 | |||
3671 | if (count == 0) | ||
3672 | return 0; | ||
3673 | |||
3674 | if (sscanf(buf, "%1i", &ant) != 1) { | ||
3675 | IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n"); | ||
3676 | return count; | ||
3677 | } | ||
3678 | |||
3679 | if ((ant >= 0) && (ant <= 2)) { | ||
3680 | IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant); | ||
3681 | iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant; | ||
3682 | } else | ||
3683 | IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant); | ||
3684 | |||
3685 | |||
3686 | return count; | ||
3687 | } | ||
3688 | |||
3689 | static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna); | ||
3690 | |||
3691 | static ssize_t iwl3945_show_status(struct device *d, | ||
3692 | struct device_attribute *attr, char *buf) | ||
3693 | { | ||
3694 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3695 | if (!iwl_legacy_is_alive(priv)) | ||
3696 | return -EAGAIN; | ||
3697 | return sprintf(buf, "0x%08x\n", (int)priv->status); | ||
3698 | } | ||
3699 | |||
3700 | static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL); | ||
3701 | |||
3702 | static ssize_t iwl3945_dump_error_log(struct device *d, | ||
3703 | struct device_attribute *attr, | ||
3704 | const char *buf, size_t count) | ||
3705 | { | ||
3706 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
3707 | char *p = (char *)buf; | ||
3708 | |||
3709 | if (p[0] == '1') | ||
3710 | iwl3945_dump_nic_error_log(priv); | ||
3711 | |||
3712 | return strnlen(buf, count); | ||
3713 | } | ||
3714 | |||
3715 | static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log); | ||
3716 | |||
3717 | /***************************************************************************** | ||
3718 | * | ||
3719 | * driver setup and tear down | ||
3720 | * | ||
3721 | *****************************************************************************/ | ||
3722 | |||
3723 | static void iwl3945_setup_deferred_work(struct iwl_priv *priv) | ||
3724 | { | ||
3725 | priv->workqueue = create_singlethread_workqueue(DRV_NAME); | ||
3726 | |||
3727 | init_waitqueue_head(&priv->wait_command_queue); | ||
3728 | |||
3729 | INIT_WORK(&priv->restart, iwl3945_bg_restart); | ||
3730 | INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish); | ||
3731 | INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); | ||
3732 | INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); | ||
3733 | INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll); | ||
3734 | |||
3735 | iwl_legacy_setup_scan_deferred_work(priv); | ||
3736 | |||
3737 | iwl3945_hw_setup_deferred_work(priv); | ||
3738 | |||
3739 | init_timer(&priv->watchdog); | ||
3740 | priv->watchdog.data = (unsigned long)priv; | ||
3741 | priv->watchdog.function = iwl_legacy_bg_watchdog; | ||
3742 | |||
3743 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | ||
3744 | iwl3945_irq_tasklet, (unsigned long)priv); | ||
3745 | } | ||
3746 | |||
3747 | static void iwl3945_cancel_deferred_work(struct iwl_priv *priv) | ||
3748 | { | ||
3749 | iwl3945_hw_cancel_deferred_work(priv); | ||
3750 | |||
3751 | cancel_delayed_work_sync(&priv->init_alive_start); | ||
3752 | cancel_delayed_work(&priv->alive_start); | ||
3753 | |||
3754 | iwl_legacy_cancel_scan_deferred_work(priv); | ||
3755 | } | ||
3756 | |||
3757 | static struct attribute *iwl3945_sysfs_entries[] = { | ||
3758 | &dev_attr_antenna.attr, | ||
3759 | &dev_attr_channels.attr, | ||
3760 | &dev_attr_dump_errors.attr, | ||
3761 | &dev_attr_flags.attr, | ||
3762 | &dev_attr_filter_flags.attr, | ||
3763 | &dev_attr_measurement.attr, | ||
3764 | &dev_attr_retry_rate.attr, | ||
3765 | &dev_attr_status.attr, | ||
3766 | &dev_attr_temperature.attr, | ||
3767 | &dev_attr_tx_power.attr, | ||
3768 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
3769 | &dev_attr_debug_level.attr, | ||
3770 | #endif | ||
3771 | NULL | ||
3772 | }; | ||
3773 | |||
3774 | static struct attribute_group iwl3945_attribute_group = { | ||
3775 | .name = NULL, /* put in device directory */ | ||
3776 | .attrs = iwl3945_sysfs_entries, | ||
3777 | }; | ||
3778 | |||
3779 | struct ieee80211_ops iwl3945_hw_ops = { | ||
3780 | .tx = iwl3945_mac_tx, | ||
3781 | .start = iwl3945_mac_start, | ||
3782 | .stop = iwl3945_mac_stop, | ||
3783 | .add_interface = iwl_legacy_mac_add_interface, | ||
3784 | .remove_interface = iwl_legacy_mac_remove_interface, | ||
3785 | .change_interface = iwl_legacy_mac_change_interface, | ||
3786 | .config = iwl_legacy_mac_config, | ||
3787 | .configure_filter = iwl3945_configure_filter, | ||
3788 | .set_key = iwl3945_mac_set_key, | ||
3789 | .conf_tx = iwl_legacy_mac_conf_tx, | ||
3790 | .reset_tsf = iwl_legacy_mac_reset_tsf, | ||
3791 | .bss_info_changed = iwl_legacy_mac_bss_info_changed, | ||
3792 | .hw_scan = iwl_legacy_mac_hw_scan, | ||
3793 | .sta_add = iwl3945_mac_sta_add, | ||
3794 | .sta_remove = iwl_legacy_mac_sta_remove, | ||
3795 | .tx_last_beacon = iwl_legacy_mac_tx_last_beacon, | ||
3796 | }; | ||
3797 | |||
3798 | static int iwl3945_init_drv(struct iwl_priv *priv) | ||
3799 | { | ||
3800 | int ret; | ||
3801 | struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom; | ||
3802 | |||
3803 | priv->retry_rate = 1; | ||
3804 | priv->beacon_skb = NULL; | ||
3805 | |||
3806 | spin_lock_init(&priv->sta_lock); | ||
3807 | spin_lock_init(&priv->hcmd_lock); | ||
3808 | |||
3809 | INIT_LIST_HEAD(&priv->free_frames); | ||
3810 | |||
3811 | mutex_init(&priv->mutex); | ||
3812 | mutex_init(&priv->sync_cmd_mutex); | ||
3813 | |||
3814 | priv->ieee_channels = NULL; | ||
3815 | priv->ieee_rates = NULL; | ||
3816 | priv->band = IEEE80211_BAND_2GHZ; | ||
3817 | |||
3818 | priv->iw_mode = NL80211_IFTYPE_STATION; | ||
3819 | priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; | ||
3820 | |||
3821 | /* initialize force reset */ | ||
3822 | priv->force_reset[IWL_RF_RESET].reset_duration = | ||
3823 | IWL_DELAY_NEXT_FORCE_RF_RESET; | ||
3824 | priv->force_reset[IWL_FW_RESET].reset_duration = | ||
3825 | IWL_DELAY_NEXT_FORCE_FW_RELOAD; | ||
3826 | |||
3827 | |||
3828 | priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; | ||
3829 | priv->tx_power_next = IWL_DEFAULT_TX_POWER; | ||
3830 | |||
3831 | if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { | ||
3832 | IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", | ||
3833 | eeprom->version); | ||
3834 | ret = -EINVAL; | ||
3835 | goto err; | ||
3836 | } | ||
3837 | ret = iwl_legacy_init_channel_map(priv); | ||
3838 | if (ret) { | ||
3839 | IWL_ERR(priv, "initializing regulatory failed: %d\n", ret); | ||
3840 | goto err; | ||
3841 | } | ||
3842 | |||
3843 | /* Set up txpower settings in driver for all channels */ | ||
3844 | if (iwl3945_txpower_set_from_eeprom(priv)) { | ||
3845 | ret = -EIO; | ||
3846 | goto err_free_channel_map; | ||
3847 | } | ||
3848 | |||
3849 | ret = iwl_legacy_init_geos(priv); | ||
3850 | if (ret) { | ||
3851 | IWL_ERR(priv, "initializing geos failed: %d\n", ret); | ||
3852 | goto err_free_channel_map; | ||
3853 | } | ||
3854 | iwl3945_init_hw_rates(priv, priv->ieee_rates); | ||
3855 | |||
3856 | return 0; | ||
3857 | |||
3858 | err_free_channel_map: | ||
3859 | iwl_legacy_free_channel_map(priv); | ||
3860 | err: | ||
3861 | return ret; | ||
3862 | } | ||
3863 | |||
3864 | #define IWL3945_MAX_PROBE_REQUEST 200 | ||
3865 | |||
3866 | static int iwl3945_setup_mac(struct iwl_priv *priv) | ||
3867 | { | ||
3868 | int ret; | ||
3869 | struct ieee80211_hw *hw = priv->hw; | ||
3870 | |||
3871 | hw->rate_control_algorithm = "iwl-3945-rs"; | ||
3872 | hw->sta_data_size = sizeof(struct iwl3945_sta_priv); | ||
3873 | hw->vif_data_size = sizeof(struct iwl_vif_priv); | ||
3874 | |||
3875 | /* Tell mac80211 our characteristics */ | ||
3876 | hw->flags = IEEE80211_HW_SIGNAL_DBM | | ||
3877 | IEEE80211_HW_SPECTRUM_MGMT; | ||
3878 | |||
3879 | hw->wiphy->interface_modes = | ||
3880 | priv->contexts[IWL_RXON_CTX_BSS].interface_modes; | ||
3881 | |||
3882 | hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY | | ||
3883 | WIPHY_FLAG_DISABLE_BEACON_HINTS | | ||
3884 | WIPHY_FLAG_IBSS_RSN; | ||
3885 | |||
3886 | hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; | ||
3887 | /* we create the 802.11 header and a zero-length SSID element */ | ||
3888 | hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2; | ||
3889 | |||
3890 | /* Default value; 4 EDCA QOS priorities */ | ||
3891 | hw->queues = 4; | ||
3892 | |||
3893 | if (priv->bands[IEEE80211_BAND_2GHZ].n_channels) | ||
3894 | priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = | ||
3895 | &priv->bands[IEEE80211_BAND_2GHZ]; | ||
3896 | |||
3897 | if (priv->bands[IEEE80211_BAND_5GHZ].n_channels) | ||
3898 | priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = | ||
3899 | &priv->bands[IEEE80211_BAND_5GHZ]; | ||
3900 | |||
3901 | iwl_legacy_leds_init(priv); | ||
3902 | |||
3903 | ret = ieee80211_register_hw(priv->hw); | ||
3904 | if (ret) { | ||
3905 | IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); | ||
3906 | return ret; | ||
3907 | } | ||
3908 | priv->mac80211_registered = 1; | ||
3909 | |||
3910 | return 0; | ||
3911 | } | ||
3912 | |||
3913 | static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
3914 | { | ||
3915 | int err = 0, i; | ||
3916 | struct iwl_priv *priv; | ||
3917 | struct ieee80211_hw *hw; | ||
3918 | struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); | ||
3919 | struct iwl3945_eeprom *eeprom; | ||
3920 | unsigned long flags; | ||
3921 | |||
3922 | /*********************** | ||
3923 | * 1. Allocating HW data | ||
3924 | * ********************/ | ||
3925 | |||
3926 | /* mac80211 allocates memory for this device instance, including | ||
3927 | * space for this driver's private structure */ | ||
3928 | hw = iwl_legacy_alloc_all(cfg); | ||
3929 | if (hw == NULL) { | ||
3930 | pr_err("Can not allocate network device\n"); | ||
3931 | err = -ENOMEM; | ||
3932 | goto out; | ||
3933 | } | ||
3934 | priv = hw->priv; | ||
3935 | SET_IEEE80211_DEV(hw, &pdev->dev); | ||
3936 | |||
3937 | priv->cmd_queue = IWL39_CMD_QUEUE_NUM; | ||
3938 | |||
3939 | /* 3945 has only one valid context */ | ||
3940 | priv->valid_contexts = BIT(IWL_RXON_CTX_BSS); | ||
3941 | |||
3942 | for (i = 0; i < NUM_IWL_RXON_CTX; i++) | ||
3943 | priv->contexts[i].ctxid = i; | ||
3944 | |||
3945 | priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON; | ||
3946 | priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING; | ||
3947 | priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC; | ||
3948 | priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM; | ||
3949 | priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID; | ||
3950 | priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY; | ||
3951 | priv->contexts[IWL_RXON_CTX_BSS].interface_modes = | ||
3952 | BIT(NL80211_IFTYPE_STATION) | | ||
3953 | BIT(NL80211_IFTYPE_ADHOC); | ||
3954 | priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; | ||
3955 | priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; | ||
3956 | priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; | ||
3957 | |||
3958 | /* | ||
3959 | * Disabling hardware scan means that mac80211 will perform scans | ||
3960 | * "the hard way", rather than using device's scan. | ||
3961 | */ | ||
3962 | if (iwl3945_mod_params.disable_hw_scan) { | ||
3963 | dev_printk(KERN_DEBUG, &(pdev->dev), | ||
3964 | "sw scan support is deprecated\n"); | ||
3965 | iwl3945_hw_ops.hw_scan = NULL; | ||
3966 | } | ||
3967 | |||
3968 | IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n"); | ||
3969 | priv->cfg = cfg; | ||
3970 | priv->pci_dev = pdev; | ||
3971 | priv->inta_mask = CSR_INI_SET_MASK; | ||
3972 | |||
3973 | if (iwl_legacy_alloc_traffic_mem(priv)) | ||
3974 | IWL_ERR(priv, "Not enough memory to generate traffic log\n"); | ||
3975 | |||
3976 | /*************************** | ||
3977 | * 2. Initializing PCI bus | ||
3978 | * *************************/ | ||
3979 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
3980 | PCIE_LINK_STATE_CLKPM); | ||
3981 | |||
3982 | if (pci_enable_device(pdev)) { | ||
3983 | err = -ENODEV; | ||
3984 | goto out_ieee80211_free_hw; | ||
3985 | } | ||
3986 | |||
3987 | pci_set_master(pdev); | ||
3988 | |||
3989 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
3990 | if (!err) | ||
3991 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); | ||
3992 | if (err) { | ||
3993 | IWL_WARN(priv, "No suitable DMA available.\n"); | ||
3994 | goto out_pci_disable_device; | ||
3995 | } | ||
3996 | |||
3997 | pci_set_drvdata(pdev, priv); | ||
3998 | err = pci_request_regions(pdev, DRV_NAME); | ||
3999 | if (err) | ||
4000 | goto out_pci_disable_device; | ||
4001 | |||
4002 | /*********************** | ||
4003 | * 3. Read REV Register | ||
4004 | * ********************/ | ||
4005 | priv->hw_base = pci_iomap(pdev, 0, 0); | ||
4006 | if (!priv->hw_base) { | ||
4007 | err = -ENODEV; | ||
4008 | goto out_pci_release_regions; | ||
4009 | } | ||
4010 | |||
4011 | IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n", | ||
4012 | (unsigned long long) pci_resource_len(pdev, 0)); | ||
4013 | IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base); | ||
4014 | |||
4015 | /* We disable the RETRY_TIMEOUT register (0x41) to keep | ||
4016 | * PCI Tx retries from interfering with C3 CPU state */ | ||
4017 | pci_write_config_byte(pdev, 0x41, 0x00); | ||
4018 | |||
4019 | /* these spin locks will be used in apm_ops.init and EEPROM access | ||
4020 | * we should init now | ||
4021 | */ | ||
4022 | spin_lock_init(&priv->reg_lock); | ||
4023 | spin_lock_init(&priv->lock); | ||
4024 | |||
4025 | /* | ||
4026 | * stop and reset the on-board processor just in case it is in a | ||
4027 | * strange state ... like being left stranded by a primary kernel | ||
4028 | * and this is now the kdump kernel trying to start up | ||
4029 | */ | ||
4030 | iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); | ||
4031 | |||
4032 | /*********************** | ||
4033 | * 4. Read EEPROM | ||
4034 | * ********************/ | ||
4035 | |||
4036 | /* Read the EEPROM */ | ||
4037 | err = iwl_legacy_eeprom_init(priv); | ||
4038 | if (err) { | ||
4039 | IWL_ERR(priv, "Unable to init EEPROM\n"); | ||
4040 | goto out_iounmap; | ||
4041 | } | ||
4042 | /* MAC Address location in EEPROM same for 3945/4965 */ | ||
4043 | eeprom = (struct iwl3945_eeprom *)priv->eeprom; | ||
4044 | IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address); | ||
4045 | SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address); | ||
4046 | |||
4047 | /*********************** | ||
4048 | * 5. Setup HW Constants | ||
4049 | * ********************/ | ||
4050 | /* Device-specific setup */ | ||
4051 | if (iwl3945_hw_set_hw_params(priv)) { | ||
4052 | IWL_ERR(priv, "failed to set hw settings\n"); | ||
4053 | goto out_eeprom_free; | ||
4054 | } | ||
4055 | |||
4056 | /*********************** | ||
4057 | * 6. Setup priv | ||
4058 | * ********************/ | ||
4059 | |||
4060 | err = iwl3945_init_drv(priv); | ||
4061 | if (err) { | ||
4062 | IWL_ERR(priv, "initializing driver failed\n"); | ||
4063 | goto out_unset_hw_params; | ||
4064 | } | ||
4065 | |||
4066 | IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n", | ||
4067 | priv->cfg->name); | ||
4068 | |||
4069 | /*********************** | ||
4070 | * 7. Setup Services | ||
4071 | * ********************/ | ||
4072 | |||
4073 | spin_lock_irqsave(&priv->lock, flags); | ||
4074 | iwl_legacy_disable_interrupts(priv); | ||
4075 | spin_unlock_irqrestore(&priv->lock, flags); | ||
4076 | |||
4077 | pci_enable_msi(priv->pci_dev); | ||
4078 | |||
4079 | err = request_irq(priv->pci_dev->irq, iwl_legacy_isr, | ||
4080 | IRQF_SHARED, DRV_NAME, priv); | ||
4081 | if (err) { | ||
4082 | IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); | ||
4083 | goto out_disable_msi; | ||
4084 | } | ||
4085 | |||
4086 | err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group); | ||
4087 | if (err) { | ||
4088 | IWL_ERR(priv, "failed to create sysfs device attributes\n"); | ||
4089 | goto out_release_irq; | ||
4090 | } | ||
4091 | |||
4092 | iwl_legacy_set_rxon_channel(priv, | ||
4093 | &priv->bands[IEEE80211_BAND_2GHZ].channels[5], | ||
4094 | &priv->contexts[IWL_RXON_CTX_BSS]); | ||
4095 | iwl3945_setup_deferred_work(priv); | ||
4096 | iwl3945_setup_rx_handlers(priv); | ||
4097 | iwl_legacy_power_initialize(priv); | ||
4098 | |||
4099 | /********************************* | ||
4100 | * 8. Setup and Register mac80211 | ||
4101 | * *******************************/ | ||
4102 | |||
4103 | iwl_legacy_enable_interrupts(priv); | ||
4104 | |||
4105 | err = iwl3945_setup_mac(priv); | ||
4106 | if (err) | ||
4107 | goto out_remove_sysfs; | ||
4108 | |||
4109 | err = iwl_legacy_dbgfs_register(priv, DRV_NAME); | ||
4110 | if (err) | ||
4111 | IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); | ||
4112 | |||
4113 | /* Start monitoring the killswitch */ | ||
4114 | queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll, | ||
4115 | 2 * HZ); | ||
4116 | |||
4117 | return 0; | ||
4118 | |||
4119 | out_remove_sysfs: | ||
4120 | destroy_workqueue(priv->workqueue); | ||
4121 | priv->workqueue = NULL; | ||
4122 | sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); | ||
4123 | out_release_irq: | ||
4124 | free_irq(priv->pci_dev->irq, priv); | ||
4125 | out_disable_msi: | ||
4126 | pci_disable_msi(priv->pci_dev); | ||
4127 | iwl_legacy_free_geos(priv); | ||
4128 | iwl_legacy_free_channel_map(priv); | ||
4129 | out_unset_hw_params: | ||
4130 | iwl3945_unset_hw_params(priv); | ||
4131 | out_eeprom_free: | ||
4132 | iwl_legacy_eeprom_free(priv); | ||
4133 | out_iounmap: | ||
4134 | pci_iounmap(pdev, priv->hw_base); | ||
4135 | out_pci_release_regions: | ||
4136 | pci_release_regions(pdev); | ||
4137 | out_pci_disable_device: | ||
4138 | pci_set_drvdata(pdev, NULL); | ||
4139 | pci_disable_device(pdev); | ||
4140 | out_ieee80211_free_hw: | ||
4141 | iwl_legacy_free_traffic_mem(priv); | ||
4142 | ieee80211_free_hw(priv->hw); | ||
4143 | out: | ||
4144 | return err; | ||
4145 | } | ||
4146 | |||
4147 | static void __devexit iwl3945_pci_remove(struct pci_dev *pdev) | ||
4148 | { | ||
4149 | struct iwl_priv *priv = pci_get_drvdata(pdev); | ||
4150 | unsigned long flags; | ||
4151 | |||
4152 | if (!priv) | ||
4153 | return; | ||
4154 | |||
4155 | IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); | ||
4156 | |||
4157 | iwl_legacy_dbgfs_unregister(priv); | ||
4158 | |||
4159 | set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
4160 | |||
4161 | iwl_legacy_leds_exit(priv); | ||
4162 | |||
4163 | if (priv->mac80211_registered) { | ||
4164 | ieee80211_unregister_hw(priv->hw); | ||
4165 | priv->mac80211_registered = 0; | ||
4166 | } else { | ||
4167 | iwl3945_down(priv); | ||
4168 | } | ||
4169 | |||
4170 | /* | ||
4171 | * Make sure device is reset to low power before unloading driver. | ||
4172 | * This may be redundant with iwl_down(), but there are paths to | ||
4173 | * run iwl_down() without calling apm_ops.stop(), and there are | ||
4174 | * paths to avoid running iwl_down() at all before leaving driver. | ||
4175 | * This (inexpensive) call *makes sure* device is reset. | ||
4176 | */ | ||
4177 | iwl_legacy_apm_stop(priv); | ||
4178 | |||
4179 | /* make sure we flush any pending irq or | ||
4180 | * tasklet for the driver | ||
4181 | */ | ||
4182 | spin_lock_irqsave(&priv->lock, flags); | ||
4183 | iwl_legacy_disable_interrupts(priv); | ||
4184 | spin_unlock_irqrestore(&priv->lock, flags); | ||
4185 | |||
4186 | iwl3945_synchronize_irq(priv); | ||
4187 | |||
4188 | sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); | ||
4189 | |||
4190 | cancel_delayed_work_sync(&priv->_3945.rfkill_poll); | ||
4191 | |||
4192 | iwl3945_dealloc_ucode_pci(priv); | ||
4193 | |||
4194 | if (priv->rxq.bd) | ||
4195 | iwl3945_rx_queue_free(priv, &priv->rxq); | ||
4196 | iwl3945_hw_txq_ctx_free(priv); | ||
4197 | |||
4198 | iwl3945_unset_hw_params(priv); | ||
4199 | |||
4200 | /*netif_stop_queue(dev); */ | ||
4201 | flush_workqueue(priv->workqueue); | ||
4202 | |||
4203 | /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes | ||
4204 | * priv->workqueue... so we can't take down the workqueue | ||
4205 | * until now... */ | ||
4206 | destroy_workqueue(priv->workqueue); | ||
4207 | priv->workqueue = NULL; | ||
4208 | iwl_legacy_free_traffic_mem(priv); | ||
4209 | |||
4210 | free_irq(pdev->irq, priv); | ||
4211 | pci_disable_msi(pdev); | ||
4212 | |||
4213 | pci_iounmap(pdev, priv->hw_base); | ||
4214 | pci_release_regions(pdev); | ||
4215 | pci_disable_device(pdev); | ||
4216 | pci_set_drvdata(pdev, NULL); | ||
4217 | |||
4218 | iwl_legacy_free_channel_map(priv); | ||
4219 | iwl_legacy_free_geos(priv); | ||
4220 | kfree(priv->scan_cmd); | ||
4221 | if (priv->beacon_skb) | ||
4222 | dev_kfree_skb(priv->beacon_skb); | ||
4223 | |||
4224 | ieee80211_free_hw(priv->hw); | ||
4225 | } | ||
4226 | |||
4227 | |||
4228 | /***************************************************************************** | ||
4229 | * | ||
4230 | * driver and module entry point | ||
4231 | * | ||
4232 | *****************************************************************************/ | ||
4233 | |||
4234 | static struct pci_driver iwl3945_driver = { | ||
4235 | .name = DRV_NAME, | ||
4236 | .id_table = iwl3945_hw_card_ids, | ||
4237 | .probe = iwl3945_pci_probe, | ||
4238 | .remove = __devexit_p(iwl3945_pci_remove), | ||
4239 | .driver.pm = IWL_LEGACY_PM_OPS, | ||
4240 | }; | ||
4241 | |||
4242 | static int __init iwl3945_init(void) | ||
4243 | { | ||
4244 | |||
4245 | int ret; | ||
4246 | pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n"); | ||
4247 | pr_info(DRV_COPYRIGHT "\n"); | ||
4248 | |||
4249 | ret = iwl3945_rate_control_register(); | ||
4250 | if (ret) { | ||
4251 | pr_err("Unable to register rate control algorithm: %d\n", ret); | ||
4252 | return ret; | ||
4253 | } | ||
4254 | |||
4255 | ret = pci_register_driver(&iwl3945_driver); | ||
4256 | if (ret) { | ||
4257 | pr_err("Unable to initialize PCI module\n"); | ||
4258 | goto error_register; | ||
4259 | } | ||
4260 | |||
4261 | return ret; | ||
4262 | |||
4263 | error_register: | ||
4264 | iwl3945_rate_control_unregister(); | ||
4265 | return ret; | ||
4266 | } | ||
4267 | |||
4268 | static void __exit iwl3945_exit(void) | ||
4269 | { | ||
4270 | pci_unregister_driver(&iwl3945_driver); | ||
4271 | iwl3945_rate_control_unregister(); | ||
4272 | } | ||
4273 | |||
4274 | MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); | ||
4275 | |||
4276 | module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO); | ||
4277 | MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); | ||
4278 | module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO); | ||
4279 | MODULE_PARM_DESC(swcrypto, | ||
4280 | "using software crypto (default 1 [software])"); | ||
4281 | module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, | ||
4282 | int, S_IRUGO); | ||
4283 | MODULE_PARM_DESC(disable_hw_scan, | ||
4284 | "disable hardware scanning (default 0) (deprecated)"); | ||
4285 | #ifdef CONFIG_IWLWIFI_LEGACY_DEBUG | ||
4286 | module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR); | ||
4287 | MODULE_PARM_DESC(debug, "debug output mask"); | ||
4288 | #endif | ||
4289 | module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO); | ||
4290 | MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); | ||
4291 | |||
4292 | module_exit(iwl3945_exit); | ||
4293 | module_init(iwl3945_init); | ||