diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl3945-base.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl3945-base.c | 8732 |
1 files changed, 8732 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c new file mode 100644 index 000000000000..474b6402040c --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c | |||
@@ -0,0 +1,8732 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * Portions of this file are derived from the ipw3945 project, as well | ||
6 | * as portions of the ieee80211 subsystem header files. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of version 2 of the GNU General Public License as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution in the | ||
22 | * file called LICENSE. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * James P. Ketrenos <ipw2100-admin@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | /* | ||
31 | * NOTE: This file (iwl-base.c) is used to build to multiple hardware targets | ||
32 | * by defining IWL to either 3945 or 4965. The Makefile used when building | ||
33 | * the base targets will create base-3945.o and base-4965.o | ||
34 | * | ||
35 | * The eventual goal is to move as many of the #if IWL / #endif blocks out of | ||
36 | * this file and into the hardware specific implementation files (iwl-XXXX.c) | ||
37 | * and leave only the common (non #ifdef sprinkled) code in this file | ||
38 | */ | ||
39 | |||
40 | #include <linux/kernel.h> | ||
41 | #include <linux/module.h> | ||
42 | #include <linux/version.h> | ||
43 | #include <linux/init.h> | ||
44 | #include <linux/pci.h> | ||
45 | #include <linux/dma-mapping.h> | ||
46 | #include <linux/delay.h> | ||
47 | #include <linux/skbuff.h> | ||
48 | #include <linux/netdevice.h> | ||
49 | #include <linux/wireless.h> | ||
50 | #include <linux/firmware.h> | ||
51 | #include <linux/skbuff.h> | ||
52 | #include <linux/netdevice.h> | ||
53 | #include <linux/etherdevice.h> | ||
54 | #include <linux/if_arp.h> | ||
55 | |||
56 | #include <net/ieee80211_radiotap.h> | ||
57 | #include <net/mac80211.h> | ||
58 | |||
59 | #include <asm/div64.h> | ||
60 | |||
61 | #include "iwlwifi.h" | ||
62 | #include "iwl-3945.h" | ||
63 | #include "iwl-helpers.h" | ||
64 | |||
65 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
66 | u32 iwl_debug_level; | ||
67 | #endif | ||
68 | |||
69 | /****************************************************************************** | ||
70 | * | ||
71 | * module boiler plate | ||
72 | * | ||
73 | ******************************************************************************/ | ||
74 | |||
75 | /* module parameters */ | ||
76 | int iwl_param_disable_hw_scan; | ||
77 | int iwl_param_debug; | ||
78 | int iwl_param_disable; /* def: enable radio */ | ||
79 | int iwl_param_antenna; /* def: 0 = both antennas (use diversity) */ | ||
80 | int iwl_param_hwcrypto; /* def: using software encryption */ | ||
81 | int iwl_param_qos_enable = 1; | ||
82 | int iwl_param_queues_num = IWL_MAX_NUM_QUEUES; | ||
83 | |||
84 | /* | ||
85 | * module name, copyright, version, etc. | ||
86 | * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk | ||
87 | */ | ||
88 | |||
89 | #define DRV_DESCRIPTION \ | ||
90 | "Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux" | ||
91 | |||
92 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
93 | #define VD "d" | ||
94 | #else | ||
95 | #define VD | ||
96 | #endif | ||
97 | |||
98 | #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT | ||
99 | #define VS "s" | ||
100 | #else | ||
101 | #define VS | ||
102 | #endif | ||
103 | |||
104 | #define IWLWIFI_VERSION "0.1.15k" VD VS | ||
105 | #define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation" | ||
106 | #define DRV_VERSION IWLWIFI_VERSION | ||
107 | |||
108 | /* Change firmware file name, using "-" and incrementing number, | ||
109 | * *only* when uCode interface or architecture changes so that it | ||
110 | * is not compatible with earlier drivers. | ||
111 | * This number will also appear in << 8 position of 1st dword of uCode file */ | ||
112 | #define IWL3945_UCODE_API "-1" | ||
113 | |||
114 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
115 | MODULE_VERSION(DRV_VERSION); | ||
116 | MODULE_AUTHOR(DRV_COPYRIGHT); | ||
117 | MODULE_LICENSE("GPL"); | ||
118 | |||
119 | __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr) | ||
120 | { | ||
121 | u16 fc = le16_to_cpu(hdr->frame_control); | ||
122 | int hdr_len = ieee80211_get_hdrlen(fc); | ||
123 | |||
124 | if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA)) | ||
125 | return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN); | ||
126 | return NULL; | ||
127 | } | ||
128 | |||
129 | static const struct ieee80211_hw_mode *iwl_get_hw_mode( | ||
130 | struct iwl_priv *priv, int mode) | ||
131 | { | ||
132 | int i; | ||
133 | |||
134 | for (i = 0; i < 3; i++) | ||
135 | if (priv->modes[i].mode == mode) | ||
136 | return &priv->modes[i]; | ||
137 | |||
138 | return NULL; | ||
139 | } | ||
140 | |||
141 | static int iwl_is_empty_essid(const char *essid, int essid_len) | ||
142 | { | ||
143 | /* Single white space is for Linksys APs */ | ||
144 | if (essid_len == 1 && essid[0] == ' ') | ||
145 | return 1; | ||
146 | |||
147 | /* Otherwise, if the entire essid is 0, we assume it is hidden */ | ||
148 | while (essid_len) { | ||
149 | essid_len--; | ||
150 | if (essid[essid_len] != '\0') | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | return 1; | ||
155 | } | ||
156 | |||
157 | static const char *iwl_escape_essid(const char *essid, u8 essid_len) | ||
158 | { | ||
159 | static char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; | ||
160 | const char *s = essid; | ||
161 | char *d = escaped; | ||
162 | |||
163 | if (iwl_is_empty_essid(essid, essid_len)) { | ||
164 | memcpy(escaped, "<hidden>", sizeof("<hidden>")); | ||
165 | return escaped; | ||
166 | } | ||
167 | |||
168 | essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE); | ||
169 | while (essid_len--) { | ||
170 | if (*s == '\0') { | ||
171 | *d++ = '\\'; | ||
172 | *d++ = '0'; | ||
173 | s++; | ||
174 | } else | ||
175 | *d++ = *s++; | ||
176 | } | ||
177 | *d = '\0'; | ||
178 | return escaped; | ||
179 | } | ||
180 | |||
181 | static void iwl_print_hex_dump(int level, void *p, u32 len) | ||
182 | { | ||
183 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
184 | if (!(iwl_debug_level & level)) | ||
185 | return; | ||
186 | |||
187 | print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1, | ||
188 | p, len, 1); | ||
189 | #endif | ||
190 | } | ||
191 | |||
192 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** | ||
193 | * DMA services | ||
194 | * | ||
195 | * Theory of operation | ||
196 | * | ||
197 | * A queue is a circular buffers with 'Read' and 'Write' pointers. | ||
198 | * 2 empty entries always kept in the buffer to protect from overflow. | ||
199 | * | ||
200 | * For Tx queue, there are low mark and high mark limits. If, after queuing | ||
201 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | ||
202 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | ||
203 | * Tx queue resumed. | ||
204 | * | ||
205 | * The IPW operates with six queues, one receive queue in the device's | ||
206 | * sram, one transmit queue for sending commands to the device firmware, | ||
207 | * and four transmit queues for data. | ||
208 | ***************************************************/ | ||
209 | |||
210 | static int iwl_queue_space(const struct iwl_queue *q) | ||
211 | { | ||
212 | int s = q->last_used - q->first_empty; | ||
213 | |||
214 | if (q->last_used > q->first_empty) | ||
215 | s -= q->n_bd; | ||
216 | |||
217 | if (s <= 0) | ||
218 | s += q->n_window; | ||
219 | /* keep some reserve to not confuse empty and full situations */ | ||
220 | s -= 2; | ||
221 | if (s < 0) | ||
222 | s = 0; | ||
223 | return s; | ||
224 | } | ||
225 | |||
226 | /* XXX: n_bd must be power-of-two size */ | ||
227 | static inline int iwl_queue_inc_wrap(int index, int n_bd) | ||
228 | { | ||
229 | return ++index & (n_bd - 1); | ||
230 | } | ||
231 | |||
232 | /* XXX: n_bd must be power-of-two size */ | ||
233 | static inline int iwl_queue_dec_wrap(int index, int n_bd) | ||
234 | { | ||
235 | return --index & (n_bd - 1); | ||
236 | } | ||
237 | |||
238 | static inline int x2_queue_used(const struct iwl_queue *q, int i) | ||
239 | { | ||
240 | return q->first_empty > q->last_used ? | ||
241 | (i >= q->last_used && i < q->first_empty) : | ||
242 | !(i < q->last_used && i >= q->first_empty); | ||
243 | } | ||
244 | |||
245 | static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) | ||
246 | { | ||
247 | if (is_huge) | ||
248 | return q->n_window; | ||
249 | |||
250 | return index & (q->n_window - 1); | ||
251 | } | ||
252 | |||
253 | static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, | ||
254 | int count, int slots_num, u32 id) | ||
255 | { | ||
256 | q->n_bd = count; | ||
257 | q->n_window = slots_num; | ||
258 | q->id = id; | ||
259 | |||
260 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | ||
261 | * and iwl_queue_dec_wrap are broken. */ | ||
262 | BUG_ON(!is_power_of_2(count)); | ||
263 | |||
264 | /* slots_num must be power-of-two size, otherwise | ||
265 | * get_cmd_index is broken. */ | ||
266 | BUG_ON(!is_power_of_2(slots_num)); | ||
267 | |||
268 | q->low_mark = q->n_window / 4; | ||
269 | if (q->low_mark < 4) | ||
270 | q->low_mark = 4; | ||
271 | |||
272 | q->high_mark = q->n_window / 8; | ||
273 | if (q->high_mark < 2) | ||
274 | q->high_mark = 2; | ||
275 | |||
276 | q->first_empty = q->last_used = 0; | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static int iwl_tx_queue_alloc(struct iwl_priv *priv, | ||
282 | struct iwl_tx_queue *txq, u32 id) | ||
283 | { | ||
284 | struct pci_dev *dev = priv->pci_dev; | ||
285 | |||
286 | if (id != IWL_CMD_QUEUE_NUM) { | ||
287 | txq->txb = kmalloc(sizeof(txq->txb[0]) * | ||
288 | TFD_QUEUE_SIZE_MAX, GFP_KERNEL); | ||
289 | if (!txq->txb) { | ||
290 | IWL_ERROR("kmalloc for auxilary BD " | ||
291 | "structures failed\n"); | ||
292 | goto error; | ||
293 | } | ||
294 | } else | ||
295 | txq->txb = NULL; | ||
296 | |||
297 | txq->bd = pci_alloc_consistent(dev, | ||
298 | sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX, | ||
299 | &txq->q.dma_addr); | ||
300 | |||
301 | if (!txq->bd) { | ||
302 | IWL_ERROR("pci_alloc_consistent(%zd) failed\n", | ||
303 | sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX); | ||
304 | goto error; | ||
305 | } | ||
306 | txq->q.id = id; | ||
307 | |||
308 | return 0; | ||
309 | |||
310 | error: | ||
311 | if (txq->txb) { | ||
312 | kfree(txq->txb); | ||
313 | txq->txb = NULL; | ||
314 | } | ||
315 | |||
316 | return -ENOMEM; | ||
317 | } | ||
318 | |||
319 | int iwl_tx_queue_init(struct iwl_priv *priv, | ||
320 | struct iwl_tx_queue *txq, int slots_num, u32 txq_id) | ||
321 | { | ||
322 | struct pci_dev *dev = priv->pci_dev; | ||
323 | int len; | ||
324 | int rc = 0; | ||
325 | |||
326 | /* alocate command space + one big command for scan since scan | ||
327 | * command is very huge the system will not have two scan at the | ||
328 | * same time */ | ||
329 | len = sizeof(struct iwl_cmd) * slots_num; | ||
330 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
331 | len += IWL_MAX_SCAN_SIZE; | ||
332 | txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); | ||
333 | if (!txq->cmd) | ||
334 | return -ENOMEM; | ||
335 | |||
336 | rc = iwl_tx_queue_alloc(priv, txq, txq_id); | ||
337 | if (rc) { | ||
338 | pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); | ||
339 | |||
340 | return -ENOMEM; | ||
341 | } | ||
342 | txq->need_update = 0; | ||
343 | |||
344 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | ||
345 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | ||
346 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | ||
347 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | ||
348 | |||
349 | iwl_hw_tx_queue_init(priv, txq); | ||
350 | |||
351 | return 0; | ||
352 | } | ||
353 | |||
354 | /** | ||
355 | * iwl_tx_queue_free - Deallocate DMA queue. | ||
356 | * @txq: Transmit queue to deallocate. | ||
357 | * | ||
358 | * Empty queue by removing and destroying all BD's. | ||
359 | * Free all buffers. txq itself is not freed. | ||
360 | * | ||
361 | */ | ||
362 | void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) | ||
363 | { | ||
364 | struct iwl_queue *q = &txq->q; | ||
365 | struct pci_dev *dev = priv->pci_dev; | ||
366 | int len; | ||
367 | |||
368 | if (q->n_bd == 0) | ||
369 | return; | ||
370 | |||
371 | /* first, empty all BD's */ | ||
372 | for (; q->first_empty != q->last_used; | ||
373 | q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) | ||
374 | iwl_hw_txq_free_tfd(priv, txq); | ||
375 | |||
376 | len = sizeof(struct iwl_cmd) * q->n_window; | ||
377 | if (q->id == IWL_CMD_QUEUE_NUM) | ||
378 | len += IWL_MAX_SCAN_SIZE; | ||
379 | |||
380 | pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); | ||
381 | |||
382 | /* free buffers belonging to queue itself */ | ||
383 | if (txq->q.n_bd) | ||
384 | pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * | ||
385 | txq->q.n_bd, txq->bd, txq->q.dma_addr); | ||
386 | |||
387 | if (txq->txb) { | ||
388 | kfree(txq->txb); | ||
389 | txq->txb = NULL; | ||
390 | } | ||
391 | |||
392 | /* 0 fill whole structure */ | ||
393 | memset(txq, 0, sizeof(*txq)); | ||
394 | } | ||
395 | |||
396 | const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; | ||
397 | |||
398 | /*************** STATION TABLE MANAGEMENT **** | ||
399 | * | ||
400 | * NOTE: This needs to be overhauled to better synchronize between | ||
401 | * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c | ||
402 | * | ||
403 | * mac80211 should also be examined to determine if sta_info is duplicating | ||
404 | * the functionality provided here | ||
405 | */ | ||
406 | |||
407 | /**************************************************************/ | ||
408 | static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap) | ||
409 | { | ||
410 | int index = IWL_INVALID_STATION; | ||
411 | int i; | ||
412 | unsigned long flags; | ||
413 | |||
414 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
415 | |||
416 | if (is_ap) | ||
417 | index = IWL_AP_ID; | ||
418 | else if (is_broadcast_ether_addr(addr)) | ||
419 | index = priv->hw_setting.bcast_sta_id; | ||
420 | else | ||
421 | for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) | ||
422 | if (priv->stations[i].used && | ||
423 | !compare_ether_addr(priv->stations[i].sta.sta.addr, | ||
424 | addr)) { | ||
425 | index = i; | ||
426 | break; | ||
427 | } | ||
428 | |||
429 | if (unlikely(index == IWL_INVALID_STATION)) | ||
430 | goto out; | ||
431 | |||
432 | if (priv->stations[index].used) { | ||
433 | priv->stations[index].used = 0; | ||
434 | priv->num_stations--; | ||
435 | } | ||
436 | |||
437 | BUG_ON(priv->num_stations < 0); | ||
438 | |||
439 | out: | ||
440 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static void iwl_clear_stations_table(struct iwl_priv *priv) | ||
445 | { | ||
446 | unsigned long flags; | ||
447 | |||
448 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
449 | |||
450 | priv->num_stations = 0; | ||
451 | memset(priv->stations, 0, sizeof(priv->stations)); | ||
452 | |||
453 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
454 | } | ||
455 | |||
456 | |||
457 | u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags) | ||
458 | { | ||
459 | int i; | ||
460 | int index = IWL_INVALID_STATION; | ||
461 | struct iwl_station_entry *station; | ||
462 | unsigned long flags_spin; | ||
463 | |||
464 | spin_lock_irqsave(&priv->sta_lock, flags_spin); | ||
465 | if (is_ap) | ||
466 | index = IWL_AP_ID; | ||
467 | else if (is_broadcast_ether_addr(addr)) | ||
468 | index = priv->hw_setting.bcast_sta_id; | ||
469 | else | ||
470 | for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) { | ||
471 | if (!compare_ether_addr(priv->stations[i].sta.sta.addr, | ||
472 | addr)) { | ||
473 | index = i; | ||
474 | break; | ||
475 | } | ||
476 | |||
477 | if (!priv->stations[i].used && | ||
478 | index == IWL_INVALID_STATION) | ||
479 | index = i; | ||
480 | } | ||
481 | |||
482 | /* These twh conditions has the same outcome but keep them separate | ||
483 | since they have different meaning */ | ||
484 | if (unlikely(index == IWL_INVALID_STATION)) { | ||
485 | spin_unlock_irqrestore(&priv->sta_lock, flags_spin); | ||
486 | return index; | ||
487 | } | ||
488 | |||
489 | if (priv->stations[index].used && | ||
490 | !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) { | ||
491 | spin_unlock_irqrestore(&priv->sta_lock, flags_spin); | ||
492 | return index; | ||
493 | } | ||
494 | |||
495 | IWL_DEBUG_ASSOC("Add STA ID %d: " MAC_FMT "\n", index, MAC_ARG(addr)); | ||
496 | station = &priv->stations[index]; | ||
497 | station->used = 1; | ||
498 | priv->num_stations++; | ||
499 | |||
500 | memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd)); | ||
501 | memcpy(station->sta.sta.addr, addr, ETH_ALEN); | ||
502 | station->sta.mode = 0; | ||
503 | station->sta.sta.sta_id = index; | ||
504 | station->sta.station_flags = 0; | ||
505 | |||
506 | spin_unlock_irqrestore(&priv->sta_lock, flags_spin); | ||
507 | iwl_send_add_station(priv, &station->sta, flags); | ||
508 | return index; | ||
509 | |||
510 | } | ||
511 | |||
512 | /*************** DRIVER STATUS FUNCTIONS *****/ | ||
513 | |||
514 | static inline int iwl_is_ready(struct iwl_priv *priv) | ||
515 | { | ||
516 | /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are | ||
517 | * set but EXIT_PENDING is not */ | ||
518 | return test_bit(STATUS_READY, &priv->status) && | ||
519 | test_bit(STATUS_GEO_CONFIGURED, &priv->status) && | ||
520 | !test_bit(STATUS_EXIT_PENDING, &priv->status); | ||
521 | } | ||
522 | |||
523 | static inline int iwl_is_alive(struct iwl_priv *priv) | ||
524 | { | ||
525 | return test_bit(STATUS_ALIVE, &priv->status); | ||
526 | } | ||
527 | |||
528 | static inline int iwl_is_init(struct iwl_priv *priv) | ||
529 | { | ||
530 | return test_bit(STATUS_INIT, &priv->status); | ||
531 | } | ||
532 | |||
533 | static inline int iwl_is_rfkill(struct iwl_priv *priv) | ||
534 | { | ||
535 | return test_bit(STATUS_RF_KILL_HW, &priv->status) || | ||
536 | test_bit(STATUS_RF_KILL_SW, &priv->status); | ||
537 | } | ||
538 | |||
539 | static inline int iwl_is_ready_rf(struct iwl_priv *priv) | ||
540 | { | ||
541 | |||
542 | if (iwl_is_rfkill(priv)) | ||
543 | return 0; | ||
544 | |||
545 | return iwl_is_ready(priv); | ||
546 | } | ||
547 | |||
548 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | ||
549 | |||
550 | #define IWL_CMD(x) case x : return #x | ||
551 | |||
552 | static const char *get_cmd_string(u8 cmd) | ||
553 | { | ||
554 | switch (cmd) { | ||
555 | IWL_CMD(REPLY_ALIVE); | ||
556 | IWL_CMD(REPLY_ERROR); | ||
557 | IWL_CMD(REPLY_RXON); | ||
558 | IWL_CMD(REPLY_RXON_ASSOC); | ||
559 | IWL_CMD(REPLY_QOS_PARAM); | ||
560 | IWL_CMD(REPLY_RXON_TIMING); | ||
561 | IWL_CMD(REPLY_ADD_STA); | ||
562 | IWL_CMD(REPLY_REMOVE_STA); | ||
563 | IWL_CMD(REPLY_REMOVE_ALL_STA); | ||
564 | IWL_CMD(REPLY_3945_RX); | ||
565 | IWL_CMD(REPLY_TX); | ||
566 | IWL_CMD(REPLY_RATE_SCALE); | ||
567 | IWL_CMD(REPLY_LEDS_CMD); | ||
568 | IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); | ||
569 | IWL_CMD(RADAR_NOTIFICATION); | ||
570 | IWL_CMD(REPLY_QUIET_CMD); | ||
571 | IWL_CMD(REPLY_CHANNEL_SWITCH); | ||
572 | IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); | ||
573 | IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); | ||
574 | IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); | ||
575 | IWL_CMD(POWER_TABLE_CMD); | ||
576 | IWL_CMD(PM_SLEEP_NOTIFICATION); | ||
577 | IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); | ||
578 | IWL_CMD(REPLY_SCAN_CMD); | ||
579 | IWL_CMD(REPLY_SCAN_ABORT_CMD); | ||
580 | IWL_CMD(SCAN_START_NOTIFICATION); | ||
581 | IWL_CMD(SCAN_RESULTS_NOTIFICATION); | ||
582 | IWL_CMD(SCAN_COMPLETE_NOTIFICATION); | ||
583 | IWL_CMD(BEACON_NOTIFICATION); | ||
584 | IWL_CMD(REPLY_TX_BEACON); | ||
585 | IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); | ||
586 | IWL_CMD(QUIET_NOTIFICATION); | ||
587 | IWL_CMD(REPLY_TX_PWR_TABLE_CMD); | ||
588 | IWL_CMD(MEASURE_ABORT_NOTIFICATION); | ||
589 | IWL_CMD(REPLY_BT_CONFIG); | ||
590 | IWL_CMD(REPLY_STATISTICS_CMD); | ||
591 | IWL_CMD(STATISTICS_NOTIFICATION); | ||
592 | IWL_CMD(REPLY_CARD_STATE_CMD); | ||
593 | IWL_CMD(CARD_STATE_NOTIFICATION); | ||
594 | IWL_CMD(MISSED_BEACONS_NOTIFICATION); | ||
595 | default: | ||
596 | return "UNKNOWN"; | ||
597 | |||
598 | } | ||
599 | } | ||
600 | |||
601 | #define HOST_COMPLETE_TIMEOUT (HZ / 2) | ||
602 | |||
603 | /** | ||
604 | * iwl_enqueue_hcmd - enqueue a uCode command | ||
605 | * @priv: device private data point | ||
606 | * @cmd: a point to the ucode command structure | ||
607 | * | ||
608 | * The function returns < 0 values to indicate the operation is | ||
609 | * failed. On success, it turns the index (> 0) of command in the | ||
610 | * command queue. | ||
611 | */ | ||
612 | static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
613 | { | ||
614 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | ||
615 | struct iwl_queue *q = &txq->q; | ||
616 | struct iwl_tfd_frame *tfd; | ||
617 | u32 *control_flags; | ||
618 | struct iwl_cmd *out_cmd; | ||
619 | u32 idx; | ||
620 | u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); | ||
621 | dma_addr_t phys_addr; | ||
622 | int pad; | ||
623 | u16 count; | ||
624 | int ret; | ||
625 | unsigned long flags; | ||
626 | |||
627 | /* If any of the command structures end up being larger than | ||
628 | * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then | ||
629 | * we will need to increase the size of the TFD entries */ | ||
630 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | ||
631 | !(cmd->meta.flags & CMD_SIZE_HUGE)); | ||
632 | |||
633 | if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { | ||
634 | IWL_ERROR("No space for Tx\n"); | ||
635 | return -ENOSPC; | ||
636 | } | ||
637 | |||
638 | spin_lock_irqsave(&priv->hcmd_lock, flags); | ||
639 | |||
640 | tfd = &txq->bd[q->first_empty]; | ||
641 | memset(tfd, 0, sizeof(*tfd)); | ||
642 | |||
643 | control_flags = (u32 *) tfd; | ||
644 | |||
645 | idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE); | ||
646 | out_cmd = &txq->cmd[idx]; | ||
647 | |||
648 | out_cmd->hdr.cmd = cmd->id; | ||
649 | memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); | ||
650 | memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); | ||
651 | |||
652 | /* At this point, the out_cmd now has all of the incoming cmd | ||
653 | * information */ | ||
654 | |||
655 | out_cmd->hdr.flags = 0; | ||
656 | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | | ||
657 | INDEX_TO_SEQ(q->first_empty)); | ||
658 | if (out_cmd->meta.flags & CMD_SIZE_HUGE) | ||
659 | out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); | ||
660 | |||
661 | phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + | ||
662 | offsetof(struct iwl_cmd, hdr); | ||
663 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); | ||
664 | |||
665 | pad = U32_PAD(cmd->len); | ||
666 | count = TFD_CTL_COUNT_GET(*control_flags); | ||
667 | *control_flags = TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad); | ||
668 | |||
669 | IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " | ||
670 | "%d bytes at %d[%d]:%d\n", | ||
671 | get_cmd_string(out_cmd->hdr.cmd), | ||
672 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | ||
673 | fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM); | ||
674 | |||
675 | txq->need_update = 1; | ||
676 | q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); | ||
677 | ret = iwl_tx_queue_update_write_ptr(priv, txq); | ||
678 | |||
679 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
680 | return ret ? ret : idx; | ||
681 | } | ||
682 | |||
683 | int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
684 | { | ||
685 | int ret; | ||
686 | |||
687 | BUG_ON(!(cmd->meta.flags & CMD_ASYNC)); | ||
688 | |||
689 | /* An asynchronous command can not expect an SKB to be set. */ | ||
690 | BUG_ON(cmd->meta.flags & CMD_WANT_SKB); | ||
691 | |||
692 | /* An asynchronous command MUST have a callback. */ | ||
693 | BUG_ON(!cmd->meta.u.callback); | ||
694 | |||
695 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
696 | return -EBUSY; | ||
697 | |||
698 | ret = iwl_enqueue_hcmd(priv, cmd); | ||
699 | if (ret < 0) { | ||
700 | IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", | ||
701 | get_cmd_string(cmd->id), ret); | ||
702 | return ret; | ||
703 | } | ||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
708 | { | ||
709 | int cmd_idx; | ||
710 | int ret; | ||
711 | static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */ | ||
712 | |||
713 | BUG_ON(cmd->meta.flags & CMD_ASYNC); | ||
714 | |||
715 | /* A synchronous command can not have a callback set. */ | ||
716 | BUG_ON(cmd->meta.u.callback != NULL); | ||
717 | |||
718 | if (atomic_xchg(&entry, 1)) { | ||
719 | IWL_ERROR("Error sending %s: Already sending a host command\n", | ||
720 | get_cmd_string(cmd->id)); | ||
721 | return -EBUSY; | ||
722 | } | ||
723 | |||
724 | set_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
725 | |||
726 | if (cmd->meta.flags & CMD_WANT_SKB) | ||
727 | cmd->meta.source = &cmd->meta; | ||
728 | |||
729 | cmd_idx = iwl_enqueue_hcmd(priv, cmd); | ||
730 | if (cmd_idx < 0) { | ||
731 | ret = cmd_idx; | ||
732 | IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", | ||
733 | get_cmd_string(cmd->id), ret); | ||
734 | goto out; | ||
735 | } | ||
736 | |||
737 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | ||
738 | !test_bit(STATUS_HCMD_ACTIVE, &priv->status), | ||
739 | HOST_COMPLETE_TIMEOUT); | ||
740 | if (!ret) { | ||
741 | if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { | ||
742 | IWL_ERROR("Error sending %s: time out after %dms.\n", | ||
743 | get_cmd_string(cmd->id), | ||
744 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); | ||
745 | |||
746 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
747 | ret = -ETIMEDOUT; | ||
748 | goto cancel; | ||
749 | } | ||
750 | } | ||
751 | |||
752 | if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { | ||
753 | IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n", | ||
754 | get_cmd_string(cmd->id)); | ||
755 | ret = -ECANCELED; | ||
756 | goto fail; | ||
757 | } | ||
758 | if (test_bit(STATUS_FW_ERROR, &priv->status)) { | ||
759 | IWL_DEBUG_INFO("Command %s failed: FW Error\n", | ||
760 | get_cmd_string(cmd->id)); | ||
761 | ret = -EIO; | ||
762 | goto fail; | ||
763 | } | ||
764 | if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) { | ||
765 | IWL_ERROR("Error: Response NULL in '%s'\n", | ||
766 | get_cmd_string(cmd->id)); | ||
767 | ret = -EIO; | ||
768 | goto out; | ||
769 | } | ||
770 | |||
771 | ret = 0; | ||
772 | goto out; | ||
773 | |||
774 | cancel: | ||
775 | if (cmd->meta.flags & CMD_WANT_SKB) { | ||
776 | struct iwl_cmd *qcmd; | ||
777 | |||
778 | /* Cancel the CMD_WANT_SKB flag for the cmd in the | ||
779 | * TX cmd queue. Otherwise in case the cmd comes | ||
780 | * in later, it will possibly set an invalid | ||
781 | * address (cmd->meta.source). */ | ||
782 | qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx]; | ||
783 | qcmd->meta.flags &= ~CMD_WANT_SKB; | ||
784 | } | ||
785 | fail: | ||
786 | if (cmd->meta.u.skb) { | ||
787 | dev_kfree_skb_any(cmd->meta.u.skb); | ||
788 | cmd->meta.u.skb = NULL; | ||
789 | } | ||
790 | out: | ||
791 | atomic_set(&entry, 0); | ||
792 | return ret; | ||
793 | } | ||
794 | |||
795 | int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
796 | { | ||
797 | /* A command can not be asynchronous AND expect an SKB to be set. */ | ||
798 | BUG_ON((cmd->meta.flags & CMD_ASYNC) && | ||
799 | (cmd->meta.flags & CMD_WANT_SKB)); | ||
800 | |||
801 | if (cmd->meta.flags & CMD_ASYNC) | ||
802 | return iwl_send_cmd_async(priv, cmd); | ||
803 | |||
804 | return iwl_send_cmd_sync(priv, cmd); | ||
805 | } | ||
806 | |||
807 | int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) | ||
808 | { | ||
809 | struct iwl_host_cmd cmd = { | ||
810 | .id = id, | ||
811 | .len = len, | ||
812 | .data = data, | ||
813 | }; | ||
814 | |||
815 | return iwl_send_cmd_sync(priv, &cmd); | ||
816 | } | ||
817 | |||
818 | static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val) | ||
819 | { | ||
820 | struct iwl_host_cmd cmd = { | ||
821 | .id = id, | ||
822 | .len = sizeof(val), | ||
823 | .data = &val, | ||
824 | }; | ||
825 | |||
826 | return iwl_send_cmd_sync(priv, &cmd); | ||
827 | } | ||
828 | |||
829 | int iwl_send_statistics_request(struct iwl_priv *priv) | ||
830 | { | ||
831 | return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0); | ||
832 | } | ||
833 | |||
834 | /** | ||
835 | * iwl_rxon_add_station - add station into station table. | ||
836 | * | ||
837 | * there is only one AP station with id= IWL_AP_ID | ||
838 | * NOTE: mutex must be held before calling the this fnction | ||
839 | */ | ||
840 | static int iwl_rxon_add_station(struct iwl_priv *priv, | ||
841 | const u8 *addr, int is_ap) | ||
842 | { | ||
843 | u8 rc; | ||
844 | |||
845 | /* Remove this station if it happens to already exist */ | ||
846 | iwl_remove_station(priv, addr, is_ap); | ||
847 | |||
848 | rc = iwl_add_station(priv, addr, is_ap, 0); | ||
849 | |||
850 | return rc; | ||
851 | } | ||
852 | |||
853 | /** | ||
854 | * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON | ||
855 | * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz | ||
856 | * @channel: Any channel valid for the requested phymode | ||
857 | |||
858 | * In addition to setting the staging RXON, priv->phymode is also set. | ||
859 | * | ||
860 | * NOTE: Does not commit to the hardware; it sets appropriate bit fields | ||
861 | * in the staging RXON flag structure based on the phymode | ||
862 | */ | ||
863 | static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel) | ||
864 | { | ||
865 | if (!iwl_get_channel_info(priv, phymode, channel)) { | ||
866 | IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", | ||
867 | channel, phymode); | ||
868 | return -EINVAL; | ||
869 | } | ||
870 | |||
871 | if ((le16_to_cpu(priv->staging_rxon.channel) == channel) && | ||
872 | (priv->phymode == phymode)) | ||
873 | return 0; | ||
874 | |||
875 | priv->staging_rxon.channel = cpu_to_le16(channel); | ||
876 | if (phymode == MODE_IEEE80211A) | ||
877 | priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; | ||
878 | else | ||
879 | priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; | ||
880 | |||
881 | priv->phymode = phymode; | ||
882 | |||
883 | IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode); | ||
884 | |||
885 | return 0; | ||
886 | } | ||
887 | |||
888 | /** | ||
889 | * iwl_check_rxon_cmd - validate RXON structure is valid | ||
890 | * | ||
891 | * NOTE: This is really only useful during development and can eventually | ||
892 | * be #ifdef'd out once the driver is stable and folks aren't actively | ||
893 | * making changes | ||
894 | */ | ||
895 | static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon) | ||
896 | { | ||
897 | int error = 0; | ||
898 | int counter = 1; | ||
899 | |||
900 | if (rxon->flags & RXON_FLG_BAND_24G_MSK) { | ||
901 | error |= le32_to_cpu(rxon->flags & | ||
902 | (RXON_FLG_TGJ_NARROW_BAND_MSK | | ||
903 | RXON_FLG_RADAR_DETECT_MSK)); | ||
904 | if (error) | ||
905 | IWL_WARNING("check 24G fields %d | %d\n", | ||
906 | counter++, error); | ||
907 | } else { | ||
908 | error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ? | ||
909 | 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK); | ||
910 | if (error) | ||
911 | IWL_WARNING("check 52 fields %d | %d\n", | ||
912 | counter++, error); | ||
913 | error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK); | ||
914 | if (error) | ||
915 | IWL_WARNING("check 52 CCK %d | %d\n", | ||
916 | counter++, error); | ||
917 | } | ||
918 | error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1; | ||
919 | if (error) | ||
920 | IWL_WARNING("check mac addr %d | %d\n", counter++, error); | ||
921 | |||
922 | /* make sure basic rates 6Mbps and 1Mbps are supported */ | ||
923 | error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) && | ||
924 | ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0)); | ||
925 | if (error) | ||
926 | IWL_WARNING("check basic rate %d | %d\n", counter++, error); | ||
927 | |||
928 | error |= (le16_to_cpu(rxon->assoc_id) > 2007); | ||
929 | if (error) | ||
930 | IWL_WARNING("check assoc id %d | %d\n", counter++, error); | ||
931 | |||
932 | error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) | ||
933 | == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)); | ||
934 | if (error) | ||
935 | IWL_WARNING("check CCK and short slot %d | %d\n", | ||
936 | counter++, error); | ||
937 | |||
938 | error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) | ||
939 | == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)); | ||
940 | if (error) | ||
941 | IWL_WARNING("check CCK & auto detect %d | %d\n", | ||
942 | counter++, error); | ||
943 | |||
944 | error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | | ||
945 | RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK); | ||
946 | if (error) | ||
947 | IWL_WARNING("check TGG and auto detect %d | %d\n", | ||
948 | counter++, error); | ||
949 | |||
950 | if ((rxon->flags & RXON_FLG_DIS_DIV_MSK)) | ||
951 | error |= ((rxon->flags & (RXON_FLG_ANT_B_MSK | | ||
952 | RXON_FLG_ANT_A_MSK)) == 0); | ||
953 | if (error) | ||
954 | IWL_WARNING("check antenna %d %d\n", counter++, error); | ||
955 | |||
956 | if (error) | ||
957 | IWL_WARNING("Tuning to channel %d\n", | ||
958 | le16_to_cpu(rxon->channel)); | ||
959 | |||
960 | if (error) { | ||
961 | IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n"); | ||
962 | return -1; | ||
963 | } | ||
964 | return 0; | ||
965 | } | ||
966 | |||
967 | /** | ||
968 | * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit | ||
969 | * @priv: staging_rxon is comapred to active_rxon | ||
970 | * | ||
971 | * If the RXON structure is changing sufficient to require a new | ||
972 | * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1 | ||
973 | * to indicate a new tune is required. | ||
974 | */ | ||
975 | static int iwl_full_rxon_required(struct iwl_priv *priv) | ||
976 | { | ||
977 | |||
978 | /* These items are only settable from the full RXON command */ | ||
979 | if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) || | ||
980 | compare_ether_addr(priv->staging_rxon.bssid_addr, | ||
981 | priv->active_rxon.bssid_addr) || | ||
982 | compare_ether_addr(priv->staging_rxon.node_addr, | ||
983 | priv->active_rxon.node_addr) || | ||
984 | compare_ether_addr(priv->staging_rxon.wlap_bssid_addr, | ||
985 | priv->active_rxon.wlap_bssid_addr) || | ||
986 | (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) || | ||
987 | (priv->staging_rxon.channel != priv->active_rxon.channel) || | ||
988 | (priv->staging_rxon.air_propagation != | ||
989 | priv->active_rxon.air_propagation) || | ||
990 | (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id)) | ||
991 | return 1; | ||
992 | |||
993 | /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can | ||
994 | * be updated with the RXON_ASSOC command -- however only some | ||
995 | * flag transitions are allowed using RXON_ASSOC */ | ||
996 | |||
997 | /* Check if we are not switching bands */ | ||
998 | if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) != | ||
999 | (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)) | ||
1000 | return 1; | ||
1001 | |||
1002 | /* Check if we are switching association toggle */ | ||
1003 | if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) != | ||
1004 | (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) | ||
1005 | return 1; | ||
1006 | |||
1007 | return 0; | ||
1008 | } | ||
1009 | |||
1010 | static int iwl_send_rxon_assoc(struct iwl_priv *priv) | ||
1011 | { | ||
1012 | int rc = 0; | ||
1013 | struct iwl_rx_packet *res = NULL; | ||
1014 | struct iwl_rxon_assoc_cmd rxon_assoc; | ||
1015 | struct iwl_host_cmd cmd = { | ||
1016 | .id = REPLY_RXON_ASSOC, | ||
1017 | .len = sizeof(rxon_assoc), | ||
1018 | .meta.flags = CMD_WANT_SKB, | ||
1019 | .data = &rxon_assoc, | ||
1020 | }; | ||
1021 | const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; | ||
1022 | const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; | ||
1023 | |||
1024 | if ((rxon1->flags == rxon2->flags) && | ||
1025 | (rxon1->filter_flags == rxon2->filter_flags) && | ||
1026 | (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && | ||
1027 | (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { | ||
1028 | IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n"); | ||
1029 | return 0; | ||
1030 | } | ||
1031 | |||
1032 | rxon_assoc.flags = priv->staging_rxon.flags; | ||
1033 | rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; | ||
1034 | rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; | ||
1035 | rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; | ||
1036 | rxon_assoc.reserved = 0; | ||
1037 | |||
1038 | rc = iwl_send_cmd_sync(priv, &cmd); | ||
1039 | if (rc) | ||
1040 | return rc; | ||
1041 | |||
1042 | res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; | ||
1043 | if (res->hdr.flags & IWL_CMD_FAILED_MSK) { | ||
1044 | IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n"); | ||
1045 | rc = -EIO; | ||
1046 | } | ||
1047 | |||
1048 | priv->alloc_rxb_skb--; | ||
1049 | dev_kfree_skb_any(cmd.meta.u.skb); | ||
1050 | |||
1051 | return rc; | ||
1052 | } | ||
1053 | |||
1054 | /** | ||
1055 | * iwl_commit_rxon - commit staging_rxon to hardware | ||
1056 | * | ||
1057 | * The RXON command in staging_rxon is commited to the hardware and | ||
1058 | * the active_rxon structure is updated with the new data. This | ||
1059 | * function correctly transitions out of the RXON_ASSOC_MSK state if | ||
1060 | * a HW tune is required based on the RXON structure changes. | ||
1061 | */ | ||
1062 | static int iwl_commit_rxon(struct iwl_priv *priv) | ||
1063 | { | ||
1064 | /* cast away the const for active_rxon in this function */ | ||
1065 | struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; | ||
1066 | int rc = 0; | ||
1067 | |||
1068 | if (!iwl_is_alive(priv)) | ||
1069 | return -1; | ||
1070 | |||
1071 | /* always get timestamp with Rx frame */ | ||
1072 | priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; | ||
1073 | |||
1074 | /* select antenna */ | ||
1075 | priv->staging_rxon.flags &= | ||
1076 | ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK); | ||
1077 | priv->staging_rxon.flags |= iwl3945_get_antenna_flags(priv); | ||
1078 | |||
1079 | rc = iwl_check_rxon_cmd(&priv->staging_rxon); | ||
1080 | if (rc) { | ||
1081 | IWL_ERROR("Invalid RXON configuration. Not committing.\n"); | ||
1082 | return -EINVAL; | ||
1083 | } | ||
1084 | |||
1085 | /* If we don't need to send a full RXON, we can use | ||
1086 | * iwl_rxon_assoc_cmd which is used to reconfigure filter | ||
1087 | * and other flags for the current radio configuration. */ | ||
1088 | if (!iwl_full_rxon_required(priv)) { | ||
1089 | rc = iwl_send_rxon_assoc(priv); | ||
1090 | if (rc) { | ||
1091 | IWL_ERROR("Error setting RXON_ASSOC " | ||
1092 | "configuration (%d).\n", rc); | ||
1093 | return rc; | ||
1094 | } | ||
1095 | |||
1096 | memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); | ||
1097 | |||
1098 | return 0; | ||
1099 | } | ||
1100 | |||
1101 | /* If we are currently associated and the new config requires | ||
1102 | * an RXON_ASSOC and the new config wants the associated mask enabled, | ||
1103 | * we must clear the associated from the active configuration | ||
1104 | * before we apply the new config */ | ||
1105 | if (iwl_is_associated(priv) && | ||
1106 | (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) { | ||
1107 | IWL_DEBUG_INFO("Toggling associated bit on current RXON\n"); | ||
1108 | active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
1109 | |||
1110 | rc = iwl_send_cmd_pdu(priv, REPLY_RXON, | ||
1111 | sizeof(struct iwl_rxon_cmd), | ||
1112 | &priv->active_rxon); | ||
1113 | |||
1114 | /* If the mask clearing failed then we set | ||
1115 | * active_rxon back to what it was previously */ | ||
1116 | if (rc) { | ||
1117 | active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
1118 | IWL_ERROR("Error clearing ASSOC_MSK on current " | ||
1119 | "configuration (%d).\n", rc); | ||
1120 | return rc; | ||
1121 | } | ||
1122 | |||
1123 | /* The RXON bit toggling will have cleared out the | ||
1124 | * station table in the uCode, so blank it in the driver | ||
1125 | * as well */ | ||
1126 | iwl_clear_stations_table(priv); | ||
1127 | } else if (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) { | ||
1128 | /* When switching from non-associated to associated, the | ||
1129 | * uCode clears out the station table; so clear it in the | ||
1130 | * driver as well */ | ||
1131 | iwl_clear_stations_table(priv); | ||
1132 | } | ||
1133 | |||
1134 | IWL_DEBUG_INFO("Sending RXON\n" | ||
1135 | "* with%s RXON_FILTER_ASSOC_MSK\n" | ||
1136 | "* channel = %d\n" | ||
1137 | "* bssid = " MAC_FMT "\n", | ||
1138 | ((priv->staging_rxon.filter_flags & | ||
1139 | RXON_FILTER_ASSOC_MSK) ? "" : "out"), | ||
1140 | le16_to_cpu(priv->staging_rxon.channel), | ||
1141 | MAC_ARG(priv->staging_rxon.bssid_addr)); | ||
1142 | |||
1143 | /* Apply the new configuration */ | ||
1144 | rc = iwl_send_cmd_pdu(priv, REPLY_RXON, | ||
1145 | sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); | ||
1146 | if (rc) { | ||
1147 | IWL_ERROR("Error setting new configuration (%d).\n", rc); | ||
1148 | return rc; | ||
1149 | } | ||
1150 | |||
1151 | memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); | ||
1152 | |||
1153 | /* If we issue a new RXON command which required a tune then we must | ||
1154 | * send a new TXPOWER command or we won't be able to Tx any frames */ | ||
1155 | rc = iwl_hw_reg_send_txpower(priv); | ||
1156 | if (rc) { | ||
1157 | IWL_ERROR("Error setting Tx power (%d).\n", rc); | ||
1158 | return rc; | ||
1159 | } | ||
1160 | |||
1161 | /* Add the broadcast address so we can send broadcast frames */ | ||
1162 | if (iwl_rxon_add_station(priv, BROADCAST_ADDR, 0) == | ||
1163 | IWL_INVALID_STATION) { | ||
1164 | IWL_ERROR("Error adding BROADCAST address for transmit.\n"); | ||
1165 | return -EIO; | ||
1166 | } | ||
1167 | |||
1168 | /* If we have set the ASSOC_MSK and we are in BSS mode then | ||
1169 | * add the IWL_AP_ID to the station rate table */ | ||
1170 | if (iwl_is_associated(priv) && | ||
1171 | (priv->iw_mode == IEEE80211_IF_TYPE_STA)) | ||
1172 | if (iwl_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1) | ||
1173 | == IWL_INVALID_STATION) { | ||
1174 | IWL_ERROR("Error adding AP address for transmit.\n"); | ||
1175 | return -EIO; | ||
1176 | } | ||
1177 | |||
1178 | /* Init the hardware's rate fallback order based on the | ||
1179 | * phymode */ | ||
1180 | rc = iwl3945_init_hw_rate_table(priv); | ||
1181 | if (rc) { | ||
1182 | IWL_ERROR("Error setting HW rate table: %02X\n", rc); | ||
1183 | return -EIO; | ||
1184 | } | ||
1185 | |||
1186 | return 0; | ||
1187 | } | ||
1188 | |||
1189 | static int iwl_send_bt_config(struct iwl_priv *priv) | ||
1190 | { | ||
1191 | struct iwl_bt_cmd bt_cmd = { | ||
1192 | .flags = 3, | ||
1193 | .lead_time = 0xAA, | ||
1194 | .max_kill = 1, | ||
1195 | .kill_ack_mask = 0, | ||
1196 | .kill_cts_mask = 0, | ||
1197 | }; | ||
1198 | |||
1199 | return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, | ||
1200 | sizeof(struct iwl_bt_cmd), &bt_cmd); | ||
1201 | } | ||
1202 | |||
1203 | static int iwl_send_scan_abort(struct iwl_priv *priv) | ||
1204 | { | ||
1205 | int rc = 0; | ||
1206 | struct iwl_rx_packet *res; | ||
1207 | struct iwl_host_cmd cmd = { | ||
1208 | .id = REPLY_SCAN_ABORT_CMD, | ||
1209 | .meta.flags = CMD_WANT_SKB, | ||
1210 | }; | ||
1211 | |||
1212 | /* If there isn't a scan actively going on in the hardware | ||
1213 | * then we are in between scan bands and not actually | ||
1214 | * actively scanning, so don't send the abort command */ | ||
1215 | if (!test_bit(STATUS_SCAN_HW, &priv->status)) { | ||
1216 | clear_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
1217 | return 0; | ||
1218 | } | ||
1219 | |||
1220 | rc = iwl_send_cmd_sync(priv, &cmd); | ||
1221 | if (rc) { | ||
1222 | clear_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
1223 | return rc; | ||
1224 | } | ||
1225 | |||
1226 | res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; | ||
1227 | if (res->u.status != CAN_ABORT_STATUS) { | ||
1228 | /* The scan abort will return 1 for success or | ||
1229 | * 2 for "failure". A failure condition can be | ||
1230 | * due to simply not being in an active scan which | ||
1231 | * can occur if we send the scan abort before we | ||
1232 | * the microcode has notified us that a scan is | ||
1233 | * completed. */ | ||
1234 | IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status); | ||
1235 | clear_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
1236 | clear_bit(STATUS_SCAN_HW, &priv->status); | ||
1237 | } | ||
1238 | |||
1239 | dev_kfree_skb_any(cmd.meta.u.skb); | ||
1240 | |||
1241 | return rc; | ||
1242 | } | ||
1243 | |||
1244 | static int iwl_card_state_sync_callback(struct iwl_priv *priv, | ||
1245 | struct iwl_cmd *cmd, | ||
1246 | struct sk_buff *skb) | ||
1247 | { | ||
1248 | return 1; | ||
1249 | } | ||
1250 | |||
1251 | /* | ||
1252 | * CARD_STATE_CMD | ||
1253 | * | ||
1254 | * Use: Sets the internal card state to enable, disable, or halt | ||
1255 | * | ||
1256 | * When in the 'enable' state the card operates as normal. | ||
1257 | * When in the 'disable' state, the card enters into a low power mode. | ||
1258 | * When in the 'halt' state, the card is shut down and must be fully | ||
1259 | * restarted to come back on. | ||
1260 | */ | ||
1261 | static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag) | ||
1262 | { | ||
1263 | struct iwl_host_cmd cmd = { | ||
1264 | .id = REPLY_CARD_STATE_CMD, | ||
1265 | .len = sizeof(u32), | ||
1266 | .data = &flags, | ||
1267 | .meta.flags = meta_flag, | ||
1268 | }; | ||
1269 | |||
1270 | if (meta_flag & CMD_ASYNC) | ||
1271 | cmd.meta.u.callback = iwl_card_state_sync_callback; | ||
1272 | |||
1273 | return iwl_send_cmd(priv, &cmd); | ||
1274 | } | ||
1275 | |||
1276 | static int iwl_add_sta_sync_callback(struct iwl_priv *priv, | ||
1277 | struct iwl_cmd *cmd, struct sk_buff *skb) | ||
1278 | { | ||
1279 | struct iwl_rx_packet *res = NULL; | ||
1280 | |||
1281 | if (!skb) { | ||
1282 | IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n"); | ||
1283 | return 1; | ||
1284 | } | ||
1285 | |||
1286 | res = (struct iwl_rx_packet *)skb->data; | ||
1287 | if (res->hdr.flags & IWL_CMD_FAILED_MSK) { | ||
1288 | IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", | ||
1289 | res->hdr.flags); | ||
1290 | return 1; | ||
1291 | } | ||
1292 | |||
1293 | switch (res->u.add_sta.status) { | ||
1294 | case ADD_STA_SUCCESS_MSK: | ||
1295 | break; | ||
1296 | default: | ||
1297 | break; | ||
1298 | } | ||
1299 | |||
1300 | /* We didn't cache the SKB; let the caller free it */ | ||
1301 | return 1; | ||
1302 | } | ||
1303 | |||
1304 | int iwl_send_add_station(struct iwl_priv *priv, | ||
1305 | struct iwl_addsta_cmd *sta, u8 flags) | ||
1306 | { | ||
1307 | struct iwl_rx_packet *res = NULL; | ||
1308 | int rc = 0; | ||
1309 | struct iwl_host_cmd cmd = { | ||
1310 | .id = REPLY_ADD_STA, | ||
1311 | .len = sizeof(struct iwl_addsta_cmd), | ||
1312 | .meta.flags = flags, | ||
1313 | .data = sta, | ||
1314 | }; | ||
1315 | |||
1316 | if (flags & CMD_ASYNC) | ||
1317 | cmd.meta.u.callback = iwl_add_sta_sync_callback; | ||
1318 | else | ||
1319 | cmd.meta.flags |= CMD_WANT_SKB; | ||
1320 | |||
1321 | rc = iwl_send_cmd(priv, &cmd); | ||
1322 | |||
1323 | if (rc || (flags & CMD_ASYNC)) | ||
1324 | return rc; | ||
1325 | |||
1326 | res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; | ||
1327 | if (res->hdr.flags & IWL_CMD_FAILED_MSK) { | ||
1328 | IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", | ||
1329 | res->hdr.flags); | ||
1330 | rc = -EIO; | ||
1331 | } | ||
1332 | |||
1333 | if (rc == 0) { | ||
1334 | switch (res->u.add_sta.status) { | ||
1335 | case ADD_STA_SUCCESS_MSK: | ||
1336 | IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n"); | ||
1337 | break; | ||
1338 | default: | ||
1339 | rc = -EIO; | ||
1340 | IWL_WARNING("REPLY_ADD_STA failed\n"); | ||
1341 | break; | ||
1342 | } | ||
1343 | } | ||
1344 | |||
1345 | priv->alloc_rxb_skb--; | ||
1346 | dev_kfree_skb_any(cmd.meta.u.skb); | ||
1347 | |||
1348 | return rc; | ||
1349 | } | ||
1350 | |||
1351 | static int iwl_update_sta_key_info(struct iwl_priv *priv, | ||
1352 | struct ieee80211_key_conf *keyconf, | ||
1353 | u8 sta_id) | ||
1354 | { | ||
1355 | unsigned long flags; | ||
1356 | __le16 key_flags = 0; | ||
1357 | |||
1358 | switch (keyconf->alg) { | ||
1359 | case ALG_CCMP: | ||
1360 | key_flags |= STA_KEY_FLG_CCMP; | ||
1361 | key_flags |= cpu_to_le16( | ||
1362 | keyconf->keyidx << STA_KEY_FLG_KEYID_POS); | ||
1363 | key_flags &= ~STA_KEY_FLG_INVALID; | ||
1364 | break; | ||
1365 | case ALG_TKIP: | ||
1366 | case ALG_WEP: | ||
1367 | return -EINVAL; | ||
1368 | default: | ||
1369 | return -EINVAL; | ||
1370 | } | ||
1371 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
1372 | priv->stations[sta_id].keyinfo.alg = keyconf->alg; | ||
1373 | priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; | ||
1374 | memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, | ||
1375 | keyconf->keylen); | ||
1376 | |||
1377 | memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, | ||
1378 | keyconf->keylen); | ||
1379 | priv->stations[sta_id].sta.key.key_flags = key_flags; | ||
1380 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | ||
1381 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | ||
1382 | |||
1383 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
1384 | |||
1385 | IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); | ||
1386 | iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); | ||
1387 | return 0; | ||
1388 | } | ||
1389 | |||
1390 | static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) | ||
1391 | { | ||
1392 | unsigned long flags; | ||
1393 | |||
1394 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
1395 | memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); | ||
1396 | memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo)); | ||
1397 | priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; | ||
1398 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | ||
1399 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | ||
1400 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
1401 | |||
1402 | IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n"); | ||
1403 | iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); | ||
1404 | return 0; | ||
1405 | } | ||
1406 | |||
1407 | static void iwl_clear_free_frames(struct iwl_priv *priv) | ||
1408 | { | ||
1409 | struct list_head *element; | ||
1410 | |||
1411 | IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n", | ||
1412 | priv->frames_count); | ||
1413 | |||
1414 | while (!list_empty(&priv->free_frames)) { | ||
1415 | element = priv->free_frames.next; | ||
1416 | list_del(element); | ||
1417 | kfree(list_entry(element, struct iwl_frame, list)); | ||
1418 | priv->frames_count--; | ||
1419 | } | ||
1420 | |||
1421 | if (priv->frames_count) { | ||
1422 | IWL_WARNING("%d frames still in use. Did we lose one?\n", | ||
1423 | priv->frames_count); | ||
1424 | priv->frames_count = 0; | ||
1425 | } | ||
1426 | } | ||
1427 | |||
1428 | static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv) | ||
1429 | { | ||
1430 | struct iwl_frame *frame; | ||
1431 | struct list_head *element; | ||
1432 | if (list_empty(&priv->free_frames)) { | ||
1433 | frame = kzalloc(sizeof(*frame), GFP_KERNEL); | ||
1434 | if (!frame) { | ||
1435 | IWL_ERROR("Could not allocate frame!\n"); | ||
1436 | return NULL; | ||
1437 | } | ||
1438 | |||
1439 | priv->frames_count++; | ||
1440 | return frame; | ||
1441 | } | ||
1442 | |||
1443 | element = priv->free_frames.next; | ||
1444 | list_del(element); | ||
1445 | return list_entry(element, struct iwl_frame, list); | ||
1446 | } | ||
1447 | |||
1448 | static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) | ||
1449 | { | ||
1450 | memset(frame, 0, sizeof(*frame)); | ||
1451 | list_add(&frame->list, &priv->free_frames); | ||
1452 | } | ||
1453 | |||
1454 | unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv, | ||
1455 | struct ieee80211_hdr *hdr, | ||
1456 | const u8 *dest, int left) | ||
1457 | { | ||
1458 | |||
1459 | if (!iwl_is_associated(priv) || !priv->ibss_beacon || | ||
1460 | ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && | ||
1461 | (priv->iw_mode != IEEE80211_IF_TYPE_AP))) | ||
1462 | return 0; | ||
1463 | |||
1464 | if (priv->ibss_beacon->len > left) | ||
1465 | return 0; | ||
1466 | |||
1467 | memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); | ||
1468 | |||
1469 | return priv->ibss_beacon->len; | ||
1470 | } | ||
1471 | |||
1472 | static int iwl_rate_index_from_plcp(int plcp) | ||
1473 | { | ||
1474 | int i = 0; | ||
1475 | |||
1476 | for (i = 0; i < IWL_RATE_COUNT; i++) | ||
1477 | if (iwl_rates[i].plcp == plcp) | ||
1478 | return i; | ||
1479 | return -1; | ||
1480 | } | ||
1481 | |||
1482 | static u8 iwl_rate_get_lowest_plcp(int rate_mask) | ||
1483 | { | ||
1484 | u8 i; | ||
1485 | |||
1486 | for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; | ||
1487 | i = iwl_rates[i].next_ieee) { | ||
1488 | if (rate_mask & (1 << i)) | ||
1489 | return iwl_rates[i].plcp; | ||
1490 | } | ||
1491 | |||
1492 | return IWL_RATE_INVALID; | ||
1493 | } | ||
1494 | |||
1495 | static int iwl_send_beacon_cmd(struct iwl_priv *priv) | ||
1496 | { | ||
1497 | struct iwl_frame *frame; | ||
1498 | unsigned int frame_size; | ||
1499 | int rc; | ||
1500 | u8 rate; | ||
1501 | |||
1502 | frame = iwl_get_free_frame(priv); | ||
1503 | |||
1504 | if (!frame) { | ||
1505 | IWL_ERROR("Could not obtain free frame buffer for beacon " | ||
1506 | "command.\n"); | ||
1507 | return -ENOMEM; | ||
1508 | } | ||
1509 | |||
1510 | if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) { | ||
1511 | rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & | ||
1512 | 0xFF0); | ||
1513 | if (rate == IWL_INVALID_RATE) | ||
1514 | rate = IWL_RATE_6M_PLCP; | ||
1515 | } else { | ||
1516 | rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF); | ||
1517 | if (rate == IWL_INVALID_RATE) | ||
1518 | rate = IWL_RATE_1M_PLCP; | ||
1519 | } | ||
1520 | |||
1521 | frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate); | ||
1522 | |||
1523 | rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, | ||
1524 | &frame->u.cmd[0]); | ||
1525 | |||
1526 | iwl_free_frame(priv, frame); | ||
1527 | |||
1528 | return rc; | ||
1529 | } | ||
1530 | |||
1531 | /****************************************************************************** | ||
1532 | * | ||
1533 | * EEPROM related functions | ||
1534 | * | ||
1535 | ******************************************************************************/ | ||
1536 | |||
1537 | static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac) | ||
1538 | { | ||
1539 | memcpy(mac, priv->eeprom.mac_address, 6); | ||
1540 | } | ||
1541 | |||
1542 | /** | ||
1543 | * iwl_eeprom_init - read EEPROM contents | ||
1544 | * | ||
1545 | * Load the EEPROM from adapter into priv->eeprom | ||
1546 | * | ||
1547 | * NOTE: This routine uses the non-debug IO access functions. | ||
1548 | */ | ||
1549 | int iwl_eeprom_init(struct iwl_priv *priv) | ||
1550 | { | ||
1551 | u16 *e = (u16 *)&priv->eeprom; | ||
1552 | u32 gp = iwl_read32(priv, CSR_EEPROM_GP); | ||
1553 | u32 r; | ||
1554 | int sz = sizeof(priv->eeprom); | ||
1555 | int rc; | ||
1556 | int i; | ||
1557 | u16 addr; | ||
1558 | |||
1559 | /* The EEPROM structure has several padding buffers within it | ||
1560 | * and when adding new EEPROM maps is subject to programmer errors | ||
1561 | * which may be very difficult to identify without explicitly | ||
1562 | * checking the resulting size of the eeprom map. */ | ||
1563 | BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); | ||
1564 | |||
1565 | if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { | ||
1566 | IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); | ||
1567 | return -ENOENT; | ||
1568 | } | ||
1569 | |||
1570 | rc = iwl_eeprom_aqcuire_semaphore(priv); | ||
1571 | if (rc < 0) { | ||
1572 | IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n"); | ||
1573 | return -ENOENT; | ||
1574 | } | ||
1575 | |||
1576 | /* eeprom is an array of 16bit values */ | ||
1577 | for (addr = 0; addr < sz; addr += sizeof(u16)) { | ||
1578 | _iwl_write32(priv, CSR_EEPROM_REG, addr << 1); | ||
1579 | _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD); | ||
1580 | |||
1581 | for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT; | ||
1582 | i += IWL_EEPROM_ACCESS_DELAY) { | ||
1583 | r = _iwl_read_restricted(priv, CSR_EEPROM_REG); | ||
1584 | if (r & CSR_EEPROM_REG_READ_VALID_MSK) | ||
1585 | break; | ||
1586 | udelay(IWL_EEPROM_ACCESS_DELAY); | ||
1587 | } | ||
1588 | |||
1589 | if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { | ||
1590 | IWL_ERROR("Time out reading EEPROM[%d]", addr); | ||
1591 | return -ETIMEDOUT; | ||
1592 | } | ||
1593 | e[addr / 2] = le16_to_cpu(r >> 16); | ||
1594 | } | ||
1595 | |||
1596 | return 0; | ||
1597 | } | ||
1598 | |||
1599 | /****************************************************************************** | ||
1600 | * | ||
1601 | * Misc. internal state and helper functions | ||
1602 | * | ||
1603 | ******************************************************************************/ | ||
1604 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1605 | |||
1606 | /** | ||
1607 | * iwl_report_frame - dump frame to syslog during debug sessions | ||
1608 | * | ||
1609 | * hack this function to show different aspects of received frames, | ||
1610 | * including selective frame dumps. | ||
1611 | * group100 parameter selects whether to show 1 out of 100 good frames. | ||
1612 | * | ||
1613 | * TODO: ieee80211_hdr stuff is common to 3945 and 4965, so frame type | ||
1614 | * info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats) | ||
1615 | * is 3945-specific and gives bad output for 4965. Need to split the | ||
1616 | * functionality, keep common stuff here. | ||
1617 | */ | ||
1618 | void iwl_report_frame(struct iwl_priv *priv, | ||
1619 | struct iwl_rx_packet *pkt, | ||
1620 | struct ieee80211_hdr *header, int group100) | ||
1621 | { | ||
1622 | u32 to_us; | ||
1623 | u32 print_summary = 0; | ||
1624 | u32 print_dump = 0; /* set to 1 to dump all frames' contents */ | ||
1625 | u32 hundred = 0; | ||
1626 | u32 dataframe = 0; | ||
1627 | u16 fc; | ||
1628 | u16 seq_ctl; | ||
1629 | u16 channel; | ||
1630 | u16 phy_flags; | ||
1631 | int rate_sym; | ||
1632 | u16 length; | ||
1633 | u16 status; | ||
1634 | u16 bcn_tmr; | ||
1635 | u32 tsf_low; | ||
1636 | u64 tsf; | ||
1637 | u8 rssi; | ||
1638 | u8 agc; | ||
1639 | u16 sig_avg; | ||
1640 | u16 noise_diff; | ||
1641 | struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); | ||
1642 | struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); | ||
1643 | struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt); | ||
1644 | u8 *data = IWL_RX_DATA(pkt); | ||
1645 | |||
1646 | /* MAC header */ | ||
1647 | fc = le16_to_cpu(header->frame_control); | ||
1648 | seq_ctl = le16_to_cpu(header->seq_ctrl); | ||
1649 | |||
1650 | /* metadata */ | ||
1651 | channel = le16_to_cpu(rx_hdr->channel); | ||
1652 | phy_flags = le16_to_cpu(rx_hdr->phy_flags); | ||
1653 | rate_sym = rx_hdr->rate; | ||
1654 | length = le16_to_cpu(rx_hdr->len); | ||
1655 | |||
1656 | /* end-of-frame status and timestamp */ | ||
1657 | status = le32_to_cpu(rx_end->status); | ||
1658 | bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp); | ||
1659 | tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff; | ||
1660 | tsf = le64_to_cpu(rx_end->timestamp); | ||
1661 | |||
1662 | /* signal statistics */ | ||
1663 | rssi = rx_stats->rssi; | ||
1664 | agc = rx_stats->agc; | ||
1665 | sig_avg = le16_to_cpu(rx_stats->sig_avg); | ||
1666 | noise_diff = le16_to_cpu(rx_stats->noise_diff); | ||
1667 | |||
1668 | to_us = !compare_ether_addr(header->addr1, priv->mac_addr); | ||
1669 | |||
1670 | /* if data frame is to us and all is good, | ||
1671 | * (optionally) print summary for only 1 out of every 100 */ | ||
1672 | if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) == | ||
1673 | (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { | ||
1674 | dataframe = 1; | ||
1675 | if (!group100) | ||
1676 | print_summary = 1; /* print each frame */ | ||
1677 | else if (priv->framecnt_to_us < 100) { | ||
1678 | priv->framecnt_to_us++; | ||
1679 | print_summary = 0; | ||
1680 | } else { | ||
1681 | priv->framecnt_to_us = 0; | ||
1682 | print_summary = 1; | ||
1683 | hundred = 1; | ||
1684 | } | ||
1685 | } else { | ||
1686 | /* print summary for all other frames */ | ||
1687 | print_summary = 1; | ||
1688 | } | ||
1689 | |||
1690 | if (print_summary) { | ||
1691 | char *title; | ||
1692 | u32 rate; | ||
1693 | |||
1694 | if (hundred) | ||
1695 | title = "100Frames"; | ||
1696 | else if (fc & IEEE80211_FCTL_RETRY) | ||
1697 | title = "Retry"; | ||
1698 | else if (ieee80211_is_assoc_response(fc)) | ||
1699 | title = "AscRsp"; | ||
1700 | else if (ieee80211_is_reassoc_response(fc)) | ||
1701 | title = "RasRsp"; | ||
1702 | else if (ieee80211_is_probe_response(fc)) { | ||
1703 | title = "PrbRsp"; | ||
1704 | print_dump = 1; /* dump frame contents */ | ||
1705 | } else if (ieee80211_is_beacon(fc)) { | ||
1706 | title = "Beacon"; | ||
1707 | print_dump = 1; /* dump frame contents */ | ||
1708 | } else if (ieee80211_is_atim(fc)) | ||
1709 | title = "ATIM"; | ||
1710 | else if (ieee80211_is_auth(fc)) | ||
1711 | title = "Auth"; | ||
1712 | else if (ieee80211_is_deauth(fc)) | ||
1713 | title = "DeAuth"; | ||
1714 | else if (ieee80211_is_disassoc(fc)) | ||
1715 | title = "DisAssoc"; | ||
1716 | else | ||
1717 | title = "Frame"; | ||
1718 | |||
1719 | rate = iwl_rate_index_from_plcp(rate_sym); | ||
1720 | if (rate == -1) | ||
1721 | rate = 0; | ||
1722 | else | ||
1723 | rate = iwl_rates[rate].ieee / 2; | ||
1724 | |||
1725 | /* print frame summary. | ||
1726 | * MAC addresses show just the last byte (for brevity), | ||
1727 | * but you can hack it to show more, if you'd like to. */ | ||
1728 | if (dataframe) | ||
1729 | IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, " | ||
1730 | "len=%u, rssi=%d, chnl=%d, rate=%u, \n", | ||
1731 | title, fc, header->addr1[5], | ||
1732 | length, rssi, channel, rate); | ||
1733 | else { | ||
1734 | /* src/dst addresses assume managed mode */ | ||
1735 | IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, " | ||
1736 | "src=0x%02x, rssi=%u, tim=%lu usec, " | ||
1737 | "phy=0x%02x, chnl=%d\n", | ||
1738 | title, fc, header->addr1[5], | ||
1739 | header->addr3[5], rssi, | ||
1740 | tsf_low - priv->scan_start_tsf, | ||
1741 | phy_flags, channel); | ||
1742 | } | ||
1743 | } | ||
1744 | if (print_dump) | ||
1745 | iwl_print_hex_dump(IWL_DL_RX, data, length); | ||
1746 | } | ||
1747 | #endif | ||
1748 | |||
1749 | static void iwl_unset_hw_setting(struct iwl_priv *priv) | ||
1750 | { | ||
1751 | if (priv->hw_setting.shared_virt) | ||
1752 | pci_free_consistent(priv->pci_dev, | ||
1753 | sizeof(struct iwl_shared), | ||
1754 | priv->hw_setting.shared_virt, | ||
1755 | priv->hw_setting.shared_phys); | ||
1756 | } | ||
1757 | |||
1758 | /** | ||
1759 | * iwl_supported_rate_to_ie - fill in the supported rate in IE field | ||
1760 | * | ||
1761 | * return : set the bit for each supported rate insert in ie | ||
1762 | */ | ||
1763 | static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate, | ||
1764 | u16 basic_rate, int max_count) | ||
1765 | { | ||
1766 | u16 ret_rates = 0, bit; | ||
1767 | int i; | ||
1768 | u8 *rates; | ||
1769 | |||
1770 | rates = &(ie[1]); | ||
1771 | |||
1772 | for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) { | ||
1773 | if (bit & supported_rate) { | ||
1774 | ret_rates |= bit; | ||
1775 | rates[*ie] = iwl_rates[i].ieee | | ||
1776 | ((bit & basic_rate) ? 0x80 : 0x00); | ||
1777 | *ie = *ie + 1; | ||
1778 | if (*ie >= max_count) | ||
1779 | break; | ||
1780 | } | ||
1781 | } | ||
1782 | |||
1783 | return ret_rates; | ||
1784 | } | ||
1785 | |||
1786 | /** | ||
1787 | * iwl_fill_probe_req - fill in all required fields and IE for probe request | ||
1788 | */ | ||
1789 | static u16 iwl_fill_probe_req(struct iwl_priv *priv, | ||
1790 | struct ieee80211_mgmt *frame, | ||
1791 | int left, int is_direct) | ||
1792 | { | ||
1793 | int len = 0; | ||
1794 | u8 *pos = NULL; | ||
1795 | u16 ret_rates; | ||
1796 | |||
1797 | /* Make sure there is enough space for the probe request, | ||
1798 | * two mandatory IEs and the data */ | ||
1799 | left -= 24; | ||
1800 | if (left < 0) | ||
1801 | return 0; | ||
1802 | len += 24; | ||
1803 | |||
1804 | frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); | ||
1805 | memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN); | ||
1806 | memcpy(frame->sa, priv->mac_addr, ETH_ALEN); | ||
1807 | memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN); | ||
1808 | frame->seq_ctrl = 0; | ||
1809 | |||
1810 | /* fill in our indirect SSID IE */ | ||
1811 | /* ...next IE... */ | ||
1812 | |||
1813 | left -= 2; | ||
1814 | if (left < 0) | ||
1815 | return 0; | ||
1816 | len += 2; | ||
1817 | pos = &(frame->u.probe_req.variable[0]); | ||
1818 | *pos++ = WLAN_EID_SSID; | ||
1819 | *pos++ = 0; | ||
1820 | |||
1821 | /* fill in our direct SSID IE... */ | ||
1822 | if (is_direct) { | ||
1823 | /* ...next IE... */ | ||
1824 | left -= 2 + priv->essid_len; | ||
1825 | if (left < 0) | ||
1826 | return 0; | ||
1827 | /* ... fill it in... */ | ||
1828 | *pos++ = WLAN_EID_SSID; | ||
1829 | *pos++ = priv->essid_len; | ||
1830 | memcpy(pos, priv->essid, priv->essid_len); | ||
1831 | pos += priv->essid_len; | ||
1832 | len += 2 + priv->essid_len; | ||
1833 | } | ||
1834 | |||
1835 | /* fill in supported rate */ | ||
1836 | /* ...next IE... */ | ||
1837 | left -= 2; | ||
1838 | if (left < 0) | ||
1839 | return 0; | ||
1840 | /* ... fill it in... */ | ||
1841 | *pos++ = WLAN_EID_SUPP_RATES; | ||
1842 | *pos = 0; | ||
1843 | ret_rates = priv->active_rate = priv->rates_mask; | ||
1844 | priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; | ||
1845 | |||
1846 | iwl_supported_rate_to_ie(pos, priv->active_rate, | ||
1847 | priv->active_rate_basic, left); | ||
1848 | len += 2 + *pos; | ||
1849 | pos += (*pos) + 1; | ||
1850 | ret_rates = ~ret_rates & priv->active_rate; | ||
1851 | |||
1852 | if (ret_rates == 0) | ||
1853 | goto fill_end; | ||
1854 | |||
1855 | /* fill in supported extended rate */ | ||
1856 | /* ...next IE... */ | ||
1857 | left -= 2; | ||
1858 | if (left < 0) | ||
1859 | return 0; | ||
1860 | /* ... fill it in... */ | ||
1861 | *pos++ = WLAN_EID_EXT_SUPP_RATES; | ||
1862 | *pos = 0; | ||
1863 | iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left); | ||
1864 | if (*pos > 0) | ||
1865 | len += 2 + *pos; | ||
1866 | |||
1867 | fill_end: | ||
1868 | return (u16)len; | ||
1869 | } | ||
1870 | |||
1871 | /* | ||
1872 | * QoS support | ||
1873 | */ | ||
1874 | #ifdef CONFIG_IWLWIFI_QOS | ||
1875 | static int iwl_send_qos_params_command(struct iwl_priv *priv, | ||
1876 | struct iwl_qosparam_cmd *qos) | ||
1877 | { | ||
1878 | |||
1879 | return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM, | ||
1880 | sizeof(struct iwl_qosparam_cmd), qos); | ||
1881 | } | ||
1882 | |||
1883 | static void iwl_reset_qos(struct iwl_priv *priv) | ||
1884 | { | ||
1885 | u16 cw_min = 15; | ||
1886 | u16 cw_max = 1023; | ||
1887 | u8 aifs = 2; | ||
1888 | u8 is_legacy = 0; | ||
1889 | unsigned long flags; | ||
1890 | int i; | ||
1891 | |||
1892 | spin_lock_irqsave(&priv->lock, flags); | ||
1893 | priv->qos_data.qos_active = 0; | ||
1894 | |||
1895 | if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) { | ||
1896 | if (priv->qos_data.qos_enable) | ||
1897 | priv->qos_data.qos_active = 1; | ||
1898 | if (!(priv->active_rate & 0xfff0)) { | ||
1899 | cw_min = 31; | ||
1900 | is_legacy = 1; | ||
1901 | } | ||
1902 | } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { | ||
1903 | if (priv->qos_data.qos_enable) | ||
1904 | priv->qos_data.qos_active = 1; | ||
1905 | } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { | ||
1906 | cw_min = 31; | ||
1907 | is_legacy = 1; | ||
1908 | } | ||
1909 | |||
1910 | if (priv->qos_data.qos_active) | ||
1911 | aifs = 3; | ||
1912 | |||
1913 | priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min); | ||
1914 | priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max); | ||
1915 | priv->qos_data.def_qos_parm.ac[0].aifsn = aifs; | ||
1916 | priv->qos_data.def_qos_parm.ac[0].edca_txop = 0; | ||
1917 | priv->qos_data.def_qos_parm.ac[0].reserved1 = 0; | ||
1918 | |||
1919 | if (priv->qos_data.qos_active) { | ||
1920 | i = 1; | ||
1921 | priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min); | ||
1922 | priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max); | ||
1923 | priv->qos_data.def_qos_parm.ac[i].aifsn = 7; | ||
1924 | priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; | ||
1925 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
1926 | |||
1927 | i = 2; | ||
1928 | priv->qos_data.def_qos_parm.ac[i].cw_min = | ||
1929 | cpu_to_le16((cw_min + 1) / 2 - 1); | ||
1930 | priv->qos_data.def_qos_parm.ac[i].cw_max = | ||
1931 | cpu_to_le16(cw_max); | ||
1932 | priv->qos_data.def_qos_parm.ac[i].aifsn = 2; | ||
1933 | if (is_legacy) | ||
1934 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
1935 | cpu_to_le16(6016); | ||
1936 | else | ||
1937 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
1938 | cpu_to_le16(3008); | ||
1939 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
1940 | |||
1941 | i = 3; | ||
1942 | priv->qos_data.def_qos_parm.ac[i].cw_min = | ||
1943 | cpu_to_le16((cw_min + 1) / 4 - 1); | ||
1944 | priv->qos_data.def_qos_parm.ac[i].cw_max = | ||
1945 | cpu_to_le16((cw_max + 1) / 2 - 1); | ||
1946 | priv->qos_data.def_qos_parm.ac[i].aifsn = 2; | ||
1947 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
1948 | if (is_legacy) | ||
1949 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
1950 | cpu_to_le16(3264); | ||
1951 | else | ||
1952 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
1953 | cpu_to_le16(1504); | ||
1954 | } else { | ||
1955 | for (i = 1; i < 4; i++) { | ||
1956 | priv->qos_data.def_qos_parm.ac[i].cw_min = | ||
1957 | cpu_to_le16(cw_min); | ||
1958 | priv->qos_data.def_qos_parm.ac[i].cw_max = | ||
1959 | cpu_to_le16(cw_max); | ||
1960 | priv->qos_data.def_qos_parm.ac[i].aifsn = aifs; | ||
1961 | priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; | ||
1962 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
1963 | } | ||
1964 | } | ||
1965 | IWL_DEBUG_QOS("set QoS to default \n"); | ||
1966 | |||
1967 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1968 | } | ||
1969 | |||
1970 | static void iwl_activate_qos(struct iwl_priv *priv, u8 force) | ||
1971 | { | ||
1972 | unsigned long flags; | ||
1973 | |||
1974 | if (priv == NULL) | ||
1975 | return; | ||
1976 | |||
1977 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
1978 | return; | ||
1979 | |||
1980 | if (!priv->qos_data.qos_enable) | ||
1981 | return; | ||
1982 | |||
1983 | spin_lock_irqsave(&priv->lock, flags); | ||
1984 | priv->qos_data.def_qos_parm.qos_flags = 0; | ||
1985 | |||
1986 | if (priv->qos_data.qos_cap.q_AP.queue_request && | ||
1987 | !priv->qos_data.qos_cap.q_AP.txop_request) | ||
1988 | priv->qos_data.def_qos_parm.qos_flags |= | ||
1989 | QOS_PARAM_FLG_TXOP_TYPE_MSK; | ||
1990 | |||
1991 | if (priv->qos_data.qos_active) | ||
1992 | priv->qos_data.def_qos_parm.qos_flags |= | ||
1993 | QOS_PARAM_FLG_UPDATE_EDCA_MSK; | ||
1994 | |||
1995 | spin_unlock_irqrestore(&priv->lock, flags); | ||
1996 | |||
1997 | if (force || iwl_is_associated(priv)) { | ||
1998 | IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n", | ||
1999 | priv->qos_data.qos_active); | ||
2000 | |||
2001 | iwl_send_qos_params_command(priv, | ||
2002 | &(priv->qos_data.def_qos_parm)); | ||
2003 | } | ||
2004 | } | ||
2005 | |||
2006 | #endif /* CONFIG_IWLWIFI_QOS */ | ||
2007 | /* | ||
2008 | * Power management (not Tx power!) functions | ||
2009 | */ | ||
2010 | #define MSEC_TO_USEC 1024 | ||
2011 | |||
2012 | #define NOSLP __constant_cpu_to_le32(0) | ||
2013 | #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK | ||
2014 | #define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC) | ||
2015 | #define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \ | ||
2016 | __constant_cpu_to_le32(X1), \ | ||
2017 | __constant_cpu_to_le32(X2), \ | ||
2018 | __constant_cpu_to_le32(X3), \ | ||
2019 | __constant_cpu_to_le32(X4)} | ||
2020 | |||
2021 | |||
2022 | /* default power management (not Tx power) table values */ | ||
2023 | /* for tim 0-10 */ | ||
2024 | static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = { | ||
2025 | {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, | ||
2026 | {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, | ||
2027 | {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0}, | ||
2028 | {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0}, | ||
2029 | {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1}, | ||
2030 | {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1} | ||
2031 | }; | ||
2032 | |||
2033 | /* for tim > 10 */ | ||
2034 | static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = { | ||
2035 | {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, | ||
2036 | {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), | ||
2037 | SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, | ||
2038 | {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), | ||
2039 | SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, | ||
2040 | {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), | ||
2041 | SLP_VEC(2, 6, 9, 9, 0xFF)}, 0}, | ||
2042 | {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, | ||
2043 | {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), | ||
2044 | SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} | ||
2045 | }; | ||
2046 | |||
2047 | int iwl_power_init_handle(struct iwl_priv *priv) | ||
2048 | { | ||
2049 | int rc = 0, i; | ||
2050 | struct iwl_power_mgr *pow_data; | ||
2051 | int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC; | ||
2052 | u16 pci_pm; | ||
2053 | |||
2054 | IWL_DEBUG_POWER("Initialize power \n"); | ||
2055 | |||
2056 | pow_data = &(priv->power_data); | ||
2057 | |||
2058 | memset(pow_data, 0, sizeof(*pow_data)); | ||
2059 | |||
2060 | pow_data->active_index = IWL_POWER_RANGE_0; | ||
2061 | pow_data->dtim_val = 0xffff; | ||
2062 | |||
2063 | memcpy(&pow_data->pwr_range_0[0], &range_0[0], size); | ||
2064 | memcpy(&pow_data->pwr_range_1[0], &range_1[0], size); | ||
2065 | |||
2066 | rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm); | ||
2067 | if (rc != 0) | ||
2068 | return 0; | ||
2069 | else { | ||
2070 | struct iwl_powertable_cmd *cmd; | ||
2071 | |||
2072 | IWL_DEBUG_POWER("adjust power command flags\n"); | ||
2073 | |||
2074 | for (i = 0; i < IWL_POWER_AC; i++) { | ||
2075 | cmd = &pow_data->pwr_range_0[i].cmd; | ||
2076 | |||
2077 | if (pci_pm & 0x1) | ||
2078 | cmd->flags &= ~IWL_POWER_PCI_PM_MSK; | ||
2079 | else | ||
2080 | cmd->flags |= IWL_POWER_PCI_PM_MSK; | ||
2081 | } | ||
2082 | } | ||
2083 | return rc; | ||
2084 | } | ||
2085 | |||
2086 | static int iwl_update_power_cmd(struct iwl_priv *priv, | ||
2087 | struct iwl_powertable_cmd *cmd, u32 mode) | ||
2088 | { | ||
2089 | int rc = 0, i; | ||
2090 | u8 skip; | ||
2091 | u32 max_sleep = 0; | ||
2092 | struct iwl_power_vec_entry *range; | ||
2093 | u8 period = 0; | ||
2094 | struct iwl_power_mgr *pow_data; | ||
2095 | |||
2096 | if (mode > IWL_POWER_INDEX_5) { | ||
2097 | IWL_DEBUG_POWER("Error invalid power mode \n"); | ||
2098 | return -1; | ||
2099 | } | ||
2100 | pow_data = &(priv->power_data); | ||
2101 | |||
2102 | if (pow_data->active_index == IWL_POWER_RANGE_0) | ||
2103 | range = &pow_data->pwr_range_0[0]; | ||
2104 | else | ||
2105 | range = &pow_data->pwr_range_1[1]; | ||
2106 | |||
2107 | memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd)); | ||
2108 | |||
2109 | #ifdef IWL_MAC80211_DISABLE | ||
2110 | if (priv->assoc_network != NULL) { | ||
2111 | unsigned long flags; | ||
2112 | |||
2113 | period = priv->assoc_network->tim.tim_period; | ||
2114 | } | ||
2115 | #endif /*IWL_MAC80211_DISABLE */ | ||
2116 | skip = range[mode].no_dtim; | ||
2117 | |||
2118 | if (period == 0) { | ||
2119 | period = 1; | ||
2120 | skip = 0; | ||
2121 | } | ||
2122 | |||
2123 | if (skip == 0) { | ||
2124 | max_sleep = period; | ||
2125 | cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; | ||
2126 | } else { | ||
2127 | __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]; | ||
2128 | max_sleep = (le32_to_cpu(slp_itrvl) / period) * period; | ||
2129 | cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; | ||
2130 | } | ||
2131 | |||
2132 | for (i = 0; i < IWL_POWER_VEC_SIZE; i++) { | ||
2133 | if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) | ||
2134 | cmd->sleep_interval[i] = cpu_to_le32(max_sleep); | ||
2135 | } | ||
2136 | |||
2137 | IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags); | ||
2138 | IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); | ||
2139 | IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); | ||
2140 | IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", | ||
2141 | le32_to_cpu(cmd->sleep_interval[0]), | ||
2142 | le32_to_cpu(cmd->sleep_interval[1]), | ||
2143 | le32_to_cpu(cmd->sleep_interval[2]), | ||
2144 | le32_to_cpu(cmd->sleep_interval[3]), | ||
2145 | le32_to_cpu(cmd->sleep_interval[4])); | ||
2146 | |||
2147 | return rc; | ||
2148 | } | ||
2149 | |||
2150 | static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode) | ||
2151 | { | ||
2152 | u32 final_mode = mode; | ||
2153 | int rc; | ||
2154 | struct iwl_powertable_cmd cmd; | ||
2155 | |||
2156 | /* If on battery, set to 3, | ||
2157 | * if plugged into AC power, set to CAM ("continuosly aware mode"), | ||
2158 | * else user level */ | ||
2159 | switch (mode) { | ||
2160 | case IWL_POWER_BATTERY: | ||
2161 | final_mode = IWL_POWER_INDEX_3; | ||
2162 | break; | ||
2163 | case IWL_POWER_AC: | ||
2164 | final_mode = IWL_POWER_MODE_CAM; | ||
2165 | break; | ||
2166 | default: | ||
2167 | final_mode = mode; | ||
2168 | break; | ||
2169 | } | ||
2170 | |||
2171 | iwl_update_power_cmd(priv, &cmd, final_mode); | ||
2172 | |||
2173 | rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd); | ||
2174 | |||
2175 | if (final_mode == IWL_POWER_MODE_CAM) | ||
2176 | clear_bit(STATUS_POWER_PMI, &priv->status); | ||
2177 | else | ||
2178 | set_bit(STATUS_POWER_PMI, &priv->status); | ||
2179 | |||
2180 | return rc; | ||
2181 | } | ||
2182 | |||
2183 | int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) | ||
2184 | { | ||
2185 | /* Filter incoming packets to determine if they are targeted toward | ||
2186 | * this network, discarding packets coming from ourselves */ | ||
2187 | switch (priv->iw_mode) { | ||
2188 | case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */ | ||
2189 | /* packets from our adapter are dropped (echo) */ | ||
2190 | if (!compare_ether_addr(header->addr2, priv->mac_addr)) | ||
2191 | return 0; | ||
2192 | /* {broad,multi}cast packets to our IBSS go through */ | ||
2193 | if (is_multicast_ether_addr(header->addr1)) | ||
2194 | return !compare_ether_addr(header->addr3, priv->bssid); | ||
2195 | /* packets to our adapter go through */ | ||
2196 | return !compare_ether_addr(header->addr1, priv->mac_addr); | ||
2197 | case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */ | ||
2198 | /* packets from our adapter are dropped (echo) */ | ||
2199 | if (!compare_ether_addr(header->addr3, priv->mac_addr)) | ||
2200 | return 0; | ||
2201 | /* {broad,multi}cast packets to our BSS go through */ | ||
2202 | if (is_multicast_ether_addr(header->addr1)) | ||
2203 | return !compare_ether_addr(header->addr2, priv->bssid); | ||
2204 | /* packets to our adapter go through */ | ||
2205 | return !compare_ether_addr(header->addr1, priv->mac_addr); | ||
2206 | } | ||
2207 | |||
2208 | return 1; | ||
2209 | } | ||
2210 | |||
2211 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x | ||
2212 | |||
2213 | const char *iwl_get_tx_fail_reason(u32 status) | ||
2214 | { | ||
2215 | switch (status & TX_STATUS_MSK) { | ||
2216 | case TX_STATUS_SUCCESS: | ||
2217 | return "SUCCESS"; | ||
2218 | TX_STATUS_ENTRY(SHORT_LIMIT); | ||
2219 | TX_STATUS_ENTRY(LONG_LIMIT); | ||
2220 | TX_STATUS_ENTRY(FIFO_UNDERRUN); | ||
2221 | TX_STATUS_ENTRY(MGMNT_ABORT); | ||
2222 | TX_STATUS_ENTRY(NEXT_FRAG); | ||
2223 | TX_STATUS_ENTRY(LIFE_EXPIRE); | ||
2224 | TX_STATUS_ENTRY(DEST_PS); | ||
2225 | TX_STATUS_ENTRY(ABORTED); | ||
2226 | TX_STATUS_ENTRY(BT_RETRY); | ||
2227 | TX_STATUS_ENTRY(STA_INVALID); | ||
2228 | TX_STATUS_ENTRY(FRAG_DROPPED); | ||
2229 | TX_STATUS_ENTRY(TID_DISABLE); | ||
2230 | TX_STATUS_ENTRY(FRAME_FLUSHED); | ||
2231 | TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); | ||
2232 | TX_STATUS_ENTRY(TX_LOCKED); | ||
2233 | TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); | ||
2234 | } | ||
2235 | |||
2236 | return "UNKNOWN"; | ||
2237 | } | ||
2238 | |||
2239 | /** | ||
2240 | * iwl_scan_cancel - Cancel any currently executing HW scan | ||
2241 | * | ||
2242 | * NOTE: priv->mutex is not required before calling this function | ||
2243 | */ | ||
2244 | static int iwl_scan_cancel(struct iwl_priv *priv) | ||
2245 | { | ||
2246 | if (!test_bit(STATUS_SCAN_HW, &priv->status)) { | ||
2247 | clear_bit(STATUS_SCANNING, &priv->status); | ||
2248 | return 0; | ||
2249 | } | ||
2250 | |||
2251 | if (test_bit(STATUS_SCANNING, &priv->status)) { | ||
2252 | if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
2253 | IWL_DEBUG_SCAN("Queuing scan abort.\n"); | ||
2254 | set_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
2255 | queue_work(priv->workqueue, &priv->abort_scan); | ||
2256 | |||
2257 | } else | ||
2258 | IWL_DEBUG_SCAN("Scan abort already in progress.\n"); | ||
2259 | |||
2260 | return test_bit(STATUS_SCANNING, &priv->status); | ||
2261 | } | ||
2262 | |||
2263 | return 0; | ||
2264 | } | ||
2265 | |||
2266 | /** | ||
2267 | * iwl_scan_cancel_timeout - Cancel any currently executing HW scan | ||
2268 | * @ms: amount of time to wait (in milliseconds) for scan to abort | ||
2269 | * | ||
2270 | * NOTE: priv->mutex must be held before calling this function | ||
2271 | */ | ||
2272 | static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) | ||
2273 | { | ||
2274 | unsigned long now = jiffies; | ||
2275 | int ret; | ||
2276 | |||
2277 | ret = iwl_scan_cancel(priv); | ||
2278 | if (ret && ms) { | ||
2279 | mutex_unlock(&priv->mutex); | ||
2280 | while (!time_after(jiffies, now + msecs_to_jiffies(ms)) && | ||
2281 | test_bit(STATUS_SCANNING, &priv->status)) | ||
2282 | msleep(1); | ||
2283 | mutex_lock(&priv->mutex); | ||
2284 | |||
2285 | return test_bit(STATUS_SCANNING, &priv->status); | ||
2286 | } | ||
2287 | |||
2288 | return ret; | ||
2289 | } | ||
2290 | |||
2291 | static void iwl_sequence_reset(struct iwl_priv *priv) | ||
2292 | { | ||
2293 | /* Reset ieee stats */ | ||
2294 | |||
2295 | /* We don't reset the net_device_stats (ieee->stats) on | ||
2296 | * re-association */ | ||
2297 | |||
2298 | priv->last_seq_num = -1; | ||
2299 | priv->last_frag_num = -1; | ||
2300 | priv->last_packet_time = 0; | ||
2301 | |||
2302 | iwl_scan_cancel(priv); | ||
2303 | } | ||
2304 | |||
2305 | #define MAX_UCODE_BEACON_INTERVAL 1024 | ||
2306 | #define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA) | ||
2307 | |||
2308 | static __le16 iwl_adjust_beacon_interval(u16 beacon_val) | ||
2309 | { | ||
2310 | u16 new_val = 0; | ||
2311 | u16 beacon_factor = 0; | ||
2312 | |||
2313 | beacon_factor = | ||
2314 | (beacon_val + MAX_UCODE_BEACON_INTERVAL) | ||
2315 | / MAX_UCODE_BEACON_INTERVAL; | ||
2316 | new_val = beacon_val / beacon_factor; | ||
2317 | |||
2318 | return cpu_to_le16(new_val); | ||
2319 | } | ||
2320 | |||
2321 | static void iwl_setup_rxon_timing(struct iwl_priv *priv) | ||
2322 | { | ||
2323 | u64 interval_tm_unit; | ||
2324 | u64 tsf, result; | ||
2325 | unsigned long flags; | ||
2326 | struct ieee80211_conf *conf = NULL; | ||
2327 | u16 beacon_int = 0; | ||
2328 | |||
2329 | conf = ieee80211_get_hw_conf(priv->hw); | ||
2330 | |||
2331 | spin_lock_irqsave(&priv->lock, flags); | ||
2332 | priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1); | ||
2333 | priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0); | ||
2334 | |||
2335 | priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL; | ||
2336 | |||
2337 | tsf = priv->timestamp1; | ||
2338 | tsf = ((tsf << 32) | priv->timestamp0); | ||
2339 | |||
2340 | beacon_int = priv->beacon_int; | ||
2341 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2342 | |||
2343 | if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { | ||
2344 | if (beacon_int == 0) { | ||
2345 | priv->rxon_timing.beacon_interval = cpu_to_le16(100); | ||
2346 | priv->rxon_timing.beacon_init_val = cpu_to_le32(102400); | ||
2347 | } else { | ||
2348 | priv->rxon_timing.beacon_interval = | ||
2349 | cpu_to_le16(beacon_int); | ||
2350 | priv->rxon_timing.beacon_interval = | ||
2351 | iwl_adjust_beacon_interval( | ||
2352 | le16_to_cpu(priv->rxon_timing.beacon_interval)); | ||
2353 | } | ||
2354 | |||
2355 | priv->rxon_timing.atim_window = 0; | ||
2356 | } else { | ||
2357 | priv->rxon_timing.beacon_interval = | ||
2358 | iwl_adjust_beacon_interval(conf->beacon_int); | ||
2359 | /* TODO: we need to get atim_window from upper stack | ||
2360 | * for now we set to 0 */ | ||
2361 | priv->rxon_timing.atim_window = 0; | ||
2362 | } | ||
2363 | |||
2364 | interval_tm_unit = | ||
2365 | (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024); | ||
2366 | result = do_div(tsf, interval_tm_unit); | ||
2367 | priv->rxon_timing.beacon_init_val = | ||
2368 | cpu_to_le32((u32) ((u64) interval_tm_unit - result)); | ||
2369 | |||
2370 | IWL_DEBUG_ASSOC | ||
2371 | ("beacon interval %d beacon timer %d beacon tim %d\n", | ||
2372 | le16_to_cpu(priv->rxon_timing.beacon_interval), | ||
2373 | le32_to_cpu(priv->rxon_timing.beacon_init_val), | ||
2374 | le16_to_cpu(priv->rxon_timing.atim_window)); | ||
2375 | } | ||
2376 | |||
2377 | static int iwl_scan_initiate(struct iwl_priv *priv) | ||
2378 | { | ||
2379 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { | ||
2380 | IWL_ERROR("APs don't scan.\n"); | ||
2381 | return 0; | ||
2382 | } | ||
2383 | |||
2384 | if (!iwl_is_ready_rf(priv)) { | ||
2385 | IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); | ||
2386 | return -EIO; | ||
2387 | } | ||
2388 | |||
2389 | if (test_bit(STATUS_SCANNING, &priv->status)) { | ||
2390 | IWL_DEBUG_SCAN("Scan already in progress.\n"); | ||
2391 | return -EAGAIN; | ||
2392 | } | ||
2393 | |||
2394 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
2395 | IWL_DEBUG_SCAN("Scan request while abort pending. " | ||
2396 | "Queuing.\n"); | ||
2397 | return -EAGAIN; | ||
2398 | } | ||
2399 | |||
2400 | IWL_DEBUG_INFO("Starting scan...\n"); | ||
2401 | priv->scan_bands = 2; | ||
2402 | set_bit(STATUS_SCANNING, &priv->status); | ||
2403 | priv->scan_start = jiffies; | ||
2404 | priv->scan_pass_start = priv->scan_start; | ||
2405 | |||
2406 | queue_work(priv->workqueue, &priv->request_scan); | ||
2407 | |||
2408 | return 0; | ||
2409 | } | ||
2410 | |||
2411 | static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) | ||
2412 | { | ||
2413 | struct iwl_rxon_cmd *rxon = &priv->staging_rxon; | ||
2414 | |||
2415 | if (hw_decrypt) | ||
2416 | rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; | ||
2417 | else | ||
2418 | rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; | ||
2419 | |||
2420 | return 0; | ||
2421 | } | ||
2422 | |||
2423 | static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode) | ||
2424 | { | ||
2425 | if (phymode == MODE_IEEE80211A) { | ||
2426 | priv->staging_rxon.flags &= | ||
2427 | ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | ||
2428 | | RXON_FLG_CCK_MSK); | ||
2429 | priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; | ||
2430 | } else { | ||
2431 | /* Copied from iwl_bg_post_associate() */ | ||
2432 | if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) | ||
2433 | priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; | ||
2434 | else | ||
2435 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | ||
2436 | |||
2437 | if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) | ||
2438 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | ||
2439 | |||
2440 | priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; | ||
2441 | priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK; | ||
2442 | priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK; | ||
2443 | } | ||
2444 | } | ||
2445 | |||
2446 | /* | ||
2447 | * initilize rxon structure with default values fromm eeprom | ||
2448 | */ | ||
2449 | static void iwl_connection_init_rx_config(struct iwl_priv *priv) | ||
2450 | { | ||
2451 | const struct iwl_channel_info *ch_info; | ||
2452 | |||
2453 | memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); | ||
2454 | |||
2455 | switch (priv->iw_mode) { | ||
2456 | case IEEE80211_IF_TYPE_AP: | ||
2457 | priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; | ||
2458 | break; | ||
2459 | |||
2460 | case IEEE80211_IF_TYPE_STA: | ||
2461 | priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; | ||
2462 | priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; | ||
2463 | break; | ||
2464 | |||
2465 | case IEEE80211_IF_TYPE_IBSS: | ||
2466 | priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; | ||
2467 | priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; | ||
2468 | priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | | ||
2469 | RXON_FILTER_ACCEPT_GRP_MSK; | ||
2470 | break; | ||
2471 | |||
2472 | case IEEE80211_IF_TYPE_MNTR: | ||
2473 | priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER; | ||
2474 | priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | | ||
2475 | RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; | ||
2476 | break; | ||
2477 | } | ||
2478 | |||
2479 | #if 0 | ||
2480 | /* TODO: Figure out when short_preamble would be set and cache from | ||
2481 | * that */ | ||
2482 | if (!hw_to_local(priv->hw)->short_preamble) | ||
2483 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; | ||
2484 | else | ||
2485 | priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; | ||
2486 | #endif | ||
2487 | |||
2488 | ch_info = iwl_get_channel_info(priv, priv->phymode, | ||
2489 | le16_to_cpu(priv->staging_rxon.channel)); | ||
2490 | |||
2491 | if (!ch_info) | ||
2492 | ch_info = &priv->channel_info[0]; | ||
2493 | |||
2494 | /* | ||
2495 | * in some case A channels are all non IBSS | ||
2496 | * in this case force B/G channel | ||
2497 | */ | ||
2498 | if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && | ||
2499 | !(is_channel_ibss(ch_info))) | ||
2500 | ch_info = &priv->channel_info[0]; | ||
2501 | |||
2502 | priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); | ||
2503 | if (is_channel_a_band(ch_info)) | ||
2504 | priv->phymode = MODE_IEEE80211A; | ||
2505 | else | ||
2506 | priv->phymode = MODE_IEEE80211G; | ||
2507 | |||
2508 | iwl_set_flags_for_phymode(priv, priv->phymode); | ||
2509 | |||
2510 | priv->staging_rxon.ofdm_basic_rates = | ||
2511 | (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; | ||
2512 | priv->staging_rxon.cck_basic_rates = | ||
2513 | (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; | ||
2514 | } | ||
2515 | |||
2516 | static int iwl_set_mode(struct iwl_priv *priv, int mode) | ||
2517 | { | ||
2518 | if (!iwl_is_ready_rf(priv)) | ||
2519 | return -EAGAIN; | ||
2520 | |||
2521 | if (mode == IEEE80211_IF_TYPE_IBSS) { | ||
2522 | const struct iwl_channel_info *ch_info; | ||
2523 | |||
2524 | ch_info = iwl_get_channel_info(priv, | ||
2525 | priv->phymode, | ||
2526 | le16_to_cpu(priv->staging_rxon.channel)); | ||
2527 | |||
2528 | if (!ch_info || !is_channel_ibss(ch_info)) { | ||
2529 | IWL_ERROR("channel %d not IBSS channel\n", | ||
2530 | le16_to_cpu(priv->staging_rxon.channel)); | ||
2531 | return -EINVAL; | ||
2532 | } | ||
2533 | } | ||
2534 | |||
2535 | cancel_delayed_work(&priv->scan_check); | ||
2536 | if (iwl_scan_cancel_timeout(priv, 100)) { | ||
2537 | IWL_WARNING("Aborted scan still in progress after 100ms\n"); | ||
2538 | IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); | ||
2539 | return -EAGAIN; | ||
2540 | } | ||
2541 | |||
2542 | priv->iw_mode = mode; | ||
2543 | |||
2544 | iwl_connection_init_rx_config(priv); | ||
2545 | memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); | ||
2546 | |||
2547 | iwl_clear_stations_table(priv); | ||
2548 | |||
2549 | iwl_commit_rxon(priv); | ||
2550 | |||
2551 | return 0; | ||
2552 | } | ||
2553 | |||
2554 | static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv, | ||
2555 | struct ieee80211_tx_control *ctl, | ||
2556 | struct iwl_cmd *cmd, | ||
2557 | struct sk_buff *skb_frag, | ||
2558 | int last_frag) | ||
2559 | { | ||
2560 | struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo; | ||
2561 | |||
2562 | switch (keyinfo->alg) { | ||
2563 | case ALG_CCMP: | ||
2564 | cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM; | ||
2565 | memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen); | ||
2566 | IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); | ||
2567 | break; | ||
2568 | |||
2569 | case ALG_TKIP: | ||
2570 | #if 0 | ||
2571 | cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP; | ||
2572 | |||
2573 | if (last_frag) | ||
2574 | memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8, | ||
2575 | 8); | ||
2576 | else | ||
2577 | memset(cmd->cmd.tx.tkip_mic.byte, 0, 8); | ||
2578 | #endif | ||
2579 | break; | ||
2580 | |||
2581 | case ALG_WEP: | ||
2582 | cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP | | ||
2583 | (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; | ||
2584 | |||
2585 | if (keyinfo->keylen == 13) | ||
2586 | cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128; | ||
2587 | |||
2588 | memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen); | ||
2589 | |||
2590 | IWL_DEBUG_TX("Configuring packet for WEP encryption " | ||
2591 | "with key %d\n", ctl->key_idx); | ||
2592 | break; | ||
2593 | |||
2594 | case ALG_NONE: | ||
2595 | IWL_DEBUG_TX("Tx packet in the clear (encrypt requested).\n"); | ||
2596 | break; | ||
2597 | |||
2598 | default: | ||
2599 | printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg); | ||
2600 | break; | ||
2601 | } | ||
2602 | } | ||
2603 | |||
2604 | /* | ||
2605 | * handle build REPLY_TX command notification. | ||
2606 | */ | ||
2607 | static void iwl_build_tx_cmd_basic(struct iwl_priv *priv, | ||
2608 | struct iwl_cmd *cmd, | ||
2609 | struct ieee80211_tx_control *ctrl, | ||
2610 | struct ieee80211_hdr *hdr, | ||
2611 | int is_unicast, u8 std_id) | ||
2612 | { | ||
2613 | __le16 *qc; | ||
2614 | u16 fc = le16_to_cpu(hdr->frame_control); | ||
2615 | __le32 tx_flags = cmd->cmd.tx.tx_flags; | ||
2616 | |||
2617 | cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
2618 | if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) { | ||
2619 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
2620 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) | ||
2621 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
2622 | if (ieee80211_is_probe_response(fc) && | ||
2623 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
2624 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
2625 | } else { | ||
2626 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
2627 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
2628 | } | ||
2629 | |||
2630 | cmd->cmd.tx.sta_id = std_id; | ||
2631 | if (ieee80211_get_morefrag(hdr)) | ||
2632 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
2633 | |||
2634 | qc = ieee80211_get_qos_ctrl(hdr); | ||
2635 | if (qc) { | ||
2636 | cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf); | ||
2637 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
2638 | } else | ||
2639 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
2640 | |||
2641 | if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) { | ||
2642 | tx_flags |= TX_CMD_FLG_RTS_MSK; | ||
2643 | tx_flags &= ~TX_CMD_FLG_CTS_MSK; | ||
2644 | } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { | ||
2645 | tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
2646 | tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
2647 | } | ||
2648 | |||
2649 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | ||
2650 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | ||
2651 | |||
2652 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
2653 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { | ||
2654 | if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ || | ||
2655 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) | ||
2656 | cmd->cmd.tx.timeout.pm_frame_timeout = | ||
2657 | cpu_to_le16(3); | ||
2658 | else | ||
2659 | cmd->cmd.tx.timeout.pm_frame_timeout = | ||
2660 | cpu_to_le16(2); | ||
2661 | } else | ||
2662 | cmd->cmd.tx.timeout.pm_frame_timeout = 0; | ||
2663 | |||
2664 | cmd->cmd.tx.driver_txop = 0; | ||
2665 | cmd->cmd.tx.tx_flags = tx_flags; | ||
2666 | cmd->cmd.tx.next_frame_len = 0; | ||
2667 | } | ||
2668 | |||
2669 | static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) | ||
2670 | { | ||
2671 | int sta_id; | ||
2672 | u16 fc = le16_to_cpu(hdr->frame_control); | ||
2673 | |||
2674 | /* If this frame is broadcast or not data then use the broadcast | ||
2675 | * station id */ | ||
2676 | if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || | ||
2677 | is_multicast_ether_addr(hdr->addr1)) | ||
2678 | return priv->hw_setting.bcast_sta_id; | ||
2679 | |||
2680 | switch (priv->iw_mode) { | ||
2681 | |||
2682 | /* If this frame is part of a BSS network (we're a station), then | ||
2683 | * we use the AP's station id */ | ||
2684 | case IEEE80211_IF_TYPE_STA: | ||
2685 | return IWL_AP_ID; | ||
2686 | |||
2687 | /* If we are an AP, then find the station, or use BCAST */ | ||
2688 | case IEEE80211_IF_TYPE_AP: | ||
2689 | sta_id = iwl_hw_find_station(priv, hdr->addr1); | ||
2690 | if (sta_id != IWL_INVALID_STATION) | ||
2691 | return sta_id; | ||
2692 | return priv->hw_setting.bcast_sta_id; | ||
2693 | |||
2694 | /* If this frame is part of a IBSS network, then we use the | ||
2695 | * target specific station id */ | ||
2696 | case IEEE80211_IF_TYPE_IBSS: | ||
2697 | sta_id = iwl_hw_find_station(priv, hdr->addr1); | ||
2698 | if (sta_id != IWL_INVALID_STATION) | ||
2699 | return sta_id; | ||
2700 | |||
2701 | sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC); | ||
2702 | |||
2703 | if (sta_id != IWL_INVALID_STATION) | ||
2704 | return sta_id; | ||
2705 | |||
2706 | IWL_DEBUG_DROP("Station " MAC_FMT " not in station map. " | ||
2707 | "Defaulting to broadcast...\n", | ||
2708 | MAC_ARG(hdr->addr1)); | ||
2709 | iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); | ||
2710 | return priv->hw_setting.bcast_sta_id; | ||
2711 | |||
2712 | default: | ||
2713 | IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode); | ||
2714 | return priv->hw_setting.bcast_sta_id; | ||
2715 | } | ||
2716 | } | ||
2717 | |||
2718 | /* | ||
2719 | * start REPLY_TX command process | ||
2720 | */ | ||
2721 | static int iwl_tx_skb(struct iwl_priv *priv, | ||
2722 | struct sk_buff *skb, struct ieee80211_tx_control *ctl) | ||
2723 | { | ||
2724 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
2725 | struct iwl_tfd_frame *tfd; | ||
2726 | u32 *control_flags; | ||
2727 | int txq_id = ctl->queue; | ||
2728 | struct iwl_tx_queue *txq = NULL; | ||
2729 | struct iwl_queue *q = NULL; | ||
2730 | dma_addr_t phys_addr; | ||
2731 | dma_addr_t txcmd_phys; | ||
2732 | struct iwl_cmd *out_cmd = NULL; | ||
2733 | u16 len, idx, len_org; | ||
2734 | u8 id, hdr_len, unicast; | ||
2735 | u8 sta_id; | ||
2736 | u16 seq_number = 0; | ||
2737 | u16 fc; | ||
2738 | __le16 *qc; | ||
2739 | u8 wait_write_ptr = 0; | ||
2740 | unsigned long flags; | ||
2741 | int rc; | ||
2742 | |||
2743 | spin_lock_irqsave(&priv->lock, flags); | ||
2744 | if (iwl_is_rfkill(priv)) { | ||
2745 | IWL_DEBUG_DROP("Dropping - RF KILL\n"); | ||
2746 | goto drop_unlock; | ||
2747 | } | ||
2748 | |||
2749 | if (!priv->interface_id) { | ||
2750 | IWL_DEBUG_DROP("Dropping - !priv->interface_id\n"); | ||
2751 | goto drop_unlock; | ||
2752 | } | ||
2753 | |||
2754 | if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) { | ||
2755 | IWL_ERROR("ERROR: No TX rate available.\n"); | ||
2756 | goto drop_unlock; | ||
2757 | } | ||
2758 | |||
2759 | unicast = !is_multicast_ether_addr(hdr->addr1); | ||
2760 | id = 0; | ||
2761 | |||
2762 | fc = le16_to_cpu(hdr->frame_control); | ||
2763 | |||
2764 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
2765 | if (ieee80211_is_auth(fc)) | ||
2766 | IWL_DEBUG_TX("Sending AUTH frame\n"); | ||
2767 | else if (ieee80211_is_assoc_request(fc)) | ||
2768 | IWL_DEBUG_TX("Sending ASSOC frame\n"); | ||
2769 | else if (ieee80211_is_reassoc_request(fc)) | ||
2770 | IWL_DEBUG_TX("Sending REASSOC frame\n"); | ||
2771 | #endif | ||
2772 | |||
2773 | if (!iwl_is_associated(priv) && | ||
2774 | ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { | ||
2775 | IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); | ||
2776 | goto drop_unlock; | ||
2777 | } | ||
2778 | |||
2779 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2780 | |||
2781 | hdr_len = ieee80211_get_hdrlen(fc); | ||
2782 | sta_id = iwl_get_sta_id(priv, hdr); | ||
2783 | if (sta_id == IWL_INVALID_STATION) { | ||
2784 | IWL_DEBUG_DROP("Dropping - INVALID STATION: " MAC_FMT "\n", | ||
2785 | MAC_ARG(hdr->addr1)); | ||
2786 | goto drop; | ||
2787 | } | ||
2788 | |||
2789 | IWL_DEBUG_RATE("station Id %d\n", sta_id); | ||
2790 | |||
2791 | qc = ieee80211_get_qos_ctrl(hdr); | ||
2792 | if (qc) { | ||
2793 | u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); | ||
2794 | seq_number = priv->stations[sta_id].tid[tid].seq_number & | ||
2795 | IEEE80211_SCTL_SEQ; | ||
2796 | hdr->seq_ctrl = cpu_to_le16(seq_number) | | ||
2797 | (hdr->seq_ctrl & | ||
2798 | __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); | ||
2799 | seq_number += 0x10; | ||
2800 | } | ||
2801 | txq = &priv->txq[txq_id]; | ||
2802 | q = &txq->q; | ||
2803 | |||
2804 | spin_lock_irqsave(&priv->lock, flags); | ||
2805 | |||
2806 | tfd = &txq->bd[q->first_empty]; | ||
2807 | memset(tfd, 0, sizeof(*tfd)); | ||
2808 | control_flags = (u32 *) tfd; | ||
2809 | idx = get_cmd_index(q, q->first_empty, 0); | ||
2810 | |||
2811 | memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info)); | ||
2812 | txq->txb[q->first_empty].skb[0] = skb; | ||
2813 | memcpy(&(txq->txb[q->first_empty].status.control), | ||
2814 | ctl, sizeof(struct ieee80211_tx_control)); | ||
2815 | out_cmd = &txq->cmd[idx]; | ||
2816 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
2817 | memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx)); | ||
2818 | out_cmd->hdr.cmd = REPLY_TX; | ||
2819 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
2820 | INDEX_TO_SEQ(q->first_empty))); | ||
2821 | /* copy frags header */ | ||
2822 | memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len); | ||
2823 | |||
2824 | /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */ | ||
2825 | len = priv->hw_setting.tx_cmd_len + | ||
2826 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
2827 | |||
2828 | len_org = len; | ||
2829 | len = (len + 3) & ~3; | ||
2830 | |||
2831 | if (len_org != len) | ||
2832 | len_org = 1; | ||
2833 | else | ||
2834 | len_org = 0; | ||
2835 | |||
2836 | txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + | ||
2837 | offsetof(struct iwl_cmd, hdr); | ||
2838 | |||
2839 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); | ||
2840 | |||
2841 | if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) | ||
2842 | iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0); | ||
2843 | |||
2844 | /* 802.11 null functions have no payload... */ | ||
2845 | len = skb->len - hdr_len; | ||
2846 | if (len) { | ||
2847 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | ||
2848 | len, PCI_DMA_TODEVICE); | ||
2849 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); | ||
2850 | } | ||
2851 | |||
2852 | /* If there is no payload, then only one TFD is used */ | ||
2853 | if (!len) | ||
2854 | *control_flags = TFD_CTL_COUNT_SET(1); | ||
2855 | else | ||
2856 | *control_flags = TFD_CTL_COUNT_SET(2) | | ||
2857 | TFD_CTL_PAD_SET(U32_PAD(len)); | ||
2858 | |||
2859 | len = (u16)skb->len; | ||
2860 | out_cmd->cmd.tx.len = cpu_to_le16(len); | ||
2861 | |||
2862 | /* TODO need this for burst mode later on */ | ||
2863 | iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id); | ||
2864 | |||
2865 | /* set is_hcca to 0; it probably will never be implemented */ | ||
2866 | iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0); | ||
2867 | |||
2868 | out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; | ||
2869 | out_cmd->cmd.tx.tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; | ||
2870 | |||
2871 | if (!ieee80211_get_morefrag(hdr)) { | ||
2872 | txq->need_update = 1; | ||
2873 | if (qc) { | ||
2874 | u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); | ||
2875 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | ||
2876 | } | ||
2877 | } else { | ||
2878 | wait_write_ptr = 1; | ||
2879 | txq->need_update = 0; | ||
2880 | } | ||
2881 | |||
2882 | iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload, | ||
2883 | sizeof(out_cmd->cmd.tx)); | ||
2884 | |||
2885 | iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, | ||
2886 | ieee80211_get_hdrlen(fc)); | ||
2887 | |||
2888 | q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); | ||
2889 | rc = iwl_tx_queue_update_write_ptr(priv, txq); | ||
2890 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2891 | |||
2892 | if (rc) | ||
2893 | return rc; | ||
2894 | |||
2895 | if ((iwl_queue_space(q) < q->high_mark) | ||
2896 | && priv->mac80211_registered) { | ||
2897 | if (wait_write_ptr) { | ||
2898 | spin_lock_irqsave(&priv->lock, flags); | ||
2899 | txq->need_update = 1; | ||
2900 | iwl_tx_queue_update_write_ptr(priv, txq); | ||
2901 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2902 | } | ||
2903 | |||
2904 | ieee80211_stop_queue(priv->hw, ctl->queue); | ||
2905 | } | ||
2906 | |||
2907 | return 0; | ||
2908 | |||
2909 | drop_unlock: | ||
2910 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2911 | drop: | ||
2912 | return -1; | ||
2913 | } | ||
2914 | |||
2915 | static void iwl_set_rate(struct iwl_priv *priv) | ||
2916 | { | ||
2917 | const struct ieee80211_hw_mode *hw = NULL; | ||
2918 | struct ieee80211_rate *rate; | ||
2919 | int i; | ||
2920 | |||
2921 | hw = iwl_get_hw_mode(priv, priv->phymode); | ||
2922 | |||
2923 | priv->active_rate = 0; | ||
2924 | priv->active_rate_basic = 0; | ||
2925 | |||
2926 | IWL_DEBUG_RATE("Setting rates for 802.11%c\n", | ||
2927 | hw->mode == MODE_IEEE80211A ? | ||
2928 | 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g')); | ||
2929 | |||
2930 | for (i = 0; i < hw->num_rates; i++) { | ||
2931 | rate = &(hw->rates[i]); | ||
2932 | if ((rate->val < IWL_RATE_COUNT) && | ||
2933 | (rate->flags & IEEE80211_RATE_SUPPORTED)) { | ||
2934 | IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n", | ||
2935 | rate->val, iwl_rates[rate->val].plcp, | ||
2936 | (rate->flags & IEEE80211_RATE_BASIC) ? | ||
2937 | "*" : ""); | ||
2938 | priv->active_rate |= (1 << rate->val); | ||
2939 | if (rate->flags & IEEE80211_RATE_BASIC) | ||
2940 | priv->active_rate_basic |= (1 << rate->val); | ||
2941 | } else | ||
2942 | IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n", | ||
2943 | rate->val, iwl_rates[rate->val].plcp); | ||
2944 | } | ||
2945 | |||
2946 | IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n", | ||
2947 | priv->active_rate, priv->active_rate_basic); | ||
2948 | |||
2949 | /* | ||
2950 | * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK) | ||
2951 | * otherwise set it to the default of all CCK rates and 6, 12, 24 for | ||
2952 | * OFDM | ||
2953 | */ | ||
2954 | if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK) | ||
2955 | priv->staging_rxon.cck_basic_rates = | ||
2956 | ((priv->active_rate_basic & | ||
2957 | IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF; | ||
2958 | else | ||
2959 | priv->staging_rxon.cck_basic_rates = | ||
2960 | (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; | ||
2961 | |||
2962 | if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK) | ||
2963 | priv->staging_rxon.ofdm_basic_rates = | ||
2964 | ((priv->active_rate_basic & | ||
2965 | (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >> | ||
2966 | IWL_FIRST_OFDM_RATE) & 0xFF; | ||
2967 | else | ||
2968 | priv->staging_rxon.ofdm_basic_rates = | ||
2969 | (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; | ||
2970 | } | ||
2971 | |||
2972 | static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio) | ||
2973 | { | ||
2974 | unsigned long flags; | ||
2975 | |||
2976 | if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status)) | ||
2977 | return; | ||
2978 | |||
2979 | IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n", | ||
2980 | disable_radio ? "OFF" : "ON"); | ||
2981 | |||
2982 | if (disable_radio) { | ||
2983 | iwl_scan_cancel(priv); | ||
2984 | /* FIXME: This is a workaround for AP */ | ||
2985 | if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { | ||
2986 | spin_lock_irqsave(&priv->lock, flags); | ||
2987 | iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, | ||
2988 | CSR_UCODE_SW_BIT_RFKILL); | ||
2989 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2990 | iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0); | ||
2991 | set_bit(STATUS_RF_KILL_SW, &priv->status); | ||
2992 | } | ||
2993 | return; | ||
2994 | } | ||
2995 | |||
2996 | spin_lock_irqsave(&priv->lock, flags); | ||
2997 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
2998 | |||
2999 | clear_bit(STATUS_RF_KILL_SW, &priv->status); | ||
3000 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3001 | |||
3002 | /* wake up ucode */ | ||
3003 | msleep(10); | ||
3004 | |||
3005 | spin_lock_irqsave(&priv->lock, flags); | ||
3006 | iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
3007 | if (!iwl_grab_restricted_access(priv)) | ||
3008 | iwl_release_restricted_access(priv); | ||
3009 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3010 | |||
3011 | if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { | ||
3012 | IWL_DEBUG_RF_KILL("Can not turn radio back on - " | ||
3013 | "disabled by HW switch\n"); | ||
3014 | return; | ||
3015 | } | ||
3016 | |||
3017 | queue_work(priv->workqueue, &priv->restart); | ||
3018 | return; | ||
3019 | } | ||
3020 | |||
3021 | void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb, | ||
3022 | u32 decrypt_res, struct ieee80211_rx_status *stats) | ||
3023 | { | ||
3024 | u16 fc = | ||
3025 | le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control); | ||
3026 | |||
3027 | if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) | ||
3028 | return; | ||
3029 | |||
3030 | if (!(fc & IEEE80211_FCTL_PROTECTED)) | ||
3031 | return; | ||
3032 | |||
3033 | IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res); | ||
3034 | switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { | ||
3035 | case RX_RES_STATUS_SEC_TYPE_TKIP: | ||
3036 | if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == | ||
3037 | RX_RES_STATUS_BAD_ICV_MIC) | ||
3038 | stats->flag |= RX_FLAG_MMIC_ERROR; | ||
3039 | case RX_RES_STATUS_SEC_TYPE_WEP: | ||
3040 | case RX_RES_STATUS_SEC_TYPE_CCMP: | ||
3041 | if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == | ||
3042 | RX_RES_STATUS_DECRYPT_OK) { | ||
3043 | IWL_DEBUG_RX("hw decrypt successfully!!!\n"); | ||
3044 | stats->flag |= RX_FLAG_DECRYPTED; | ||
3045 | } | ||
3046 | break; | ||
3047 | |||
3048 | default: | ||
3049 | break; | ||
3050 | } | ||
3051 | } | ||
3052 | |||
3053 | void iwl_handle_data_packet_monitor(struct iwl_priv *priv, | ||
3054 | struct iwl_rx_mem_buffer *rxb, | ||
3055 | void *data, short len, | ||
3056 | struct ieee80211_rx_status *stats, | ||
3057 | u16 phy_flags) | ||
3058 | { | ||
3059 | struct iwl_rt_rx_hdr *iwl_rt; | ||
3060 | |||
3061 | /* First cache any information we need before we overwrite | ||
3062 | * the information provided in the skb from the hardware */ | ||
3063 | s8 signal = stats->ssi; | ||
3064 | s8 noise = 0; | ||
3065 | int rate = stats->rate; | ||
3066 | u64 tsf = stats->mactime; | ||
3067 | __le16 phy_flags_hw = cpu_to_le16(phy_flags); | ||
3068 | |||
3069 | /* We received data from the HW, so stop the watchdog */ | ||
3070 | if (len > IWL_RX_BUF_SIZE - sizeof(*iwl_rt)) { | ||
3071 | IWL_DEBUG_DROP("Dropping too large packet in monitor\n"); | ||
3072 | return; | ||
3073 | } | ||
3074 | |||
3075 | /* copy the frame data to write after where the radiotap header goes */ | ||
3076 | iwl_rt = (void *)rxb->skb->data; | ||
3077 | memmove(iwl_rt->payload, data, len); | ||
3078 | |||
3079 | iwl_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; | ||
3080 | iwl_rt->rt_hdr.it_pad = 0; /* always good to zero */ | ||
3081 | |||
3082 | /* total header + data */ | ||
3083 | iwl_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl_rt)); | ||
3084 | |||
3085 | /* Set the size of the skb to the size of the frame */ | ||
3086 | skb_put(rxb->skb, sizeof(*iwl_rt) + len); | ||
3087 | |||
3088 | /* Big bitfield of all the fields we provide in radiotap */ | ||
3089 | iwl_rt->rt_hdr.it_present = | ||
3090 | cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) | | ||
3091 | (1 << IEEE80211_RADIOTAP_FLAGS) | | ||
3092 | (1 << IEEE80211_RADIOTAP_RATE) | | ||
3093 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | ||
3094 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | | ||
3095 | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | | ||
3096 | (1 << IEEE80211_RADIOTAP_ANTENNA)); | ||
3097 | |||
3098 | /* Zero the flags, we'll add to them as we go */ | ||
3099 | iwl_rt->rt_flags = 0; | ||
3100 | |||
3101 | iwl_rt->rt_tsf = cpu_to_le64(tsf); | ||
3102 | |||
3103 | /* Convert to dBm */ | ||
3104 | iwl_rt->rt_dbmsignal = signal; | ||
3105 | iwl_rt->rt_dbmnoise = noise; | ||
3106 | |||
3107 | /* Convert the channel frequency and set the flags */ | ||
3108 | iwl_rt->rt_channelMHz = cpu_to_le16(stats->freq); | ||
3109 | if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) | ||
3110 | iwl_rt->rt_chbitmask = | ||
3111 | cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); | ||
3112 | else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) | ||
3113 | iwl_rt->rt_chbitmask = | ||
3114 | cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); | ||
3115 | else /* 802.11g */ | ||
3116 | iwl_rt->rt_chbitmask = | ||
3117 | cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ)); | ||
3118 | |||
3119 | rate = iwl_rate_index_from_plcp(rate); | ||
3120 | if (rate == -1) | ||
3121 | iwl_rt->rt_rate = 0; | ||
3122 | else | ||
3123 | iwl_rt->rt_rate = iwl_rates[rate].ieee; | ||
3124 | |||
3125 | /* antenna number */ | ||
3126 | iwl_rt->rt_antenna = | ||
3127 | le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; | ||
3128 | |||
3129 | /* set the preamble flag if we have it */ | ||
3130 | if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) | ||
3131 | iwl_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; | ||
3132 | |||
3133 | IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); | ||
3134 | |||
3135 | stats->flag |= RX_FLAG_RADIOTAP; | ||
3136 | ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); | ||
3137 | rxb->skb = NULL; | ||
3138 | } | ||
3139 | |||
3140 | |||
3141 | #define IWL_PACKET_RETRY_TIME HZ | ||
3142 | |||
3143 | int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) | ||
3144 | { | ||
3145 | u16 sc = le16_to_cpu(header->seq_ctrl); | ||
3146 | u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; | ||
3147 | u16 frag = sc & IEEE80211_SCTL_FRAG; | ||
3148 | u16 *last_seq, *last_frag; | ||
3149 | unsigned long *last_time; | ||
3150 | |||
3151 | switch (priv->iw_mode) { | ||
3152 | case IEEE80211_IF_TYPE_IBSS:{ | ||
3153 | struct list_head *p; | ||
3154 | struct iwl_ibss_seq *entry = NULL; | ||
3155 | u8 *mac = header->addr2; | ||
3156 | int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1); | ||
3157 | |||
3158 | __list_for_each(p, &priv->ibss_mac_hash[index]) { | ||
3159 | entry = | ||
3160 | list_entry(p, struct iwl_ibss_seq, list); | ||
3161 | if (!compare_ether_addr(entry->mac, mac)) | ||
3162 | break; | ||
3163 | } | ||
3164 | if (p == &priv->ibss_mac_hash[index]) { | ||
3165 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | ||
3166 | if (!entry) { | ||
3167 | IWL_ERROR | ||
3168 | ("Cannot malloc new mac entry\n"); | ||
3169 | return 0; | ||
3170 | } | ||
3171 | memcpy(entry->mac, mac, ETH_ALEN); | ||
3172 | entry->seq_num = seq; | ||
3173 | entry->frag_num = frag; | ||
3174 | entry->packet_time = jiffies; | ||
3175 | list_add(&entry->list, | ||
3176 | &priv->ibss_mac_hash[index]); | ||
3177 | return 0; | ||
3178 | } | ||
3179 | last_seq = &entry->seq_num; | ||
3180 | last_frag = &entry->frag_num; | ||
3181 | last_time = &entry->packet_time; | ||
3182 | break; | ||
3183 | } | ||
3184 | case IEEE80211_IF_TYPE_STA: | ||
3185 | last_seq = &priv->last_seq_num; | ||
3186 | last_frag = &priv->last_frag_num; | ||
3187 | last_time = &priv->last_packet_time; | ||
3188 | break; | ||
3189 | default: | ||
3190 | return 0; | ||
3191 | } | ||
3192 | if ((*last_seq == seq) && | ||
3193 | time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) { | ||
3194 | if (*last_frag == frag) | ||
3195 | goto drop; | ||
3196 | if (*last_frag + 1 != frag) | ||
3197 | /* out-of-order fragment */ | ||
3198 | goto drop; | ||
3199 | } else | ||
3200 | *last_seq = seq; | ||
3201 | |||
3202 | *last_frag = frag; | ||
3203 | *last_time = jiffies; | ||
3204 | return 0; | ||
3205 | |||
3206 | drop: | ||
3207 | return 1; | ||
3208 | } | ||
3209 | |||
3210 | #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT | ||
3211 | |||
3212 | #include "iwl-spectrum.h" | ||
3213 | |||
3214 | #define BEACON_TIME_MASK_LOW 0x00FFFFFF | ||
3215 | #define BEACON_TIME_MASK_HIGH 0xFF000000 | ||
3216 | #define TIME_UNIT 1024 | ||
3217 | |||
3218 | /* | ||
3219 | * extended beacon time format | ||
3220 | * time in usec will be changed into a 32-bit value in 8:24 format | ||
3221 | * the high 1 byte is the beacon counts | ||
3222 | * the lower 3 bytes is the time in usec within one beacon interval | ||
3223 | */ | ||
3224 | |||
3225 | static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval) | ||
3226 | { | ||
3227 | u32 quot; | ||
3228 | u32 rem; | ||
3229 | u32 interval = beacon_interval * 1024; | ||
3230 | |||
3231 | if (!interval || !usec) | ||
3232 | return 0; | ||
3233 | |||
3234 | quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24); | ||
3235 | rem = (usec % interval) & BEACON_TIME_MASK_LOW; | ||
3236 | |||
3237 | return (quot << 24) + rem; | ||
3238 | } | ||
3239 | |||
3240 | /* base is usually what we get from ucode with each received frame, | ||
3241 | * the same as HW timer counter counting down | ||
3242 | */ | ||
3243 | |||
3244 | static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval) | ||
3245 | { | ||
3246 | u32 base_low = base & BEACON_TIME_MASK_LOW; | ||
3247 | u32 addon_low = addon & BEACON_TIME_MASK_LOW; | ||
3248 | u32 interval = beacon_interval * TIME_UNIT; | ||
3249 | u32 res = (base & BEACON_TIME_MASK_HIGH) + | ||
3250 | (addon & BEACON_TIME_MASK_HIGH); | ||
3251 | |||
3252 | if (base_low > addon_low) | ||
3253 | res += base_low - addon_low; | ||
3254 | else if (base_low < addon_low) { | ||
3255 | res += interval + base_low - addon_low; | ||
3256 | res += (1 << 24); | ||
3257 | } else | ||
3258 | res += (1 << 24); | ||
3259 | |||
3260 | return cpu_to_le32(res); | ||
3261 | } | ||
3262 | |||
3263 | static int iwl_get_measurement(struct iwl_priv *priv, | ||
3264 | struct ieee80211_measurement_params *params, | ||
3265 | u8 type) | ||
3266 | { | ||
3267 | struct iwl_spectrum_cmd spectrum; | ||
3268 | struct iwl_rx_packet *res; | ||
3269 | struct iwl_host_cmd cmd = { | ||
3270 | .id = REPLY_SPECTRUM_MEASUREMENT_CMD, | ||
3271 | .data = (void *)&spectrum, | ||
3272 | .meta.flags = CMD_WANT_SKB, | ||
3273 | }; | ||
3274 | u32 add_time = le64_to_cpu(params->start_time); | ||
3275 | int rc; | ||
3276 | int spectrum_resp_status; | ||
3277 | int duration = le16_to_cpu(params->duration); | ||
3278 | |||
3279 | if (iwl_is_associated(priv)) | ||
3280 | add_time = | ||
3281 | iwl_usecs_to_beacons( | ||
3282 | le64_to_cpu(params->start_time) - priv->last_tsf, | ||
3283 | le16_to_cpu(priv->rxon_timing.beacon_interval)); | ||
3284 | |||
3285 | memset(&spectrum, 0, sizeof(spectrum)); | ||
3286 | |||
3287 | spectrum.channel_count = cpu_to_le16(1); | ||
3288 | spectrum.flags = | ||
3289 | RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; | ||
3290 | spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; | ||
3291 | cmd.len = sizeof(spectrum); | ||
3292 | spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); | ||
3293 | |||
3294 | if (iwl_is_associated(priv)) | ||
3295 | spectrum.start_time = | ||
3296 | iwl_add_beacon_time(priv->last_beacon_time, | ||
3297 | add_time, | ||
3298 | le16_to_cpu(priv->rxon_timing.beacon_interval)); | ||
3299 | else | ||
3300 | spectrum.start_time = 0; | ||
3301 | |||
3302 | spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); | ||
3303 | spectrum.channels[0].channel = params->channel; | ||
3304 | spectrum.channels[0].type = type; | ||
3305 | if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK) | ||
3306 | spectrum.flags |= RXON_FLG_BAND_24G_MSK | | ||
3307 | RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; | ||
3308 | |||
3309 | rc = iwl_send_cmd_sync(priv, &cmd); | ||
3310 | if (rc) | ||
3311 | return rc; | ||
3312 | |||
3313 | res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; | ||
3314 | if (res->hdr.flags & IWL_CMD_FAILED_MSK) { | ||
3315 | IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n"); | ||
3316 | rc = -EIO; | ||
3317 | } | ||
3318 | |||
3319 | spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); | ||
3320 | switch (spectrum_resp_status) { | ||
3321 | case 0: /* Command will be handled */ | ||
3322 | if (res->u.spectrum.id != 0xff) { | ||
3323 | IWL_DEBUG_INFO | ||
3324 | ("Replaced existing measurement: %d\n", | ||
3325 | res->u.spectrum.id); | ||
3326 | priv->measurement_status &= ~MEASUREMENT_READY; | ||
3327 | } | ||
3328 | priv->measurement_status |= MEASUREMENT_ACTIVE; | ||
3329 | rc = 0; | ||
3330 | break; | ||
3331 | |||
3332 | case 1: /* Command will not be handled */ | ||
3333 | rc = -EAGAIN; | ||
3334 | break; | ||
3335 | } | ||
3336 | |||
3337 | dev_kfree_skb_any(cmd.meta.u.skb); | ||
3338 | |||
3339 | return rc; | ||
3340 | } | ||
3341 | #endif | ||
3342 | |||
3343 | static void iwl_txstatus_to_ieee(struct iwl_priv *priv, | ||
3344 | struct iwl_tx_info *tx_sta) | ||
3345 | { | ||
3346 | |||
3347 | tx_sta->status.ack_signal = 0; | ||
3348 | tx_sta->status.excessive_retries = 0; | ||
3349 | tx_sta->status.queue_length = 0; | ||
3350 | tx_sta->status.queue_number = 0; | ||
3351 | |||
3352 | if (in_interrupt()) | ||
3353 | ieee80211_tx_status_irqsafe(priv->hw, | ||
3354 | tx_sta->skb[0], &(tx_sta->status)); | ||
3355 | else | ||
3356 | ieee80211_tx_status(priv->hw, | ||
3357 | tx_sta->skb[0], &(tx_sta->status)); | ||
3358 | |||
3359 | tx_sta->skb[0] = NULL; | ||
3360 | } | ||
3361 | |||
3362 | /** | ||
3363 | * iwl_tx_queue_reclaim - Reclaim Tx queue entries no more used by NIC. | ||
3364 | * | ||
3365 | * When FW advances 'R' index, all entries between old and | ||
3366 | * new 'R' index need to be reclaimed. As result, some free space | ||
3367 | * forms. If there is enough free space (> low mark), wake Tx queue. | ||
3368 | */ | ||
3369 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | ||
3370 | { | ||
3371 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
3372 | struct iwl_queue *q = &txq->q; | ||
3373 | int nfreed = 0; | ||
3374 | |||
3375 | if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) { | ||
3376 | IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " | ||
3377 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
3378 | index, q->n_bd, q->first_empty, q->last_used); | ||
3379 | return 0; | ||
3380 | } | ||
3381 | |||
3382 | for (index = iwl_queue_inc_wrap(index, q->n_bd); | ||
3383 | q->last_used != index; | ||
3384 | q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) { | ||
3385 | if (txq_id != IWL_CMD_QUEUE_NUM) { | ||
3386 | iwl_txstatus_to_ieee(priv, | ||
3387 | &(txq->txb[txq->q.last_used])); | ||
3388 | iwl_hw_txq_free_tfd(priv, txq); | ||
3389 | } else if (nfreed > 1) { | ||
3390 | IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, | ||
3391 | q->first_empty, q->last_used); | ||
3392 | queue_work(priv->workqueue, &priv->restart); | ||
3393 | } | ||
3394 | nfreed++; | ||
3395 | } | ||
3396 | |||
3397 | if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && | ||
3398 | (txq_id != IWL_CMD_QUEUE_NUM) && | ||
3399 | priv->mac80211_registered) | ||
3400 | ieee80211_wake_queue(priv->hw, txq_id); | ||
3401 | |||
3402 | |||
3403 | return nfreed; | ||
3404 | } | ||
3405 | |||
3406 | static int iwl_is_tx_success(u32 status) | ||
3407 | { | ||
3408 | return (status & 0xFF) == 0x1; | ||
3409 | } | ||
3410 | |||
3411 | /****************************************************************************** | ||
3412 | * | ||
3413 | * Generic RX handler implementations | ||
3414 | * | ||
3415 | ******************************************************************************/ | ||
3416 | static void iwl_rx_reply_tx(struct iwl_priv *priv, | ||
3417 | struct iwl_rx_mem_buffer *rxb) | ||
3418 | { | ||
3419 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3420 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
3421 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
3422 | int index = SEQ_TO_INDEX(sequence); | ||
3423 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
3424 | struct ieee80211_tx_status *tx_status; | ||
3425 | struct iwl_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; | ||
3426 | u32 status = le32_to_cpu(tx_resp->status); | ||
3427 | |||
3428 | if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) { | ||
3429 | IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " | ||
3430 | "is out of range [0-%d] %d %d\n", txq_id, | ||
3431 | index, txq->q.n_bd, txq->q.first_empty, | ||
3432 | txq->q.last_used); | ||
3433 | return; | ||
3434 | } | ||
3435 | |||
3436 | tx_status = &(txq->txb[txq->q.last_used].status); | ||
3437 | |||
3438 | tx_status->retry_count = tx_resp->failure_frame; | ||
3439 | tx_status->queue_number = status; | ||
3440 | tx_status->queue_length = tx_resp->bt_kill_count; | ||
3441 | tx_status->queue_length |= tx_resp->failure_rts; | ||
3442 | |||
3443 | tx_status->flags = | ||
3444 | iwl_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0; | ||
3445 | |||
3446 | tx_status->control.tx_rate = iwl_rate_index_from_plcp(tx_resp->rate); | ||
3447 | |||
3448 | IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", | ||
3449 | txq_id, iwl_get_tx_fail_reason(status), status, | ||
3450 | tx_resp->rate, tx_resp->failure_frame); | ||
3451 | |||
3452 | IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); | ||
3453 | if (index != -1) | ||
3454 | iwl_tx_queue_reclaim(priv, txq_id, index); | ||
3455 | |||
3456 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | ||
3457 | IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); | ||
3458 | } | ||
3459 | |||
3460 | |||
3461 | static void iwl_rx_reply_alive(struct iwl_priv *priv, | ||
3462 | struct iwl_rx_mem_buffer *rxb) | ||
3463 | { | ||
3464 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3465 | struct iwl_alive_resp *palive; | ||
3466 | struct delayed_work *pwork; | ||
3467 | |||
3468 | palive = &pkt->u.alive_frame; | ||
3469 | |||
3470 | IWL_DEBUG_INFO("Alive ucode status 0x%08X revision " | ||
3471 | "0x%01X 0x%01X\n", | ||
3472 | palive->is_valid, palive->ver_type, | ||
3473 | palive->ver_subtype); | ||
3474 | |||
3475 | if (palive->ver_subtype == INITIALIZE_SUBTYPE) { | ||
3476 | IWL_DEBUG_INFO("Initialization Alive received.\n"); | ||
3477 | memcpy(&priv->card_alive_init, | ||
3478 | &pkt->u.alive_frame, | ||
3479 | sizeof(struct iwl_init_alive_resp)); | ||
3480 | pwork = &priv->init_alive_start; | ||
3481 | } else { | ||
3482 | IWL_DEBUG_INFO("Runtime Alive received.\n"); | ||
3483 | memcpy(&priv->card_alive, &pkt->u.alive_frame, | ||
3484 | sizeof(struct iwl_alive_resp)); | ||
3485 | pwork = &priv->alive_start; | ||
3486 | iwl_disable_events(priv); | ||
3487 | } | ||
3488 | |||
3489 | /* We delay the ALIVE response by 5ms to | ||
3490 | * give the HW RF Kill time to activate... */ | ||
3491 | if (palive->is_valid == UCODE_VALID_OK) | ||
3492 | queue_delayed_work(priv->workqueue, pwork, | ||
3493 | msecs_to_jiffies(5)); | ||
3494 | else | ||
3495 | IWL_WARNING("uCode did not respond OK.\n"); | ||
3496 | } | ||
3497 | |||
3498 | static void iwl_rx_reply_add_sta(struct iwl_priv *priv, | ||
3499 | struct iwl_rx_mem_buffer *rxb) | ||
3500 | { | ||
3501 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3502 | |||
3503 | IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); | ||
3504 | return; | ||
3505 | } | ||
3506 | |||
3507 | static void iwl_rx_reply_error(struct iwl_priv *priv, | ||
3508 | struct iwl_rx_mem_buffer *rxb) | ||
3509 | { | ||
3510 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3511 | |||
3512 | IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) " | ||
3513 | "seq 0x%04X ser 0x%08X\n", | ||
3514 | le32_to_cpu(pkt->u.err_resp.error_type), | ||
3515 | get_cmd_string(pkt->u.err_resp.cmd_id), | ||
3516 | pkt->u.err_resp.cmd_id, | ||
3517 | le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), | ||
3518 | le32_to_cpu(pkt->u.err_resp.error_info)); | ||
3519 | } | ||
3520 | |||
3521 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x | ||
3522 | |||
3523 | static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | ||
3524 | { | ||
3525 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3526 | struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon; | ||
3527 | struct iwl_csa_notification *csa = &(pkt->u.csa_notif); | ||
3528 | IWL_DEBUG_11H("CSA notif: channel %d, status %d\n", | ||
3529 | le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); | ||
3530 | rxon->channel = csa->channel; | ||
3531 | priv->staging_rxon.channel = csa->channel; | ||
3532 | } | ||
3533 | |||
3534 | static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, | ||
3535 | struct iwl_rx_mem_buffer *rxb) | ||
3536 | { | ||
3537 | #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT | ||
3538 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3539 | struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); | ||
3540 | |||
3541 | if (!report->state) { | ||
3542 | IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO, | ||
3543 | "Spectrum Measure Notification: Start\n"); | ||
3544 | return; | ||
3545 | } | ||
3546 | |||
3547 | memcpy(&priv->measure_report, report, sizeof(*report)); | ||
3548 | priv->measurement_status |= MEASUREMENT_READY; | ||
3549 | #endif | ||
3550 | } | ||
3551 | |||
3552 | static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, | ||
3553 | struct iwl_rx_mem_buffer *rxb) | ||
3554 | { | ||
3555 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
3556 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3557 | struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); | ||
3558 | IWL_DEBUG_RX("sleep mode: %d, src: %d\n", | ||
3559 | sleep->pm_sleep_mode, sleep->pm_wakeup_src); | ||
3560 | #endif | ||
3561 | } | ||
3562 | |||
3563 | static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, | ||
3564 | struct iwl_rx_mem_buffer *rxb) | ||
3565 | { | ||
3566 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3567 | IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " | ||
3568 | "notification for %s:\n", | ||
3569 | le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); | ||
3570 | iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); | ||
3571 | } | ||
3572 | |||
3573 | static void iwl_bg_beacon_update(struct work_struct *work) | ||
3574 | { | ||
3575 | struct iwl_priv *priv = | ||
3576 | container_of(work, struct iwl_priv, beacon_update); | ||
3577 | struct sk_buff *beacon; | ||
3578 | |||
3579 | /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ | ||
3580 | beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL); | ||
3581 | |||
3582 | if (!beacon) { | ||
3583 | IWL_ERROR("update beacon failed\n"); | ||
3584 | return; | ||
3585 | } | ||
3586 | |||
3587 | mutex_lock(&priv->mutex); | ||
3588 | /* new beacon skb is allocated every time; dispose previous.*/ | ||
3589 | if (priv->ibss_beacon) | ||
3590 | dev_kfree_skb(priv->ibss_beacon); | ||
3591 | |||
3592 | priv->ibss_beacon = beacon; | ||
3593 | mutex_unlock(&priv->mutex); | ||
3594 | |||
3595 | iwl_send_beacon_cmd(priv); | ||
3596 | } | ||
3597 | |||
3598 | static void iwl_rx_beacon_notif(struct iwl_priv *priv, | ||
3599 | struct iwl_rx_mem_buffer *rxb) | ||
3600 | { | ||
3601 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
3602 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3603 | struct iwl_beacon_notif *beacon = &(pkt->u.beacon_status); | ||
3604 | u8 rate = beacon->beacon_notify_hdr.rate; | ||
3605 | |||
3606 | IWL_DEBUG_RX("beacon status %x retries %d iss %d " | ||
3607 | "tsf %d %d rate %d\n", | ||
3608 | le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, | ||
3609 | beacon->beacon_notify_hdr.failure_frame, | ||
3610 | le32_to_cpu(beacon->ibss_mgr_status), | ||
3611 | le32_to_cpu(beacon->high_tsf), | ||
3612 | le32_to_cpu(beacon->low_tsf), rate); | ||
3613 | #endif | ||
3614 | |||
3615 | if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && | ||
3616 | (!test_bit(STATUS_EXIT_PENDING, &priv->status))) | ||
3617 | queue_work(priv->workqueue, &priv->beacon_update); | ||
3618 | } | ||
3619 | |||
3620 | /* Service response to REPLY_SCAN_CMD (0x80) */ | ||
3621 | static void iwl_rx_reply_scan(struct iwl_priv *priv, | ||
3622 | struct iwl_rx_mem_buffer *rxb) | ||
3623 | { | ||
3624 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
3625 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3626 | struct iwl_scanreq_notification *notif = | ||
3627 | (struct iwl_scanreq_notification *)pkt->u.raw; | ||
3628 | |||
3629 | IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status); | ||
3630 | #endif | ||
3631 | } | ||
3632 | |||
3633 | /* Service SCAN_START_NOTIFICATION (0x82) */ | ||
3634 | static void iwl_rx_scan_start_notif(struct iwl_priv *priv, | ||
3635 | struct iwl_rx_mem_buffer *rxb) | ||
3636 | { | ||
3637 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3638 | struct iwl_scanstart_notification *notif = | ||
3639 | (struct iwl_scanstart_notification *)pkt->u.raw; | ||
3640 | priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); | ||
3641 | IWL_DEBUG_SCAN("Scan start: " | ||
3642 | "%d [802.11%s] " | ||
3643 | "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", | ||
3644 | notif->channel, | ||
3645 | notif->band ? "bg" : "a", | ||
3646 | notif->tsf_high, | ||
3647 | notif->tsf_low, notif->status, notif->beacon_timer); | ||
3648 | } | ||
3649 | |||
3650 | /* Service SCAN_RESULTS_NOTIFICATION (0x83) */ | ||
3651 | static void iwl_rx_scan_results_notif(struct iwl_priv *priv, | ||
3652 | struct iwl_rx_mem_buffer *rxb) | ||
3653 | { | ||
3654 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3655 | struct iwl_scanresults_notification *notif = | ||
3656 | (struct iwl_scanresults_notification *)pkt->u.raw; | ||
3657 | |||
3658 | IWL_DEBUG_SCAN("Scan ch.res: " | ||
3659 | "%d [802.11%s] " | ||
3660 | "(TSF: 0x%08X:%08X) - %d " | ||
3661 | "elapsed=%lu usec (%dms since last)\n", | ||
3662 | notif->channel, | ||
3663 | notif->band ? "bg" : "a", | ||
3664 | le32_to_cpu(notif->tsf_high), | ||
3665 | le32_to_cpu(notif->tsf_low), | ||
3666 | le32_to_cpu(notif->statistics[0]), | ||
3667 | le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf, | ||
3668 | jiffies_to_msecs(elapsed_jiffies | ||
3669 | (priv->last_scan_jiffies, jiffies))); | ||
3670 | |||
3671 | priv->last_scan_jiffies = jiffies; | ||
3672 | } | ||
3673 | |||
3674 | /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ | ||
3675 | static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, | ||
3676 | struct iwl_rx_mem_buffer *rxb) | ||
3677 | { | ||
3678 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3679 | struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; | ||
3680 | |||
3681 | IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", | ||
3682 | scan_notif->scanned_channels, | ||
3683 | scan_notif->tsf_low, | ||
3684 | scan_notif->tsf_high, scan_notif->status); | ||
3685 | |||
3686 | /* The HW is no longer scanning */ | ||
3687 | clear_bit(STATUS_SCAN_HW, &priv->status); | ||
3688 | |||
3689 | /* The scan completion notification came in, so kill that timer... */ | ||
3690 | cancel_delayed_work(&priv->scan_check); | ||
3691 | |||
3692 | IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", | ||
3693 | (priv->scan_bands == 2) ? "2.4" : "5.2", | ||
3694 | jiffies_to_msecs(elapsed_jiffies | ||
3695 | (priv->scan_pass_start, jiffies))); | ||
3696 | |||
3697 | /* Remove this scanned band from the list | ||
3698 | * of pending bands to scan */ | ||
3699 | priv->scan_bands--; | ||
3700 | |||
3701 | /* If a request to abort was given, or the scan did not succeed | ||
3702 | * then we reset the scan state machine and terminate, | ||
3703 | * re-queuing another scan if one has been requested */ | ||
3704 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
3705 | IWL_DEBUG_INFO("Aborted scan completed.\n"); | ||
3706 | clear_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
3707 | } else { | ||
3708 | /* If there are more bands on this scan pass reschedule */ | ||
3709 | if (priv->scan_bands > 0) | ||
3710 | goto reschedule; | ||
3711 | } | ||
3712 | |||
3713 | priv->last_scan_jiffies = jiffies; | ||
3714 | IWL_DEBUG_INFO("Setting scan to off\n"); | ||
3715 | |||
3716 | clear_bit(STATUS_SCANNING, &priv->status); | ||
3717 | |||
3718 | IWL_DEBUG_INFO("Scan took %dms\n", | ||
3719 | jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies))); | ||
3720 | |||
3721 | queue_work(priv->workqueue, &priv->scan_completed); | ||
3722 | |||
3723 | return; | ||
3724 | |||
3725 | reschedule: | ||
3726 | priv->scan_pass_start = jiffies; | ||
3727 | queue_work(priv->workqueue, &priv->request_scan); | ||
3728 | } | ||
3729 | |||
3730 | /* Handle notification from uCode that card's power state is changing | ||
3731 | * due to software, hardware, or critical temperature RFKILL */ | ||
3732 | static void iwl_rx_card_state_notif(struct iwl_priv *priv, | ||
3733 | struct iwl_rx_mem_buffer *rxb) | ||
3734 | { | ||
3735 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3736 | u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); | ||
3737 | unsigned long status = priv->status; | ||
3738 | |||
3739 | IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n", | ||
3740 | (flags & HW_CARD_DISABLED) ? "Kill" : "On", | ||
3741 | (flags & SW_CARD_DISABLED) ? "Kill" : "On"); | ||
3742 | |||
3743 | iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, | ||
3744 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
3745 | |||
3746 | if (flags & HW_CARD_DISABLED) | ||
3747 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
3748 | else | ||
3749 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
3750 | |||
3751 | |||
3752 | if (flags & SW_CARD_DISABLED) | ||
3753 | set_bit(STATUS_RF_KILL_SW, &priv->status); | ||
3754 | else | ||
3755 | clear_bit(STATUS_RF_KILL_SW, &priv->status); | ||
3756 | |||
3757 | iwl_scan_cancel(priv); | ||
3758 | |||
3759 | if ((test_bit(STATUS_RF_KILL_HW, &status) != | ||
3760 | test_bit(STATUS_RF_KILL_HW, &priv->status)) || | ||
3761 | (test_bit(STATUS_RF_KILL_SW, &status) != | ||
3762 | test_bit(STATUS_RF_KILL_SW, &priv->status))) | ||
3763 | queue_work(priv->workqueue, &priv->rf_kill); | ||
3764 | else | ||
3765 | wake_up_interruptible(&priv->wait_command_queue); | ||
3766 | } | ||
3767 | |||
3768 | /** | ||
3769 | * iwl_setup_rx_handlers - Initialize Rx handler callbacks | ||
3770 | * | ||
3771 | * Setup the RX handlers for each of the reply types sent from the uCode | ||
3772 | * to the host. | ||
3773 | * | ||
3774 | * This function chains into the hardware specific files for them to setup | ||
3775 | * any hardware specific handlers as well. | ||
3776 | */ | ||
3777 | static void iwl_setup_rx_handlers(struct iwl_priv *priv) | ||
3778 | { | ||
3779 | priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; | ||
3780 | priv->rx_handlers[REPLY_ADD_STA] = iwl_rx_reply_add_sta; | ||
3781 | priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; | ||
3782 | priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; | ||
3783 | priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = | ||
3784 | iwl_rx_spectrum_measure_notif; | ||
3785 | priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; | ||
3786 | priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = | ||
3787 | iwl_rx_pm_debug_statistics_notif; | ||
3788 | priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; | ||
3789 | |||
3790 | /* NOTE: iwl_rx_statistics is different based on whether | ||
3791 | * the build is for the 3945 or the 4965. See the | ||
3792 | * corresponding implementation in iwl-XXXX.c | ||
3793 | * | ||
3794 | * The same handler is used for both the REPLY to a | ||
3795 | * discrete statistics request from the host as well as | ||
3796 | * for the periodic statistics notification from the uCode | ||
3797 | */ | ||
3798 | priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_hw_rx_statistics; | ||
3799 | priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_hw_rx_statistics; | ||
3800 | |||
3801 | priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan; | ||
3802 | priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif; | ||
3803 | priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = | ||
3804 | iwl_rx_scan_results_notif; | ||
3805 | priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = | ||
3806 | iwl_rx_scan_complete_notif; | ||
3807 | priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; | ||
3808 | priv->rx_handlers[REPLY_TX] = iwl_rx_reply_tx; | ||
3809 | |||
3810 | /* Setup hardware specific Rx handlers */ | ||
3811 | iwl_hw_rx_handler_setup(priv); | ||
3812 | } | ||
3813 | |||
3814 | /** | ||
3815 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | ||
3816 | * @rxb: Rx buffer to reclaim | ||
3817 | * | ||
3818 | * If an Rx buffer has an async callback associated with it the callback | ||
3819 | * will be executed. The attached skb (if present) will only be freed | ||
3820 | * if the callback returns 1 | ||
3821 | */ | ||
3822 | static void iwl_tx_cmd_complete(struct iwl_priv *priv, | ||
3823 | struct iwl_rx_mem_buffer *rxb) | ||
3824 | { | ||
3825 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | ||
3826 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
3827 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
3828 | int index = SEQ_TO_INDEX(sequence); | ||
3829 | int huge = sequence & SEQ_HUGE_FRAME; | ||
3830 | int cmd_index; | ||
3831 | struct iwl_cmd *cmd; | ||
3832 | |||
3833 | /* If a Tx command is being handled and it isn't in the actual | ||
3834 | * command queue then there a command routing bug has been introduced | ||
3835 | * in the queue management code. */ | ||
3836 | if (txq_id != IWL_CMD_QUEUE_NUM) | ||
3837 | IWL_ERROR("Error wrong command queue %d command id 0x%X\n", | ||
3838 | txq_id, pkt->hdr.cmd); | ||
3839 | BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); | ||
3840 | |||
3841 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | ||
3842 | cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; | ||
3843 | |||
3844 | /* Input error checking is done when commands are added to queue. */ | ||
3845 | if (cmd->meta.flags & CMD_WANT_SKB) { | ||
3846 | cmd->meta.source->u.skb = rxb->skb; | ||
3847 | rxb->skb = NULL; | ||
3848 | } else if (cmd->meta.u.callback && | ||
3849 | !cmd->meta.u.callback(priv, cmd, rxb->skb)) | ||
3850 | rxb->skb = NULL; | ||
3851 | |||
3852 | iwl_tx_queue_reclaim(priv, txq_id, index); | ||
3853 | |||
3854 | if (!(cmd->meta.flags & CMD_ASYNC)) { | ||
3855 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
3856 | wake_up_interruptible(&priv->wait_command_queue); | ||
3857 | } | ||
3858 | } | ||
3859 | |||
3860 | /************************** RX-FUNCTIONS ****************************/ | ||
3861 | /* | ||
3862 | * Rx theory of operation | ||
3863 | * | ||
3864 | * The host allocates 32 DMA target addresses and passes the host address | ||
3865 | * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is | ||
3866 | * 0 to 31 | ||
3867 | * | ||
3868 | * Rx Queue Indexes | ||
3869 | * The host/firmware share two index registers for managing the Rx buffers. | ||
3870 | * | ||
3871 | * The READ index maps to the first position that the firmware may be writing | ||
3872 | * to -- the driver can read up to (but not including) this position and get | ||
3873 | * good data. | ||
3874 | * The READ index is managed by the firmware once the card is enabled. | ||
3875 | * | ||
3876 | * The WRITE index maps to the last position the driver has read from -- the | ||
3877 | * position preceding WRITE is the last slot the firmware can place a packet. | ||
3878 | * | ||
3879 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | ||
3880 | * WRITE = READ. | ||
3881 | * | ||
3882 | * During initialization the host sets up the READ queue position to the first | ||
3883 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | ||
3884 | * | ||
3885 | * When the firmware places a packet in a buffer it will advance the READ index | ||
3886 | * and fire the RX interrupt. The driver can then query the READ index and | ||
3887 | * process as many packets as possible, moving the WRITE index forward as it | ||
3888 | * resets the Rx queue buffers with new memory. | ||
3889 | * | ||
3890 | * The management in the driver is as follows: | ||
3891 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | ||
3892 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | ||
3893 | * to replensish the iwl->rxq->rx_free. | ||
3894 | * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the | ||
3895 | * iwl->rxq is replenished and the READ INDEX is updated (updating the | ||
3896 | * 'processed' and 'read' driver indexes as well) | ||
3897 | * + A received packet is processed and handed to the kernel network stack, | ||
3898 | * detached from the iwl->rxq. The driver 'processed' index is updated. | ||
3899 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free | ||
3900 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ | ||
3901 | * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there | ||
3902 | * were enough free buffers and RX_STALLED is set it is cleared. | ||
3903 | * | ||
3904 | * | ||
3905 | * Driver sequence: | ||
3906 | * | ||
3907 | * iwl_rx_queue_alloc() Allocates rx_free | ||
3908 | * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls | ||
3909 | * iwl_rx_queue_restock | ||
3910 | * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx | ||
3911 | * queue, updates firmware pointers, and updates | ||
3912 | * the WRITE index. If insufficient rx_free buffers | ||
3913 | * are available, schedules iwl_rx_replenish | ||
3914 | * | ||
3915 | * -- enable interrupts -- | ||
3916 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the | ||
3917 | * READ INDEX, detaching the SKB from the pool. | ||
3918 | * Moves the packet buffer from queue to rx_used. | ||
3919 | * Calls iwl_rx_queue_restock to refill any empty | ||
3920 | * slots. | ||
3921 | * ... | ||
3922 | * | ||
3923 | */ | ||
3924 | |||
3925 | /** | ||
3926 | * iwl_rx_queue_space - Return number of free slots available in queue. | ||
3927 | */ | ||
3928 | static int iwl_rx_queue_space(const struct iwl_rx_queue *q) | ||
3929 | { | ||
3930 | int s = q->read - q->write; | ||
3931 | if (s <= 0) | ||
3932 | s += RX_QUEUE_SIZE; | ||
3933 | /* keep some buffer to not confuse full and empty queue */ | ||
3934 | s -= 2; | ||
3935 | if (s < 0) | ||
3936 | s = 0; | ||
3937 | return s; | ||
3938 | } | ||
3939 | |||
3940 | /** | ||
3941 | * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue | ||
3942 | * | ||
3943 | * NOTE: This function has 3945 and 4965 specific code sections | ||
3944 | * but is declared in base due to the majority of the | ||
3945 | * implementation being the same (only a numeric constant is | ||
3946 | * different) | ||
3947 | * | ||
3948 | */ | ||
3949 | int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) | ||
3950 | { | ||
3951 | u32 reg = 0; | ||
3952 | int rc = 0; | ||
3953 | unsigned long flags; | ||
3954 | |||
3955 | spin_lock_irqsave(&q->lock, flags); | ||
3956 | |||
3957 | if (q->need_update == 0) | ||
3958 | goto exit_unlock; | ||
3959 | |||
3960 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | ||
3961 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
3962 | |||
3963 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
3964 | iwl_set_bit(priv, CSR_GP_CNTRL, | ||
3965 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
3966 | goto exit_unlock; | ||
3967 | } | ||
3968 | |||
3969 | rc = iwl_grab_restricted_access(priv); | ||
3970 | if (rc) | ||
3971 | goto exit_unlock; | ||
3972 | |||
3973 | iwl_write_restricted(priv, FH_RSCSR_CHNL0_WPTR, | ||
3974 | q->write & ~0x7); | ||
3975 | iwl_release_restricted_access(priv); | ||
3976 | } else | ||
3977 | iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7); | ||
3978 | |||
3979 | |||
3980 | q->need_update = 0; | ||
3981 | |||
3982 | exit_unlock: | ||
3983 | spin_unlock_irqrestore(&q->lock, flags); | ||
3984 | return rc; | ||
3985 | } | ||
3986 | |||
3987 | /** | ||
3988 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer pointer. | ||
3989 | * | ||
3990 | * NOTE: This function has 3945 and 4965 specific code paths in it. | ||
3991 | */ | ||
3992 | static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv, | ||
3993 | dma_addr_t dma_addr) | ||
3994 | { | ||
3995 | return cpu_to_le32((u32)dma_addr); | ||
3996 | } | ||
3997 | |||
3998 | /** | ||
3999 | * iwl_rx_queue_restock - refill RX queue from pre-allocated pool | ||
4000 | * | ||
4001 | * If there are slots in the RX queue that need to be restocked, | ||
4002 | * and we have free pre-allocated buffers, fill the ranks as much | ||
4003 | * as we can pulling from rx_free. | ||
4004 | * | ||
4005 | * This moves the 'write' index forward to catch up with 'processed', and | ||
4006 | * also updates the memory address in the firmware to reference the new | ||
4007 | * target buffer. | ||
4008 | */ | ||
4009 | int iwl_rx_queue_restock(struct iwl_priv *priv) | ||
4010 | { | ||
4011 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
4012 | struct list_head *element; | ||
4013 | struct iwl_rx_mem_buffer *rxb; | ||
4014 | unsigned long flags; | ||
4015 | int write, rc; | ||
4016 | |||
4017 | spin_lock_irqsave(&rxq->lock, flags); | ||
4018 | write = rxq->write & ~0x7; | ||
4019 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | ||
4020 | element = rxq->rx_free.next; | ||
4021 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
4022 | list_del(element); | ||
4023 | rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr); | ||
4024 | rxq->queue[rxq->write] = rxb; | ||
4025 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | ||
4026 | rxq->free_count--; | ||
4027 | } | ||
4028 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
4029 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
4030 | * refill it */ | ||
4031 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
4032 | queue_work(priv->workqueue, &priv->rx_replenish); | ||
4033 | |||
4034 | |||
4035 | /* If we've added more space for the firmware to place data, tell it */ | ||
4036 | if ((write != (rxq->write & ~0x7)) | ||
4037 | || (abs(rxq->write - rxq->read) > 7)) { | ||
4038 | spin_lock_irqsave(&rxq->lock, flags); | ||
4039 | rxq->need_update = 1; | ||
4040 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
4041 | rc = iwl_rx_queue_update_write_ptr(priv, rxq); | ||
4042 | if (rc) | ||
4043 | return rc; | ||
4044 | } | ||
4045 | |||
4046 | return 0; | ||
4047 | } | ||
4048 | |||
4049 | /** | ||
4050 | * iwl_rx_replensih - Move all used packet from rx_used to rx_free | ||
4051 | * | ||
4052 | * When moving to rx_free an SKB is allocated for the slot. | ||
4053 | * | ||
4054 | * Also restock the Rx queue via iwl_rx_queue_restock. | ||
4055 | * This is called as a scheduled work item (except for during intialization) | ||
4056 | */ | ||
4057 | void iwl_rx_replenish(void *data) | ||
4058 | { | ||
4059 | struct iwl_priv *priv = data; | ||
4060 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
4061 | struct list_head *element; | ||
4062 | struct iwl_rx_mem_buffer *rxb; | ||
4063 | unsigned long flags; | ||
4064 | spin_lock_irqsave(&rxq->lock, flags); | ||
4065 | while (!list_empty(&rxq->rx_used)) { | ||
4066 | element = rxq->rx_used.next; | ||
4067 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
4068 | rxb->skb = | ||
4069 | alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC); | ||
4070 | if (!rxb->skb) { | ||
4071 | if (net_ratelimit()) | ||
4072 | printk(KERN_CRIT DRV_NAME | ||
4073 | ": Can not allocate SKB buffers\n"); | ||
4074 | /* We don't reschedule replenish work here -- we will | ||
4075 | * call the restock method and if it still needs | ||
4076 | * more buffers it will schedule replenish */ | ||
4077 | break; | ||
4078 | } | ||
4079 | priv->alloc_rxb_skb++; | ||
4080 | list_del(element); | ||
4081 | rxb->dma_addr = | ||
4082 | pci_map_single(priv->pci_dev, rxb->skb->data, | ||
4083 | IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); | ||
4084 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
4085 | rxq->free_count++; | ||
4086 | } | ||
4087 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
4088 | |||
4089 | spin_lock_irqsave(&priv->lock, flags); | ||
4090 | iwl_rx_queue_restock(priv); | ||
4091 | spin_unlock_irqrestore(&priv->lock, flags); | ||
4092 | } | ||
4093 | |||
4094 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | ||
4095 | * If an SKB has been detached, the POOL needs to have it's SKB set to NULL | ||
4096 | * This free routine walks the list of POOL entries and if SKB is set to | ||
4097 | * non NULL it is unmapped and freed | ||
4098 | */ | ||
4099 | void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
4100 | { | ||
4101 | int i; | ||
4102 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | ||
4103 | if (rxq->pool[i].skb != NULL) { | ||
4104 | pci_unmap_single(priv->pci_dev, | ||
4105 | rxq->pool[i].dma_addr, | ||
4106 | IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); | ||
4107 | dev_kfree_skb(rxq->pool[i].skb); | ||
4108 | } | ||
4109 | } | ||
4110 | |||
4111 | pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, | ||
4112 | rxq->dma_addr); | ||
4113 | rxq->bd = NULL; | ||
4114 | } | ||
4115 | |||
4116 | int iwl_rx_queue_alloc(struct iwl_priv *priv) | ||
4117 | { | ||
4118 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
4119 | struct pci_dev *dev = priv->pci_dev; | ||
4120 | int i; | ||
4121 | |||
4122 | spin_lock_init(&rxq->lock); | ||
4123 | INIT_LIST_HEAD(&rxq->rx_free); | ||
4124 | INIT_LIST_HEAD(&rxq->rx_used); | ||
4125 | rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); | ||
4126 | if (!rxq->bd) | ||
4127 | return -ENOMEM; | ||
4128 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
4129 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) | ||
4130 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
4131 | /* Set us so that we have processed and used all buffers, but have | ||
4132 | * not restocked the Rx queue with fresh buffers */ | ||
4133 | rxq->read = rxq->write = 0; | ||
4134 | rxq->free_count = 0; | ||
4135 | rxq->need_update = 0; | ||
4136 | return 0; | ||
4137 | } | ||
4138 | |||
4139 | void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
4140 | { | ||
4141 | unsigned long flags; | ||
4142 | int i; | ||
4143 | spin_lock_irqsave(&rxq->lock, flags); | ||
4144 | INIT_LIST_HEAD(&rxq->rx_free); | ||
4145 | INIT_LIST_HEAD(&rxq->rx_used); | ||
4146 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
4147 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
4148 | /* In the reset function, these buffers may have been allocated | ||
4149 | * to an SKB, so we need to unmap and free potential storage */ | ||
4150 | if (rxq->pool[i].skb != NULL) { | ||
4151 | pci_unmap_single(priv->pci_dev, | ||
4152 | rxq->pool[i].dma_addr, | ||
4153 | IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); | ||
4154 | priv->alloc_rxb_skb--; | ||
4155 | dev_kfree_skb(rxq->pool[i].skb); | ||
4156 | rxq->pool[i].skb = NULL; | ||
4157 | } | ||
4158 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
4159 | } | ||
4160 | |||
4161 | /* Set us so that we have processed and used all buffers, but have | ||
4162 | * not restocked the Rx queue with fresh buffers */ | ||
4163 | rxq->read = rxq->write = 0; | ||
4164 | rxq->free_count = 0; | ||
4165 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
4166 | } | ||
4167 | |||
4168 | /* Convert linear signal-to-noise ratio into dB */ | ||
4169 | static u8 ratio2dB[100] = { | ||
4170 | /* 0 1 2 3 4 5 6 7 8 9 */ | ||
4171 | 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ | ||
4172 | 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ | ||
4173 | 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ | ||
4174 | 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ | ||
4175 | 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ | ||
4176 | 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ | ||
4177 | 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ | ||
4178 | 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ | ||
4179 | 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ | ||
4180 | 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ | ||
4181 | }; | ||
4182 | |||
4183 | /* Calculates a relative dB value from a ratio of linear | ||
4184 | * (i.e. not dB) signal levels. | ||
4185 | * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ | ||
4186 | int iwl_calc_db_from_ratio(int sig_ratio) | ||
4187 | { | ||
4188 | /* Anything above 1000:1 just report as 60 dB */ | ||
4189 | if (sig_ratio > 1000) | ||
4190 | return 60; | ||
4191 | |||
4192 | /* Above 100:1, divide by 10 and use table, | ||
4193 | * add 20 dB to make up for divide by 10 */ | ||
4194 | if (sig_ratio > 100) | ||
4195 | return (20 + (int)ratio2dB[sig_ratio/10]); | ||
4196 | |||
4197 | /* We shouldn't see this */ | ||
4198 | if (sig_ratio < 1) | ||
4199 | return 0; | ||
4200 | |||
4201 | /* Use table for ratios 1:1 - 99:1 */ | ||
4202 | return (int)ratio2dB[sig_ratio]; | ||
4203 | } | ||
4204 | |||
4205 | #define PERFECT_RSSI (-20) /* dBm */ | ||
4206 | #define WORST_RSSI (-95) /* dBm */ | ||
4207 | #define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI) | ||
4208 | |||
4209 | /* Calculate an indication of rx signal quality (a percentage, not dBm!). | ||
4210 | * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info | ||
4211 | * about formulas used below. */ | ||
4212 | int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm) | ||
4213 | { | ||
4214 | int sig_qual; | ||
4215 | int degradation = PERFECT_RSSI - rssi_dbm; | ||
4216 | |||
4217 | /* If we get a noise measurement, use signal-to-noise ratio (SNR) | ||
4218 | * as indicator; formula is (signal dbm - noise dbm). | ||
4219 | * SNR at or above 40 is a great signal (100%). | ||
4220 | * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator. | ||
4221 | * Weakest usable signal is usually 10 - 15 dB SNR. */ | ||
4222 | if (noise_dbm) { | ||
4223 | if (rssi_dbm - noise_dbm >= 40) | ||
4224 | return 100; | ||
4225 | else if (rssi_dbm < noise_dbm) | ||
4226 | return 0; | ||
4227 | sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2; | ||
4228 | |||
4229 | /* Else use just the signal level. | ||
4230 | * This formula is a least squares fit of data points collected and | ||
4231 | * compared with a reference system that had a percentage (%) display | ||
4232 | * for signal quality. */ | ||
4233 | } else | ||
4234 | sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation * | ||
4235 | (15 * RSSI_RANGE + 62 * degradation)) / | ||
4236 | (RSSI_RANGE * RSSI_RANGE); | ||
4237 | |||
4238 | if (sig_qual > 100) | ||
4239 | sig_qual = 100; | ||
4240 | else if (sig_qual < 1) | ||
4241 | sig_qual = 0; | ||
4242 | |||
4243 | return sig_qual; | ||
4244 | } | ||
4245 | |||
4246 | /** | ||
4247 | * iwl_rx_handle - Main entry function for receiving responses from the uCode | ||
4248 | * | ||
4249 | * Uses the priv->rx_handlers callback function array to invoke | ||
4250 | * the appropriate handlers, including command responses, | ||
4251 | * frame-received notifications, and other notifications. | ||
4252 | */ | ||
4253 | static void iwl_rx_handle(struct iwl_priv *priv) | ||
4254 | { | ||
4255 | struct iwl_rx_mem_buffer *rxb; | ||
4256 | struct iwl_rx_packet *pkt; | ||
4257 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
4258 | u32 r, i; | ||
4259 | int reclaim; | ||
4260 | unsigned long flags; | ||
4261 | |||
4262 | r = iwl_hw_get_rx_read(priv); | ||
4263 | i = rxq->read; | ||
4264 | |||
4265 | /* Rx interrupt, but nothing sent from uCode */ | ||
4266 | if (i == r) | ||
4267 | IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); | ||
4268 | |||
4269 | while (i != r) { | ||
4270 | rxb = rxq->queue[i]; | ||
4271 | |||
4272 | /* If an RXB doesn't have a queue slot associated with it | ||
4273 | * then a bug has been introduced in the queue refilling | ||
4274 | * routines -- catch it here */ | ||
4275 | BUG_ON(rxb == NULL); | ||
4276 | |||
4277 | rxq->queue[i] = NULL; | ||
4278 | |||
4279 | pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, | ||
4280 | IWL_RX_BUF_SIZE, | ||
4281 | PCI_DMA_FROMDEVICE); | ||
4282 | pkt = (struct iwl_rx_packet *)rxb->skb->data; | ||
4283 | |||
4284 | /* Reclaim a command buffer only if this packet is a response | ||
4285 | * to a (driver-originated) command. | ||
4286 | * If the packet (e.g. Rx frame) originated from uCode, | ||
4287 | * there is no command buffer to reclaim. | ||
4288 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | ||
4289 | * but apparently a few don't get set; catch them here. */ | ||
4290 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && | ||
4291 | (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && | ||
4292 | (pkt->hdr.cmd != REPLY_TX); | ||
4293 | |||
4294 | /* Based on type of command response or notification, | ||
4295 | * handle those that need handling via function in | ||
4296 | * rx_handlers table. See iwl_setup_rx_handlers() */ | ||
4297 | if (priv->rx_handlers[pkt->hdr.cmd]) { | ||
4298 | IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, | ||
4299 | "r = %d, i = %d, %s, 0x%02x\n", r, i, | ||
4300 | get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); | ||
4301 | priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); | ||
4302 | } else { | ||
4303 | /* No handling needed */ | ||
4304 | IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, | ||
4305 | "r %d i %d No handler needed for %s, 0x%02x\n", | ||
4306 | r, i, get_cmd_string(pkt->hdr.cmd), | ||
4307 | pkt->hdr.cmd); | ||
4308 | } | ||
4309 | |||
4310 | if (reclaim) { | ||
4311 | /* Invoke any callbacks, transfer the skb to caller, | ||
4312 | * and fire off the (possibly) blocking iwl_send_cmd() | ||
4313 | * as we reclaim the driver command queue */ | ||
4314 | if (rxb && rxb->skb) | ||
4315 | iwl_tx_cmd_complete(priv, rxb); | ||
4316 | else | ||
4317 | IWL_WARNING("Claim null rxb?\n"); | ||
4318 | } | ||
4319 | |||
4320 | /* For now we just don't re-use anything. We can tweak this | ||
4321 | * later to try and re-use notification packets and SKBs that | ||
4322 | * fail to Rx correctly */ | ||
4323 | if (rxb->skb != NULL) { | ||
4324 | priv->alloc_rxb_skb--; | ||
4325 | dev_kfree_skb_any(rxb->skb); | ||
4326 | rxb->skb = NULL; | ||
4327 | } | ||
4328 | |||
4329 | pci_unmap_single(priv->pci_dev, rxb->dma_addr, | ||
4330 | IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); | ||
4331 | spin_lock_irqsave(&rxq->lock, flags); | ||
4332 | list_add_tail(&rxb->list, &priv->rxq.rx_used); | ||
4333 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
4334 | i = (i + 1) & RX_QUEUE_MASK; | ||
4335 | } | ||
4336 | |||
4337 | /* Backtrack one entry */ | ||
4338 | priv->rxq.read = i; | ||
4339 | iwl_rx_queue_restock(priv); | ||
4340 | } | ||
4341 | |||
4342 | int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv, | ||
4343 | struct iwl_tx_queue *txq) | ||
4344 | { | ||
4345 | u32 reg = 0; | ||
4346 | int rc = 0; | ||
4347 | int txq_id = txq->q.id; | ||
4348 | |||
4349 | if (txq->need_update == 0) | ||
4350 | return rc; | ||
4351 | |||
4352 | /* if we're trying to save power */ | ||
4353 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | ||
4354 | /* wake up nic if it's powered down ... | ||
4355 | * uCode will wake up, and interrupt us again, so next | ||
4356 | * time we'll skip this part. */ | ||
4357 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
4358 | |||
4359 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
4360 | IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg); | ||
4361 | iwl_set_bit(priv, CSR_GP_CNTRL, | ||
4362 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
4363 | return rc; | ||
4364 | } | ||
4365 | |||
4366 | /* restore this queue's parameters in nic hardware. */ | ||
4367 | rc = iwl_grab_restricted_access(priv); | ||
4368 | if (rc) | ||
4369 | return rc; | ||
4370 | iwl_write_restricted(priv, HBUS_TARG_WRPTR, | ||
4371 | txq->q.first_empty | (txq_id << 8)); | ||
4372 | iwl_release_restricted_access(priv); | ||
4373 | |||
4374 | /* else not in power-save mode, uCode will never sleep when we're | ||
4375 | * trying to tx (during RFKILL, we're not trying to tx). */ | ||
4376 | } else | ||
4377 | iwl_write32(priv, HBUS_TARG_WRPTR, | ||
4378 | txq->q.first_empty | (txq_id << 8)); | ||
4379 | |||
4380 | txq->need_update = 0; | ||
4381 | |||
4382 | return rc; | ||
4383 | } | ||
4384 | |||
4385 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
4386 | static void iwl_print_rx_config_cmd(struct iwl_rxon_cmd *rxon) | ||
4387 | { | ||
4388 | IWL_DEBUG_RADIO("RX CONFIG:\n"); | ||
4389 | iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); | ||
4390 | IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); | ||
4391 | IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); | ||
4392 | IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n", | ||
4393 | le32_to_cpu(rxon->filter_flags)); | ||
4394 | IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); | ||
4395 | IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n", | ||
4396 | rxon->ofdm_basic_rates); | ||
4397 | IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); | ||
4398 | IWL_DEBUG_RADIO("u8[6] node_addr: " MAC_FMT "\n", | ||
4399 | MAC_ARG(rxon->node_addr)); | ||
4400 | IWL_DEBUG_RADIO("u8[6] bssid_addr: " MAC_FMT "\n", | ||
4401 | MAC_ARG(rxon->bssid_addr)); | ||
4402 | IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); | ||
4403 | } | ||
4404 | #endif | ||
4405 | |||
4406 | static void iwl_enable_interrupts(struct iwl_priv *priv) | ||
4407 | { | ||
4408 | IWL_DEBUG_ISR("Enabling interrupts\n"); | ||
4409 | set_bit(STATUS_INT_ENABLED, &priv->status); | ||
4410 | iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK); | ||
4411 | } | ||
4412 | |||
4413 | static inline void iwl_disable_interrupts(struct iwl_priv *priv) | ||
4414 | { | ||
4415 | clear_bit(STATUS_INT_ENABLED, &priv->status); | ||
4416 | |||
4417 | /* disable interrupts from uCode/NIC to host */ | ||
4418 | iwl_write32(priv, CSR_INT_MASK, 0x00000000); | ||
4419 | |||
4420 | /* acknowledge/clear/reset any interrupts still pending | ||
4421 | * from uCode or flow handler (Rx/Tx DMA) */ | ||
4422 | iwl_write32(priv, CSR_INT, 0xffffffff); | ||
4423 | iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); | ||
4424 | IWL_DEBUG_ISR("Disabled interrupts\n"); | ||
4425 | } | ||
4426 | |||
4427 | static const char *desc_lookup(int i) | ||
4428 | { | ||
4429 | switch (i) { | ||
4430 | case 1: | ||
4431 | return "FAIL"; | ||
4432 | case 2: | ||
4433 | return "BAD_PARAM"; | ||
4434 | case 3: | ||
4435 | return "BAD_CHECKSUM"; | ||
4436 | case 4: | ||
4437 | return "NMI_INTERRUPT"; | ||
4438 | case 5: | ||
4439 | return "SYSASSERT"; | ||
4440 | case 6: | ||
4441 | return "FATAL_ERROR"; | ||
4442 | } | ||
4443 | |||
4444 | return "UNKNOWN"; | ||
4445 | } | ||
4446 | |||
4447 | #define ERROR_START_OFFSET (1 * sizeof(u32)) | ||
4448 | #define ERROR_ELEM_SIZE (7 * sizeof(u32)) | ||
4449 | |||
4450 | static void iwl_dump_nic_error_log(struct iwl_priv *priv) | ||
4451 | { | ||
4452 | u32 i; | ||
4453 | u32 desc, time, count, base, data1; | ||
4454 | u32 blink1, blink2, ilink1, ilink2; | ||
4455 | int rc; | ||
4456 | |||
4457 | base = le32_to_cpu(priv->card_alive.error_event_table_ptr); | ||
4458 | |||
4459 | if (!iwl_hw_valid_rtc_data_addr(base)) { | ||
4460 | IWL_ERROR("Not valid error log pointer 0x%08X\n", base); | ||
4461 | return; | ||
4462 | } | ||
4463 | |||
4464 | rc = iwl_grab_restricted_access(priv); | ||
4465 | if (rc) { | ||
4466 | IWL_WARNING("Can not read from adapter at this time.\n"); | ||
4467 | return; | ||
4468 | } | ||
4469 | |||
4470 | count = iwl_read_restricted_mem(priv, base); | ||
4471 | |||
4472 | if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { | ||
4473 | IWL_ERROR("Start IWL Error Log Dump:\n"); | ||
4474 | IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n", | ||
4475 | priv->status, priv->config, count); | ||
4476 | } | ||
4477 | |||
4478 | IWL_ERROR("Desc Time asrtPC blink2 " | ||
4479 | "ilink1 nmiPC Line\n"); | ||
4480 | for (i = ERROR_START_OFFSET; | ||
4481 | i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET; | ||
4482 | i += ERROR_ELEM_SIZE) { | ||
4483 | desc = iwl_read_restricted_mem(priv, base + i); | ||
4484 | time = | ||
4485 | iwl_read_restricted_mem(priv, base + i + 1 * sizeof(u32)); | ||
4486 | blink1 = | ||
4487 | iwl_read_restricted_mem(priv, base + i + 2 * sizeof(u32)); | ||
4488 | blink2 = | ||
4489 | iwl_read_restricted_mem(priv, base + i + 3 * sizeof(u32)); | ||
4490 | ilink1 = | ||
4491 | iwl_read_restricted_mem(priv, base + i + 4 * sizeof(u32)); | ||
4492 | ilink2 = | ||
4493 | iwl_read_restricted_mem(priv, base + i + 5 * sizeof(u32)); | ||
4494 | data1 = | ||
4495 | iwl_read_restricted_mem(priv, base + i + 6 * sizeof(u32)); | ||
4496 | |||
4497 | IWL_ERROR | ||
4498 | ("%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", | ||
4499 | desc_lookup(desc), desc, time, blink1, blink2, | ||
4500 | ilink1, ilink2, data1); | ||
4501 | } | ||
4502 | |||
4503 | iwl_release_restricted_access(priv); | ||
4504 | |||
4505 | } | ||
4506 | |||
4507 | #define EVENT_START_OFFSET (4 * sizeof(u32)) | ||
4508 | |||
4509 | /** | ||
4510 | * iwl_print_event_log - Dump error event log to syslog | ||
4511 | * | ||
4512 | * NOTE: Must be called with iwl_grab_restricted_access() already obtained! | ||
4513 | */ | ||
4514 | static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, | ||
4515 | u32 num_events, u32 mode) | ||
4516 | { | ||
4517 | u32 i; | ||
4518 | u32 base; /* SRAM byte address of event log header */ | ||
4519 | u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ | ||
4520 | u32 ptr; /* SRAM byte address of log data */ | ||
4521 | u32 ev, time, data; /* event log data */ | ||
4522 | |||
4523 | if (num_events == 0) | ||
4524 | return; | ||
4525 | |||
4526 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); | ||
4527 | |||
4528 | if (mode == 0) | ||
4529 | event_size = 2 * sizeof(u32); | ||
4530 | else | ||
4531 | event_size = 3 * sizeof(u32); | ||
4532 | |||
4533 | ptr = base + EVENT_START_OFFSET + (start_idx * event_size); | ||
4534 | |||
4535 | /* "time" is actually "data" for mode 0 (no timestamp). | ||
4536 | * place event id # at far right for easier visual parsing. */ | ||
4537 | for (i = 0; i < num_events; i++) { | ||
4538 | ev = iwl_read_restricted_mem(priv, ptr); | ||
4539 | ptr += sizeof(u32); | ||
4540 | time = iwl_read_restricted_mem(priv, ptr); | ||
4541 | ptr += sizeof(u32); | ||
4542 | if (mode == 0) | ||
4543 | IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */ | ||
4544 | else { | ||
4545 | data = iwl_read_restricted_mem(priv, ptr); | ||
4546 | ptr += sizeof(u32); | ||
4547 | IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev); | ||
4548 | } | ||
4549 | } | ||
4550 | } | ||
4551 | |||
4552 | static void iwl_dump_nic_event_log(struct iwl_priv *priv) | ||
4553 | { | ||
4554 | int rc; | ||
4555 | u32 base; /* SRAM byte address of event log header */ | ||
4556 | u32 capacity; /* event log capacity in # entries */ | ||
4557 | u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ | ||
4558 | u32 num_wraps; /* # times uCode wrapped to top of log */ | ||
4559 | u32 next_entry; /* index of next entry to be written by uCode */ | ||
4560 | u32 size; /* # entries that we'll print */ | ||
4561 | |||
4562 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); | ||
4563 | if (!iwl_hw_valid_rtc_data_addr(base)) { | ||
4564 | IWL_ERROR("Invalid event log pointer 0x%08X\n", base); | ||
4565 | return; | ||
4566 | } | ||
4567 | |||
4568 | rc = iwl_grab_restricted_access(priv); | ||
4569 | if (rc) { | ||
4570 | IWL_WARNING("Can not read from adapter at this time.\n"); | ||
4571 | return; | ||
4572 | } | ||
4573 | |||
4574 | /* event log header */ | ||
4575 | capacity = iwl_read_restricted_mem(priv, base); | ||
4576 | mode = iwl_read_restricted_mem(priv, base + (1 * sizeof(u32))); | ||
4577 | num_wraps = iwl_read_restricted_mem(priv, base + (2 * sizeof(u32))); | ||
4578 | next_entry = iwl_read_restricted_mem(priv, base + (3 * sizeof(u32))); | ||
4579 | |||
4580 | size = num_wraps ? capacity : next_entry; | ||
4581 | |||
4582 | /* bail out if nothing in log */ | ||
4583 | if (size == 0) { | ||
4584 | IWL_ERROR("Start IPW Event Log Dump: nothing in log\n"); | ||
4585 | iwl_release_restricted_access(priv); | ||
4586 | return; | ||
4587 | } | ||
4588 | |||
4589 | IWL_ERROR("Start IPW Event Log Dump: display count %d, wraps %d\n", | ||
4590 | size, num_wraps); | ||
4591 | |||
4592 | /* if uCode has wrapped back to top of log, start at the oldest entry, | ||
4593 | * i.e the next one that uCode would fill. */ | ||
4594 | if (num_wraps) | ||
4595 | iwl_print_event_log(priv, next_entry, | ||
4596 | capacity - next_entry, mode); | ||
4597 | |||
4598 | /* (then/else) start at top of log */ | ||
4599 | iwl_print_event_log(priv, 0, next_entry, mode); | ||
4600 | |||
4601 | iwl_release_restricted_access(priv); | ||
4602 | } | ||
4603 | |||
4604 | /** | ||
4605 | * iwl_irq_handle_error - called for HW or SW error interrupt from card | ||
4606 | */ | ||
4607 | static void iwl_irq_handle_error(struct iwl_priv *priv) | ||
4608 | { | ||
4609 | /* Set the FW error flag -- cleared on iwl_down */ | ||
4610 | set_bit(STATUS_FW_ERROR, &priv->status); | ||
4611 | |||
4612 | /* Cancel currently queued command. */ | ||
4613 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
4614 | |||
4615 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
4616 | if (iwl_debug_level & IWL_DL_FW_ERRORS) { | ||
4617 | iwl_dump_nic_error_log(priv); | ||
4618 | iwl_dump_nic_event_log(priv); | ||
4619 | iwl_print_rx_config_cmd(&priv->staging_rxon); | ||
4620 | } | ||
4621 | #endif | ||
4622 | |||
4623 | wake_up_interruptible(&priv->wait_command_queue); | ||
4624 | |||
4625 | /* Keep the restart process from trying to send host | ||
4626 | * commands by clearing the INIT status bit */ | ||
4627 | clear_bit(STATUS_READY, &priv->status); | ||
4628 | |||
4629 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { | ||
4630 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS, | ||
4631 | "Restarting adapter due to uCode error.\n"); | ||
4632 | |||
4633 | if (iwl_is_associated(priv)) { | ||
4634 | memcpy(&priv->recovery_rxon, &priv->active_rxon, | ||
4635 | sizeof(priv->recovery_rxon)); | ||
4636 | priv->error_recovering = 1; | ||
4637 | } | ||
4638 | queue_work(priv->workqueue, &priv->restart); | ||
4639 | } | ||
4640 | } | ||
4641 | |||
4642 | static void iwl_error_recovery(struct iwl_priv *priv) | ||
4643 | { | ||
4644 | unsigned long flags; | ||
4645 | |||
4646 | memcpy(&priv->staging_rxon, &priv->recovery_rxon, | ||
4647 | sizeof(priv->staging_rxon)); | ||
4648 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
4649 | iwl_commit_rxon(priv); | ||
4650 | |||
4651 | iwl_rxon_add_station(priv, priv->bssid, 1); | ||
4652 | |||
4653 | spin_lock_irqsave(&priv->lock, flags); | ||
4654 | priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id); | ||
4655 | priv->error_recovering = 0; | ||
4656 | spin_unlock_irqrestore(&priv->lock, flags); | ||
4657 | } | ||
4658 | |||
4659 | static void iwl_irq_tasklet(struct iwl_priv *priv) | ||
4660 | { | ||
4661 | u32 inta, handled = 0; | ||
4662 | u32 inta_fh; | ||
4663 | unsigned long flags; | ||
4664 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
4665 | u32 inta_mask; | ||
4666 | #endif | ||
4667 | |||
4668 | spin_lock_irqsave(&priv->lock, flags); | ||
4669 | |||
4670 | /* Ack/clear/reset pending uCode interrupts. | ||
4671 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | ||
4672 | * and will clear only when CSR_FH_INT_STATUS gets cleared. */ | ||
4673 | inta = iwl_read32(priv, CSR_INT); | ||
4674 | iwl_write32(priv, CSR_INT, inta); | ||
4675 | |||
4676 | /* Ack/clear/reset pending flow-handler (DMA) interrupts. | ||
4677 | * Any new interrupts that happen after this, either while we're | ||
4678 | * in this tasklet, or later, will show up in next ISR/tasklet. */ | ||
4679 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | ||
4680 | iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); | ||
4681 | |||
4682 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
4683 | if (iwl_debug_level & IWL_DL_ISR) { | ||
4684 | inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ | ||
4685 | IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", | ||
4686 | inta, inta_mask, inta_fh); | ||
4687 | } | ||
4688 | #endif | ||
4689 | |||
4690 | /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not | ||
4691 | * atomic, make sure that inta covers all the interrupts that | ||
4692 | * we've discovered, even if FH interrupt came in just after | ||
4693 | * reading CSR_INT. */ | ||
4694 | if (inta_fh & CSR_FH_INT_RX_MASK) | ||
4695 | inta |= CSR_INT_BIT_FH_RX; | ||
4696 | if (inta_fh & CSR_FH_INT_TX_MASK) | ||
4697 | inta |= CSR_INT_BIT_FH_TX; | ||
4698 | |||
4699 | /* Now service all interrupt bits discovered above. */ | ||
4700 | if (inta & CSR_INT_BIT_HW_ERR) { | ||
4701 | IWL_ERROR("Microcode HW error detected. Restarting.\n"); | ||
4702 | |||
4703 | /* Tell the device to stop sending interrupts */ | ||
4704 | iwl_disable_interrupts(priv); | ||
4705 | |||
4706 | iwl_irq_handle_error(priv); | ||
4707 | |||
4708 | handled |= CSR_INT_BIT_HW_ERR; | ||
4709 | |||
4710 | spin_unlock_irqrestore(&priv->lock, flags); | ||
4711 | |||
4712 | return; | ||
4713 | } | ||
4714 | |||
4715 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
4716 | if (iwl_debug_level & (IWL_DL_ISR)) { | ||
4717 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ | ||
4718 | if (inta & CSR_INT_BIT_MAC_CLK_ACTV) | ||
4719 | IWL_DEBUG_ISR("Microcode started or stopped.\n"); | ||
4720 | |||
4721 | /* Alive notification via Rx interrupt will do the real work */ | ||
4722 | if (inta & CSR_INT_BIT_ALIVE) | ||
4723 | IWL_DEBUG_ISR("Alive interrupt\n"); | ||
4724 | } | ||
4725 | #endif | ||
4726 | /* Safely ignore these bits for debug checks below */ | ||
4727 | inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE); | ||
4728 | |||
4729 | /* HW RF KILL switch toggled (4965 only) */ | ||
4730 | if (inta & CSR_INT_BIT_RF_KILL) { | ||
4731 | int hw_rf_kill = 0; | ||
4732 | if (!(iwl_read32(priv, CSR_GP_CNTRL) & | ||
4733 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) | ||
4734 | hw_rf_kill = 1; | ||
4735 | |||
4736 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR, | ||
4737 | "RF_KILL bit toggled to %s.\n", | ||
4738 | hw_rf_kill ? "disable radio":"enable radio"); | ||
4739 | |||
4740 | /* Queue restart only if RF_KILL switch was set to "kill" | ||
4741 | * when we loaded driver, and is now set to "enable". | ||
4742 | * After we're Alive, RF_KILL gets handled by | ||
4743 | * iwl_rx_card_state_notif() */ | ||
4744 | if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) | ||
4745 | queue_work(priv->workqueue, &priv->restart); | ||
4746 | |||
4747 | handled |= CSR_INT_BIT_RF_KILL; | ||
4748 | } | ||
4749 | |||
4750 | /* Chip got too hot and stopped itself (4965 only) */ | ||
4751 | if (inta & CSR_INT_BIT_CT_KILL) { | ||
4752 | IWL_ERROR("Microcode CT kill error detected.\n"); | ||
4753 | handled |= CSR_INT_BIT_CT_KILL; | ||
4754 | } | ||
4755 | |||
4756 | /* Error detected by uCode */ | ||
4757 | if (inta & CSR_INT_BIT_SW_ERR) { | ||
4758 | IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n", | ||
4759 | inta); | ||
4760 | iwl_irq_handle_error(priv); | ||
4761 | handled |= CSR_INT_BIT_SW_ERR; | ||
4762 | } | ||
4763 | |||
4764 | /* uCode wakes up after power-down sleep */ | ||
4765 | if (inta & CSR_INT_BIT_WAKEUP) { | ||
4766 | IWL_DEBUG_ISR("Wakeup interrupt\n"); | ||
4767 | iwl_rx_queue_update_write_ptr(priv, &priv->rxq); | ||
4768 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[0]); | ||
4769 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[1]); | ||
4770 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[2]); | ||
4771 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[3]); | ||
4772 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[4]); | ||
4773 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[5]); | ||
4774 | |||
4775 | handled |= CSR_INT_BIT_WAKEUP; | ||
4776 | } | ||
4777 | |||
4778 | /* All uCode command responses, including Tx command responses, | ||
4779 | * Rx "responses" (frame-received notification), and other | ||
4780 | * notifications from uCode come through here*/ | ||
4781 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { | ||
4782 | iwl_rx_handle(priv); | ||
4783 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | ||
4784 | } | ||
4785 | |||
4786 | if (inta & CSR_INT_BIT_FH_TX) { | ||
4787 | IWL_DEBUG_ISR("Tx interrupt\n"); | ||
4788 | |||
4789 | iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6)); | ||
4790 | if (!iwl_grab_restricted_access(priv)) { | ||
4791 | iwl_write_restricted(priv, | ||
4792 | FH_TCSR_CREDIT | ||
4793 | (ALM_FH_SRVC_CHNL), 0x0); | ||
4794 | iwl_release_restricted_access(priv); | ||
4795 | } | ||
4796 | handled |= CSR_INT_BIT_FH_TX; | ||
4797 | } | ||
4798 | |||
4799 | if (inta & ~handled) | ||
4800 | IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); | ||
4801 | |||
4802 | if (inta & ~CSR_INI_SET_MASK) { | ||
4803 | IWL_WARNING("Disabled INTA bits 0x%08x were pending\n", | ||
4804 | inta & ~CSR_INI_SET_MASK); | ||
4805 | IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh); | ||
4806 | } | ||
4807 | |||
4808 | /* Re-enable all interrupts */ | ||
4809 | iwl_enable_interrupts(priv); | ||
4810 | |||
4811 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
4812 | if (iwl_debug_level & (IWL_DL_ISR)) { | ||
4813 | inta = iwl_read32(priv, CSR_INT); | ||
4814 | inta_mask = iwl_read32(priv, CSR_INT_MASK); | ||
4815 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | ||
4816 | IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " | ||
4817 | "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); | ||
4818 | } | ||
4819 | #endif | ||
4820 | spin_unlock_irqrestore(&priv->lock, flags); | ||
4821 | } | ||
4822 | |||
4823 | static irqreturn_t iwl_isr(int irq, void *data) | ||
4824 | { | ||
4825 | struct iwl_priv *priv = data; | ||
4826 | u32 inta, inta_mask; | ||
4827 | u32 inta_fh; | ||
4828 | if (!priv) | ||
4829 | return IRQ_NONE; | ||
4830 | |||
4831 | spin_lock(&priv->lock); | ||
4832 | |||
4833 | /* Disable (but don't clear!) interrupts here to avoid | ||
4834 | * back-to-back ISRs and sporadic interrupts from our NIC. | ||
4835 | * If we have something to service, the tasklet will re-enable ints. | ||
4836 | * If we *don't* have something, we'll re-enable before leaving here. */ | ||
4837 | inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ | ||
4838 | iwl_write32(priv, CSR_INT_MASK, 0x00000000); | ||
4839 | |||
4840 | /* Discover which interrupts are active/pending */ | ||
4841 | inta = iwl_read32(priv, CSR_INT); | ||
4842 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | ||
4843 | |||
4844 | /* Ignore interrupt if there's nothing in NIC to service. | ||
4845 | * This may be due to IRQ shared with another device, | ||
4846 | * or due to sporadic interrupts thrown from our NIC. */ | ||
4847 | if (!inta && !inta_fh) { | ||
4848 | IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); | ||
4849 | goto none; | ||
4850 | } | ||
4851 | |||
4852 | if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { | ||
4853 | /* Hardware disappeared */ | ||
4854 | IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta); | ||
4855 | goto none; | ||
4856 | } | ||
4857 | |||
4858 | IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", | ||
4859 | inta, inta_mask, inta_fh); | ||
4860 | |||
4861 | /* iwl_irq_tasklet() will service interrupts and re-enable them */ | ||
4862 | tasklet_schedule(&priv->irq_tasklet); | ||
4863 | spin_unlock(&priv->lock); | ||
4864 | |||
4865 | return IRQ_HANDLED; | ||
4866 | |||
4867 | none: | ||
4868 | /* re-enable interrupts here since we don't have anything to service. */ | ||
4869 | iwl_enable_interrupts(priv); | ||
4870 | spin_unlock(&priv->lock); | ||
4871 | return IRQ_NONE; | ||
4872 | } | ||
4873 | |||
4874 | /************************** EEPROM BANDS **************************** | ||
4875 | * | ||
4876 | * The iwl_eeprom_band definitions below provide the mapping from the | ||
4877 | * EEPROM contents to the specific channel number supported for each | ||
4878 | * band. | ||
4879 | * | ||
4880 | * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3 | ||
4881 | * definition below maps to physical channel 42 in the 5.2GHz spectrum. | ||
4882 | * The specific geography and calibration information for that channel | ||
4883 | * is contained in the eeprom map itself. | ||
4884 | * | ||
4885 | * During init, we copy the eeprom information and channel map | ||
4886 | * information into priv->channel_info_24/52 and priv->channel_map_24/52 | ||
4887 | * | ||
4888 | * channel_map_24/52 provides the index in the channel_info array for a | ||
4889 | * given channel. We have to have two separate maps as there is channel | ||
4890 | * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and | ||
4891 | * band_2 | ||
4892 | * | ||
4893 | * A value of 0xff stored in the channel_map indicates that the channel | ||
4894 | * is not supported by the hardware at all. | ||
4895 | * | ||
4896 | * A value of 0xfe in the channel_map indicates that the channel is not | ||
4897 | * valid for Tx with the current hardware. This means that | ||
4898 | * while the system can tune and receive on a given channel, it may not | ||
4899 | * be able to associate or transmit any frames on that | ||
4900 | * channel. There is no corresponding channel information for that | ||
4901 | * entry. | ||
4902 | * | ||
4903 | *********************************************************************/ | ||
4904 | |||
4905 | /* 2.4 GHz */ | ||
4906 | static const u8 iwl_eeprom_band_1[14] = { | ||
4907 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 | ||
4908 | }; | ||
4909 | |||
4910 | /* 5.2 GHz bands */ | ||
4911 | static const u8 iwl_eeprom_band_2[] = { | ||
4912 | 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 | ||
4913 | }; | ||
4914 | |||
4915 | static const u8 iwl_eeprom_band_3[] = { /* 5205-5320MHz */ | ||
4916 | 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 | ||
4917 | }; | ||
4918 | |||
4919 | static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */ | ||
4920 | 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 | ||
4921 | }; | ||
4922 | |||
4923 | static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */ | ||
4924 | 145, 149, 153, 157, 161, 165 | ||
4925 | }; | ||
4926 | |||
4927 | static void iwl_init_band_reference(const struct iwl_priv *priv, int band, | ||
4928 | int *eeprom_ch_count, | ||
4929 | const struct iwl_eeprom_channel | ||
4930 | **eeprom_ch_info, | ||
4931 | const u8 **eeprom_ch_index) | ||
4932 | { | ||
4933 | switch (band) { | ||
4934 | case 1: /* 2.4GHz band */ | ||
4935 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); | ||
4936 | *eeprom_ch_info = priv->eeprom.band_1_channels; | ||
4937 | *eeprom_ch_index = iwl_eeprom_band_1; | ||
4938 | break; | ||
4939 | case 2: /* 5.2GHz band */ | ||
4940 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); | ||
4941 | *eeprom_ch_info = priv->eeprom.band_2_channels; | ||
4942 | *eeprom_ch_index = iwl_eeprom_band_2; | ||
4943 | break; | ||
4944 | case 3: /* 5.2GHz band */ | ||
4945 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); | ||
4946 | *eeprom_ch_info = priv->eeprom.band_3_channels; | ||
4947 | *eeprom_ch_index = iwl_eeprom_band_3; | ||
4948 | break; | ||
4949 | case 4: /* 5.2GHz band */ | ||
4950 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); | ||
4951 | *eeprom_ch_info = priv->eeprom.band_4_channels; | ||
4952 | *eeprom_ch_index = iwl_eeprom_band_4; | ||
4953 | break; | ||
4954 | case 5: /* 5.2GHz band */ | ||
4955 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); | ||
4956 | *eeprom_ch_info = priv->eeprom.band_5_channels; | ||
4957 | *eeprom_ch_index = iwl_eeprom_band_5; | ||
4958 | break; | ||
4959 | default: | ||
4960 | BUG(); | ||
4961 | return; | ||
4962 | } | ||
4963 | } | ||
4964 | |||
4965 | const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv, | ||
4966 | int phymode, u16 channel) | ||
4967 | { | ||
4968 | int i; | ||
4969 | |||
4970 | switch (phymode) { | ||
4971 | case MODE_IEEE80211A: | ||
4972 | for (i = 14; i < priv->channel_count; i++) { | ||
4973 | if (priv->channel_info[i].channel == channel) | ||
4974 | return &priv->channel_info[i]; | ||
4975 | } | ||
4976 | break; | ||
4977 | |||
4978 | case MODE_IEEE80211B: | ||
4979 | case MODE_IEEE80211G: | ||
4980 | if (channel >= 1 && channel <= 14) | ||
4981 | return &priv->channel_info[channel - 1]; | ||
4982 | break; | ||
4983 | |||
4984 | } | ||
4985 | |||
4986 | return NULL; | ||
4987 | } | ||
4988 | |||
4989 | #define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ | ||
4990 | ? # x " " : "") | ||
4991 | |||
4992 | static int iwl_init_channel_map(struct iwl_priv *priv) | ||
4993 | { | ||
4994 | int eeprom_ch_count = 0; | ||
4995 | const u8 *eeprom_ch_index = NULL; | ||
4996 | const struct iwl_eeprom_channel *eeprom_ch_info = NULL; | ||
4997 | int band, ch; | ||
4998 | struct iwl_channel_info *ch_info; | ||
4999 | |||
5000 | if (priv->channel_count) { | ||
5001 | IWL_DEBUG_INFO("Channel map already initialized.\n"); | ||
5002 | return 0; | ||
5003 | } | ||
5004 | |||
5005 | if (priv->eeprom.version < 0x2f) { | ||
5006 | IWL_WARNING("Unsupported EEPROM version: 0x%04X\n", | ||
5007 | priv->eeprom.version); | ||
5008 | return -EINVAL; | ||
5009 | } | ||
5010 | |||
5011 | IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n"); | ||
5012 | |||
5013 | priv->channel_count = | ||
5014 | ARRAY_SIZE(iwl_eeprom_band_1) + | ||
5015 | ARRAY_SIZE(iwl_eeprom_band_2) + | ||
5016 | ARRAY_SIZE(iwl_eeprom_band_3) + | ||
5017 | ARRAY_SIZE(iwl_eeprom_band_4) + | ||
5018 | ARRAY_SIZE(iwl_eeprom_band_5); | ||
5019 | |||
5020 | IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count); | ||
5021 | |||
5022 | priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * | ||
5023 | priv->channel_count, GFP_KERNEL); | ||
5024 | if (!priv->channel_info) { | ||
5025 | IWL_ERROR("Could not allocate channel_info\n"); | ||
5026 | priv->channel_count = 0; | ||
5027 | return -ENOMEM; | ||
5028 | } | ||
5029 | |||
5030 | ch_info = priv->channel_info; | ||
5031 | |||
5032 | /* Loop through the 5 EEPROM bands adding them in order to the | ||
5033 | * channel map we maintain (that contains additional information than | ||
5034 | * what just in the EEPROM) */ | ||
5035 | for (band = 1; band <= 5; band++) { | ||
5036 | |||
5037 | iwl_init_band_reference(priv, band, &eeprom_ch_count, | ||
5038 | &eeprom_ch_info, &eeprom_ch_index); | ||
5039 | |||
5040 | /* Loop through each band adding each of the channels */ | ||
5041 | for (ch = 0; ch < eeprom_ch_count; ch++) { | ||
5042 | ch_info->channel = eeprom_ch_index[ch]; | ||
5043 | ch_info->phymode = (band == 1) ? MODE_IEEE80211B : | ||
5044 | MODE_IEEE80211A; | ||
5045 | |||
5046 | /* permanently store EEPROM's channel regulatory flags | ||
5047 | * and max power in channel info database. */ | ||
5048 | ch_info->eeprom = eeprom_ch_info[ch]; | ||
5049 | |||
5050 | /* Copy the run-time flags so they are there even on | ||
5051 | * invalid channels */ | ||
5052 | ch_info->flags = eeprom_ch_info[ch].flags; | ||
5053 | |||
5054 | if (!(is_channel_valid(ch_info))) { | ||
5055 | IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - " | ||
5056 | "No traffic\n", | ||
5057 | ch_info->channel, | ||
5058 | ch_info->flags, | ||
5059 | is_channel_a_band(ch_info) ? | ||
5060 | "5.2" : "2.4"); | ||
5061 | ch_info++; | ||
5062 | continue; | ||
5063 | } | ||
5064 | |||
5065 | /* Initialize regulatory-based run-time data */ | ||
5066 | ch_info->max_power_avg = ch_info->curr_txpow = | ||
5067 | eeprom_ch_info[ch].max_power_avg; | ||
5068 | ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; | ||
5069 | ch_info->min_power = 0; | ||
5070 | |||
5071 | IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" | ||
5072 | " %ddBm): Ad-Hoc %ssupported\n", | ||
5073 | ch_info->channel, | ||
5074 | is_channel_a_band(ch_info) ? | ||
5075 | "5.2" : "2.4", | ||
5076 | CHECK_AND_PRINT(IBSS), | ||
5077 | CHECK_AND_PRINT(ACTIVE), | ||
5078 | CHECK_AND_PRINT(RADAR), | ||
5079 | CHECK_AND_PRINT(WIDE), | ||
5080 | CHECK_AND_PRINT(NARROW), | ||
5081 | CHECK_AND_PRINT(DFS), | ||
5082 | eeprom_ch_info[ch].flags, | ||
5083 | eeprom_ch_info[ch].max_power_avg, | ||
5084 | ((eeprom_ch_info[ch]. | ||
5085 | flags & EEPROM_CHANNEL_IBSS) | ||
5086 | && !(eeprom_ch_info[ch]. | ||
5087 | flags & EEPROM_CHANNEL_RADAR)) | ||
5088 | ? "" : "not "); | ||
5089 | |||
5090 | /* Set the user_txpower_limit to the highest power | ||
5091 | * supported by any channel */ | ||
5092 | if (eeprom_ch_info[ch].max_power_avg > | ||
5093 | priv->user_txpower_limit) | ||
5094 | priv->user_txpower_limit = | ||
5095 | eeprom_ch_info[ch].max_power_avg; | ||
5096 | |||
5097 | ch_info++; | ||
5098 | } | ||
5099 | } | ||
5100 | |||
5101 | if (iwl3945_txpower_set_from_eeprom(priv)) | ||
5102 | return -EIO; | ||
5103 | |||
5104 | return 0; | ||
5105 | } | ||
5106 | |||
5107 | /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after | ||
5108 | * sending probe req. This should be set long enough to hear probe responses | ||
5109 | * from more than one AP. */ | ||
5110 | #define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */ | ||
5111 | #define IWL_ACTIVE_DWELL_TIME_52 (10) | ||
5112 | |||
5113 | /* For faster active scanning, scan will move to the next channel if fewer than | ||
5114 | * PLCP_QUIET_THRESH packets are heard on this channel within | ||
5115 | * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell | ||
5116 | * time if it's a quiet channel (nothing responded to our probe, and there's | ||
5117 | * no other traffic). | ||
5118 | * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ | ||
5119 | #define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */ | ||
5120 | #define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */ | ||
5121 | |||
5122 | /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. | ||
5123 | * Must be set longer than active dwell time. | ||
5124 | * For the most reliable scan, set > AP beacon interval (typically 100msec). */ | ||
5125 | #define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ | ||
5126 | #define IWL_PASSIVE_DWELL_TIME_52 (10) | ||
5127 | #define IWL_PASSIVE_DWELL_BASE (100) | ||
5128 | #define IWL_CHANNEL_TUNE_TIME 5 | ||
5129 | |||
5130 | static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, int phymode) | ||
5131 | { | ||
5132 | if (phymode == MODE_IEEE80211A) | ||
5133 | return IWL_ACTIVE_DWELL_TIME_52; | ||
5134 | else | ||
5135 | return IWL_ACTIVE_DWELL_TIME_24; | ||
5136 | } | ||
5137 | |||
5138 | static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, int phymode) | ||
5139 | { | ||
5140 | u16 active = iwl_get_active_dwell_time(priv, phymode); | ||
5141 | u16 passive = (phymode != MODE_IEEE80211A) ? | ||
5142 | IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : | ||
5143 | IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; | ||
5144 | |||
5145 | if (iwl_is_associated(priv)) { | ||
5146 | /* If we're associated, we clamp the maximum passive | ||
5147 | * dwell time to be 98% of the beacon interval (minus | ||
5148 | * 2 * channel tune time) */ | ||
5149 | passive = priv->beacon_int; | ||
5150 | if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive) | ||
5151 | passive = IWL_PASSIVE_DWELL_BASE; | ||
5152 | passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; | ||
5153 | } | ||
5154 | |||
5155 | if (passive <= active) | ||
5156 | passive = active + 1; | ||
5157 | |||
5158 | return passive; | ||
5159 | } | ||
5160 | |||
5161 | static int iwl_get_channels_for_scan(struct iwl_priv *priv, int phymode, | ||
5162 | u8 is_active, u8 direct_mask, | ||
5163 | struct iwl_scan_channel *scan_ch) | ||
5164 | { | ||
5165 | const struct ieee80211_channel *channels = NULL; | ||
5166 | const struct ieee80211_hw_mode *hw_mode; | ||
5167 | const struct iwl_channel_info *ch_info; | ||
5168 | u16 passive_dwell = 0; | ||
5169 | u16 active_dwell = 0; | ||
5170 | int added, i; | ||
5171 | |||
5172 | hw_mode = iwl_get_hw_mode(priv, phymode); | ||
5173 | if (!hw_mode) | ||
5174 | return 0; | ||
5175 | |||
5176 | channels = hw_mode->channels; | ||
5177 | |||
5178 | active_dwell = iwl_get_active_dwell_time(priv, phymode); | ||
5179 | passive_dwell = iwl_get_passive_dwell_time(priv, phymode); | ||
5180 | |||
5181 | for (i = 0, added = 0; i < hw_mode->num_channels; i++) { | ||
5182 | if (channels[i].chan == | ||
5183 | le16_to_cpu(priv->active_rxon.channel)) { | ||
5184 | if (iwl_is_associated(priv)) { | ||
5185 | IWL_DEBUG_SCAN | ||
5186 | ("Skipping current channel %d\n", | ||
5187 | le16_to_cpu(priv->active_rxon.channel)); | ||
5188 | continue; | ||
5189 | } | ||
5190 | } else if (priv->only_active_channel) | ||
5191 | continue; | ||
5192 | |||
5193 | scan_ch->channel = channels[i].chan; | ||
5194 | |||
5195 | ch_info = iwl_get_channel_info(priv, phymode, scan_ch->channel); | ||
5196 | if (!is_channel_valid(ch_info)) { | ||
5197 | IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", | ||
5198 | scan_ch->channel); | ||
5199 | continue; | ||
5200 | } | ||
5201 | |||
5202 | if (!is_active || is_channel_passive(ch_info) || | ||
5203 | !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN)) | ||
5204 | scan_ch->type = 0; /* passive */ | ||
5205 | else | ||
5206 | scan_ch->type = 1; /* active */ | ||
5207 | |||
5208 | if (scan_ch->type & 1) | ||
5209 | scan_ch->type |= (direct_mask << 1); | ||
5210 | |||
5211 | if (is_channel_narrow(ch_info)) | ||
5212 | scan_ch->type |= (1 << 7); | ||
5213 | |||
5214 | scan_ch->active_dwell = cpu_to_le16(active_dwell); | ||
5215 | scan_ch->passive_dwell = cpu_to_le16(passive_dwell); | ||
5216 | |||
5217 | /* Set power levels to defaults */ | ||
5218 | scan_ch->tpc.dsp_atten = 110; | ||
5219 | /* scan_pwr_info->tpc.dsp_atten; */ | ||
5220 | |||
5221 | /*scan_pwr_info->tpc.tx_gain; */ | ||
5222 | if (phymode == MODE_IEEE80211A) | ||
5223 | scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; | ||
5224 | else { | ||
5225 | scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); | ||
5226 | /* NOTE: if we were doing 6Mb OFDM for scans we'd use | ||
5227 | * power level | ||
5228 | scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3; | ||
5229 | */ | ||
5230 | } | ||
5231 | |||
5232 | IWL_DEBUG_SCAN("Scanning %d [%s %d]\n", | ||
5233 | scan_ch->channel, | ||
5234 | (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", | ||
5235 | (scan_ch->type & 1) ? | ||
5236 | active_dwell : passive_dwell); | ||
5237 | |||
5238 | scan_ch++; | ||
5239 | added++; | ||
5240 | } | ||
5241 | |||
5242 | IWL_DEBUG_SCAN("total channels to scan %d \n", added); | ||
5243 | return added; | ||
5244 | } | ||
5245 | |||
5246 | static void iwl_reset_channel_flag(struct iwl_priv *priv) | ||
5247 | { | ||
5248 | int i, j; | ||
5249 | for (i = 0; i < 3; i++) { | ||
5250 | struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i]; | ||
5251 | for (j = 0; j < hw_mode->num_channels; j++) | ||
5252 | hw_mode->channels[j].flag = hw_mode->channels[j].val; | ||
5253 | } | ||
5254 | } | ||
5255 | |||
5256 | static void iwl_init_hw_rates(struct iwl_priv *priv, | ||
5257 | struct ieee80211_rate *rates) | ||
5258 | { | ||
5259 | int i; | ||
5260 | |||
5261 | for (i = 0; i < IWL_RATE_COUNT; i++) { | ||
5262 | rates[i].rate = iwl_rates[i].ieee * 5; | ||
5263 | rates[i].val = i; /* Rate scaling will work on indexes */ | ||
5264 | rates[i].val2 = i; | ||
5265 | rates[i].flags = IEEE80211_RATE_SUPPORTED; | ||
5266 | /* Only OFDM have the bits-per-symbol set */ | ||
5267 | if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE)) | ||
5268 | rates[i].flags |= IEEE80211_RATE_OFDM; | ||
5269 | else { | ||
5270 | /* | ||
5271 | * If CCK 1M then set rate flag to CCK else CCK_2 | ||
5272 | * which is CCK | PREAMBLE2 | ||
5273 | */ | ||
5274 | rates[i].flags |= (iwl_rates[i].plcp == 10) ? | ||
5275 | IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2; | ||
5276 | } | ||
5277 | |||
5278 | /* Set up which ones are basic rates... */ | ||
5279 | if (IWL_BASIC_RATES_MASK & (1 << i)) | ||
5280 | rates[i].flags |= IEEE80211_RATE_BASIC; | ||
5281 | } | ||
5282 | } | ||
5283 | |||
5284 | /** | ||
5285 | * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom | ||
5286 | */ | ||
5287 | static int iwl_init_geos(struct iwl_priv *priv) | ||
5288 | { | ||
5289 | struct iwl_channel_info *ch; | ||
5290 | struct ieee80211_hw_mode *modes; | ||
5291 | struct ieee80211_channel *channels; | ||
5292 | struct ieee80211_channel *geo_ch; | ||
5293 | struct ieee80211_rate *rates; | ||
5294 | int i = 0; | ||
5295 | enum { | ||
5296 | A = 0, | ||
5297 | B = 1, | ||
5298 | G = 2, | ||
5299 | }; | ||
5300 | int mode_count = 3; | ||
5301 | |||
5302 | if (priv->modes) { | ||
5303 | IWL_DEBUG_INFO("Geography modes already initialized.\n"); | ||
5304 | set_bit(STATUS_GEO_CONFIGURED, &priv->status); | ||
5305 | return 0; | ||
5306 | } | ||
5307 | |||
5308 | modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count, | ||
5309 | GFP_KERNEL); | ||
5310 | if (!modes) | ||
5311 | return -ENOMEM; | ||
5312 | |||
5313 | channels = kzalloc(sizeof(struct ieee80211_channel) * | ||
5314 | priv->channel_count, GFP_KERNEL); | ||
5315 | if (!channels) { | ||
5316 | kfree(modes); | ||
5317 | return -ENOMEM; | ||
5318 | } | ||
5319 | |||
5320 | rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)), | ||
5321 | GFP_KERNEL); | ||
5322 | if (!rates) { | ||
5323 | kfree(modes); | ||
5324 | kfree(channels); | ||
5325 | return -ENOMEM; | ||
5326 | } | ||
5327 | |||
5328 | /* 0 = 802.11a | ||
5329 | * 1 = 802.11b | ||
5330 | * 2 = 802.11g | ||
5331 | */ | ||
5332 | |||
5333 | /* 5.2GHz channels start after the 2.4GHz channels */ | ||
5334 | modes[A].mode = MODE_IEEE80211A; | ||
5335 | modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; | ||
5336 | modes[A].rates = rates; | ||
5337 | modes[A].num_rates = 8; /* just OFDM */ | ||
5338 | modes[A].num_channels = 0; | ||
5339 | |||
5340 | modes[B].mode = MODE_IEEE80211B; | ||
5341 | modes[B].channels = channels; | ||
5342 | modes[B].rates = &rates[8]; | ||
5343 | modes[B].num_rates = 4; /* just CCK */ | ||
5344 | modes[B].num_channels = 0; | ||
5345 | |||
5346 | modes[G].mode = MODE_IEEE80211G; | ||
5347 | modes[G].channels = channels; | ||
5348 | modes[G].rates = rates; | ||
5349 | modes[G].num_rates = 12; /* OFDM & CCK */ | ||
5350 | modes[G].num_channels = 0; | ||
5351 | |||
5352 | priv->ieee_channels = channels; | ||
5353 | priv->ieee_rates = rates; | ||
5354 | |||
5355 | iwl_init_hw_rates(priv, rates); | ||
5356 | |||
5357 | for (i = 0, geo_ch = channels; i < priv->channel_count; i++) { | ||
5358 | ch = &priv->channel_info[i]; | ||
5359 | |||
5360 | if (!is_channel_valid(ch)) { | ||
5361 | IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- " | ||
5362 | "skipping.\n", | ||
5363 | ch->channel, is_channel_a_band(ch) ? | ||
5364 | "5.2" : "2.4"); | ||
5365 | continue; | ||
5366 | } | ||
5367 | |||
5368 | if (is_channel_a_band(ch)) | ||
5369 | geo_ch = &modes[A].channels[modes[A].num_channels++]; | ||
5370 | else { | ||
5371 | geo_ch = &modes[B].channels[modes[B].num_channels++]; | ||
5372 | modes[G].num_channels++; | ||
5373 | } | ||
5374 | |||
5375 | geo_ch->freq = ieee80211chan2mhz(ch->channel); | ||
5376 | geo_ch->chan = ch->channel; | ||
5377 | geo_ch->power_level = ch->max_power_avg; | ||
5378 | geo_ch->antenna_max = 0xff; | ||
5379 | |||
5380 | if (is_channel_valid(ch)) { | ||
5381 | geo_ch->flag = IEEE80211_CHAN_W_SCAN; | ||
5382 | if (ch->flags & EEPROM_CHANNEL_IBSS) | ||
5383 | geo_ch->flag |= IEEE80211_CHAN_W_IBSS; | ||
5384 | |||
5385 | if (ch->flags & EEPROM_CHANNEL_ACTIVE) | ||
5386 | geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN; | ||
5387 | |||
5388 | if (ch->flags & EEPROM_CHANNEL_RADAR) | ||
5389 | geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT; | ||
5390 | |||
5391 | if (ch->max_power_avg > priv->max_channel_txpower_limit) | ||
5392 | priv->max_channel_txpower_limit = | ||
5393 | ch->max_power_avg; | ||
5394 | } | ||
5395 | |||
5396 | geo_ch->val = geo_ch->flag; | ||
5397 | } | ||
5398 | |||
5399 | if ((modes[A].num_channels == 0) && priv->is_abg) { | ||
5400 | printk(KERN_INFO DRV_NAME | ||
5401 | ": Incorrectly detected BG card as ABG. Please send " | ||
5402 | "your PCI ID 0x%04X:0x%04X to maintainer.\n", | ||
5403 | priv->pci_dev->device, priv->pci_dev->subsystem_device); | ||
5404 | priv->is_abg = 0; | ||
5405 | } | ||
5406 | |||
5407 | printk(KERN_INFO DRV_NAME | ||
5408 | ": Tunable channels: %d 802.11bg, %d 802.11a channels\n", | ||
5409 | modes[G].num_channels, modes[A].num_channels); | ||
5410 | |||
5411 | /* | ||
5412 | * NOTE: We register these in preference of order -- the | ||
5413 | * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick | ||
5414 | * a phymode based on rates or AP capabilities but seems to | ||
5415 | * configure it purely on if the channel being configured | ||
5416 | * is supported by a mode -- and the first match is taken | ||
5417 | */ | ||
5418 | |||
5419 | if (modes[G].num_channels) | ||
5420 | ieee80211_register_hwmode(priv->hw, &modes[G]); | ||
5421 | if (modes[B].num_channels) | ||
5422 | ieee80211_register_hwmode(priv->hw, &modes[B]); | ||
5423 | if (modes[A].num_channels) | ||
5424 | ieee80211_register_hwmode(priv->hw, &modes[A]); | ||
5425 | |||
5426 | priv->modes = modes; | ||
5427 | set_bit(STATUS_GEO_CONFIGURED, &priv->status); | ||
5428 | |||
5429 | return 0; | ||
5430 | } | ||
5431 | |||
5432 | /****************************************************************************** | ||
5433 | * | ||
5434 | * uCode download functions | ||
5435 | * | ||
5436 | ******************************************************************************/ | ||
5437 | |||
5438 | static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) | ||
5439 | { | ||
5440 | if (priv->ucode_code.v_addr != NULL) { | ||
5441 | pci_free_consistent(priv->pci_dev, | ||
5442 | priv->ucode_code.len, | ||
5443 | priv->ucode_code.v_addr, | ||
5444 | priv->ucode_code.p_addr); | ||
5445 | priv->ucode_code.v_addr = NULL; | ||
5446 | } | ||
5447 | if (priv->ucode_data.v_addr != NULL) { | ||
5448 | pci_free_consistent(priv->pci_dev, | ||
5449 | priv->ucode_data.len, | ||
5450 | priv->ucode_data.v_addr, | ||
5451 | priv->ucode_data.p_addr); | ||
5452 | priv->ucode_data.v_addr = NULL; | ||
5453 | } | ||
5454 | if (priv->ucode_data_backup.v_addr != NULL) { | ||
5455 | pci_free_consistent(priv->pci_dev, | ||
5456 | priv->ucode_data_backup.len, | ||
5457 | priv->ucode_data_backup.v_addr, | ||
5458 | priv->ucode_data_backup.p_addr); | ||
5459 | priv->ucode_data_backup.v_addr = NULL; | ||
5460 | } | ||
5461 | if (priv->ucode_init.v_addr != NULL) { | ||
5462 | pci_free_consistent(priv->pci_dev, | ||
5463 | priv->ucode_init.len, | ||
5464 | priv->ucode_init.v_addr, | ||
5465 | priv->ucode_init.p_addr); | ||
5466 | priv->ucode_init.v_addr = NULL; | ||
5467 | } | ||
5468 | if (priv->ucode_init_data.v_addr != NULL) { | ||
5469 | pci_free_consistent(priv->pci_dev, | ||
5470 | priv->ucode_init_data.len, | ||
5471 | priv->ucode_init_data.v_addr, | ||
5472 | priv->ucode_init_data.p_addr); | ||
5473 | priv->ucode_init_data.v_addr = NULL; | ||
5474 | } | ||
5475 | if (priv->ucode_boot.v_addr != NULL) { | ||
5476 | pci_free_consistent(priv->pci_dev, | ||
5477 | priv->ucode_boot.len, | ||
5478 | priv->ucode_boot.v_addr, | ||
5479 | priv->ucode_boot.p_addr); | ||
5480 | priv->ucode_boot.v_addr = NULL; | ||
5481 | } | ||
5482 | } | ||
5483 | |||
5484 | /** | ||
5485 | * iwl_verify_inst_full - verify runtime uCode image in card vs. host, | ||
5486 | * looking at all data. | ||
5487 | */ | ||
5488 | static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 * image, u32 len) | ||
5489 | { | ||
5490 | u32 val; | ||
5491 | u32 save_len = len; | ||
5492 | int rc = 0; | ||
5493 | u32 errcnt; | ||
5494 | |||
5495 | IWL_DEBUG_INFO("ucode inst image size is %u\n", len); | ||
5496 | |||
5497 | rc = iwl_grab_restricted_access(priv); | ||
5498 | if (rc) | ||
5499 | return rc; | ||
5500 | |||
5501 | iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND); | ||
5502 | |||
5503 | errcnt = 0; | ||
5504 | for (; len > 0; len -= sizeof(u32), image++) { | ||
5505 | /* read data comes through single port, auto-incr addr */ | ||
5506 | /* NOTE: Use the debugless read so we don't flood kernel log | ||
5507 | * if IWL_DL_IO is set */ | ||
5508 | val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); | ||
5509 | if (val != le32_to_cpu(*image)) { | ||
5510 | IWL_ERROR("uCode INST section is invalid at " | ||
5511 | "offset 0x%x, is 0x%x, s/b 0x%x\n", | ||
5512 | save_len - len, val, le32_to_cpu(*image)); | ||
5513 | rc = -EIO; | ||
5514 | errcnt++; | ||
5515 | if (errcnt >= 20) | ||
5516 | break; | ||
5517 | } | ||
5518 | } | ||
5519 | |||
5520 | iwl_release_restricted_access(priv); | ||
5521 | |||
5522 | if (!errcnt) | ||
5523 | IWL_DEBUG_INFO | ||
5524 | ("ucode image in INSTRUCTION memory is good\n"); | ||
5525 | |||
5526 | return rc; | ||
5527 | } | ||
5528 | |||
5529 | |||
5530 | /** | ||
5531 | * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, | ||
5532 | * using sample data 100 bytes apart. If these sample points are good, | ||
5533 | * it's a pretty good bet that everything between them is good, too. | ||
5534 | */ | ||
5535 | static int iwl_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) | ||
5536 | { | ||
5537 | u32 val; | ||
5538 | int rc = 0; | ||
5539 | u32 errcnt = 0; | ||
5540 | u32 i; | ||
5541 | |||
5542 | IWL_DEBUG_INFO("ucode inst image size is %u\n", len); | ||
5543 | |||
5544 | rc = iwl_grab_restricted_access(priv); | ||
5545 | if (rc) | ||
5546 | return rc; | ||
5547 | |||
5548 | for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { | ||
5549 | /* read data comes through single port, auto-incr addr */ | ||
5550 | /* NOTE: Use the debugless read so we don't flood kernel log | ||
5551 | * if IWL_DL_IO is set */ | ||
5552 | iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, | ||
5553 | i + RTC_INST_LOWER_BOUND); | ||
5554 | val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); | ||
5555 | if (val != le32_to_cpu(*image)) { | ||
5556 | #if 0 /* Enable this if you want to see details */ | ||
5557 | IWL_ERROR("uCode INST section is invalid at " | ||
5558 | "offset 0x%x, is 0x%x, s/b 0x%x\n", | ||
5559 | i, val, *image); | ||
5560 | #endif | ||
5561 | rc = -EIO; | ||
5562 | errcnt++; | ||
5563 | if (errcnt >= 3) | ||
5564 | break; | ||
5565 | } | ||
5566 | } | ||
5567 | |||
5568 | iwl_release_restricted_access(priv); | ||
5569 | |||
5570 | return rc; | ||
5571 | } | ||
5572 | |||
5573 | |||
5574 | /** | ||
5575 | * iwl_verify_ucode - determine which instruction image is in SRAM, | ||
5576 | * and verify its contents | ||
5577 | */ | ||
5578 | static int iwl_verify_ucode(struct iwl_priv *priv) | ||
5579 | { | ||
5580 | __le32 *image; | ||
5581 | u32 len; | ||
5582 | int rc = 0; | ||
5583 | |||
5584 | /* Try bootstrap */ | ||
5585 | image = (__le32 *)priv->ucode_boot.v_addr; | ||
5586 | len = priv->ucode_boot.len; | ||
5587 | rc = iwl_verify_inst_sparse(priv, image, len); | ||
5588 | if (rc == 0) { | ||
5589 | IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n"); | ||
5590 | return 0; | ||
5591 | } | ||
5592 | |||
5593 | /* Try initialize */ | ||
5594 | image = (__le32 *)priv->ucode_init.v_addr; | ||
5595 | len = priv->ucode_init.len; | ||
5596 | rc = iwl_verify_inst_sparse(priv, image, len); | ||
5597 | if (rc == 0) { | ||
5598 | IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n"); | ||
5599 | return 0; | ||
5600 | } | ||
5601 | |||
5602 | /* Try runtime/protocol */ | ||
5603 | image = (__le32 *)priv->ucode_code.v_addr; | ||
5604 | len = priv->ucode_code.len; | ||
5605 | rc = iwl_verify_inst_sparse(priv, image, len); | ||
5606 | if (rc == 0) { | ||
5607 | IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n"); | ||
5608 | return 0; | ||
5609 | } | ||
5610 | |||
5611 | IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); | ||
5612 | |||
5613 | /* Show first several data entries in instruction SRAM. | ||
5614 | * Selection of bootstrap image is arbitrary. */ | ||
5615 | image = (__le32 *)priv->ucode_boot.v_addr; | ||
5616 | len = priv->ucode_boot.len; | ||
5617 | rc = iwl_verify_inst_full(priv, image, len); | ||
5618 | |||
5619 | return rc; | ||
5620 | } | ||
5621 | |||
5622 | |||
5623 | /* check contents of special bootstrap uCode SRAM */ | ||
5624 | static int iwl_verify_bsm(struct iwl_priv *priv) | ||
5625 | { | ||
5626 | __le32 *image = priv->ucode_boot.v_addr; | ||
5627 | u32 len = priv->ucode_boot.len; | ||
5628 | u32 reg; | ||
5629 | u32 val; | ||
5630 | |||
5631 | IWL_DEBUG_INFO("Begin verify bsm\n"); | ||
5632 | |||
5633 | /* verify BSM SRAM contents */ | ||
5634 | val = iwl_read_restricted_reg(priv, BSM_WR_DWCOUNT_REG); | ||
5635 | for (reg = BSM_SRAM_LOWER_BOUND; | ||
5636 | reg < BSM_SRAM_LOWER_BOUND + len; | ||
5637 | reg += sizeof(u32), image ++) { | ||
5638 | val = iwl_read_restricted_reg(priv, reg); | ||
5639 | if (val != le32_to_cpu(*image)) { | ||
5640 | IWL_ERROR("BSM uCode verification failed at " | ||
5641 | "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", | ||
5642 | BSM_SRAM_LOWER_BOUND, | ||
5643 | reg - BSM_SRAM_LOWER_BOUND, len, | ||
5644 | val, le32_to_cpu(*image)); | ||
5645 | return -EIO; | ||
5646 | } | ||
5647 | } | ||
5648 | |||
5649 | IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n"); | ||
5650 | |||
5651 | return 0; | ||
5652 | } | ||
5653 | |||
5654 | /** | ||
5655 | * iwl_load_bsm - Load bootstrap instructions | ||
5656 | * | ||
5657 | * BSM operation: | ||
5658 | * | ||
5659 | * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program | ||
5660 | * in special SRAM that does not power down during RFKILL. When powering back | ||
5661 | * up after power-saving sleeps (or during initial uCode load), the BSM loads | ||
5662 | * the bootstrap program into the on-board processor, and starts it. | ||
5663 | * | ||
5664 | * The bootstrap program loads (via DMA) instructions and data for a new | ||
5665 | * program from host DRAM locations indicated by the host driver in the | ||
5666 | * BSM_DRAM_* registers. Once the new program is loaded, it starts | ||
5667 | * automatically. | ||
5668 | * | ||
5669 | * When initializing the NIC, the host driver points the BSM to the | ||
5670 | * "initialize" uCode image. This uCode sets up some internal data, then | ||
5671 | * notifies host via "initialize alive" that it is complete. | ||
5672 | * | ||
5673 | * The host then replaces the BSM_DRAM_* pointer values to point to the | ||
5674 | * normal runtime uCode instructions and a backup uCode data cache buffer | ||
5675 | * (filled initially with starting data values for the on-board processor), | ||
5676 | * then triggers the "initialize" uCode to load and launch the runtime uCode, | ||
5677 | * which begins normal operation. | ||
5678 | * | ||
5679 | * When doing a power-save shutdown, runtime uCode saves data SRAM into | ||
5680 | * the backup data cache in DRAM before SRAM is powered down. | ||
5681 | * | ||
5682 | * When powering back up, the BSM loads the bootstrap program. This reloads | ||
5683 | * the runtime uCode instructions and the backup data cache into SRAM, | ||
5684 | * and re-launches the runtime uCode from where it left off. | ||
5685 | */ | ||
5686 | static int iwl_load_bsm(struct iwl_priv *priv) | ||
5687 | { | ||
5688 | __le32 *image = priv->ucode_boot.v_addr; | ||
5689 | u32 len = priv->ucode_boot.len; | ||
5690 | dma_addr_t pinst; | ||
5691 | dma_addr_t pdata; | ||
5692 | u32 inst_len; | ||
5693 | u32 data_len; | ||
5694 | int rc; | ||
5695 | int i; | ||
5696 | u32 done; | ||
5697 | u32 reg_offset; | ||
5698 | |||
5699 | IWL_DEBUG_INFO("Begin load bsm\n"); | ||
5700 | |||
5701 | /* make sure bootstrap program is no larger than BSM's SRAM size */ | ||
5702 | if (len > IWL_MAX_BSM_SIZE) | ||
5703 | return -EINVAL; | ||
5704 | |||
5705 | /* Tell bootstrap uCode where to find the "Initialize" uCode | ||
5706 | * in host DRAM ... bits 31:0 for 3945, bits 35:4 for 4965. | ||
5707 | * NOTE: iwl_initialize_alive_start() will replace these values, | ||
5708 | * after the "initialize" uCode has run, to point to | ||
5709 | * runtime/protocol instructions and backup data cache. */ | ||
5710 | pinst = priv->ucode_init.p_addr; | ||
5711 | pdata = priv->ucode_init_data.p_addr; | ||
5712 | inst_len = priv->ucode_init.len; | ||
5713 | data_len = priv->ucode_init_data.len; | ||
5714 | |||
5715 | rc = iwl_grab_restricted_access(priv); | ||
5716 | if (rc) | ||
5717 | return rc; | ||
5718 | |||
5719 | iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); | ||
5720 | iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); | ||
5721 | iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); | ||
5722 | iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); | ||
5723 | |||
5724 | /* Fill BSM memory with bootstrap instructions */ | ||
5725 | for (reg_offset = BSM_SRAM_LOWER_BOUND; | ||
5726 | reg_offset < BSM_SRAM_LOWER_BOUND + len; | ||
5727 | reg_offset += sizeof(u32), image++) | ||
5728 | _iwl_write_restricted_reg(priv, reg_offset, | ||
5729 | le32_to_cpu(*image)); | ||
5730 | |||
5731 | rc = iwl_verify_bsm(priv); | ||
5732 | if (rc) { | ||
5733 | iwl_release_restricted_access(priv); | ||
5734 | return rc; | ||
5735 | } | ||
5736 | |||
5737 | /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ | ||
5738 | iwl_write_restricted_reg(priv, BSM_WR_MEM_SRC_REG, 0x0); | ||
5739 | iwl_write_restricted_reg(priv, BSM_WR_MEM_DST_REG, | ||
5740 | RTC_INST_LOWER_BOUND); | ||
5741 | iwl_write_restricted_reg(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); | ||
5742 | |||
5743 | /* Load bootstrap code into instruction SRAM now, | ||
5744 | * to prepare to load "initialize" uCode */ | ||
5745 | iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, | ||
5746 | BSM_WR_CTRL_REG_BIT_START); | ||
5747 | |||
5748 | /* Wait for load of bootstrap uCode to finish */ | ||
5749 | for (i = 0; i < 100; i++) { | ||
5750 | done = iwl_read_restricted_reg(priv, BSM_WR_CTRL_REG); | ||
5751 | if (!(done & BSM_WR_CTRL_REG_BIT_START)) | ||
5752 | break; | ||
5753 | udelay(10); | ||
5754 | } | ||
5755 | if (i < 100) | ||
5756 | IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i); | ||
5757 | else { | ||
5758 | IWL_ERROR("BSM write did not complete!\n"); | ||
5759 | return -EIO; | ||
5760 | } | ||
5761 | |||
5762 | /* Enable future boot loads whenever power management unit triggers it | ||
5763 | * (e.g. when powering back up after power-save shutdown) */ | ||
5764 | iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, | ||
5765 | BSM_WR_CTRL_REG_BIT_START_EN); | ||
5766 | |||
5767 | iwl_release_restricted_access(priv); | ||
5768 | |||
5769 | return 0; | ||
5770 | } | ||
5771 | |||
5772 | static void iwl_nic_start(struct iwl_priv *priv) | ||
5773 | { | ||
5774 | /* Remove all resets to allow NIC to operate */ | ||
5775 | iwl_write32(priv, CSR_RESET, 0); | ||
5776 | } | ||
5777 | |||
5778 | /** | ||
5779 | * iwl_read_ucode - Read uCode images from disk file. | ||
5780 | * | ||
5781 | * Copy into buffers for card to fetch via bus-mastering | ||
5782 | */ | ||
5783 | static int iwl_read_ucode(struct iwl_priv *priv) | ||
5784 | { | ||
5785 | struct iwl_ucode *ucode; | ||
5786 | int rc = 0; | ||
5787 | const struct firmware *ucode_raw; | ||
5788 | /* firmware file name contains uCode/driver compatibility version */ | ||
5789 | const char *name = "iwlwifi-3945" IWL3945_UCODE_API ".ucode"; | ||
5790 | u8 *src; | ||
5791 | size_t len; | ||
5792 | u32 ver, inst_size, data_size, init_size, init_data_size, boot_size; | ||
5793 | |||
5794 | /* Ask kernel firmware_class module to get the boot firmware off disk. | ||
5795 | * request_firmware() is synchronous, file is in memory on return. */ | ||
5796 | rc = request_firmware(&ucode_raw, name, &priv->pci_dev->dev); | ||
5797 | if (rc < 0) { | ||
5798 | IWL_ERROR("%s firmware file req failed: Reason %d\n", name, rc); | ||
5799 | goto error; | ||
5800 | } | ||
5801 | |||
5802 | IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n", | ||
5803 | name, ucode_raw->size); | ||
5804 | |||
5805 | /* Make sure that we got at least our header! */ | ||
5806 | if (ucode_raw->size < sizeof(*ucode)) { | ||
5807 | IWL_ERROR("File size way too small!\n"); | ||
5808 | rc = -EINVAL; | ||
5809 | goto err_release; | ||
5810 | } | ||
5811 | |||
5812 | /* Data from ucode file: header followed by uCode images */ | ||
5813 | ucode = (void *)ucode_raw->data; | ||
5814 | |||
5815 | ver = le32_to_cpu(ucode->ver); | ||
5816 | inst_size = le32_to_cpu(ucode->inst_size); | ||
5817 | data_size = le32_to_cpu(ucode->data_size); | ||
5818 | init_size = le32_to_cpu(ucode->init_size); | ||
5819 | init_data_size = le32_to_cpu(ucode->init_data_size); | ||
5820 | boot_size = le32_to_cpu(ucode->boot_size); | ||
5821 | |||
5822 | IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver); | ||
5823 | IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", | ||
5824 | inst_size); | ||
5825 | IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", | ||
5826 | data_size); | ||
5827 | IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", | ||
5828 | init_size); | ||
5829 | IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", | ||
5830 | init_data_size); | ||
5831 | IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", | ||
5832 | boot_size); | ||
5833 | |||
5834 | /* Verify size of file vs. image size info in file's header */ | ||
5835 | if (ucode_raw->size < sizeof(*ucode) + | ||
5836 | inst_size + data_size + init_size + | ||
5837 | init_data_size + boot_size) { | ||
5838 | |||
5839 | IWL_DEBUG_INFO("uCode file size %d too small\n", | ||
5840 | (int)ucode_raw->size); | ||
5841 | rc = -EINVAL; | ||
5842 | goto err_release; | ||
5843 | } | ||
5844 | |||
5845 | /* Verify that uCode images will fit in card's SRAM */ | ||
5846 | if (inst_size > IWL_MAX_INST_SIZE) { | ||
5847 | IWL_DEBUG_INFO("uCode instr len %d too large to fit in card\n", | ||
5848 | (int)inst_size); | ||
5849 | rc = -EINVAL; | ||
5850 | goto err_release; | ||
5851 | } | ||
5852 | |||
5853 | if (data_size > IWL_MAX_DATA_SIZE) { | ||
5854 | IWL_DEBUG_INFO("uCode data len %d too large to fit in card\n", | ||
5855 | (int)data_size); | ||
5856 | rc = -EINVAL; | ||
5857 | goto err_release; | ||
5858 | } | ||
5859 | if (init_size > IWL_MAX_INST_SIZE) { | ||
5860 | IWL_DEBUG_INFO | ||
5861 | ("uCode init instr len %d too large to fit in card\n", | ||
5862 | (int)init_size); | ||
5863 | rc = -EINVAL; | ||
5864 | goto err_release; | ||
5865 | } | ||
5866 | if (init_data_size > IWL_MAX_DATA_SIZE) { | ||
5867 | IWL_DEBUG_INFO | ||
5868 | ("uCode init data len %d too large to fit in card\n", | ||
5869 | (int)init_data_size); | ||
5870 | rc = -EINVAL; | ||
5871 | goto err_release; | ||
5872 | } | ||
5873 | if (boot_size > IWL_MAX_BSM_SIZE) { | ||
5874 | IWL_DEBUG_INFO | ||
5875 | ("uCode boot instr len %d too large to fit in bsm\n", | ||
5876 | (int)boot_size); | ||
5877 | rc = -EINVAL; | ||
5878 | goto err_release; | ||
5879 | } | ||
5880 | |||
5881 | /* Allocate ucode buffers for card's bus-master loading ... */ | ||
5882 | |||
5883 | /* Runtime instructions and 2 copies of data: | ||
5884 | * 1) unmodified from disk | ||
5885 | * 2) backup cache for save/restore during power-downs */ | ||
5886 | priv->ucode_code.len = inst_size; | ||
5887 | priv->ucode_code.v_addr = | ||
5888 | pci_alloc_consistent(priv->pci_dev, | ||
5889 | priv->ucode_code.len, | ||
5890 | &(priv->ucode_code.p_addr)); | ||
5891 | |||
5892 | priv->ucode_data.len = data_size; | ||
5893 | priv->ucode_data.v_addr = | ||
5894 | pci_alloc_consistent(priv->pci_dev, | ||
5895 | priv->ucode_data.len, | ||
5896 | &(priv->ucode_data.p_addr)); | ||
5897 | |||
5898 | priv->ucode_data_backup.len = data_size; | ||
5899 | priv->ucode_data_backup.v_addr = | ||
5900 | pci_alloc_consistent(priv->pci_dev, | ||
5901 | priv->ucode_data_backup.len, | ||
5902 | &(priv->ucode_data_backup.p_addr)); | ||
5903 | |||
5904 | |||
5905 | /* Initialization instructions and data */ | ||
5906 | priv->ucode_init.len = init_size; | ||
5907 | priv->ucode_init.v_addr = | ||
5908 | pci_alloc_consistent(priv->pci_dev, | ||
5909 | priv->ucode_init.len, | ||
5910 | &(priv->ucode_init.p_addr)); | ||
5911 | |||
5912 | priv->ucode_init_data.len = init_data_size; | ||
5913 | priv->ucode_init_data.v_addr = | ||
5914 | pci_alloc_consistent(priv->pci_dev, | ||
5915 | priv->ucode_init_data.len, | ||
5916 | &(priv->ucode_init_data.p_addr)); | ||
5917 | |||
5918 | /* Bootstrap (instructions only, no data) */ | ||
5919 | priv->ucode_boot.len = boot_size; | ||
5920 | priv->ucode_boot.v_addr = | ||
5921 | pci_alloc_consistent(priv->pci_dev, | ||
5922 | priv->ucode_boot.len, | ||
5923 | &(priv->ucode_boot.p_addr)); | ||
5924 | |||
5925 | if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || | ||
5926 | !priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr || | ||
5927 | !priv->ucode_boot.v_addr || !priv->ucode_data_backup.v_addr) | ||
5928 | goto err_pci_alloc; | ||
5929 | |||
5930 | /* Copy images into buffers for card's bus-master reads ... */ | ||
5931 | |||
5932 | /* Runtime instructions (first block of data in file) */ | ||
5933 | src = &ucode->data[0]; | ||
5934 | len = priv->ucode_code.len; | ||
5935 | IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %d\n", | ||
5936 | (int)len); | ||
5937 | memcpy(priv->ucode_code.v_addr, src, len); | ||
5938 | IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", | ||
5939 | priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); | ||
5940 | |||
5941 | /* Runtime data (2nd block) | ||
5942 | * NOTE: Copy into backup buffer will be done in iwl_up() */ | ||
5943 | src = &ucode->data[inst_size]; | ||
5944 | len = priv->ucode_data.len; | ||
5945 | IWL_DEBUG_INFO("Copying (but not loading) uCode data len %d\n", | ||
5946 | (int)len); | ||
5947 | memcpy(priv->ucode_data.v_addr, src, len); | ||
5948 | memcpy(priv->ucode_data_backup.v_addr, src, len); | ||
5949 | |||
5950 | /* Initialization instructions (3rd block) */ | ||
5951 | if (init_size) { | ||
5952 | src = &ucode->data[inst_size + data_size]; | ||
5953 | len = priv->ucode_init.len; | ||
5954 | IWL_DEBUG_INFO("Copying (but not loading) init instr len %d\n", | ||
5955 | (int)len); | ||
5956 | memcpy(priv->ucode_init.v_addr, src, len); | ||
5957 | } | ||
5958 | |||
5959 | /* Initialization data (4th block) */ | ||
5960 | if (init_data_size) { | ||
5961 | src = &ucode->data[inst_size + data_size + init_size]; | ||
5962 | len = priv->ucode_init_data.len; | ||
5963 | IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n", | ||
5964 | (int)len); | ||
5965 | memcpy(priv->ucode_init_data.v_addr, src, len); | ||
5966 | } | ||
5967 | |||
5968 | /* Bootstrap instructions (5th block) */ | ||
5969 | src = &ucode->data[inst_size + data_size + init_size + init_data_size]; | ||
5970 | len = priv->ucode_boot.len; | ||
5971 | IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n", | ||
5972 | (int)len); | ||
5973 | memcpy(priv->ucode_boot.v_addr, src, len); | ||
5974 | |||
5975 | /* We have our copies now, allow OS release its copies */ | ||
5976 | release_firmware(ucode_raw); | ||
5977 | return 0; | ||
5978 | |||
5979 | err_pci_alloc: | ||
5980 | IWL_ERROR("failed to allocate pci memory\n"); | ||
5981 | rc = -ENOMEM; | ||
5982 | iwl_dealloc_ucode_pci(priv); | ||
5983 | |||
5984 | err_release: | ||
5985 | release_firmware(ucode_raw); | ||
5986 | |||
5987 | error: | ||
5988 | return rc; | ||
5989 | } | ||
5990 | |||
5991 | |||
5992 | /** | ||
5993 | * iwl_set_ucode_ptrs - Set uCode address location | ||
5994 | * | ||
5995 | * Tell initialization uCode where to find runtime uCode. | ||
5996 | * | ||
5997 | * BSM registers initially contain pointers to initialization uCode. | ||
5998 | * We need to replace them to load runtime uCode inst and data, | ||
5999 | * and to save runtime data when powering down. | ||
6000 | */ | ||
6001 | static int iwl_set_ucode_ptrs(struct iwl_priv *priv) | ||
6002 | { | ||
6003 | dma_addr_t pinst; | ||
6004 | dma_addr_t pdata; | ||
6005 | int rc = 0; | ||
6006 | unsigned long flags; | ||
6007 | |||
6008 | /* bits 31:0 for 3945 */ | ||
6009 | pinst = priv->ucode_code.p_addr; | ||
6010 | pdata = priv->ucode_data_backup.p_addr; | ||
6011 | |||
6012 | spin_lock_irqsave(&priv->lock, flags); | ||
6013 | rc = iwl_grab_restricted_access(priv); | ||
6014 | if (rc) { | ||
6015 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6016 | return rc; | ||
6017 | } | ||
6018 | |||
6019 | /* Tell bootstrap uCode where to find image to load */ | ||
6020 | iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); | ||
6021 | iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); | ||
6022 | iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, | ||
6023 | priv->ucode_data.len); | ||
6024 | |||
6025 | /* Inst bytecount must be last to set up, bit 31 signals uCode | ||
6026 | * that all new ptr/size info is in place */ | ||
6027 | iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, | ||
6028 | priv->ucode_code.len | BSM_DRAM_INST_LOAD); | ||
6029 | |||
6030 | iwl_release_restricted_access(priv); | ||
6031 | |||
6032 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6033 | |||
6034 | IWL_DEBUG_INFO("Runtime uCode pointers are set.\n"); | ||
6035 | |||
6036 | return rc; | ||
6037 | } | ||
6038 | |||
6039 | /** | ||
6040 | * iwl_init_alive_start - Called after REPLY_ALIVE notification receieved | ||
6041 | * | ||
6042 | * Called after REPLY_ALIVE notification received from "initialize" uCode. | ||
6043 | * | ||
6044 | * The 4965 "initialize" ALIVE reply contains calibration data for: | ||
6045 | * Voltage, temperature, and MIMO tx gain correction, now stored in priv | ||
6046 | * (3945 does not contain this data). | ||
6047 | * | ||
6048 | * Tell "initialize" uCode to go ahead and load the runtime uCode. | ||
6049 | */ | ||
6050 | static void iwl_init_alive_start(struct iwl_priv *priv) | ||
6051 | { | ||
6052 | /* Check alive response for "valid" sign from uCode */ | ||
6053 | if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { | ||
6054 | /* We had an error bringing up the hardware, so take it | ||
6055 | * all the way back down so we can try again */ | ||
6056 | IWL_DEBUG_INFO("Initialize Alive failed.\n"); | ||
6057 | goto restart; | ||
6058 | } | ||
6059 | |||
6060 | /* Bootstrap uCode has loaded initialize uCode ... verify inst image. | ||
6061 | * This is a paranoid check, because we would not have gotten the | ||
6062 | * "initialize" alive if code weren't properly loaded. */ | ||
6063 | if (iwl_verify_ucode(priv)) { | ||
6064 | /* Runtime instruction load was bad; | ||
6065 | * take it all the way back down so we can try again */ | ||
6066 | IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n"); | ||
6067 | goto restart; | ||
6068 | } | ||
6069 | |||
6070 | /* Send pointers to protocol/runtime uCode image ... init code will | ||
6071 | * load and launch runtime uCode, which will send us another "Alive" | ||
6072 | * notification. */ | ||
6073 | IWL_DEBUG_INFO("Initialization Alive received.\n"); | ||
6074 | if (iwl_set_ucode_ptrs(priv)) { | ||
6075 | /* Runtime instruction load won't happen; | ||
6076 | * take it all the way back down so we can try again */ | ||
6077 | IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n"); | ||
6078 | goto restart; | ||
6079 | } | ||
6080 | return; | ||
6081 | |||
6082 | restart: | ||
6083 | queue_work(priv->workqueue, &priv->restart); | ||
6084 | } | ||
6085 | |||
6086 | |||
6087 | /** | ||
6088 | * iwl_alive_start - called after REPLY_ALIVE notification received | ||
6089 | * from protocol/runtime uCode (initialization uCode's | ||
6090 | * Alive gets handled by iwl_init_alive_start()). | ||
6091 | */ | ||
6092 | static void iwl_alive_start(struct iwl_priv *priv) | ||
6093 | { | ||
6094 | int rc = 0; | ||
6095 | int thermal_spin = 0; | ||
6096 | u32 rfkill; | ||
6097 | |||
6098 | IWL_DEBUG_INFO("Runtime Alive received.\n"); | ||
6099 | |||
6100 | if (priv->card_alive.is_valid != UCODE_VALID_OK) { | ||
6101 | /* We had an error bringing up the hardware, so take it | ||
6102 | * all the way back down so we can try again */ | ||
6103 | IWL_DEBUG_INFO("Alive failed.\n"); | ||
6104 | goto restart; | ||
6105 | } | ||
6106 | |||
6107 | /* Initialize uCode has loaded Runtime uCode ... verify inst image. | ||
6108 | * This is a paranoid check, because we would not have gotten the | ||
6109 | * "runtime" alive if code weren't properly loaded. */ | ||
6110 | if (iwl_verify_ucode(priv)) { | ||
6111 | /* Runtime instruction load was bad; | ||
6112 | * take it all the way back down so we can try again */ | ||
6113 | IWL_DEBUG_INFO("Bad runtime uCode load.\n"); | ||
6114 | goto restart; | ||
6115 | } | ||
6116 | |||
6117 | iwl_clear_stations_table(priv); | ||
6118 | |||
6119 | rc = iwl_grab_restricted_access(priv); | ||
6120 | if (rc) { | ||
6121 | IWL_WARNING("Can not read rfkill status from adapter\n"); | ||
6122 | return; | ||
6123 | } | ||
6124 | |||
6125 | rfkill = iwl_read_restricted_reg(priv, APMG_RFKILL_REG); | ||
6126 | IWL_DEBUG_INFO("RFKILL status: 0x%x\n", rfkill); | ||
6127 | iwl_release_restricted_access(priv); | ||
6128 | |||
6129 | if (rfkill & 0x1) { | ||
6130 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
6131 | /* if rfkill is not on, then wait for thermal | ||
6132 | * sensor in adapter to kick in */ | ||
6133 | while (iwl_hw_get_temperature(priv) == 0) { | ||
6134 | thermal_spin++; | ||
6135 | udelay(10); | ||
6136 | } | ||
6137 | |||
6138 | if (thermal_spin) | ||
6139 | IWL_DEBUG_INFO("Thermal calibration took %dus\n", | ||
6140 | thermal_spin * 10); | ||
6141 | } else | ||
6142 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
6143 | |||
6144 | /* After the ALIVE response, we can process host commands */ | ||
6145 | set_bit(STATUS_ALIVE, &priv->status); | ||
6146 | |||
6147 | /* Clear out the uCode error bit if it is set */ | ||
6148 | clear_bit(STATUS_FW_ERROR, &priv->status); | ||
6149 | |||
6150 | rc = iwl_init_channel_map(priv); | ||
6151 | if (rc) { | ||
6152 | IWL_ERROR("initializing regulatory failed: %d\n", rc); | ||
6153 | return; | ||
6154 | } | ||
6155 | |||
6156 | iwl_init_geos(priv); | ||
6157 | |||
6158 | if (iwl_is_rfkill(priv)) | ||
6159 | return; | ||
6160 | |||
6161 | if (!priv->mac80211_registered) { | ||
6162 | /* Unlock so any user space entry points can call back into | ||
6163 | * the driver without a deadlock... */ | ||
6164 | mutex_unlock(&priv->mutex); | ||
6165 | iwl_rate_control_register(priv->hw); | ||
6166 | rc = ieee80211_register_hw(priv->hw); | ||
6167 | priv->hw->conf.beacon_int = 100; | ||
6168 | mutex_lock(&priv->mutex); | ||
6169 | |||
6170 | if (rc) { | ||
6171 | IWL_ERROR("Failed to register network " | ||
6172 | "device (error %d)\n", rc); | ||
6173 | return; | ||
6174 | } | ||
6175 | |||
6176 | priv->mac80211_registered = 1; | ||
6177 | |||
6178 | iwl_reset_channel_flag(priv); | ||
6179 | } else | ||
6180 | ieee80211_start_queues(priv->hw); | ||
6181 | |||
6182 | priv->active_rate = priv->rates_mask; | ||
6183 | priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; | ||
6184 | |||
6185 | iwl_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode)); | ||
6186 | |||
6187 | if (iwl_is_associated(priv)) { | ||
6188 | struct iwl_rxon_cmd *active_rxon = | ||
6189 | (struct iwl_rxon_cmd *)(&priv->active_rxon); | ||
6190 | |||
6191 | memcpy(&priv->staging_rxon, &priv->active_rxon, | ||
6192 | sizeof(priv->staging_rxon)); | ||
6193 | active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
6194 | } else { | ||
6195 | /* Initialize our rx_config data */ | ||
6196 | iwl_connection_init_rx_config(priv); | ||
6197 | memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); | ||
6198 | } | ||
6199 | |||
6200 | /* Configure BT coexistence */ | ||
6201 | iwl_send_bt_config(priv); | ||
6202 | |||
6203 | /* Configure the adapter for unassociated operation */ | ||
6204 | iwl_commit_rxon(priv); | ||
6205 | |||
6206 | /* At this point, the NIC is initialized and operational */ | ||
6207 | priv->notif_missed_beacons = 0; | ||
6208 | set_bit(STATUS_READY, &priv->status); | ||
6209 | |||
6210 | iwl3945_reg_txpower_periodic(priv); | ||
6211 | |||
6212 | IWL_DEBUG_INFO("ALIVE processing complete.\n"); | ||
6213 | |||
6214 | if (priv->error_recovering) | ||
6215 | iwl_error_recovery(priv); | ||
6216 | |||
6217 | return; | ||
6218 | |||
6219 | restart: | ||
6220 | queue_work(priv->workqueue, &priv->restart); | ||
6221 | } | ||
6222 | |||
6223 | static void iwl_cancel_deferred_work(struct iwl_priv *priv); | ||
6224 | |||
6225 | static void __iwl_down(struct iwl_priv *priv) | ||
6226 | { | ||
6227 | unsigned long flags; | ||
6228 | int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); | ||
6229 | struct ieee80211_conf *conf = NULL; | ||
6230 | |||
6231 | IWL_DEBUG_INFO(DRV_NAME " is going down\n"); | ||
6232 | |||
6233 | conf = ieee80211_get_hw_conf(priv->hw); | ||
6234 | |||
6235 | if (!exit_pending) | ||
6236 | set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
6237 | |||
6238 | iwl_clear_stations_table(priv); | ||
6239 | |||
6240 | /* Unblock any waiting calls */ | ||
6241 | wake_up_interruptible_all(&priv->wait_command_queue); | ||
6242 | |||
6243 | iwl_cancel_deferred_work(priv); | ||
6244 | |||
6245 | /* Wipe out the EXIT_PENDING status bit if we are not actually | ||
6246 | * exiting the module */ | ||
6247 | if (!exit_pending) | ||
6248 | clear_bit(STATUS_EXIT_PENDING, &priv->status); | ||
6249 | |||
6250 | /* stop and reset the on-board processor */ | ||
6251 | iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); | ||
6252 | |||
6253 | /* tell the device to stop sending interrupts */ | ||
6254 | iwl_disable_interrupts(priv); | ||
6255 | |||
6256 | if (priv->mac80211_registered) | ||
6257 | ieee80211_stop_queues(priv->hw); | ||
6258 | |||
6259 | /* If we have not previously called iwl_init() then | ||
6260 | * clear all bits but the RF Kill and SUSPEND bits and return */ | ||
6261 | if (!iwl_is_init(priv)) { | ||
6262 | priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << | ||
6263 | STATUS_RF_KILL_HW | | ||
6264 | test_bit(STATUS_RF_KILL_SW, &priv->status) << | ||
6265 | STATUS_RF_KILL_SW | | ||
6266 | test_bit(STATUS_IN_SUSPEND, &priv->status) << | ||
6267 | STATUS_IN_SUSPEND; | ||
6268 | goto exit; | ||
6269 | } | ||
6270 | |||
6271 | /* ...otherwise clear out all the status bits but the RF Kill and | ||
6272 | * SUSPEND bits and continue taking the NIC down. */ | ||
6273 | priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << | ||
6274 | STATUS_RF_KILL_HW | | ||
6275 | test_bit(STATUS_RF_KILL_SW, &priv->status) << | ||
6276 | STATUS_RF_KILL_SW | | ||
6277 | test_bit(STATUS_IN_SUSPEND, &priv->status) << | ||
6278 | STATUS_IN_SUSPEND | | ||
6279 | test_bit(STATUS_FW_ERROR, &priv->status) << | ||
6280 | STATUS_FW_ERROR; | ||
6281 | |||
6282 | spin_lock_irqsave(&priv->lock, flags); | ||
6283 | iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
6284 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6285 | |||
6286 | iwl_hw_txq_ctx_stop(priv); | ||
6287 | iwl_hw_rxq_stop(priv); | ||
6288 | |||
6289 | spin_lock_irqsave(&priv->lock, flags); | ||
6290 | if (!iwl_grab_restricted_access(priv)) { | ||
6291 | iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, | ||
6292 | APMG_CLK_VAL_DMA_CLK_RQT); | ||
6293 | iwl_release_restricted_access(priv); | ||
6294 | } | ||
6295 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6296 | |||
6297 | udelay(5); | ||
6298 | |||
6299 | iwl_hw_nic_stop_master(priv); | ||
6300 | iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | ||
6301 | iwl_hw_nic_reset(priv); | ||
6302 | |||
6303 | exit: | ||
6304 | memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); | ||
6305 | |||
6306 | if (priv->ibss_beacon) | ||
6307 | dev_kfree_skb(priv->ibss_beacon); | ||
6308 | priv->ibss_beacon = NULL; | ||
6309 | |||
6310 | /* clear out any free frames */ | ||
6311 | iwl_clear_free_frames(priv); | ||
6312 | } | ||
6313 | |||
6314 | static void iwl_down(struct iwl_priv *priv) | ||
6315 | { | ||
6316 | mutex_lock(&priv->mutex); | ||
6317 | __iwl_down(priv); | ||
6318 | mutex_unlock(&priv->mutex); | ||
6319 | } | ||
6320 | |||
6321 | #define MAX_HW_RESTARTS 5 | ||
6322 | |||
6323 | static int __iwl_up(struct iwl_priv *priv) | ||
6324 | { | ||
6325 | int rc, i; | ||
6326 | |||
6327 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { | ||
6328 | IWL_WARNING("Exit pending; will not bring the NIC up\n"); | ||
6329 | return -EIO; | ||
6330 | } | ||
6331 | |||
6332 | if (test_bit(STATUS_RF_KILL_SW, &priv->status)) { | ||
6333 | IWL_WARNING("Radio disabled by SW RF kill (module " | ||
6334 | "parameter)\n"); | ||
6335 | return 0; | ||
6336 | } | ||
6337 | |||
6338 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
6339 | |||
6340 | rc = iwl_hw_nic_init(priv); | ||
6341 | if (rc) { | ||
6342 | IWL_ERROR("Unable to int nic\n"); | ||
6343 | return rc; | ||
6344 | } | ||
6345 | |||
6346 | /* make sure rfkill handshake bits are cleared */ | ||
6347 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
6348 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | ||
6349 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
6350 | |||
6351 | /* clear (again), then enable host interrupts */ | ||
6352 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
6353 | iwl_enable_interrupts(priv); | ||
6354 | |||
6355 | /* really make sure rfkill handshake bits are cleared */ | ||
6356 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
6357 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
6358 | |||
6359 | /* Copy original ucode data image from disk into backup cache. | ||
6360 | * This will be used to initialize the on-board processor's | ||
6361 | * data SRAM for a clean start when the runtime program first loads. */ | ||
6362 | memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, | ||
6363 | priv->ucode_data.len); | ||
6364 | |||
6365 | for (i = 0; i < MAX_HW_RESTARTS; i++) { | ||
6366 | |||
6367 | iwl_clear_stations_table(priv); | ||
6368 | |||
6369 | /* load bootstrap state machine, | ||
6370 | * load bootstrap program into processor's memory, | ||
6371 | * prepare to load the "initialize" uCode */ | ||
6372 | rc = iwl_load_bsm(priv); | ||
6373 | |||
6374 | if (rc) { | ||
6375 | IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc); | ||
6376 | continue; | ||
6377 | } | ||
6378 | |||
6379 | /* start card; "initialize" will load runtime ucode */ | ||
6380 | iwl_nic_start(priv); | ||
6381 | |||
6382 | /* MAC Address location in EEPROM same for 3945/4965 */ | ||
6383 | get_eeprom_mac(priv, priv->mac_addr); | ||
6384 | IWL_DEBUG_INFO("MAC address: " MAC_FMT "\n", | ||
6385 | MAC_ARG(priv->mac_addr)); | ||
6386 | |||
6387 | SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); | ||
6388 | |||
6389 | IWL_DEBUG_INFO(DRV_NAME " is coming up\n"); | ||
6390 | |||
6391 | return 0; | ||
6392 | } | ||
6393 | |||
6394 | set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
6395 | __iwl_down(priv); | ||
6396 | |||
6397 | /* tried to restart and config the device for as long as our | ||
6398 | * patience could withstand */ | ||
6399 | IWL_ERROR("Unable to initialize device after %d attempts.\n", i); | ||
6400 | return -EIO; | ||
6401 | } | ||
6402 | |||
6403 | |||
6404 | /***************************************************************************** | ||
6405 | * | ||
6406 | * Workqueue callbacks | ||
6407 | * | ||
6408 | *****************************************************************************/ | ||
6409 | |||
6410 | static void iwl_bg_init_alive_start(struct work_struct *data) | ||
6411 | { | ||
6412 | struct iwl_priv *priv = | ||
6413 | container_of(data, struct iwl_priv, init_alive_start.work); | ||
6414 | |||
6415 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6416 | return; | ||
6417 | |||
6418 | mutex_lock(&priv->mutex); | ||
6419 | iwl_init_alive_start(priv); | ||
6420 | mutex_unlock(&priv->mutex); | ||
6421 | } | ||
6422 | |||
6423 | static void iwl_bg_alive_start(struct work_struct *data) | ||
6424 | { | ||
6425 | struct iwl_priv *priv = | ||
6426 | container_of(data, struct iwl_priv, alive_start.work); | ||
6427 | |||
6428 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6429 | return; | ||
6430 | |||
6431 | mutex_lock(&priv->mutex); | ||
6432 | iwl_alive_start(priv); | ||
6433 | mutex_unlock(&priv->mutex); | ||
6434 | } | ||
6435 | |||
6436 | static void iwl_bg_rf_kill(struct work_struct *work) | ||
6437 | { | ||
6438 | struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill); | ||
6439 | |||
6440 | wake_up_interruptible(&priv->wait_command_queue); | ||
6441 | |||
6442 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6443 | return; | ||
6444 | |||
6445 | mutex_lock(&priv->mutex); | ||
6446 | |||
6447 | if (!iwl_is_rfkill(priv)) { | ||
6448 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL, | ||
6449 | "HW and/or SW RF Kill no longer active, restarting " | ||
6450 | "device\n"); | ||
6451 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6452 | queue_work(priv->workqueue, &priv->restart); | ||
6453 | } else { | ||
6454 | |||
6455 | if (!test_bit(STATUS_RF_KILL_HW, &priv->status)) | ||
6456 | IWL_DEBUG_RF_KILL("Can not turn radio back on - " | ||
6457 | "disabled by SW switch\n"); | ||
6458 | else | ||
6459 | IWL_WARNING("Radio Frequency Kill Switch is On:\n" | ||
6460 | "Kill switch must be turned off for " | ||
6461 | "wireless networking to work.\n"); | ||
6462 | } | ||
6463 | mutex_unlock(&priv->mutex); | ||
6464 | } | ||
6465 | |||
6466 | #define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) | ||
6467 | |||
6468 | static void iwl_bg_scan_check(struct work_struct *data) | ||
6469 | { | ||
6470 | struct iwl_priv *priv = | ||
6471 | container_of(data, struct iwl_priv, scan_check.work); | ||
6472 | |||
6473 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6474 | return; | ||
6475 | |||
6476 | mutex_lock(&priv->mutex); | ||
6477 | if (test_bit(STATUS_SCANNING, &priv->status) || | ||
6478 | test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
6479 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, | ||
6480 | "Scan completion watchdog resetting adapter (%dms)\n", | ||
6481 | jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); | ||
6482 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6483 | queue_work(priv->workqueue, &priv->restart); | ||
6484 | } | ||
6485 | mutex_unlock(&priv->mutex); | ||
6486 | } | ||
6487 | |||
6488 | static void iwl_bg_request_scan(struct work_struct *data) | ||
6489 | { | ||
6490 | struct iwl_priv *priv = | ||
6491 | container_of(data, struct iwl_priv, request_scan); | ||
6492 | struct iwl_host_cmd cmd = { | ||
6493 | .id = REPLY_SCAN_CMD, | ||
6494 | .len = sizeof(struct iwl_scan_cmd), | ||
6495 | .meta.flags = CMD_SIZE_HUGE, | ||
6496 | }; | ||
6497 | int rc = 0; | ||
6498 | struct iwl_scan_cmd *scan; | ||
6499 | struct ieee80211_conf *conf = NULL; | ||
6500 | u8 direct_mask; | ||
6501 | int phymode; | ||
6502 | |||
6503 | conf = ieee80211_get_hw_conf(priv->hw); | ||
6504 | |||
6505 | mutex_lock(&priv->mutex); | ||
6506 | |||
6507 | if (!iwl_is_ready(priv)) { | ||
6508 | IWL_WARNING("request scan called when driver not ready.\n"); | ||
6509 | goto done; | ||
6510 | } | ||
6511 | |||
6512 | /* Make sure the scan wasn't cancelled before this queued work | ||
6513 | * was given the chance to run... */ | ||
6514 | if (!test_bit(STATUS_SCANNING, &priv->status)) | ||
6515 | goto done; | ||
6516 | |||
6517 | /* This should never be called or scheduled if there is currently | ||
6518 | * a scan active in the hardware. */ | ||
6519 | if (test_bit(STATUS_SCAN_HW, &priv->status)) { | ||
6520 | IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. " | ||
6521 | "Ignoring second request.\n"); | ||
6522 | rc = -EIO; | ||
6523 | goto done; | ||
6524 | } | ||
6525 | |||
6526 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { | ||
6527 | IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n"); | ||
6528 | goto done; | ||
6529 | } | ||
6530 | |||
6531 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
6532 | IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n"); | ||
6533 | goto done; | ||
6534 | } | ||
6535 | |||
6536 | if (iwl_is_rfkill(priv)) { | ||
6537 | IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n"); | ||
6538 | goto done; | ||
6539 | } | ||
6540 | |||
6541 | if (!test_bit(STATUS_READY, &priv->status)) { | ||
6542 | IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n"); | ||
6543 | goto done; | ||
6544 | } | ||
6545 | |||
6546 | if (!priv->scan_bands) { | ||
6547 | IWL_DEBUG_HC("Aborting scan due to no requested bands\n"); | ||
6548 | goto done; | ||
6549 | } | ||
6550 | |||
6551 | if (!priv->scan) { | ||
6552 | priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) + | ||
6553 | IWL_MAX_SCAN_SIZE, GFP_KERNEL); | ||
6554 | if (!priv->scan) { | ||
6555 | rc = -ENOMEM; | ||
6556 | goto done; | ||
6557 | } | ||
6558 | } | ||
6559 | scan = priv->scan; | ||
6560 | memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); | ||
6561 | |||
6562 | scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; | ||
6563 | scan->quiet_time = IWL_ACTIVE_QUIET_TIME; | ||
6564 | |||
6565 | if (iwl_is_associated(priv)) { | ||
6566 | u16 interval = 0; | ||
6567 | u32 extra; | ||
6568 | u32 suspend_time = 100; | ||
6569 | u32 scan_suspend_time = 100; | ||
6570 | unsigned long flags; | ||
6571 | |||
6572 | IWL_DEBUG_INFO("Scanning while associated...\n"); | ||
6573 | |||
6574 | spin_lock_irqsave(&priv->lock, flags); | ||
6575 | interval = priv->beacon_int; | ||
6576 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6577 | |||
6578 | scan->suspend_time = 0; | ||
6579 | scan->max_out_time = cpu_to_le32(600 * 1024); | ||
6580 | if (!interval) | ||
6581 | interval = suspend_time; | ||
6582 | /* | ||
6583 | * suspend time format: | ||
6584 | * 0-19: beacon interval in usec (time before exec.) | ||
6585 | * 20-23: 0 | ||
6586 | * 24-31: number of beacons (suspend between channels) | ||
6587 | */ | ||
6588 | |||
6589 | extra = (suspend_time / interval) << 24; | ||
6590 | scan_suspend_time = 0xFF0FFFFF & | ||
6591 | (extra | ((suspend_time % interval) * 1024)); | ||
6592 | |||
6593 | scan->suspend_time = cpu_to_le32(scan_suspend_time); | ||
6594 | IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n", | ||
6595 | scan_suspend_time, interval); | ||
6596 | } | ||
6597 | |||
6598 | /* We should add the ability for user to lock to PASSIVE ONLY */ | ||
6599 | if (priv->one_direct_scan) { | ||
6600 | IWL_DEBUG_SCAN | ||
6601 | ("Kicking off one direct scan for '%s'\n", | ||
6602 | iwl_escape_essid(priv->direct_ssid, | ||
6603 | priv->direct_ssid_len)); | ||
6604 | scan->direct_scan[0].id = WLAN_EID_SSID; | ||
6605 | scan->direct_scan[0].len = priv->direct_ssid_len; | ||
6606 | memcpy(scan->direct_scan[0].ssid, | ||
6607 | priv->direct_ssid, priv->direct_ssid_len); | ||
6608 | direct_mask = 1; | ||
6609 | } else if (!iwl_is_associated(priv)) { | ||
6610 | scan->direct_scan[0].id = WLAN_EID_SSID; | ||
6611 | scan->direct_scan[0].len = priv->essid_len; | ||
6612 | memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); | ||
6613 | direct_mask = 1; | ||
6614 | } else | ||
6615 | direct_mask = 0; | ||
6616 | |||
6617 | /* We don't build a direct scan probe request; the uCode will do | ||
6618 | * that based on the direct_mask added to each channel entry */ | ||
6619 | scan->tx_cmd.len = cpu_to_le16( | ||
6620 | iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, | ||
6621 | IWL_MAX_SCAN_SIZE - sizeof(scan), 0)); | ||
6622 | scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; | ||
6623 | scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id; | ||
6624 | scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
6625 | |||
6626 | /* flags + rate selection */ | ||
6627 | |||
6628 | switch (priv->scan_bands) { | ||
6629 | case 2: | ||
6630 | scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; | ||
6631 | scan->tx_cmd.rate = IWL_RATE_1M_PLCP; | ||
6632 | scan->good_CRC_th = 0; | ||
6633 | phymode = MODE_IEEE80211G; | ||
6634 | break; | ||
6635 | |||
6636 | case 1: | ||
6637 | scan->tx_cmd.rate = IWL_RATE_6M_PLCP; | ||
6638 | scan->good_CRC_th = IWL_GOOD_CRC_TH; | ||
6639 | phymode = MODE_IEEE80211A; | ||
6640 | break; | ||
6641 | |||
6642 | default: | ||
6643 | IWL_WARNING("Invalid scan band count\n"); | ||
6644 | goto done; | ||
6645 | } | ||
6646 | |||
6647 | /* select Rx antennas */ | ||
6648 | scan->flags |= iwl3945_get_antenna_flags(priv); | ||
6649 | |||
6650 | if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) | ||
6651 | scan->filter_flags = RXON_FILTER_PROMISC_MSK; | ||
6652 | |||
6653 | if (direct_mask) | ||
6654 | IWL_DEBUG_SCAN | ||
6655 | ("Initiating direct scan for %s.\n", | ||
6656 | iwl_escape_essid(priv->essid, priv->essid_len)); | ||
6657 | else | ||
6658 | IWL_DEBUG_SCAN("Initiating indirect scan.\n"); | ||
6659 | |||
6660 | scan->channel_count = | ||
6661 | iwl_get_channels_for_scan( | ||
6662 | priv, phymode, 1, /* active */ | ||
6663 | direct_mask, | ||
6664 | (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); | ||
6665 | |||
6666 | cmd.len += le16_to_cpu(scan->tx_cmd.len) + | ||
6667 | scan->channel_count * sizeof(struct iwl_scan_channel); | ||
6668 | cmd.data = scan; | ||
6669 | scan->len = cpu_to_le16(cmd.len); | ||
6670 | |||
6671 | set_bit(STATUS_SCAN_HW, &priv->status); | ||
6672 | rc = iwl_send_cmd_sync(priv, &cmd); | ||
6673 | if (rc) | ||
6674 | goto done; | ||
6675 | |||
6676 | queue_delayed_work(priv->workqueue, &priv->scan_check, | ||
6677 | IWL_SCAN_CHECK_WATCHDOG); | ||
6678 | |||
6679 | mutex_unlock(&priv->mutex); | ||
6680 | return; | ||
6681 | |||
6682 | done: | ||
6683 | /* inform mac80211 sacn aborted */ | ||
6684 | queue_work(priv->workqueue, &priv->scan_completed); | ||
6685 | mutex_unlock(&priv->mutex); | ||
6686 | } | ||
6687 | |||
6688 | static void iwl_bg_up(struct work_struct *data) | ||
6689 | { | ||
6690 | struct iwl_priv *priv = container_of(data, struct iwl_priv, up); | ||
6691 | |||
6692 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6693 | return; | ||
6694 | |||
6695 | mutex_lock(&priv->mutex); | ||
6696 | __iwl_up(priv); | ||
6697 | mutex_unlock(&priv->mutex); | ||
6698 | } | ||
6699 | |||
6700 | static void iwl_bg_restart(struct work_struct *data) | ||
6701 | { | ||
6702 | struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); | ||
6703 | |||
6704 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6705 | return; | ||
6706 | |||
6707 | iwl_down(priv); | ||
6708 | queue_work(priv->workqueue, &priv->up); | ||
6709 | } | ||
6710 | |||
6711 | static void iwl_bg_rx_replenish(struct work_struct *data) | ||
6712 | { | ||
6713 | struct iwl_priv *priv = | ||
6714 | container_of(data, struct iwl_priv, rx_replenish); | ||
6715 | |||
6716 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6717 | return; | ||
6718 | |||
6719 | mutex_lock(&priv->mutex); | ||
6720 | iwl_rx_replenish(priv); | ||
6721 | mutex_unlock(&priv->mutex); | ||
6722 | } | ||
6723 | |||
6724 | static void iwl_bg_post_associate(struct work_struct *data) | ||
6725 | { | ||
6726 | struct iwl_priv *priv = container_of(data, struct iwl_priv, | ||
6727 | post_associate.work); | ||
6728 | |||
6729 | int rc = 0; | ||
6730 | struct ieee80211_conf *conf = NULL; | ||
6731 | |||
6732 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { | ||
6733 | IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__); | ||
6734 | return; | ||
6735 | } | ||
6736 | |||
6737 | |||
6738 | IWL_DEBUG_ASSOC("Associated as %d to: " MAC_FMT "\n", | ||
6739 | priv->assoc_id, MAC_ARG(priv->active_rxon.bssid_addr)); | ||
6740 | |||
6741 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6742 | return; | ||
6743 | |||
6744 | mutex_lock(&priv->mutex); | ||
6745 | |||
6746 | conf = ieee80211_get_hw_conf(priv->hw); | ||
6747 | |||
6748 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
6749 | iwl_commit_rxon(priv); | ||
6750 | |||
6751 | memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); | ||
6752 | iwl_setup_rxon_timing(priv); | ||
6753 | rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, | ||
6754 | sizeof(priv->rxon_timing), &priv->rxon_timing); | ||
6755 | if (rc) | ||
6756 | IWL_WARNING("REPLY_RXON_TIMING failed - " | ||
6757 | "Attempting to continue.\n"); | ||
6758 | |||
6759 | priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
6760 | |||
6761 | priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); | ||
6762 | |||
6763 | IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n", | ||
6764 | priv->assoc_id, priv->beacon_int); | ||
6765 | |||
6766 | if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) | ||
6767 | priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; | ||
6768 | else | ||
6769 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; | ||
6770 | |||
6771 | if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { | ||
6772 | if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) | ||
6773 | priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; | ||
6774 | else | ||
6775 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | ||
6776 | |||
6777 | if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) | ||
6778 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | ||
6779 | |||
6780 | } | ||
6781 | |||
6782 | iwl_commit_rxon(priv); | ||
6783 | |||
6784 | switch (priv->iw_mode) { | ||
6785 | case IEEE80211_IF_TYPE_STA: | ||
6786 | iwl_rate_scale_init(priv->hw, IWL_AP_ID); | ||
6787 | break; | ||
6788 | |||
6789 | case IEEE80211_IF_TYPE_IBSS: | ||
6790 | |||
6791 | /* clear out the station table */ | ||
6792 | iwl_clear_stations_table(priv); | ||
6793 | |||
6794 | iwl_rxon_add_station(priv, BROADCAST_ADDR, 0); | ||
6795 | iwl_rxon_add_station(priv, priv->bssid, 0); | ||
6796 | iwl3945_sync_sta(priv, IWL_STA_ID, | ||
6797 | (priv->phymode == MODE_IEEE80211A)? | ||
6798 | IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP, | ||
6799 | CMD_ASYNC); | ||
6800 | iwl_rate_scale_init(priv->hw, IWL_STA_ID); | ||
6801 | iwl_send_beacon_cmd(priv); | ||
6802 | |||
6803 | break; | ||
6804 | |||
6805 | default: | ||
6806 | IWL_ERROR("%s Should not be called in %d mode\n", | ||
6807 | __FUNCTION__, priv->iw_mode); | ||
6808 | break; | ||
6809 | } | ||
6810 | |||
6811 | iwl_sequence_reset(priv); | ||
6812 | |||
6813 | #ifdef CONFIG_IWLWIFI_QOS | ||
6814 | iwl_activate_qos(priv, 0); | ||
6815 | #endif /* CONFIG_IWLWIFI_QOS */ | ||
6816 | mutex_unlock(&priv->mutex); | ||
6817 | } | ||
6818 | |||
6819 | static void iwl_bg_abort_scan(struct work_struct *work) | ||
6820 | { | ||
6821 | struct iwl_priv *priv = container_of(work, struct iwl_priv, | ||
6822 | abort_scan); | ||
6823 | |||
6824 | if (!iwl_is_ready(priv)) | ||
6825 | return; | ||
6826 | |||
6827 | mutex_lock(&priv->mutex); | ||
6828 | |||
6829 | set_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
6830 | iwl_send_scan_abort(priv); | ||
6831 | |||
6832 | mutex_unlock(&priv->mutex); | ||
6833 | } | ||
6834 | |||
6835 | static void iwl_bg_scan_completed(struct work_struct *work) | ||
6836 | { | ||
6837 | struct iwl_priv *priv = | ||
6838 | container_of(work, struct iwl_priv, scan_completed); | ||
6839 | |||
6840 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n"); | ||
6841 | |||
6842 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6843 | return; | ||
6844 | |||
6845 | ieee80211_scan_completed(priv->hw); | ||
6846 | |||
6847 | /* Since setting the TXPOWER may have been deferred while | ||
6848 | * performing the scan, fire one off */ | ||
6849 | mutex_lock(&priv->mutex); | ||
6850 | iwl_hw_reg_send_txpower(priv); | ||
6851 | mutex_unlock(&priv->mutex); | ||
6852 | } | ||
6853 | |||
6854 | /***************************************************************************** | ||
6855 | * | ||
6856 | * mac80211 entry point functions | ||
6857 | * | ||
6858 | *****************************************************************************/ | ||
6859 | |||
6860 | static int iwl_mac_open(struct ieee80211_hw *hw) | ||
6861 | { | ||
6862 | struct iwl_priv *priv = hw->priv; | ||
6863 | |||
6864 | IWL_DEBUG_MAC80211("enter\n"); | ||
6865 | |||
6866 | /* we should be verifying the device is ready to be opened */ | ||
6867 | mutex_lock(&priv->mutex); | ||
6868 | |||
6869 | priv->is_open = 1; | ||
6870 | |||
6871 | if (!iwl_is_rfkill(priv)) | ||
6872 | ieee80211_start_queues(priv->hw); | ||
6873 | |||
6874 | mutex_unlock(&priv->mutex); | ||
6875 | IWL_DEBUG_MAC80211("leave\n"); | ||
6876 | return 0; | ||
6877 | } | ||
6878 | |||
6879 | static int iwl_mac_stop(struct ieee80211_hw *hw) | ||
6880 | { | ||
6881 | struct iwl_priv *priv = hw->priv; | ||
6882 | |||
6883 | IWL_DEBUG_MAC80211("enter\n"); | ||
6884 | priv->is_open = 0; | ||
6885 | /*netif_stop_queue(dev); */ | ||
6886 | flush_workqueue(priv->workqueue); | ||
6887 | IWL_DEBUG_MAC80211("leave\n"); | ||
6888 | |||
6889 | return 0; | ||
6890 | } | ||
6891 | |||
6892 | static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, | ||
6893 | struct ieee80211_tx_control *ctl) | ||
6894 | { | ||
6895 | struct iwl_priv *priv = hw->priv; | ||
6896 | |||
6897 | IWL_DEBUG_MAC80211("enter\n"); | ||
6898 | |||
6899 | if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { | ||
6900 | IWL_DEBUG_MAC80211("leave - monitor\n"); | ||
6901 | return -1; | ||
6902 | } | ||
6903 | |||
6904 | IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, | ||
6905 | ctl->tx_rate); | ||
6906 | |||
6907 | if (iwl_tx_skb(priv, skb, ctl)) | ||
6908 | dev_kfree_skb_any(skb); | ||
6909 | |||
6910 | IWL_DEBUG_MAC80211("leave\n"); | ||
6911 | return 0; | ||
6912 | } | ||
6913 | |||
6914 | static int iwl_mac_add_interface(struct ieee80211_hw *hw, | ||
6915 | struct ieee80211_if_init_conf *conf) | ||
6916 | { | ||
6917 | struct iwl_priv *priv = hw->priv; | ||
6918 | unsigned long flags; | ||
6919 | |||
6920 | IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type); | ||
6921 | if (conf->mac_addr) | ||
6922 | IWL_DEBUG_MAC80211("enter: MAC " MAC_FMT "\n", | ||
6923 | MAC_ARG(conf->mac_addr)); | ||
6924 | |||
6925 | if (priv->interface_id) { | ||
6926 | IWL_DEBUG_MAC80211("leave - interface_id != 0\n"); | ||
6927 | return 0; | ||
6928 | } | ||
6929 | |||
6930 | spin_lock_irqsave(&priv->lock, flags); | ||
6931 | priv->interface_id = conf->if_id; | ||
6932 | |||
6933 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6934 | |||
6935 | mutex_lock(&priv->mutex); | ||
6936 | iwl_set_mode(priv, conf->type); | ||
6937 | |||
6938 | IWL_DEBUG_MAC80211("leave\n"); | ||
6939 | mutex_unlock(&priv->mutex); | ||
6940 | |||
6941 | return 0; | ||
6942 | } | ||
6943 | |||
6944 | /** | ||
6945 | * iwl_mac_config - mac80211 config callback | ||
6946 | * | ||
6947 | * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to | ||
6948 | * be set inappropriately and the driver currently sets the hardware up to | ||
6949 | * use it whenever needed. | ||
6950 | */ | ||
6951 | static int iwl_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) | ||
6952 | { | ||
6953 | struct iwl_priv *priv = hw->priv; | ||
6954 | const struct iwl_channel_info *ch_info; | ||
6955 | unsigned long flags; | ||
6956 | |||
6957 | mutex_lock(&priv->mutex); | ||
6958 | IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel); | ||
6959 | |||
6960 | if (!iwl_is_ready(priv)) { | ||
6961 | IWL_DEBUG_MAC80211("leave - not ready\n"); | ||
6962 | mutex_unlock(&priv->mutex); | ||
6963 | return -EIO; | ||
6964 | } | ||
6965 | |||
6966 | /* TODO: Figure out how to get ieee80211_local->sta_scanning w/ only | ||
6967 | * what is exposed through include/ declrations */ | ||
6968 | if (unlikely(!iwl_param_disable_hw_scan && | ||
6969 | test_bit(STATUS_SCANNING, &priv->status))) { | ||
6970 | IWL_DEBUG_MAC80211("leave - scanning\n"); | ||
6971 | mutex_unlock(&priv->mutex); | ||
6972 | return 0; | ||
6973 | } | ||
6974 | |||
6975 | spin_lock_irqsave(&priv->lock, flags); | ||
6976 | |||
6977 | ch_info = iwl_get_channel_info(priv, conf->phymode, conf->channel); | ||
6978 | if (!is_channel_valid(ch_info)) { | ||
6979 | IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n", | ||
6980 | conf->channel, conf->phymode); | ||
6981 | IWL_DEBUG_MAC80211("leave - invalid channel\n"); | ||
6982 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6983 | mutex_unlock(&priv->mutex); | ||
6984 | return -EINVAL; | ||
6985 | } | ||
6986 | |||
6987 | iwl_set_rxon_channel(priv, conf->phymode, conf->channel); | ||
6988 | |||
6989 | iwl_set_flags_for_phymode(priv, conf->phymode); | ||
6990 | |||
6991 | /* The list of supported rates and rate mask can be different | ||
6992 | * for each phymode; since the phymode may have changed, reset | ||
6993 | * the rate mask to what mac80211 lists */ | ||
6994 | iwl_set_rate(priv); | ||
6995 | |||
6996 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6997 | |||
6998 | #ifdef IEEE80211_CONF_CHANNEL_SWITCH | ||
6999 | if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) { | ||
7000 | iwl_hw_channel_switch(priv, conf->channel); | ||
7001 | mutex_unlock(&priv->mutex); | ||
7002 | return 0; | ||
7003 | } | ||
7004 | #endif | ||
7005 | |||
7006 | iwl_radio_kill_sw(priv, !conf->radio_enabled); | ||
7007 | |||
7008 | if (!conf->radio_enabled) { | ||
7009 | IWL_DEBUG_MAC80211("leave - radio disabled\n"); | ||
7010 | mutex_unlock(&priv->mutex); | ||
7011 | return 0; | ||
7012 | } | ||
7013 | |||
7014 | if (iwl_is_rfkill(priv)) { | ||
7015 | IWL_DEBUG_MAC80211("leave - RF kill\n"); | ||
7016 | mutex_unlock(&priv->mutex); | ||
7017 | return -EIO; | ||
7018 | } | ||
7019 | |||
7020 | iwl_set_rate(priv); | ||
7021 | |||
7022 | if (memcmp(&priv->active_rxon, | ||
7023 | &priv->staging_rxon, sizeof(priv->staging_rxon))) | ||
7024 | iwl_commit_rxon(priv); | ||
7025 | else | ||
7026 | IWL_DEBUG_INFO("No re-sending same RXON configuration.\n"); | ||
7027 | |||
7028 | IWL_DEBUG_MAC80211("leave\n"); | ||
7029 | |||
7030 | mutex_unlock(&priv->mutex); | ||
7031 | |||
7032 | return 0; | ||
7033 | } | ||
7034 | |||
7035 | static void iwl_config_ap(struct iwl_priv *priv) | ||
7036 | { | ||
7037 | int rc = 0; | ||
7038 | |||
7039 | if (priv->status & STATUS_EXIT_PENDING) | ||
7040 | return; | ||
7041 | |||
7042 | /* The following should be done only at AP bring up */ | ||
7043 | if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) { | ||
7044 | |||
7045 | /* RXON - unassoc (to set timing command) */ | ||
7046 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7047 | iwl_commit_rxon(priv); | ||
7048 | |||
7049 | /* RXON Timing */ | ||
7050 | memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); | ||
7051 | iwl_setup_rxon_timing(priv); | ||
7052 | rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, | ||
7053 | sizeof(priv->rxon_timing), &priv->rxon_timing); | ||
7054 | if (rc) | ||
7055 | IWL_WARNING("REPLY_RXON_TIMING failed - " | ||
7056 | "Attempting to continue.\n"); | ||
7057 | |||
7058 | /* FIXME: what should be the assoc_id for AP? */ | ||
7059 | priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); | ||
7060 | if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) | ||
7061 | priv->staging_rxon.flags |= | ||
7062 | RXON_FLG_SHORT_PREAMBLE_MSK; | ||
7063 | else | ||
7064 | priv->staging_rxon.flags &= | ||
7065 | ~RXON_FLG_SHORT_PREAMBLE_MSK; | ||
7066 | |||
7067 | if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { | ||
7068 | if (priv->assoc_capability & | ||
7069 | WLAN_CAPABILITY_SHORT_SLOT_TIME) | ||
7070 | priv->staging_rxon.flags |= | ||
7071 | RXON_FLG_SHORT_SLOT_MSK; | ||
7072 | else | ||
7073 | priv->staging_rxon.flags &= | ||
7074 | ~RXON_FLG_SHORT_SLOT_MSK; | ||
7075 | |||
7076 | if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) | ||
7077 | priv->staging_rxon.flags &= | ||
7078 | ~RXON_FLG_SHORT_SLOT_MSK; | ||
7079 | } | ||
7080 | /* restore RXON assoc */ | ||
7081 | priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
7082 | iwl_commit_rxon(priv); | ||
7083 | iwl_rxon_add_station(priv, BROADCAST_ADDR, 0); | ||
7084 | iwl_send_beacon_cmd(priv); | ||
7085 | } else | ||
7086 | iwl_send_beacon_cmd(priv); | ||
7087 | |||
7088 | /* FIXME - we need to add code here to detect a totally new | ||
7089 | * configuration, reset the AP, unassoc, rxon timing, assoc, | ||
7090 | * clear sta table, add BCAST sta... */ | ||
7091 | } | ||
7092 | |||
7093 | static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, | ||
7094 | struct ieee80211_if_conf *conf) | ||
7095 | { | ||
7096 | struct iwl_priv *priv = hw->priv; | ||
7097 | unsigned long flags; | ||
7098 | int rc; | ||
7099 | |||
7100 | if (conf == NULL) | ||
7101 | return -EIO; | ||
7102 | |||
7103 | if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && | ||
7104 | (!conf->beacon || !conf->ssid_len)) { | ||
7105 | IWL_DEBUG_MAC80211 | ||
7106 | ("Leaving in AP mode because HostAPD is not ready.\n"); | ||
7107 | return 0; | ||
7108 | } | ||
7109 | |||
7110 | mutex_lock(&priv->mutex); | ||
7111 | |||
7112 | IWL_DEBUG_MAC80211("enter: interface id %d\n", if_id); | ||
7113 | if (conf->bssid) | ||
7114 | IWL_DEBUG_MAC80211("bssid: " MAC_FMT "\n", | ||
7115 | MAC_ARG(conf->bssid)); | ||
7116 | |||
7117 | if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) && | ||
7118 | !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { | ||
7119 | IWL_DEBUG_MAC80211("leave - scanning\n"); | ||
7120 | mutex_unlock(&priv->mutex); | ||
7121 | return 0; | ||
7122 | } | ||
7123 | |||
7124 | if (priv->interface_id != if_id) { | ||
7125 | IWL_DEBUG_MAC80211("leave - interface_id != if_id\n"); | ||
7126 | mutex_unlock(&priv->mutex); | ||
7127 | return 0; | ||
7128 | } | ||
7129 | |||
7130 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { | ||
7131 | if (!conf->bssid) { | ||
7132 | conf->bssid = priv->mac_addr; | ||
7133 | memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); | ||
7134 | IWL_DEBUG_MAC80211("bssid was set to: " MAC_FMT "\n", | ||
7135 | MAC_ARG(conf->bssid)); | ||
7136 | } | ||
7137 | if (priv->ibss_beacon) | ||
7138 | dev_kfree_skb(priv->ibss_beacon); | ||
7139 | |||
7140 | priv->ibss_beacon = conf->beacon; | ||
7141 | } | ||
7142 | |||
7143 | if (conf->bssid && !is_zero_ether_addr(conf->bssid) && | ||
7144 | !is_multicast_ether_addr(conf->bssid)) { | ||
7145 | /* If there is currently a HW scan going on in the background | ||
7146 | * then we need to cancel it else the RXON below will fail. */ | ||
7147 | if (iwl_scan_cancel_timeout(priv, 100)) { | ||
7148 | IWL_WARNING("Aborted scan still in progress " | ||
7149 | "after 100ms\n"); | ||
7150 | IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); | ||
7151 | mutex_unlock(&priv->mutex); | ||
7152 | return -EAGAIN; | ||
7153 | } | ||
7154 | memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN); | ||
7155 | |||
7156 | /* TODO: Audit driver for usage of these members and see | ||
7157 | * if mac80211 deprecates them (priv->bssid looks like it | ||
7158 | * shouldn't be there, but I haven't scanned the IBSS code | ||
7159 | * to verify) - jpk */ | ||
7160 | memcpy(priv->bssid, conf->bssid, ETH_ALEN); | ||
7161 | |||
7162 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) | ||
7163 | iwl_config_ap(priv); | ||
7164 | else { | ||
7165 | priv->staging_rxon.filter_flags |= | ||
7166 | RXON_FILTER_ASSOC_MSK; | ||
7167 | rc = iwl_commit_rxon(priv); | ||
7168 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) | ||
7169 | iwl_rxon_add_station( | ||
7170 | priv, priv->active_rxon.bssid_addr, 1); | ||
7171 | } | ||
7172 | |||
7173 | } else { | ||
7174 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7175 | iwl_commit_rxon(priv); | ||
7176 | } | ||
7177 | |||
7178 | spin_lock_irqsave(&priv->lock, flags); | ||
7179 | if (!conf->ssid_len) | ||
7180 | memset(priv->essid, 0, IW_ESSID_MAX_SIZE); | ||
7181 | else | ||
7182 | memcpy(priv->essid, conf->ssid, conf->ssid_len); | ||
7183 | |||
7184 | priv->essid_len = conf->ssid_len; | ||
7185 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7186 | |||
7187 | IWL_DEBUG_MAC80211("leave\n"); | ||
7188 | mutex_unlock(&priv->mutex); | ||
7189 | |||
7190 | return 0; | ||
7191 | } | ||
7192 | |||
7193 | static void iwl_mac_remove_interface(struct ieee80211_hw *hw, | ||
7194 | struct ieee80211_if_init_conf *conf) | ||
7195 | { | ||
7196 | struct iwl_priv *priv = hw->priv; | ||
7197 | |||
7198 | IWL_DEBUG_MAC80211("enter\n"); | ||
7199 | |||
7200 | mutex_lock(&priv->mutex); | ||
7201 | if (priv->interface_id == conf->if_id) { | ||
7202 | priv->interface_id = 0; | ||
7203 | memset(priv->bssid, 0, ETH_ALEN); | ||
7204 | memset(priv->essid, 0, IW_ESSID_MAX_SIZE); | ||
7205 | priv->essid_len = 0; | ||
7206 | } | ||
7207 | mutex_unlock(&priv->mutex); | ||
7208 | |||
7209 | IWL_DEBUG_MAC80211("leave\n"); | ||
7210 | |||
7211 | } | ||
7212 | |||
7213 | #define IWL_DELAY_NEXT_SCAN (HZ*2) | ||
7214 | static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | ||
7215 | { | ||
7216 | int rc = 0; | ||
7217 | unsigned long flags; | ||
7218 | struct iwl_priv *priv = hw->priv; | ||
7219 | |||
7220 | IWL_DEBUG_MAC80211("enter\n"); | ||
7221 | |||
7222 | spin_lock_irqsave(&priv->lock, flags); | ||
7223 | |||
7224 | if (!iwl_is_ready_rf(priv)) { | ||
7225 | rc = -EIO; | ||
7226 | IWL_DEBUG_MAC80211("leave - not ready or exit pending\n"); | ||
7227 | goto out_unlock; | ||
7228 | } | ||
7229 | |||
7230 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */ | ||
7231 | rc = -EIO; | ||
7232 | IWL_ERROR("ERROR: APs don't scan\n"); | ||
7233 | goto out_unlock; | ||
7234 | } | ||
7235 | |||
7236 | /* if we just finished scan ask for delay */ | ||
7237 | if (priv->last_scan_jiffies && | ||
7238 | time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, | ||
7239 | jiffies)) { | ||
7240 | rc = -EAGAIN; | ||
7241 | goto out_unlock; | ||
7242 | } | ||
7243 | if (len) { | ||
7244 | IWL_DEBUG_SCAN("direct scan for " | ||
7245 | "%s [%d]\n ", | ||
7246 | iwl_escape_essid(ssid, len), (int)len); | ||
7247 | |||
7248 | priv->one_direct_scan = 1; | ||
7249 | priv->direct_ssid_len = (u8) | ||
7250 | min((u8) len, (u8) IW_ESSID_MAX_SIZE); | ||
7251 | memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); | ||
7252 | } | ||
7253 | |||
7254 | rc = iwl_scan_initiate(priv); | ||
7255 | |||
7256 | IWL_DEBUG_MAC80211("leave\n"); | ||
7257 | |||
7258 | out_unlock: | ||
7259 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7260 | |||
7261 | return rc; | ||
7262 | } | ||
7263 | |||
7264 | static int iwl_mac_set_key(struct ieee80211_hw *hw, set_key_cmd cmd, | ||
7265 | const u8 *local_addr, const u8 *addr, | ||
7266 | struct ieee80211_key_conf *key) | ||
7267 | { | ||
7268 | struct iwl_priv *priv = hw->priv; | ||
7269 | int rc = 0; | ||
7270 | u8 sta_id; | ||
7271 | |||
7272 | IWL_DEBUG_MAC80211("enter\n"); | ||
7273 | |||
7274 | if (!iwl_param_hwcrypto) { | ||
7275 | IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); | ||
7276 | return -EOPNOTSUPP; | ||
7277 | } | ||
7278 | |||
7279 | if (is_zero_ether_addr(addr)) | ||
7280 | /* only support pairwise keys */ | ||
7281 | return -EOPNOTSUPP; | ||
7282 | |||
7283 | sta_id = iwl_hw_find_station(priv, addr); | ||
7284 | if (sta_id == IWL_INVALID_STATION) { | ||
7285 | IWL_DEBUG_MAC80211("leave - " MAC_FMT " not in station map.\n", | ||
7286 | MAC_ARG(addr)); | ||
7287 | return -EINVAL; | ||
7288 | } | ||
7289 | |||
7290 | mutex_lock(&priv->mutex); | ||
7291 | |||
7292 | switch (cmd) { | ||
7293 | case SET_KEY: | ||
7294 | rc = iwl_update_sta_key_info(priv, key, sta_id); | ||
7295 | if (!rc) { | ||
7296 | iwl_set_rxon_hwcrypto(priv, 1); | ||
7297 | iwl_commit_rxon(priv); | ||
7298 | key->hw_key_idx = sta_id; | ||
7299 | IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n"); | ||
7300 | key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; | ||
7301 | } | ||
7302 | break; | ||
7303 | case DISABLE_KEY: | ||
7304 | rc = iwl_clear_sta_key_info(priv, sta_id); | ||
7305 | if (!rc) { | ||
7306 | iwl_set_rxon_hwcrypto(priv, 0); | ||
7307 | iwl_commit_rxon(priv); | ||
7308 | IWL_DEBUG_MAC80211("disable hwcrypto key\n"); | ||
7309 | } | ||
7310 | break; | ||
7311 | default: | ||
7312 | rc = -EINVAL; | ||
7313 | } | ||
7314 | |||
7315 | IWL_DEBUG_MAC80211("leave\n"); | ||
7316 | mutex_unlock(&priv->mutex); | ||
7317 | |||
7318 | return rc; | ||
7319 | } | ||
7320 | |||
7321 | static int iwl_mac_conf_tx(struct ieee80211_hw *hw, int queue, | ||
7322 | const struct ieee80211_tx_queue_params *params) | ||
7323 | { | ||
7324 | struct iwl_priv *priv = hw->priv; | ||
7325 | #ifdef CONFIG_IWLWIFI_QOS | ||
7326 | unsigned long flags; | ||
7327 | int q; | ||
7328 | #endif /* CONFIG_IWL_QOS */ | ||
7329 | |||
7330 | IWL_DEBUG_MAC80211("enter\n"); | ||
7331 | |||
7332 | if (!iwl_is_ready_rf(priv)) { | ||
7333 | IWL_DEBUG_MAC80211("leave - RF not ready\n"); | ||
7334 | return -EIO; | ||
7335 | } | ||
7336 | |||
7337 | if (queue >= AC_NUM) { | ||
7338 | IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue); | ||
7339 | return 0; | ||
7340 | } | ||
7341 | |||
7342 | #ifdef CONFIG_IWLWIFI_QOS | ||
7343 | if (!priv->qos_data.qos_enable) { | ||
7344 | priv->qos_data.qos_active = 0; | ||
7345 | IWL_DEBUG_MAC80211("leave - qos not enabled\n"); | ||
7346 | return 0; | ||
7347 | } | ||
7348 | q = AC_NUM - 1 - queue; | ||
7349 | |||
7350 | spin_lock_irqsave(&priv->lock, flags); | ||
7351 | |||
7352 | priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min); | ||
7353 | priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); | ||
7354 | priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; | ||
7355 | priv->qos_data.def_qos_parm.ac[q].edca_txop = | ||
7356 | cpu_to_le16((params->burst_time * 100)); | ||
7357 | |||
7358 | priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; | ||
7359 | priv->qos_data.qos_active = 1; | ||
7360 | |||
7361 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7362 | |||
7363 | mutex_lock(&priv->mutex); | ||
7364 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) | ||
7365 | iwl_activate_qos(priv, 1); | ||
7366 | else if (priv->assoc_id && iwl_is_associated(priv)) | ||
7367 | iwl_activate_qos(priv, 0); | ||
7368 | |||
7369 | mutex_unlock(&priv->mutex); | ||
7370 | |||
7371 | #endif /*CONFIG_IWLWIFI_QOS */ | ||
7372 | |||
7373 | IWL_DEBUG_MAC80211("leave\n"); | ||
7374 | return 0; | ||
7375 | } | ||
7376 | |||
7377 | static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw, | ||
7378 | struct ieee80211_tx_queue_stats *stats) | ||
7379 | { | ||
7380 | struct iwl_priv *priv = hw->priv; | ||
7381 | int i, avail; | ||
7382 | struct iwl_tx_queue *txq; | ||
7383 | struct iwl_queue *q; | ||
7384 | unsigned long flags; | ||
7385 | |||
7386 | IWL_DEBUG_MAC80211("enter\n"); | ||
7387 | |||
7388 | if (!iwl_is_ready_rf(priv)) { | ||
7389 | IWL_DEBUG_MAC80211("leave - RF not ready\n"); | ||
7390 | return -EIO; | ||
7391 | } | ||
7392 | |||
7393 | spin_lock_irqsave(&priv->lock, flags); | ||
7394 | |||
7395 | for (i = 0; i < AC_NUM; i++) { | ||
7396 | txq = &priv->txq[i]; | ||
7397 | q = &txq->q; | ||
7398 | avail = iwl_queue_space(q); | ||
7399 | |||
7400 | stats->data[i].len = q->n_window - avail; | ||
7401 | stats->data[i].limit = q->n_window - q->high_mark; | ||
7402 | stats->data[i].count = q->n_window; | ||
7403 | |||
7404 | } | ||
7405 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7406 | |||
7407 | IWL_DEBUG_MAC80211("leave\n"); | ||
7408 | |||
7409 | return 0; | ||
7410 | } | ||
7411 | |||
7412 | static int iwl_mac_get_stats(struct ieee80211_hw *hw, | ||
7413 | struct ieee80211_low_level_stats *stats) | ||
7414 | { | ||
7415 | IWL_DEBUG_MAC80211("enter\n"); | ||
7416 | IWL_DEBUG_MAC80211("leave\n"); | ||
7417 | |||
7418 | return 0; | ||
7419 | } | ||
7420 | |||
7421 | static u64 iwl_mac_get_tsf(struct ieee80211_hw *hw) | ||
7422 | { | ||
7423 | IWL_DEBUG_MAC80211("enter\n"); | ||
7424 | IWL_DEBUG_MAC80211("leave\n"); | ||
7425 | |||
7426 | return 0; | ||
7427 | } | ||
7428 | |||
7429 | static void iwl_mac_reset_tsf(struct ieee80211_hw *hw) | ||
7430 | { | ||
7431 | struct iwl_priv *priv = hw->priv; | ||
7432 | unsigned long flags; | ||
7433 | |||
7434 | mutex_lock(&priv->mutex); | ||
7435 | IWL_DEBUG_MAC80211("enter\n"); | ||
7436 | |||
7437 | #ifdef CONFIG_IWLWIFI_QOS | ||
7438 | iwl_reset_qos(priv); | ||
7439 | #endif | ||
7440 | cancel_delayed_work(&priv->post_associate); | ||
7441 | |||
7442 | spin_lock_irqsave(&priv->lock, flags); | ||
7443 | priv->assoc_id = 0; | ||
7444 | priv->assoc_capability = 0; | ||
7445 | priv->call_post_assoc_from_beacon = 0; | ||
7446 | |||
7447 | /* new association get rid of ibss beacon skb */ | ||
7448 | if (priv->ibss_beacon) | ||
7449 | dev_kfree_skb(priv->ibss_beacon); | ||
7450 | |||
7451 | priv->ibss_beacon = NULL; | ||
7452 | |||
7453 | priv->beacon_int = priv->hw->conf.beacon_int; | ||
7454 | priv->timestamp1 = 0; | ||
7455 | priv->timestamp0 = 0; | ||
7456 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA)) | ||
7457 | priv->beacon_int = 0; | ||
7458 | |||
7459 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7460 | |||
7461 | /* Per mac80211.h: This is only used in IBSS mode... */ | ||
7462 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { | ||
7463 | IWL_DEBUG_MAC80211("leave - not in IBSS\n"); | ||
7464 | mutex_unlock(&priv->mutex); | ||
7465 | return; | ||
7466 | } | ||
7467 | |||
7468 | if (!iwl_is_ready_rf(priv)) { | ||
7469 | IWL_DEBUG_MAC80211("leave - not ready\n"); | ||
7470 | mutex_unlock(&priv->mutex); | ||
7471 | return; | ||
7472 | } | ||
7473 | |||
7474 | priv->only_active_channel = 0; | ||
7475 | |||
7476 | iwl_set_rate(priv); | ||
7477 | |||
7478 | mutex_unlock(&priv->mutex); | ||
7479 | |||
7480 | IWL_DEBUG_MAC80211("leave\n"); | ||
7481 | |||
7482 | } | ||
7483 | |||
7484 | static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, | ||
7485 | struct ieee80211_tx_control *control) | ||
7486 | { | ||
7487 | struct iwl_priv *priv = hw->priv; | ||
7488 | unsigned long flags; | ||
7489 | |||
7490 | mutex_lock(&priv->mutex); | ||
7491 | IWL_DEBUG_MAC80211("enter\n"); | ||
7492 | |||
7493 | if (!iwl_is_ready_rf(priv)) { | ||
7494 | IWL_DEBUG_MAC80211("leave - RF not ready\n"); | ||
7495 | mutex_unlock(&priv->mutex); | ||
7496 | return -EIO; | ||
7497 | } | ||
7498 | |||
7499 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { | ||
7500 | IWL_DEBUG_MAC80211("leave - not IBSS\n"); | ||
7501 | mutex_unlock(&priv->mutex); | ||
7502 | return -EIO; | ||
7503 | } | ||
7504 | |||
7505 | spin_lock_irqsave(&priv->lock, flags); | ||
7506 | |||
7507 | if (priv->ibss_beacon) | ||
7508 | dev_kfree_skb(priv->ibss_beacon); | ||
7509 | |||
7510 | priv->ibss_beacon = skb; | ||
7511 | |||
7512 | priv->assoc_id = 0; | ||
7513 | |||
7514 | IWL_DEBUG_MAC80211("leave\n"); | ||
7515 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7516 | |||
7517 | #ifdef CONFIG_IWLWIFI_QOS | ||
7518 | iwl_reset_qos(priv); | ||
7519 | #endif | ||
7520 | |||
7521 | queue_work(priv->workqueue, &priv->post_associate.work); | ||
7522 | |||
7523 | mutex_unlock(&priv->mutex); | ||
7524 | |||
7525 | return 0; | ||
7526 | } | ||
7527 | |||
7528 | /***************************************************************************** | ||
7529 | * | ||
7530 | * sysfs attributes | ||
7531 | * | ||
7532 | *****************************************************************************/ | ||
7533 | |||
7534 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
7535 | |||
7536 | /* | ||
7537 | * The following adds a new attribute to the sysfs representation | ||
7538 | * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) | ||
7539 | * used for controlling the debug level. | ||
7540 | * | ||
7541 | * See the level definitions in iwl for details. | ||
7542 | */ | ||
7543 | |||
7544 | static ssize_t show_debug_level(struct device_driver *d, char *buf) | ||
7545 | { | ||
7546 | return sprintf(buf, "0x%08X\n", iwl_debug_level); | ||
7547 | } | ||
7548 | static ssize_t store_debug_level(struct device_driver *d, | ||
7549 | const char *buf, size_t count) | ||
7550 | { | ||
7551 | char *p = (char *)buf; | ||
7552 | u32 val; | ||
7553 | |||
7554 | val = simple_strtoul(p, &p, 0); | ||
7555 | if (p == buf) | ||
7556 | printk(KERN_INFO DRV_NAME | ||
7557 | ": %s is not in hex or decimal form.\n", buf); | ||
7558 | else | ||
7559 | iwl_debug_level = val; | ||
7560 | |||
7561 | return strnlen(buf, count); | ||
7562 | } | ||
7563 | |||
7564 | static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, | ||
7565 | show_debug_level, store_debug_level); | ||
7566 | |||
7567 | #endif /* CONFIG_IWLWIFI_DEBUG */ | ||
7568 | |||
7569 | static ssize_t show_rf_kill(struct device *d, | ||
7570 | struct device_attribute *attr, char *buf) | ||
7571 | { | ||
7572 | /* | ||
7573 | * 0 - RF kill not enabled | ||
7574 | * 1 - SW based RF kill active (sysfs) | ||
7575 | * 2 - HW based RF kill active | ||
7576 | * 3 - Both HW and SW based RF kill active | ||
7577 | */ | ||
7578 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
7579 | int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) | | ||
7580 | (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0); | ||
7581 | |||
7582 | return sprintf(buf, "%i\n", val); | ||
7583 | } | ||
7584 | |||
7585 | static ssize_t store_rf_kill(struct device *d, | ||
7586 | struct device_attribute *attr, | ||
7587 | const char *buf, size_t count) | ||
7588 | { | ||
7589 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
7590 | |||
7591 | mutex_lock(&priv->mutex); | ||
7592 | iwl_radio_kill_sw(priv, buf[0] == '1'); | ||
7593 | mutex_unlock(&priv->mutex); | ||
7594 | |||
7595 | return count; | ||
7596 | } | ||
7597 | |||
7598 | static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); | ||
7599 | |||
7600 | static ssize_t show_temperature(struct device *d, | ||
7601 | struct device_attribute *attr, char *buf) | ||
7602 | { | ||
7603 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
7604 | |||
7605 | if (!iwl_is_alive(priv)) | ||
7606 | return -EAGAIN; | ||
7607 | |||
7608 | return sprintf(buf, "%d\n", iwl_hw_get_temperature(priv)); | ||
7609 | } | ||
7610 | |||
7611 | static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); | ||
7612 | |||
7613 | static ssize_t show_rs_window(struct device *d, | ||
7614 | struct device_attribute *attr, | ||
7615 | char *buf) | ||
7616 | { | ||
7617 | struct iwl_priv *priv = d->driver_data; | ||
7618 | return iwl_fill_rs_info(priv->hw, buf, IWL_AP_ID); | ||
7619 | } | ||
7620 | static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL); | ||
7621 | |||
7622 | static ssize_t show_tx_power(struct device *d, | ||
7623 | struct device_attribute *attr, char *buf) | ||
7624 | { | ||
7625 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
7626 | return sprintf(buf, "%d\n", priv->user_txpower_limit); | ||
7627 | } | ||
7628 | |||
7629 | static ssize_t store_tx_power(struct device *d, | ||
7630 | struct device_attribute *attr, | ||
7631 | const char *buf, size_t count) | ||
7632 | { | ||
7633 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
7634 | char *p = (char *)buf; | ||
7635 | u32 val; | ||
7636 | |||
7637 | val = simple_strtoul(p, &p, 10); | ||
7638 | if (p == buf) | ||
7639 | printk(KERN_INFO DRV_NAME | ||
7640 | ": %s is not in decimal form.\n", buf); | ||
7641 | else | ||
7642 | iwl_hw_reg_set_txpower(priv, val); | ||
7643 | |||
7644 | return count; | ||
7645 | } | ||
7646 | |||
7647 | static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); | ||
7648 | |||
7649 | static ssize_t show_flags(struct device *d, | ||
7650 | struct device_attribute *attr, char *buf) | ||
7651 | { | ||
7652 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
7653 | |||
7654 | return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); | ||
7655 | } | ||
7656 | |||
7657 | static ssize_t store_flags(struct device *d, | ||
7658 | struct device_attribute *attr, | ||
7659 | const char *buf, size_t count) | ||
7660 | { | ||
7661 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
7662 | u32 flags = simple_strtoul(buf, NULL, 0); | ||
7663 | |||
7664 | mutex_lock(&priv->mutex); | ||
7665 | if (le32_to_cpu(priv->staging_rxon.flags) != flags) { | ||
7666 | /* Cancel any currently running scans... */ | ||
7667 | if (iwl_scan_cancel_timeout(priv, 100)) | ||
7668 | IWL_WARNING("Could not cancel scan.\n"); | ||
7669 | else { | ||
7670 | IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n", | ||
7671 | flags); | ||
7672 | priv->staging_rxon.flags = cpu_to_le32(flags); | ||
7673 | iwl_commit_rxon(priv); | ||
7674 | } | ||
7675 | } | ||
7676 | mutex_unlock(&priv->mutex); | ||
7677 | |||
7678 | return count; | ||
7679 | } | ||
7680 | |||
7681 | static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); | ||
7682 | |||
7683 | static ssize_t show_filter_flags(struct device *d, | ||
7684 | struct device_attribute *attr, char *buf) | ||
7685 | { | ||
7686 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
7687 | |||
7688 | return sprintf(buf, "0x%04X\n", | ||
7689 | le32_to_cpu(priv->active_rxon.filter_flags)); | ||
7690 | } | ||
7691 | |||
7692 | static ssize_t store_filter_flags(struct device *d, | ||
7693 | struct device_attribute *attr, | ||
7694 | const char *buf, size_t count) | ||
7695 | { | ||
7696 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
7697 | u32 filter_flags = simple_strtoul(buf, NULL, 0); | ||
7698 | |||
7699 | mutex_lock(&priv->mutex); | ||
7700 | if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { | ||
7701 | /* Cancel any currently running scans... */ | ||
7702 | if (iwl_scan_cancel_timeout(priv, 100)) | ||
7703 | IWL_WARNING("Could not cancel scan.\n"); | ||
7704 | else { | ||
7705 | IWL_DEBUG_INFO("Committing rxon.filter_flags = " | ||
7706 | "0x%04X\n", filter_flags); | ||
7707 | priv->staging_rxon.filter_flags = | ||
7708 | cpu_to_le32(filter_flags); | ||
7709 | iwl_commit_rxon(priv); | ||
7710 | } | ||
7711 | } | ||
7712 | mutex_unlock(&priv->mutex); | ||
7713 | |||
7714 | return count; | ||
7715 | } | ||
7716 | |||
7717 | static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, | ||
7718 | store_filter_flags); | ||
7719 | |||
7720 | static ssize_t show_tune(struct device *d, | ||
7721 | struct device_attribute *attr, char *buf) | ||
7722 | { | ||
7723 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
7724 | |||
7725 | return sprintf(buf, "0x%04X\n", | ||
7726 | (priv->phymode << 8) | | ||
7727 | le16_to_cpu(priv->active_rxon.channel)); | ||
7728 | } | ||
7729 | |||
7730 | static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode); | ||
7731 | |||
7732 | static ssize_t store_tune(struct device *d, | ||
7733 | struct device_attribute *attr, | ||
7734 | const char *buf, size_t count) | ||
7735 | { | ||
7736 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
7737 | char *p = (char *)buf; | ||
7738 | u16 tune = simple_strtoul(p, &p, 0); | ||
7739 | u8 phymode = (tune >> 8) & 0xff; | ||
7740 | u16 channel = tune & 0xff; | ||
7741 | |||
7742 | IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel); | ||
7743 | |||
7744 | mutex_lock(&priv->mutex); | ||
7745 | if ((le16_to_cpu(priv->staging_rxon.channel) != channel) || | ||
7746 | (priv->phymode != phymode)) { | ||
7747 | const struct iwl_channel_info *ch_info; | ||
7748 | |||
7749 | ch_info = iwl_get_channel_info(priv, phymode, channel); | ||
7750 | if (!ch_info) { | ||
7751 | IWL_WARNING("Requested invalid phymode/channel " | ||
7752 | "combination: %d %d\n", phymode, channel); | ||
7753 | mutex_unlock(&priv->mutex); | ||
7754 | return -EINVAL; | ||
7755 | } | ||
7756 | |||
7757 | /* Cancel any currently running scans... */ | ||
7758 | if (iwl_scan_cancel_timeout(priv, 100)) | ||
7759 | IWL_WARNING("Could not cancel scan.\n"); | ||
7760 | else { | ||
7761 | IWL_DEBUG_INFO("Committing phymode and " | ||
7762 | "rxon.channel = %d %d\n", | ||
7763 | phymode, channel); | ||
7764 | |||
7765 | iwl_set_rxon_channel(priv, phymode, channel); | ||
7766 | iwl_set_flags_for_phymode(priv, phymode); | ||
7767 | |||
7768 | iwl_set_rate(priv); | ||
7769 | iwl_commit_rxon(priv); | ||
7770 | } | ||
7771 | } | ||
7772 | mutex_unlock(&priv->mutex); | ||
7773 | |||
7774 | return count; | ||
7775 | } | ||
7776 | |||
7777 | static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune); | ||
7778 | |||
7779 | #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT | ||
7780 | |||
7781 | static ssize_t show_measurement(struct device *d, | ||
7782 | struct device_attribute *attr, char *buf) | ||
7783 | { | ||
7784 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
7785 | struct iwl_spectrum_notification measure_report; | ||
7786 | u32 size = sizeof(measure_report), len = 0, ofs = 0; | ||
7787 | u8 *data = (u8 *) & measure_report; | ||
7788 | unsigned long flags; | ||
7789 | |||
7790 | spin_lock_irqsave(&priv->lock, flags); | ||
7791 | if (!(priv->measurement_status & MEASUREMENT_READY)) { | ||
7792 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7793 | return 0; | ||
7794 | } | ||
7795 | memcpy(&measure_report, &priv->measure_report, size); | ||
7796 | priv->measurement_status = 0; | ||
7797 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7798 | |||
7799 | while (size && (PAGE_SIZE - len)) { | ||
7800 | hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, | ||
7801 | PAGE_SIZE - len, 1); | ||
7802 | len = strlen(buf); | ||
7803 | if (PAGE_SIZE - len) | ||
7804 | buf[len++] = '\n'; | ||
7805 | |||
7806 | ofs += 16; | ||
7807 | size -= min(size, 16U); | ||
7808 | } | ||
7809 | |||
7810 | return len; | ||
7811 | } | ||
7812 | |||
7813 | static ssize_t store_measurement(struct device *d, | ||
7814 | struct device_attribute *attr, | ||
7815 | const char *buf, size_t count) | ||
7816 | { | ||
7817 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
7818 | struct ieee80211_measurement_params params = { | ||
7819 | .channel = le16_to_cpu(priv->active_rxon.channel), | ||
7820 | .start_time = cpu_to_le64(priv->last_tsf), | ||
7821 | .duration = cpu_to_le16(1), | ||
7822 | }; | ||
7823 | u8 type = IWL_MEASURE_BASIC; | ||
7824 | u8 buffer[32]; | ||
7825 | u8 channel; | ||
7826 | |||
7827 | if (count) { | ||
7828 | char *p = buffer; | ||
7829 | strncpy(buffer, buf, min(sizeof(buffer), count)); | ||
7830 | channel = simple_strtoul(p, NULL, 0); | ||
7831 | if (channel) | ||
7832 | params.channel = channel; | ||
7833 | |||
7834 | p = buffer; | ||
7835 | while (*p && *p != ' ') | ||
7836 | p++; | ||
7837 | if (*p) | ||
7838 | type = simple_strtoul(p + 1, NULL, 0); | ||
7839 | } | ||
7840 | |||
7841 | IWL_DEBUG_INFO("Invoking measurement of type %d on " | ||
7842 | "channel %d (for '%s')\n", type, params.channel, buf); | ||
7843 | iwl_get_measurement(priv, ¶ms, type); | ||
7844 | |||
7845 | return count; | ||
7846 | } | ||
7847 | |||
7848 | static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, | ||
7849 | show_measurement, store_measurement); | ||
7850 | #endif /* CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT */ | ||
7851 | |||
7852 | static ssize_t show_rate(struct device *d, | ||
7853 | struct device_attribute *attr, char *buf) | ||
7854 | { | ||
7855 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
7856 | unsigned long flags; | ||
7857 | int i; | ||
7858 | |||
7859 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
7860 | if (priv->iw_mode == IEEE80211_IF_TYPE_STA) | ||
7861 | i = priv->stations[IWL_AP_ID].current_rate.s.rate; | ||
7862 | else | ||
7863 | i = priv->stations[IWL_STA_ID].current_rate.s.rate; | ||
7864 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
7865 | |||
7866 | i = iwl_rate_index_from_plcp(i); | ||
7867 | if (i == -1) | ||
7868 | return sprintf(buf, "0\n"); | ||
7869 | |||
7870 | return sprintf(buf, "%d%s\n", | ||
7871 | (iwl_rates[i].ieee >> 1), | ||
7872 | (iwl_rates[i].ieee & 0x1) ? ".5" : ""); | ||
7873 | } | ||
7874 | |||
7875 | static DEVICE_ATTR(rate, S_IRUSR, show_rate, NULL); | ||
7876 | |||
7877 | static ssize_t store_retry_rate(struct device *d, | ||
7878 | struct device_attribute *attr, | ||
7879 | const char *buf, size_t count) | ||
7880 | { | ||
7881 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
7882 | |||
7883 | priv->retry_rate = simple_strtoul(buf, NULL, 0); | ||
7884 | if (priv->retry_rate <= 0) | ||
7885 | priv->retry_rate = 1; | ||
7886 | |||
7887 | return count; | ||
7888 | } | ||
7889 | |||
7890 | static ssize_t show_retry_rate(struct device *d, | ||
7891 | struct device_attribute *attr, char *buf) | ||
7892 | { | ||
7893 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
7894 | return sprintf(buf, "%d", priv->retry_rate); | ||
7895 | } | ||
7896 | |||
7897 | static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, | ||
7898 | store_retry_rate); | ||
7899 | |||
7900 | static ssize_t store_power_level(struct device *d, | ||
7901 | struct device_attribute *attr, | ||
7902 | const char *buf, size_t count) | ||
7903 | { | ||
7904 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
7905 | int rc; | ||
7906 | int mode; | ||
7907 | |||
7908 | mode = simple_strtoul(buf, NULL, 0); | ||
7909 | mutex_lock(&priv->mutex); | ||
7910 | |||
7911 | if (!iwl_is_ready(priv)) { | ||
7912 | rc = -EAGAIN; | ||
7913 | goto out; | ||
7914 | } | ||
7915 | |||
7916 | if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC)) | ||
7917 | mode = IWL_POWER_AC; | ||
7918 | else | ||
7919 | mode |= IWL_POWER_ENABLED; | ||
7920 | |||
7921 | if (mode != priv->power_mode) { | ||
7922 | rc = iwl_send_power_mode(priv, IWL_POWER_LEVEL(mode)); | ||
7923 | if (rc) { | ||
7924 | IWL_DEBUG_MAC80211("failed setting power mode.\n"); | ||
7925 | goto out; | ||
7926 | } | ||
7927 | priv->power_mode = mode; | ||
7928 | } | ||
7929 | |||
7930 | rc = count; | ||
7931 | |||
7932 | out: | ||
7933 | mutex_unlock(&priv->mutex); | ||
7934 | return rc; | ||
7935 | } | ||
7936 | |||
7937 | #define MAX_WX_STRING 80 | ||
7938 | |||
7939 | /* Values are in microsecond */ | ||
7940 | static const s32 timeout_duration[] = { | ||
7941 | 350000, | ||
7942 | 250000, | ||
7943 | 75000, | ||
7944 | 37000, | ||
7945 | 25000, | ||
7946 | }; | ||
7947 | static const s32 period_duration[] = { | ||
7948 | 400000, | ||
7949 | 700000, | ||
7950 | 1000000, | ||
7951 | 1000000, | ||
7952 | 1000000 | ||
7953 | }; | ||
7954 | |||
7955 | static ssize_t show_power_level(struct device *d, | ||
7956 | struct device_attribute *attr, char *buf) | ||
7957 | { | ||
7958 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
7959 | int level = IWL_POWER_LEVEL(priv->power_mode); | ||
7960 | char *p = buf; | ||
7961 | |||
7962 | p += sprintf(p, "%d ", level); | ||
7963 | switch (level) { | ||
7964 | case IWL_POWER_MODE_CAM: | ||
7965 | case IWL_POWER_AC: | ||
7966 | p += sprintf(p, "(AC)"); | ||
7967 | break; | ||
7968 | case IWL_POWER_BATTERY: | ||
7969 | p += sprintf(p, "(BATTERY)"); | ||
7970 | break; | ||
7971 | default: | ||
7972 | p += sprintf(p, | ||
7973 | "(Timeout %dms, Period %dms)", | ||
7974 | timeout_duration[level - 1] / 1000, | ||
7975 | period_duration[level - 1] / 1000); | ||
7976 | } | ||
7977 | |||
7978 | if (!(priv->power_mode & IWL_POWER_ENABLED)) | ||
7979 | p += sprintf(p, " OFF\n"); | ||
7980 | else | ||
7981 | p += sprintf(p, " \n"); | ||
7982 | |||
7983 | return (p - buf + 1); | ||
7984 | |||
7985 | } | ||
7986 | |||
7987 | static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, | ||
7988 | store_power_level); | ||
7989 | |||
7990 | static ssize_t show_channels(struct device *d, | ||
7991 | struct device_attribute *attr, char *buf) | ||
7992 | { | ||
7993 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
7994 | int len = 0, i; | ||
7995 | struct ieee80211_channel *channels = NULL; | ||
7996 | const struct ieee80211_hw_mode *hw_mode = NULL; | ||
7997 | int count = 0; | ||
7998 | |||
7999 | if (!iwl_is_ready(priv)) | ||
8000 | return -EAGAIN; | ||
8001 | |||
8002 | hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211G); | ||
8003 | if (!hw_mode) | ||
8004 | hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211B); | ||
8005 | if (hw_mode) { | ||
8006 | channels = hw_mode->channels; | ||
8007 | count = hw_mode->num_channels; | ||
8008 | } | ||
8009 | |||
8010 | len += | ||
8011 | sprintf(&buf[len], | ||
8012 | "Displaying %d channels in 2.4GHz band " | ||
8013 | "(802.11bg):\n", count); | ||
8014 | |||
8015 | for (i = 0; i < count; i++) | ||
8016 | len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", | ||
8017 | channels[i].chan, | ||
8018 | channels[i].power_level, | ||
8019 | channels[i]. | ||
8020 | flag & IEEE80211_CHAN_W_RADAR_DETECT ? | ||
8021 | " (IEEE 802.11h required)" : "", | ||
8022 | (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) | ||
8023 | || (channels[i]. | ||
8024 | flag & | ||
8025 | IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : | ||
8026 | ", IBSS", | ||
8027 | channels[i]. | ||
8028 | flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? | ||
8029 | "active/passive" : "passive only"); | ||
8030 | |||
8031 | hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211A); | ||
8032 | if (hw_mode) { | ||
8033 | channels = hw_mode->channels; | ||
8034 | count = hw_mode->num_channels; | ||
8035 | } else { | ||
8036 | channels = NULL; | ||
8037 | count = 0; | ||
8038 | } | ||
8039 | |||
8040 | len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band " | ||
8041 | "(802.11a):\n", count); | ||
8042 | |||
8043 | for (i = 0; i < count; i++) | ||
8044 | len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", | ||
8045 | channels[i].chan, | ||
8046 | channels[i].power_level, | ||
8047 | channels[i]. | ||
8048 | flag & IEEE80211_CHAN_W_RADAR_DETECT ? | ||
8049 | " (IEEE 802.11h required)" : "", | ||
8050 | (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) | ||
8051 | || (channels[i]. | ||
8052 | flag & | ||
8053 | IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : | ||
8054 | ", IBSS", | ||
8055 | channels[i]. | ||
8056 | flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? | ||
8057 | "active/passive" : "passive only"); | ||
8058 | |||
8059 | return len; | ||
8060 | } | ||
8061 | |||
8062 | static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); | ||
8063 | |||
8064 | static ssize_t show_statistics(struct device *d, | ||
8065 | struct device_attribute *attr, char *buf) | ||
8066 | { | ||
8067 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8068 | u32 size = sizeof(struct iwl_notif_statistics); | ||
8069 | u32 len = 0, ofs = 0; | ||
8070 | u8 *data = (u8 *) & priv->statistics; | ||
8071 | int rc = 0; | ||
8072 | |||
8073 | if (!iwl_is_alive(priv)) | ||
8074 | return -EAGAIN; | ||
8075 | |||
8076 | mutex_lock(&priv->mutex); | ||
8077 | rc = iwl_send_statistics_request(priv); | ||
8078 | mutex_unlock(&priv->mutex); | ||
8079 | |||
8080 | if (rc) { | ||
8081 | len = sprintf(buf, | ||
8082 | "Error sending statistics request: 0x%08X\n", rc); | ||
8083 | return len; | ||
8084 | } | ||
8085 | |||
8086 | while (size && (PAGE_SIZE - len)) { | ||
8087 | hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, | ||
8088 | PAGE_SIZE - len, 1); | ||
8089 | len = strlen(buf); | ||
8090 | if (PAGE_SIZE - len) | ||
8091 | buf[len++] = '\n'; | ||
8092 | |||
8093 | ofs += 16; | ||
8094 | size -= min(size, 16U); | ||
8095 | } | ||
8096 | |||
8097 | return len; | ||
8098 | } | ||
8099 | |||
8100 | static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); | ||
8101 | |||
8102 | static ssize_t show_antenna(struct device *d, | ||
8103 | struct device_attribute *attr, char *buf) | ||
8104 | { | ||
8105 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8106 | |||
8107 | if (!iwl_is_alive(priv)) | ||
8108 | return -EAGAIN; | ||
8109 | |||
8110 | return sprintf(buf, "%d\n", priv->antenna); | ||
8111 | } | ||
8112 | |||
8113 | static ssize_t store_antenna(struct device *d, | ||
8114 | struct device_attribute *attr, | ||
8115 | const char *buf, size_t count) | ||
8116 | { | ||
8117 | int ant; | ||
8118 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8119 | |||
8120 | if (count == 0) | ||
8121 | return 0; | ||
8122 | |||
8123 | if (sscanf(buf, "%1i", &ant) != 1) { | ||
8124 | IWL_DEBUG_INFO("not in hex or decimal form.\n"); | ||
8125 | return count; | ||
8126 | } | ||
8127 | |||
8128 | if ((ant >= 0) && (ant <= 2)) { | ||
8129 | IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant); | ||
8130 | priv->antenna = (enum iwl_antenna)ant; | ||
8131 | } else | ||
8132 | IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant); | ||
8133 | |||
8134 | |||
8135 | return count; | ||
8136 | } | ||
8137 | |||
8138 | static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); | ||
8139 | |||
8140 | static ssize_t show_status(struct device *d, | ||
8141 | struct device_attribute *attr, char *buf) | ||
8142 | { | ||
8143 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8144 | if (!iwl_is_alive(priv)) | ||
8145 | return -EAGAIN; | ||
8146 | return sprintf(buf, "0x%08x\n", (int)priv->status); | ||
8147 | } | ||
8148 | |||
8149 | static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); | ||
8150 | |||
8151 | static ssize_t dump_error_log(struct device *d, | ||
8152 | struct device_attribute *attr, | ||
8153 | const char *buf, size_t count) | ||
8154 | { | ||
8155 | char *p = (char *)buf; | ||
8156 | |||
8157 | if (p[0] == '1') | ||
8158 | iwl_dump_nic_error_log((struct iwl_priv *)d->driver_data); | ||
8159 | |||
8160 | return strnlen(buf, count); | ||
8161 | } | ||
8162 | |||
8163 | static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); | ||
8164 | |||
8165 | static ssize_t dump_event_log(struct device *d, | ||
8166 | struct device_attribute *attr, | ||
8167 | const char *buf, size_t count) | ||
8168 | { | ||
8169 | char *p = (char *)buf; | ||
8170 | |||
8171 | if (p[0] == '1') | ||
8172 | iwl_dump_nic_event_log((struct iwl_priv *)d->driver_data); | ||
8173 | |||
8174 | return strnlen(buf, count); | ||
8175 | } | ||
8176 | |||
8177 | static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log); | ||
8178 | |||
8179 | /***************************************************************************** | ||
8180 | * | ||
8181 | * driver setup and teardown | ||
8182 | * | ||
8183 | *****************************************************************************/ | ||
8184 | |||
8185 | static void iwl_setup_deferred_work(struct iwl_priv *priv) | ||
8186 | { | ||
8187 | priv->workqueue = create_workqueue(DRV_NAME); | ||
8188 | |||
8189 | init_waitqueue_head(&priv->wait_command_queue); | ||
8190 | |||
8191 | INIT_WORK(&priv->up, iwl_bg_up); | ||
8192 | INIT_WORK(&priv->restart, iwl_bg_restart); | ||
8193 | INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); | ||
8194 | INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); | ||
8195 | INIT_WORK(&priv->request_scan, iwl_bg_request_scan); | ||
8196 | INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); | ||
8197 | INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill); | ||
8198 | INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); | ||
8199 | INIT_DELAYED_WORK(&priv->post_associate, iwl_bg_post_associate); | ||
8200 | INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); | ||
8201 | INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); | ||
8202 | INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); | ||
8203 | |||
8204 | iwl_hw_setup_deferred_work(priv); | ||
8205 | |||
8206 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | ||
8207 | iwl_irq_tasklet, (unsigned long)priv); | ||
8208 | } | ||
8209 | |||
8210 | static void iwl_cancel_deferred_work(struct iwl_priv *priv) | ||
8211 | { | ||
8212 | iwl_hw_cancel_deferred_work(priv); | ||
8213 | |||
8214 | cancel_delayed_work(&priv->scan_check); | ||
8215 | cancel_delayed_work(&priv->alive_start); | ||
8216 | cancel_delayed_work(&priv->post_associate); | ||
8217 | cancel_work_sync(&priv->beacon_update); | ||
8218 | } | ||
8219 | |||
8220 | static struct attribute *iwl_sysfs_entries[] = { | ||
8221 | &dev_attr_antenna.attr, | ||
8222 | &dev_attr_channels.attr, | ||
8223 | &dev_attr_dump_errors.attr, | ||
8224 | &dev_attr_dump_events.attr, | ||
8225 | &dev_attr_flags.attr, | ||
8226 | &dev_attr_filter_flags.attr, | ||
8227 | #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT | ||
8228 | &dev_attr_measurement.attr, | ||
8229 | #endif | ||
8230 | &dev_attr_power_level.attr, | ||
8231 | &dev_attr_rate.attr, | ||
8232 | &dev_attr_retry_rate.attr, | ||
8233 | &dev_attr_rf_kill.attr, | ||
8234 | &dev_attr_rs_window.attr, | ||
8235 | &dev_attr_statistics.attr, | ||
8236 | &dev_attr_status.attr, | ||
8237 | &dev_attr_temperature.attr, | ||
8238 | &dev_attr_tune.attr, | ||
8239 | &dev_attr_tx_power.attr, | ||
8240 | |||
8241 | NULL | ||
8242 | }; | ||
8243 | |||
8244 | static struct attribute_group iwl_attribute_group = { | ||
8245 | .name = NULL, /* put in device directory */ | ||
8246 | .attrs = iwl_sysfs_entries, | ||
8247 | }; | ||
8248 | |||
8249 | static struct ieee80211_ops iwl_hw_ops = { | ||
8250 | .tx = iwl_mac_tx, | ||
8251 | .open = iwl_mac_open, | ||
8252 | .stop = iwl_mac_stop, | ||
8253 | .add_interface = iwl_mac_add_interface, | ||
8254 | .remove_interface = iwl_mac_remove_interface, | ||
8255 | .config = iwl_mac_config, | ||
8256 | .config_interface = iwl_mac_config_interface, | ||
8257 | .set_key = iwl_mac_set_key, | ||
8258 | .get_stats = iwl_mac_get_stats, | ||
8259 | .get_tx_stats = iwl_mac_get_tx_stats, | ||
8260 | .conf_tx = iwl_mac_conf_tx, | ||
8261 | .get_tsf = iwl_mac_get_tsf, | ||
8262 | .reset_tsf = iwl_mac_reset_tsf, | ||
8263 | .beacon_update = iwl_mac_beacon_update, | ||
8264 | .hw_scan = iwl_mac_hw_scan | ||
8265 | }; | ||
8266 | |||
8267 | static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
8268 | { | ||
8269 | int err = 0; | ||
8270 | u32 pci_id; | ||
8271 | struct iwl_priv *priv; | ||
8272 | struct ieee80211_hw *hw; | ||
8273 | int i; | ||
8274 | |||
8275 | if (iwl_param_disable_hw_scan) { | ||
8276 | IWL_DEBUG_INFO("Disabling hw_scan\n"); | ||
8277 | iwl_hw_ops.hw_scan = NULL; | ||
8278 | } | ||
8279 | |||
8280 | if ((iwl_param_queues_num > IWL_MAX_NUM_QUEUES) || | ||
8281 | (iwl_param_queues_num < IWL_MIN_NUM_QUEUES)) { | ||
8282 | IWL_ERROR("invalid queues_num, should be between %d and %d\n", | ||
8283 | IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES); | ||
8284 | err = -EINVAL; | ||
8285 | goto out; | ||
8286 | } | ||
8287 | |||
8288 | /* mac80211 allocates memory for this device instance, including | ||
8289 | * space for this driver's private structure */ | ||
8290 | hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwl_hw_ops); | ||
8291 | if (hw == NULL) { | ||
8292 | IWL_ERROR("Can not allocate network device\n"); | ||
8293 | err = -ENOMEM; | ||
8294 | goto out; | ||
8295 | } | ||
8296 | SET_IEEE80211_DEV(hw, &pdev->dev); | ||
8297 | |||
8298 | IWL_DEBUG_INFO("*** LOAD DRIVER ***\n"); | ||
8299 | priv = hw->priv; | ||
8300 | priv->hw = hw; | ||
8301 | |||
8302 | priv->pci_dev = pdev; | ||
8303 | priv->antenna = (enum iwl_antenna)iwl_param_antenna; | ||
8304 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
8305 | iwl_debug_level = iwl_param_debug; | ||
8306 | atomic_set(&priv->restrict_refcnt, 0); | ||
8307 | #endif | ||
8308 | priv->retry_rate = 1; | ||
8309 | |||
8310 | priv->ibss_beacon = NULL; | ||
8311 | |||
8312 | /* Tell mac80211 and its clients (e.g. Wireless Extensions) | ||
8313 | * the range of signal quality values that we'll provide. | ||
8314 | * Negative values for level/noise indicate that we'll provide dBm. | ||
8315 | * For WE, at least, non-0 values here *enable* display of values | ||
8316 | * in app (iwconfig). */ | ||
8317 | hw->max_rssi = -20; /* signal level, negative indicates dBm */ | ||
8318 | hw->max_noise = -20; /* noise level, negative indicates dBm */ | ||
8319 | hw->max_signal = 100; /* link quality indication (%) */ | ||
8320 | |||
8321 | /* Tell mac80211 our Tx characteristics */ | ||
8322 | hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; | ||
8323 | |||
8324 | hw->queues = 4; | ||
8325 | |||
8326 | spin_lock_init(&priv->lock); | ||
8327 | spin_lock_init(&priv->power_data.lock); | ||
8328 | spin_lock_init(&priv->sta_lock); | ||
8329 | spin_lock_init(&priv->hcmd_lock); | ||
8330 | |||
8331 | for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) | ||
8332 | INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); | ||
8333 | |||
8334 | INIT_LIST_HEAD(&priv->free_frames); | ||
8335 | |||
8336 | mutex_init(&priv->mutex); | ||
8337 | if (pci_enable_device(pdev)) { | ||
8338 | err = -ENODEV; | ||
8339 | goto out_ieee80211_free_hw; | ||
8340 | } | ||
8341 | |||
8342 | pci_set_master(pdev); | ||
8343 | |||
8344 | iwl_clear_stations_table(priv); | ||
8345 | |||
8346 | priv->data_retry_limit = -1; | ||
8347 | priv->ieee_channels = NULL; | ||
8348 | priv->ieee_rates = NULL; | ||
8349 | priv->phymode = -1; | ||
8350 | |||
8351 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
8352 | if (!err) | ||
8353 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
8354 | if (err) { | ||
8355 | printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); | ||
8356 | goto out_pci_disable_device; | ||
8357 | } | ||
8358 | |||
8359 | pci_set_drvdata(pdev, priv); | ||
8360 | err = pci_request_regions(pdev, DRV_NAME); | ||
8361 | if (err) | ||
8362 | goto out_pci_disable_device; | ||
8363 | /* We disable the RETRY_TIMEOUT register (0x41) to keep | ||
8364 | * PCI Tx retries from interfering with C3 CPU state */ | ||
8365 | pci_write_config_byte(pdev, 0x41, 0x00); | ||
8366 | priv->hw_base = pci_iomap(pdev, 0, 0); | ||
8367 | if (!priv->hw_base) { | ||
8368 | err = -ENODEV; | ||
8369 | goto out_pci_release_regions; | ||
8370 | } | ||
8371 | |||
8372 | IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n", | ||
8373 | (unsigned long long) pci_resource_len(pdev, 0)); | ||
8374 | IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); | ||
8375 | |||
8376 | /* Initialize module parameter values here */ | ||
8377 | |||
8378 | if (iwl_param_disable) { | ||
8379 | set_bit(STATUS_RF_KILL_SW, &priv->status); | ||
8380 | IWL_DEBUG_INFO("Radio disabled.\n"); | ||
8381 | } | ||
8382 | |||
8383 | priv->iw_mode = IEEE80211_IF_TYPE_STA; | ||
8384 | |||
8385 | pci_id = | ||
8386 | (priv->pci_dev->device << 16) | priv->pci_dev->subsystem_device; | ||
8387 | |||
8388 | switch (pci_id) { | ||
8389 | case 0x42221005: /* 0x4222 0x8086 0x1005 is BG SKU */ | ||
8390 | case 0x42221034: /* 0x4222 0x8086 0x1034 is BG SKU */ | ||
8391 | case 0x42271014: /* 0x4227 0x8086 0x1014 is BG SKU */ | ||
8392 | case 0x42221044: /* 0x4222 0x8086 0x1044 is BG SKU */ | ||
8393 | priv->is_abg = 0; | ||
8394 | break; | ||
8395 | |||
8396 | /* | ||
8397 | * Rest are assumed ABG SKU -- if this is not the | ||
8398 | * case then the card will get the wrong 'Detected' | ||
8399 | * line in the kernel log however the code that | ||
8400 | * initializes the GEO table will detect no A-band | ||
8401 | * channels and remove the is_abg mask. | ||
8402 | */ | ||
8403 | default: | ||
8404 | priv->is_abg = 1; | ||
8405 | break; | ||
8406 | } | ||
8407 | |||
8408 | printk(KERN_INFO DRV_NAME | ||
8409 | ": Detected Intel PRO/Wireless 3945%sBG Network Connection\n", | ||
8410 | priv->is_abg ? "A" : ""); | ||
8411 | |||
8412 | /* Device-specific setup */ | ||
8413 | if (iwl_hw_set_hw_setting(priv)) { | ||
8414 | IWL_ERROR("failed to set hw settings\n"); | ||
8415 | mutex_unlock(&priv->mutex); | ||
8416 | goto out_iounmap; | ||
8417 | } | ||
8418 | |||
8419 | #ifdef CONFIG_IWLWIFI_QOS | ||
8420 | if (iwl_param_qos_enable) | ||
8421 | priv->qos_data.qos_enable = 1; | ||
8422 | |||
8423 | iwl_reset_qos(priv); | ||
8424 | |||
8425 | priv->qos_data.qos_active = 0; | ||
8426 | priv->qos_data.qos_cap.val = 0; | ||
8427 | #endif /* CONFIG_IWLWIFI_QOS */ | ||
8428 | |||
8429 | iwl_set_rxon_channel(priv, MODE_IEEE80211G, 6); | ||
8430 | iwl_setup_deferred_work(priv); | ||
8431 | iwl_setup_rx_handlers(priv); | ||
8432 | |||
8433 | priv->rates_mask = IWL_RATES_MASK; | ||
8434 | /* If power management is turned on, default to AC mode */ | ||
8435 | priv->power_mode = IWL_POWER_AC; | ||
8436 | priv->user_txpower_limit = IWL_DEFAULT_TX_POWER; | ||
8437 | |||
8438 | pci_enable_msi(pdev); | ||
8439 | |||
8440 | err = request_irq(pdev->irq, iwl_isr, IRQF_SHARED, DRV_NAME, priv); | ||
8441 | if (err) { | ||
8442 | IWL_ERROR("Error allocating IRQ %d\n", pdev->irq); | ||
8443 | goto out_disable_msi; | ||
8444 | } | ||
8445 | |||
8446 | mutex_lock(&priv->mutex); | ||
8447 | |||
8448 | err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group); | ||
8449 | if (err) { | ||
8450 | IWL_ERROR("failed to create sysfs device attributes\n"); | ||
8451 | mutex_unlock(&priv->mutex); | ||
8452 | goto out_release_irq; | ||
8453 | } | ||
8454 | |||
8455 | /* fetch ucode file from disk, alloc and copy to bus-master buffers ... | ||
8456 | * ucode filename and max sizes are card-specific. */ | ||
8457 | err = iwl_read_ucode(priv); | ||
8458 | if (err) { | ||
8459 | IWL_ERROR("Could not read microcode: %d\n", err); | ||
8460 | mutex_unlock(&priv->mutex); | ||
8461 | goto out_pci_alloc; | ||
8462 | } | ||
8463 | |||
8464 | mutex_unlock(&priv->mutex); | ||
8465 | |||
8466 | IWL_DEBUG_INFO("Queing UP work.\n"); | ||
8467 | |||
8468 | queue_work(priv->workqueue, &priv->up); | ||
8469 | |||
8470 | return 0; | ||
8471 | |||
8472 | out_pci_alloc: | ||
8473 | iwl_dealloc_ucode_pci(priv); | ||
8474 | |||
8475 | sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); | ||
8476 | |||
8477 | out_release_irq: | ||
8478 | free_irq(pdev->irq, priv); | ||
8479 | |||
8480 | out_disable_msi: | ||
8481 | pci_disable_msi(pdev); | ||
8482 | destroy_workqueue(priv->workqueue); | ||
8483 | priv->workqueue = NULL; | ||
8484 | iwl_unset_hw_setting(priv); | ||
8485 | |||
8486 | out_iounmap: | ||
8487 | pci_iounmap(pdev, priv->hw_base); | ||
8488 | out_pci_release_regions: | ||
8489 | pci_release_regions(pdev); | ||
8490 | out_pci_disable_device: | ||
8491 | pci_disable_device(pdev); | ||
8492 | pci_set_drvdata(pdev, NULL); | ||
8493 | out_ieee80211_free_hw: | ||
8494 | ieee80211_free_hw(priv->hw); | ||
8495 | out: | ||
8496 | return err; | ||
8497 | } | ||
8498 | |||
8499 | static void iwl_pci_remove(struct pci_dev *pdev) | ||
8500 | { | ||
8501 | struct iwl_priv *priv = pci_get_drvdata(pdev); | ||
8502 | struct list_head *p, *q; | ||
8503 | int i; | ||
8504 | |||
8505 | if (!priv) | ||
8506 | return; | ||
8507 | |||
8508 | IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); | ||
8509 | |||
8510 | mutex_lock(&priv->mutex); | ||
8511 | set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
8512 | __iwl_down(priv); | ||
8513 | mutex_unlock(&priv->mutex); | ||
8514 | |||
8515 | /* Free MAC hash list for ADHOC */ | ||
8516 | for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) { | ||
8517 | list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { | ||
8518 | list_del(p); | ||
8519 | kfree(list_entry(p, struct iwl_ibss_seq, list)); | ||
8520 | } | ||
8521 | } | ||
8522 | |||
8523 | sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); | ||
8524 | |||
8525 | iwl_dealloc_ucode_pci(priv); | ||
8526 | |||
8527 | if (priv->rxq.bd) | ||
8528 | iwl_rx_queue_free(priv, &priv->rxq); | ||
8529 | iwl_hw_txq_ctx_free(priv); | ||
8530 | |||
8531 | iwl_unset_hw_setting(priv); | ||
8532 | iwl_clear_stations_table(priv); | ||
8533 | |||
8534 | if (priv->mac80211_registered) { | ||
8535 | ieee80211_unregister_hw(priv->hw); | ||
8536 | iwl_rate_control_unregister(priv->hw); | ||
8537 | } | ||
8538 | |||
8539 | /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes | ||
8540 | * priv->workqueue... so we can't take down the workqueue | ||
8541 | * until now... */ | ||
8542 | destroy_workqueue(priv->workqueue); | ||
8543 | priv->workqueue = NULL; | ||
8544 | |||
8545 | free_irq(pdev->irq, priv); | ||
8546 | pci_disable_msi(pdev); | ||
8547 | pci_iounmap(pdev, priv->hw_base); | ||
8548 | pci_release_regions(pdev); | ||
8549 | pci_disable_device(pdev); | ||
8550 | pci_set_drvdata(pdev, NULL); | ||
8551 | |||
8552 | kfree(priv->channel_info); | ||
8553 | |||
8554 | kfree(priv->ieee_channels); | ||
8555 | kfree(priv->ieee_rates); | ||
8556 | |||
8557 | if (priv->ibss_beacon) | ||
8558 | dev_kfree_skb(priv->ibss_beacon); | ||
8559 | |||
8560 | ieee80211_free_hw(priv->hw); | ||
8561 | } | ||
8562 | |||
8563 | #ifdef CONFIG_PM | ||
8564 | |||
8565 | static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
8566 | { | ||
8567 | struct iwl_priv *priv = pci_get_drvdata(pdev); | ||
8568 | |||
8569 | mutex_lock(&priv->mutex); | ||
8570 | |||
8571 | set_bit(STATUS_IN_SUSPEND, &priv->status); | ||
8572 | |||
8573 | /* Take down the device; powers it off, etc. */ | ||
8574 | __iwl_down(priv); | ||
8575 | |||
8576 | if (priv->mac80211_registered) | ||
8577 | ieee80211_stop_queues(priv->hw); | ||
8578 | |||
8579 | pci_save_state(pdev); | ||
8580 | pci_disable_device(pdev); | ||
8581 | pci_set_power_state(pdev, PCI_D3hot); | ||
8582 | |||
8583 | mutex_unlock(&priv->mutex); | ||
8584 | |||
8585 | return 0; | ||
8586 | } | ||
8587 | |||
8588 | static void iwl_resume(struct iwl_priv *priv) | ||
8589 | { | ||
8590 | unsigned long flags; | ||
8591 | |||
8592 | /* The following it a temporary work around due to the | ||
8593 | * suspend / resume not fully initializing the NIC correctly. | ||
8594 | * Without all of the following, resume will not attempt to take | ||
8595 | * down the NIC (it shouldn't really need to) and will just try | ||
8596 | * and bring the NIC back up. However that fails during the | ||
8597 | * ucode verification process. This then causes iwl_down to be | ||
8598 | * called *after* iwl_hw_nic_init() has succeeded -- which | ||
8599 | * then lets the next init sequence succeed. So, we've | ||
8600 | * replicated all of that NIC init code here... */ | ||
8601 | |||
8602 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
8603 | |||
8604 | iwl_hw_nic_init(priv); | ||
8605 | |||
8606 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
8607 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | ||
8608 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
8609 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
8610 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
8611 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
8612 | |||
8613 | /* tell the device to stop sending interrupts */ | ||
8614 | iwl_disable_interrupts(priv); | ||
8615 | |||
8616 | spin_lock_irqsave(&priv->lock, flags); | ||
8617 | iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
8618 | |||
8619 | if (!iwl_grab_restricted_access(priv)) { | ||
8620 | iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, | ||
8621 | APMG_CLK_VAL_DMA_CLK_RQT); | ||
8622 | iwl_release_restricted_access(priv); | ||
8623 | } | ||
8624 | spin_unlock_irqrestore(&priv->lock, flags); | ||
8625 | |||
8626 | udelay(5); | ||
8627 | |||
8628 | iwl_hw_nic_reset(priv); | ||
8629 | |||
8630 | /* Bring the device back up */ | ||
8631 | clear_bit(STATUS_IN_SUSPEND, &priv->status); | ||
8632 | queue_work(priv->workqueue, &priv->up); | ||
8633 | } | ||
8634 | |||
8635 | static int iwl_pci_resume(struct pci_dev *pdev) | ||
8636 | { | ||
8637 | struct iwl_priv *priv = pci_get_drvdata(pdev); | ||
8638 | int err; | ||
8639 | |||
8640 | printk(KERN_INFO "Coming out of suspend...\n"); | ||
8641 | |||
8642 | mutex_lock(&priv->mutex); | ||
8643 | |||
8644 | pci_set_power_state(pdev, PCI_D0); | ||
8645 | err = pci_enable_device(pdev); | ||
8646 | pci_restore_state(pdev); | ||
8647 | |||
8648 | /* | ||
8649 | * Suspend/Resume resets the PCI configuration space, so we have to | ||
8650 | * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries | ||
8651 | * from interfering with C3 CPU state. pci_restore_state won't help | ||
8652 | * here since it only restores the first 64 bytes pci config header. | ||
8653 | */ | ||
8654 | pci_write_config_byte(pdev, 0x41, 0x00); | ||
8655 | |||
8656 | iwl_resume(priv); | ||
8657 | mutex_unlock(&priv->mutex); | ||
8658 | |||
8659 | return 0; | ||
8660 | } | ||
8661 | |||
8662 | #endif /* CONFIG_PM */ | ||
8663 | |||
8664 | /***************************************************************************** | ||
8665 | * | ||
8666 | * driver and module entry point | ||
8667 | * | ||
8668 | *****************************************************************************/ | ||
8669 | |||
8670 | static struct pci_driver iwl_driver = { | ||
8671 | .name = DRV_NAME, | ||
8672 | .id_table = iwl_hw_card_ids, | ||
8673 | .probe = iwl_pci_probe, | ||
8674 | .remove = __devexit_p(iwl_pci_remove), | ||
8675 | #ifdef CONFIG_PM | ||
8676 | .suspend = iwl_pci_suspend, | ||
8677 | .resume = iwl_pci_resume, | ||
8678 | #endif | ||
8679 | }; | ||
8680 | |||
8681 | static int __init iwl_init(void) | ||
8682 | { | ||
8683 | |||
8684 | int ret; | ||
8685 | printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); | ||
8686 | printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); | ||
8687 | ret = pci_register_driver(&iwl_driver); | ||
8688 | if (ret) { | ||
8689 | IWL_ERROR("Unable to initialize PCI module\n"); | ||
8690 | return ret; | ||
8691 | } | ||
8692 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
8693 | ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level); | ||
8694 | if (ret) { | ||
8695 | IWL_ERROR("Unable to create driver sysfs file\n"); | ||
8696 | pci_unregister_driver(&iwl_driver); | ||
8697 | return ret; | ||
8698 | } | ||
8699 | #endif | ||
8700 | |||
8701 | return ret; | ||
8702 | } | ||
8703 | |||
8704 | static void __exit iwl_exit(void) | ||
8705 | { | ||
8706 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
8707 | driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level); | ||
8708 | #endif | ||
8709 | pci_unregister_driver(&iwl_driver); | ||
8710 | } | ||
8711 | |||
8712 | module_param_named(antenna, iwl_param_antenna, int, 0444); | ||
8713 | MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); | ||
8714 | module_param_named(disable, iwl_param_disable, int, 0444); | ||
8715 | MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); | ||
8716 | module_param_named(hwcrypto, iwl_param_hwcrypto, int, 0444); | ||
8717 | MODULE_PARM_DESC(hwcrypto, | ||
8718 | "using hardware crypto engine (default 0 [software])\n"); | ||
8719 | module_param_named(debug, iwl_param_debug, int, 0444); | ||
8720 | MODULE_PARM_DESC(debug, "debug output mask"); | ||
8721 | module_param_named(disable_hw_scan, iwl_param_disable_hw_scan, int, 0444); | ||
8722 | MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); | ||
8723 | |||
8724 | module_param_named(queues_num, iwl_param_queues_num, int, 0444); | ||
8725 | MODULE_PARM_DESC(queues_num, "number of hw queues."); | ||
8726 | |||
8727 | /* QoS */ | ||
8728 | module_param_named(qos_enable, iwl_param_qos_enable, int, 0444); | ||
8729 | MODULE_PARM_DESC(qos_enable, "enable all QoS functionality"); | ||
8730 | |||
8731 | module_exit(iwl_exit); | ||
8732 | module_init(iwl_init); | ||