diff options
author | Zhu Yi <yi.zhu@intel.com> | 2007-09-25 20:54:57 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:51:39 -0400 |
commit | b481de9ca074528fe8c429604e2777db8b89806a (patch) | |
tree | cf226646d73c56af843e8a656a296905ad6df179 /drivers/net/wireless/iwlwifi/iwl4965-base.c | |
parent | 75388acd0cd827dc1498043daa7d1c760902cd67 (diff) |
[IWLWIFI]: add iwlwifi wireless drivers
This patch adds the mac80211 based wireless drivers for the Intel
PRO/Wireless 3945ABG/BG Network Connection and Intel Wireless WiFi
Link AGN (4965) adapters.
[ Move driver into it's own directory -DaveM ]
Signed-off-by: Zhu Yi <yi.zhu@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl4965-base.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl4965-base.c | 9323 |
1 files changed, 9323 insertions, 0 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c new file mode 100644 index 000000000000..b79dabc8c01c --- /dev/null +++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c | |||
@@ -0,0 +1,9323 @@ | |||
1 | /****************************************************************************** | ||
2 | * | ||
3 | * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. | ||
4 | * | ||
5 | * Portions of this file are derived from the ipw3945 project, as well | ||
6 | * as portions of the ieee80211 subsystem header files. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of version 2 of the GNU General Public License as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | ||
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
15 | * more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License along with | ||
18 | * this program; if not, write to the Free Software Foundation, Inc., | ||
19 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA | ||
20 | * | ||
21 | * The full GNU General Public License is included in this distribution in the | ||
22 | * file called LICENSE. | ||
23 | * | ||
24 | * Contact Information: | ||
25 | * James P. Ketrenos <ipw2100-admin@linux.intel.com> | ||
26 | * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | ||
27 | * | ||
28 | *****************************************************************************/ | ||
29 | |||
30 | /* | ||
31 | * NOTE: This file (iwl-base.c) is used to build to multiple hardware targets | ||
32 | * by defining IWL to either 3945 or 4965. The Makefile used when building | ||
33 | * the base targets will create base-3945.o and base-4965.o | ||
34 | * | ||
35 | * The eventual goal is to move as many of the #if IWL / #endif blocks out of | ||
36 | * this file and into the hardware specific implementation files (iwl-XXXX.c) | ||
37 | * and leave only the common (non #ifdef sprinkled) code in this file | ||
38 | */ | ||
39 | |||
40 | #include <linux/kernel.h> | ||
41 | #include <linux/module.h> | ||
42 | #include <linux/version.h> | ||
43 | #include <linux/init.h> | ||
44 | #include <linux/pci.h> | ||
45 | #include <linux/dma-mapping.h> | ||
46 | #include <linux/delay.h> | ||
47 | #include <linux/skbuff.h> | ||
48 | #include <linux/netdevice.h> | ||
49 | #include <linux/wireless.h> | ||
50 | #include <linux/firmware.h> | ||
51 | #include <linux/skbuff.h> | ||
52 | #include <linux/netdevice.h> | ||
53 | #include <linux/etherdevice.h> | ||
54 | #include <linux/if_arp.h> | ||
55 | |||
56 | #include <net/ieee80211_radiotap.h> | ||
57 | #include <net/mac80211.h> | ||
58 | |||
59 | #include <asm/div64.h> | ||
60 | |||
61 | #include "iwlwifi.h" | ||
62 | #include "iwl-4965.h" | ||
63 | #include "iwl-helpers.h" | ||
64 | |||
65 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
66 | u32 iwl_debug_level; | ||
67 | #endif | ||
68 | |||
69 | /****************************************************************************** | ||
70 | * | ||
71 | * module boiler plate | ||
72 | * | ||
73 | ******************************************************************************/ | ||
74 | |||
75 | /* module parameters */ | ||
76 | int iwl_param_disable_hw_scan; | ||
77 | int iwl_param_debug; | ||
78 | int iwl_param_disable; /* def: enable radio */ | ||
79 | int iwl_param_antenna; /* def: 0 = both antennas (use diversity) */ | ||
80 | int iwl_param_hwcrypto; /* def: using software encryption */ | ||
81 | int iwl_param_qos_enable = 1; | ||
82 | int iwl_param_queues_num = IWL_MAX_NUM_QUEUES; | ||
83 | |||
84 | /* | ||
85 | * module name, copyright, version, etc. | ||
86 | * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk | ||
87 | */ | ||
88 | |||
89 | #define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux" | ||
90 | |||
91 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
92 | #define VD "d" | ||
93 | #else | ||
94 | #define VD | ||
95 | #endif | ||
96 | |||
97 | #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT | ||
98 | #define VS "s" | ||
99 | #else | ||
100 | #define VS | ||
101 | #endif | ||
102 | |||
103 | #define IWLWIFI_VERSION "0.1.15k" VD VS | ||
104 | #define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation" | ||
105 | #define DRV_VERSION IWLWIFI_VERSION | ||
106 | |||
107 | /* Change firmware file name, using "-" and incrementing number, | ||
108 | * *only* when uCode interface or architecture changes so that it | ||
109 | * is not compatible with earlier drivers. | ||
110 | * This number will also appear in << 8 position of 1st dword of uCode file */ | ||
111 | #define IWL4965_UCODE_API "-1" | ||
112 | |||
113 | MODULE_DESCRIPTION(DRV_DESCRIPTION); | ||
114 | MODULE_VERSION(DRV_VERSION); | ||
115 | MODULE_AUTHOR(DRV_COPYRIGHT); | ||
116 | MODULE_LICENSE("GPL"); | ||
117 | |||
118 | __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr) | ||
119 | { | ||
120 | u16 fc = le16_to_cpu(hdr->frame_control); | ||
121 | int hdr_len = ieee80211_get_hdrlen(fc); | ||
122 | |||
123 | if ((fc & 0x00cc) == (IEEE80211_STYPE_QOS_DATA | IEEE80211_FTYPE_DATA)) | ||
124 | return (__le16 *) ((u8 *) hdr + hdr_len - QOS_CONTROL_LEN); | ||
125 | return NULL; | ||
126 | } | ||
127 | |||
128 | static const struct ieee80211_hw_mode *iwl_get_hw_mode( | ||
129 | struct iwl_priv *priv, int mode) | ||
130 | { | ||
131 | int i; | ||
132 | |||
133 | for (i = 0; i < 3; i++) | ||
134 | if (priv->modes[i].mode == mode) | ||
135 | return &priv->modes[i]; | ||
136 | |||
137 | return NULL; | ||
138 | } | ||
139 | |||
140 | static int iwl_is_empty_essid(const char *essid, int essid_len) | ||
141 | { | ||
142 | /* Single white space is for Linksys APs */ | ||
143 | if (essid_len == 1 && essid[0] == ' ') | ||
144 | return 1; | ||
145 | |||
146 | /* Otherwise, if the entire essid is 0, we assume it is hidden */ | ||
147 | while (essid_len) { | ||
148 | essid_len--; | ||
149 | if (essid[essid_len] != '\0') | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | return 1; | ||
154 | } | ||
155 | |||
156 | static const char *iwl_escape_essid(const char *essid, u8 essid_len) | ||
157 | { | ||
158 | static char escaped[IW_ESSID_MAX_SIZE * 2 + 1]; | ||
159 | const char *s = essid; | ||
160 | char *d = escaped; | ||
161 | |||
162 | if (iwl_is_empty_essid(essid, essid_len)) { | ||
163 | memcpy(escaped, "<hidden>", sizeof("<hidden>")); | ||
164 | return escaped; | ||
165 | } | ||
166 | |||
167 | essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE); | ||
168 | while (essid_len--) { | ||
169 | if (*s == '\0') { | ||
170 | *d++ = '\\'; | ||
171 | *d++ = '0'; | ||
172 | s++; | ||
173 | } else | ||
174 | *d++ = *s++; | ||
175 | } | ||
176 | *d = '\0'; | ||
177 | return escaped; | ||
178 | } | ||
179 | |||
180 | static void iwl_print_hex_dump(int level, void *p, u32 len) | ||
181 | { | ||
182 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
183 | if (!(iwl_debug_level & level)) | ||
184 | return; | ||
185 | |||
186 | print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1, | ||
187 | p, len, 1); | ||
188 | #endif | ||
189 | } | ||
190 | |||
191 | /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** | ||
192 | * DMA services | ||
193 | * | ||
194 | * Theory of operation | ||
195 | * | ||
196 | * A queue is a circular buffers with 'Read' and 'Write' pointers. | ||
197 | * 2 empty entries always kept in the buffer to protect from overflow. | ||
198 | * | ||
199 | * For Tx queue, there are low mark and high mark limits. If, after queuing | ||
200 | * the packet for Tx, free space become < low mark, Tx queue stopped. When | ||
201 | * reclaiming packets (on 'tx done IRQ), if free space become > high mark, | ||
202 | * Tx queue resumed. | ||
203 | * | ||
204 | * The IPW operates with six queues, one receive queue in the device's | ||
205 | * sram, one transmit queue for sending commands to the device firmware, | ||
206 | * and four transmit queues for data. | ||
207 | ***************************************************/ | ||
208 | |||
209 | static int iwl_queue_space(const struct iwl_queue *q) | ||
210 | { | ||
211 | int s = q->last_used - q->first_empty; | ||
212 | |||
213 | if (q->last_used > q->first_empty) | ||
214 | s -= q->n_bd; | ||
215 | |||
216 | if (s <= 0) | ||
217 | s += q->n_window; | ||
218 | /* keep some reserve to not confuse empty and full situations */ | ||
219 | s -= 2; | ||
220 | if (s < 0) | ||
221 | s = 0; | ||
222 | return s; | ||
223 | } | ||
224 | |||
225 | /* XXX: n_bd must be power-of-two size */ | ||
226 | static inline int iwl_queue_inc_wrap(int index, int n_bd) | ||
227 | { | ||
228 | return ++index & (n_bd - 1); | ||
229 | } | ||
230 | |||
231 | /* XXX: n_bd must be power-of-two size */ | ||
232 | static inline int iwl_queue_dec_wrap(int index, int n_bd) | ||
233 | { | ||
234 | return --index & (n_bd - 1); | ||
235 | } | ||
236 | |||
237 | static inline int x2_queue_used(const struct iwl_queue *q, int i) | ||
238 | { | ||
239 | return q->first_empty > q->last_used ? | ||
240 | (i >= q->last_used && i < q->first_empty) : | ||
241 | !(i < q->last_used && i >= q->first_empty); | ||
242 | } | ||
243 | |||
244 | static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) | ||
245 | { | ||
246 | if (is_huge) | ||
247 | return q->n_window; | ||
248 | |||
249 | return index & (q->n_window - 1); | ||
250 | } | ||
251 | |||
252 | static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, | ||
253 | int count, int slots_num, u32 id) | ||
254 | { | ||
255 | q->n_bd = count; | ||
256 | q->n_window = slots_num; | ||
257 | q->id = id; | ||
258 | |||
259 | /* count must be power-of-two size, otherwise iwl_queue_inc_wrap | ||
260 | * and iwl_queue_dec_wrap are broken. */ | ||
261 | BUG_ON(!is_power_of_2(count)); | ||
262 | |||
263 | /* slots_num must be power-of-two size, otherwise | ||
264 | * get_cmd_index is broken. */ | ||
265 | BUG_ON(!is_power_of_2(slots_num)); | ||
266 | |||
267 | q->low_mark = q->n_window / 4; | ||
268 | if (q->low_mark < 4) | ||
269 | q->low_mark = 4; | ||
270 | |||
271 | q->high_mark = q->n_window / 8; | ||
272 | if (q->high_mark < 2) | ||
273 | q->high_mark = 2; | ||
274 | |||
275 | q->first_empty = q->last_used = 0; | ||
276 | |||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | static int iwl_tx_queue_alloc(struct iwl_priv *priv, | ||
281 | struct iwl_tx_queue *txq, u32 id) | ||
282 | { | ||
283 | struct pci_dev *dev = priv->pci_dev; | ||
284 | |||
285 | if (id != IWL_CMD_QUEUE_NUM) { | ||
286 | txq->txb = kmalloc(sizeof(txq->txb[0]) * | ||
287 | TFD_QUEUE_SIZE_MAX, GFP_KERNEL); | ||
288 | if (!txq->txb) { | ||
289 | IWL_ERROR("kmalloc for auxilary BD " | ||
290 | "structures failed\n"); | ||
291 | goto error; | ||
292 | } | ||
293 | } else | ||
294 | txq->txb = NULL; | ||
295 | |||
296 | txq->bd = pci_alloc_consistent(dev, | ||
297 | sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX, | ||
298 | &txq->q.dma_addr); | ||
299 | |||
300 | if (!txq->bd) { | ||
301 | IWL_ERROR("pci_alloc_consistent(%zd) failed\n", | ||
302 | sizeof(txq->bd[0]) * TFD_QUEUE_SIZE_MAX); | ||
303 | goto error; | ||
304 | } | ||
305 | txq->q.id = id; | ||
306 | |||
307 | return 0; | ||
308 | |||
309 | error: | ||
310 | if (txq->txb) { | ||
311 | kfree(txq->txb); | ||
312 | txq->txb = NULL; | ||
313 | } | ||
314 | |||
315 | return -ENOMEM; | ||
316 | } | ||
317 | |||
318 | int iwl_tx_queue_init(struct iwl_priv *priv, | ||
319 | struct iwl_tx_queue *txq, int slots_num, u32 txq_id) | ||
320 | { | ||
321 | struct pci_dev *dev = priv->pci_dev; | ||
322 | int len; | ||
323 | int rc = 0; | ||
324 | |||
325 | /* alocate command space + one big command for scan since scan | ||
326 | * command is very huge the system will not have two scan at the | ||
327 | * same time */ | ||
328 | len = sizeof(struct iwl_cmd) * slots_num; | ||
329 | if (txq_id == IWL_CMD_QUEUE_NUM) | ||
330 | len += IWL_MAX_SCAN_SIZE; | ||
331 | txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); | ||
332 | if (!txq->cmd) | ||
333 | return -ENOMEM; | ||
334 | |||
335 | rc = iwl_tx_queue_alloc(priv, txq, txq_id); | ||
336 | if (rc) { | ||
337 | pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); | ||
338 | |||
339 | return -ENOMEM; | ||
340 | } | ||
341 | txq->need_update = 0; | ||
342 | |||
343 | /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise | ||
344 | * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ | ||
345 | BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); | ||
346 | iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id); | ||
347 | |||
348 | iwl_hw_tx_queue_init(priv, txq); | ||
349 | |||
350 | return 0; | ||
351 | } | ||
352 | |||
353 | /** | ||
354 | * iwl_tx_queue_free - Deallocate DMA queue. | ||
355 | * @txq: Transmit queue to deallocate. | ||
356 | * | ||
357 | * Empty queue by removing and destroying all BD's. | ||
358 | * Free all buffers. txq itself is not freed. | ||
359 | * | ||
360 | */ | ||
361 | void iwl_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq) | ||
362 | { | ||
363 | struct iwl_queue *q = &txq->q; | ||
364 | struct pci_dev *dev = priv->pci_dev; | ||
365 | int len; | ||
366 | |||
367 | if (q->n_bd == 0) | ||
368 | return; | ||
369 | |||
370 | /* first, empty all BD's */ | ||
371 | for (; q->first_empty != q->last_used; | ||
372 | q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) | ||
373 | iwl_hw_txq_free_tfd(priv, txq); | ||
374 | |||
375 | len = sizeof(struct iwl_cmd) * q->n_window; | ||
376 | if (q->id == IWL_CMD_QUEUE_NUM) | ||
377 | len += IWL_MAX_SCAN_SIZE; | ||
378 | |||
379 | pci_free_consistent(dev, len, txq->cmd, txq->dma_addr_cmd); | ||
380 | |||
381 | /* free buffers belonging to queue itself */ | ||
382 | if (txq->q.n_bd) | ||
383 | pci_free_consistent(dev, sizeof(struct iwl_tfd_frame) * | ||
384 | txq->q.n_bd, txq->bd, txq->q.dma_addr); | ||
385 | |||
386 | if (txq->txb) { | ||
387 | kfree(txq->txb); | ||
388 | txq->txb = NULL; | ||
389 | } | ||
390 | |||
391 | /* 0 fill whole structure */ | ||
392 | memset(txq, 0, sizeof(*txq)); | ||
393 | } | ||
394 | |||
395 | const u8 BROADCAST_ADDR[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; | ||
396 | |||
397 | /*************** STATION TABLE MANAGEMENT **** | ||
398 | * | ||
399 | * NOTE: This needs to be overhauled to better synchronize between | ||
400 | * how the iwl-4965.c is using iwl_hw_find_station vs. iwl-3945.c | ||
401 | * | ||
402 | * mac80211 should also be examined to determine if sta_info is duplicating | ||
403 | * the functionality provided here | ||
404 | */ | ||
405 | |||
406 | /**************************************************************/ | ||
407 | |||
408 | static u8 iwl_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap) | ||
409 | { | ||
410 | int index = IWL_INVALID_STATION; | ||
411 | int i; | ||
412 | unsigned long flags; | ||
413 | |||
414 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
415 | |||
416 | if (is_ap) | ||
417 | index = IWL_AP_ID; | ||
418 | else if (is_broadcast_ether_addr(addr)) | ||
419 | index = priv->hw_setting.bcast_sta_id; | ||
420 | else | ||
421 | for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) | ||
422 | if (priv->stations[i].used && | ||
423 | !compare_ether_addr(priv->stations[i].sta.sta.addr, | ||
424 | addr)) { | ||
425 | index = i; | ||
426 | break; | ||
427 | } | ||
428 | |||
429 | if (unlikely(index == IWL_INVALID_STATION)) | ||
430 | goto out; | ||
431 | |||
432 | if (priv->stations[index].used) { | ||
433 | priv->stations[index].used = 0; | ||
434 | priv->num_stations--; | ||
435 | } | ||
436 | |||
437 | BUG_ON(priv->num_stations < 0); | ||
438 | |||
439 | out: | ||
440 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static void iwl_clear_stations_table(struct iwl_priv *priv) | ||
445 | { | ||
446 | unsigned long flags; | ||
447 | |||
448 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
449 | |||
450 | priv->num_stations = 0; | ||
451 | memset(priv->stations, 0, sizeof(priv->stations)); | ||
452 | |||
453 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
454 | } | ||
455 | |||
456 | u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap, u8 flags) | ||
457 | { | ||
458 | int i; | ||
459 | int index = IWL_INVALID_STATION; | ||
460 | struct iwl_station_entry *station; | ||
461 | unsigned long flags_spin; | ||
462 | |||
463 | spin_lock_irqsave(&priv->sta_lock, flags_spin); | ||
464 | if (is_ap) | ||
465 | index = IWL_AP_ID; | ||
466 | else if (is_broadcast_ether_addr(addr)) | ||
467 | index = priv->hw_setting.bcast_sta_id; | ||
468 | else | ||
469 | for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) { | ||
470 | if (!compare_ether_addr(priv->stations[i].sta.sta.addr, | ||
471 | addr)) { | ||
472 | index = i; | ||
473 | break; | ||
474 | } | ||
475 | |||
476 | if (!priv->stations[i].used && | ||
477 | index == IWL_INVALID_STATION) | ||
478 | index = i; | ||
479 | } | ||
480 | |||
481 | |||
482 | /* These twh conditions has the same outcome but keep them separate | ||
483 | since they have different meaning */ | ||
484 | if (unlikely(index == IWL_INVALID_STATION)) { | ||
485 | spin_unlock_irqrestore(&priv->sta_lock, flags_spin); | ||
486 | return index; | ||
487 | } | ||
488 | |||
489 | if (priv->stations[index].used && | ||
490 | !compare_ether_addr(priv->stations[index].sta.sta.addr, addr)) { | ||
491 | spin_unlock_irqrestore(&priv->sta_lock, flags_spin); | ||
492 | return index; | ||
493 | } | ||
494 | |||
495 | |||
496 | IWL_DEBUG_ASSOC("Add STA ID %d: " MAC_FMT "\n", index, MAC_ARG(addr)); | ||
497 | station = &priv->stations[index]; | ||
498 | station->used = 1; | ||
499 | priv->num_stations++; | ||
500 | |||
501 | memset(&station->sta, 0, sizeof(struct iwl_addsta_cmd)); | ||
502 | memcpy(station->sta.sta.addr, addr, ETH_ALEN); | ||
503 | station->sta.mode = 0; | ||
504 | station->sta.sta.sta_id = index; | ||
505 | station->sta.station_flags = 0; | ||
506 | |||
507 | #ifdef CONFIG_IWLWIFI_HT | ||
508 | /* BCAST station and IBSS stations do not work in HT mode */ | ||
509 | if (index != priv->hw_setting.bcast_sta_id && | ||
510 | priv->iw_mode != IEEE80211_IF_TYPE_IBSS) | ||
511 | iwl4965_set_ht_add_station(priv, index); | ||
512 | #endif /*CONFIG_IWLWIFI_HT*/ | ||
513 | |||
514 | spin_unlock_irqrestore(&priv->sta_lock, flags_spin); | ||
515 | iwl_send_add_station(priv, &station->sta, flags); | ||
516 | return index; | ||
517 | |||
518 | } | ||
519 | |||
520 | /*************** DRIVER STATUS FUNCTIONS *****/ | ||
521 | |||
522 | static inline int iwl_is_ready(struct iwl_priv *priv) | ||
523 | { | ||
524 | /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are | ||
525 | * set but EXIT_PENDING is not */ | ||
526 | return test_bit(STATUS_READY, &priv->status) && | ||
527 | test_bit(STATUS_GEO_CONFIGURED, &priv->status) && | ||
528 | !test_bit(STATUS_EXIT_PENDING, &priv->status); | ||
529 | } | ||
530 | |||
531 | static inline int iwl_is_alive(struct iwl_priv *priv) | ||
532 | { | ||
533 | return test_bit(STATUS_ALIVE, &priv->status); | ||
534 | } | ||
535 | |||
536 | static inline int iwl_is_init(struct iwl_priv *priv) | ||
537 | { | ||
538 | return test_bit(STATUS_INIT, &priv->status); | ||
539 | } | ||
540 | |||
541 | static inline int iwl_is_rfkill(struct iwl_priv *priv) | ||
542 | { | ||
543 | return test_bit(STATUS_RF_KILL_HW, &priv->status) || | ||
544 | test_bit(STATUS_RF_KILL_SW, &priv->status); | ||
545 | } | ||
546 | |||
547 | static inline int iwl_is_ready_rf(struct iwl_priv *priv) | ||
548 | { | ||
549 | |||
550 | if (iwl_is_rfkill(priv)) | ||
551 | return 0; | ||
552 | |||
553 | return iwl_is_ready(priv); | ||
554 | } | ||
555 | |||
556 | /*************** HOST COMMAND QUEUE FUNCTIONS *****/ | ||
557 | |||
558 | #define IWL_CMD(x) case x : return #x | ||
559 | |||
560 | static const char *get_cmd_string(u8 cmd) | ||
561 | { | ||
562 | switch (cmd) { | ||
563 | IWL_CMD(REPLY_ALIVE); | ||
564 | IWL_CMD(REPLY_ERROR); | ||
565 | IWL_CMD(REPLY_RXON); | ||
566 | IWL_CMD(REPLY_RXON_ASSOC); | ||
567 | IWL_CMD(REPLY_QOS_PARAM); | ||
568 | IWL_CMD(REPLY_RXON_TIMING); | ||
569 | IWL_CMD(REPLY_ADD_STA); | ||
570 | IWL_CMD(REPLY_REMOVE_STA); | ||
571 | IWL_CMD(REPLY_REMOVE_ALL_STA); | ||
572 | IWL_CMD(REPLY_TX); | ||
573 | IWL_CMD(REPLY_RATE_SCALE); | ||
574 | IWL_CMD(REPLY_LEDS_CMD); | ||
575 | IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); | ||
576 | IWL_CMD(RADAR_NOTIFICATION); | ||
577 | IWL_CMD(REPLY_QUIET_CMD); | ||
578 | IWL_CMD(REPLY_CHANNEL_SWITCH); | ||
579 | IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); | ||
580 | IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); | ||
581 | IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); | ||
582 | IWL_CMD(POWER_TABLE_CMD); | ||
583 | IWL_CMD(PM_SLEEP_NOTIFICATION); | ||
584 | IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); | ||
585 | IWL_CMD(REPLY_SCAN_CMD); | ||
586 | IWL_CMD(REPLY_SCAN_ABORT_CMD); | ||
587 | IWL_CMD(SCAN_START_NOTIFICATION); | ||
588 | IWL_CMD(SCAN_RESULTS_NOTIFICATION); | ||
589 | IWL_CMD(SCAN_COMPLETE_NOTIFICATION); | ||
590 | IWL_CMD(BEACON_NOTIFICATION); | ||
591 | IWL_CMD(REPLY_TX_BEACON); | ||
592 | IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); | ||
593 | IWL_CMD(QUIET_NOTIFICATION); | ||
594 | IWL_CMD(REPLY_TX_PWR_TABLE_CMD); | ||
595 | IWL_CMD(MEASURE_ABORT_NOTIFICATION); | ||
596 | IWL_CMD(REPLY_BT_CONFIG); | ||
597 | IWL_CMD(REPLY_STATISTICS_CMD); | ||
598 | IWL_CMD(STATISTICS_NOTIFICATION); | ||
599 | IWL_CMD(REPLY_CARD_STATE_CMD); | ||
600 | IWL_CMD(CARD_STATE_NOTIFICATION); | ||
601 | IWL_CMD(MISSED_BEACONS_NOTIFICATION); | ||
602 | IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); | ||
603 | IWL_CMD(SENSITIVITY_CMD); | ||
604 | IWL_CMD(REPLY_PHY_CALIBRATION_CMD); | ||
605 | IWL_CMD(REPLY_RX_PHY_CMD); | ||
606 | IWL_CMD(REPLY_RX_MPDU_CMD); | ||
607 | IWL_CMD(REPLY_4965_RX); | ||
608 | IWL_CMD(REPLY_COMPRESSED_BA); | ||
609 | default: | ||
610 | return "UNKNOWN"; | ||
611 | |||
612 | } | ||
613 | } | ||
614 | |||
615 | #define HOST_COMPLETE_TIMEOUT (HZ / 2) | ||
616 | |||
617 | /** | ||
618 | * iwl_enqueue_hcmd - enqueue a uCode command | ||
619 | * @priv: device private data point | ||
620 | * @cmd: a point to the ucode command structure | ||
621 | * | ||
622 | * The function returns < 0 values to indicate the operation is | ||
623 | * failed. On success, it turns the index (> 0) of command in the | ||
624 | * command queue. | ||
625 | */ | ||
626 | static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
627 | { | ||
628 | struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; | ||
629 | struct iwl_queue *q = &txq->q; | ||
630 | struct iwl_tfd_frame *tfd; | ||
631 | u32 *control_flags; | ||
632 | struct iwl_cmd *out_cmd; | ||
633 | u32 idx; | ||
634 | u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); | ||
635 | dma_addr_t phys_addr; | ||
636 | int ret; | ||
637 | unsigned long flags; | ||
638 | |||
639 | /* If any of the command structures end up being larger than | ||
640 | * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then | ||
641 | * we will need to increase the size of the TFD entries */ | ||
642 | BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && | ||
643 | !(cmd->meta.flags & CMD_SIZE_HUGE)); | ||
644 | |||
645 | if (iwl_queue_space(q) < ((cmd->meta.flags & CMD_ASYNC) ? 2 : 1)) { | ||
646 | IWL_ERROR("No space for Tx\n"); | ||
647 | return -ENOSPC; | ||
648 | } | ||
649 | |||
650 | spin_lock_irqsave(&priv->hcmd_lock, flags); | ||
651 | |||
652 | tfd = &txq->bd[q->first_empty]; | ||
653 | memset(tfd, 0, sizeof(*tfd)); | ||
654 | |||
655 | control_flags = (u32 *) tfd; | ||
656 | |||
657 | idx = get_cmd_index(q, q->first_empty, cmd->meta.flags & CMD_SIZE_HUGE); | ||
658 | out_cmd = &txq->cmd[idx]; | ||
659 | |||
660 | out_cmd->hdr.cmd = cmd->id; | ||
661 | memcpy(&out_cmd->meta, &cmd->meta, sizeof(cmd->meta)); | ||
662 | memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len); | ||
663 | |||
664 | /* At this point, the out_cmd now has all of the incoming cmd | ||
665 | * information */ | ||
666 | |||
667 | out_cmd->hdr.flags = 0; | ||
668 | out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(IWL_CMD_QUEUE_NUM) | | ||
669 | INDEX_TO_SEQ(q->first_empty)); | ||
670 | if (out_cmd->meta.flags & CMD_SIZE_HUGE) | ||
671 | out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); | ||
672 | |||
673 | phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + | ||
674 | offsetof(struct iwl_cmd, hdr); | ||
675 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); | ||
676 | |||
677 | IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " | ||
678 | "%d bytes at %d[%d]:%d\n", | ||
679 | get_cmd_string(out_cmd->hdr.cmd), | ||
680 | out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), | ||
681 | fix_size, q->first_empty, idx, IWL_CMD_QUEUE_NUM); | ||
682 | |||
683 | txq->need_update = 1; | ||
684 | ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0); | ||
685 | q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); | ||
686 | iwl_tx_queue_update_write_ptr(priv, txq); | ||
687 | |||
688 | spin_unlock_irqrestore(&priv->hcmd_lock, flags); | ||
689 | return ret ? ret : idx; | ||
690 | } | ||
691 | |||
692 | int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
693 | { | ||
694 | int ret; | ||
695 | |||
696 | BUG_ON(!(cmd->meta.flags & CMD_ASYNC)); | ||
697 | |||
698 | /* An asynchronous command can not expect an SKB to be set. */ | ||
699 | BUG_ON(cmd->meta.flags & CMD_WANT_SKB); | ||
700 | |||
701 | /* An asynchronous command MUST have a callback. */ | ||
702 | BUG_ON(!cmd->meta.u.callback); | ||
703 | |||
704 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
705 | return -EBUSY; | ||
706 | |||
707 | ret = iwl_enqueue_hcmd(priv, cmd); | ||
708 | if (ret < 0) { | ||
709 | IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", | ||
710 | get_cmd_string(cmd->id), ret); | ||
711 | return ret; | ||
712 | } | ||
713 | return 0; | ||
714 | } | ||
715 | |||
716 | int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
717 | { | ||
718 | int cmd_idx; | ||
719 | int ret; | ||
720 | static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */ | ||
721 | |||
722 | BUG_ON(cmd->meta.flags & CMD_ASYNC); | ||
723 | |||
724 | /* A synchronous command can not have a callback set. */ | ||
725 | BUG_ON(cmd->meta.u.callback != NULL); | ||
726 | |||
727 | if (atomic_xchg(&entry, 1)) { | ||
728 | IWL_ERROR("Error sending %s: Already sending a host command\n", | ||
729 | get_cmd_string(cmd->id)); | ||
730 | return -EBUSY; | ||
731 | } | ||
732 | |||
733 | set_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
734 | |||
735 | if (cmd->meta.flags & CMD_WANT_SKB) | ||
736 | cmd->meta.source = &cmd->meta; | ||
737 | |||
738 | cmd_idx = iwl_enqueue_hcmd(priv, cmd); | ||
739 | if (cmd_idx < 0) { | ||
740 | ret = cmd_idx; | ||
741 | IWL_ERROR("Error sending %s: iwl_enqueue_hcmd failed: %d\n", | ||
742 | get_cmd_string(cmd->id), ret); | ||
743 | goto out; | ||
744 | } | ||
745 | |||
746 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | ||
747 | !test_bit(STATUS_HCMD_ACTIVE, &priv->status), | ||
748 | HOST_COMPLETE_TIMEOUT); | ||
749 | if (!ret) { | ||
750 | if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { | ||
751 | IWL_ERROR("Error sending %s: time out after %dms.\n", | ||
752 | get_cmd_string(cmd->id), | ||
753 | jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); | ||
754 | |||
755 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
756 | ret = -ETIMEDOUT; | ||
757 | goto cancel; | ||
758 | } | ||
759 | } | ||
760 | |||
761 | if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { | ||
762 | IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n", | ||
763 | get_cmd_string(cmd->id)); | ||
764 | ret = -ECANCELED; | ||
765 | goto fail; | ||
766 | } | ||
767 | if (test_bit(STATUS_FW_ERROR, &priv->status)) { | ||
768 | IWL_DEBUG_INFO("Command %s failed: FW Error\n", | ||
769 | get_cmd_string(cmd->id)); | ||
770 | ret = -EIO; | ||
771 | goto fail; | ||
772 | } | ||
773 | if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) { | ||
774 | IWL_ERROR("Error: Response NULL in '%s'\n", | ||
775 | get_cmd_string(cmd->id)); | ||
776 | ret = -EIO; | ||
777 | goto out; | ||
778 | } | ||
779 | |||
780 | ret = 0; | ||
781 | goto out; | ||
782 | |||
783 | cancel: | ||
784 | if (cmd->meta.flags & CMD_WANT_SKB) { | ||
785 | struct iwl_cmd *qcmd; | ||
786 | |||
787 | /* Cancel the CMD_WANT_SKB flag for the cmd in the | ||
788 | * TX cmd queue. Otherwise in case the cmd comes | ||
789 | * in later, it will possibly set an invalid | ||
790 | * address (cmd->meta.source). */ | ||
791 | qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx]; | ||
792 | qcmd->meta.flags &= ~CMD_WANT_SKB; | ||
793 | } | ||
794 | fail: | ||
795 | if (cmd->meta.u.skb) { | ||
796 | dev_kfree_skb_any(cmd->meta.u.skb); | ||
797 | cmd->meta.u.skb = NULL; | ||
798 | } | ||
799 | out: | ||
800 | atomic_set(&entry, 0); | ||
801 | return ret; | ||
802 | } | ||
803 | |||
804 | int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | ||
805 | { | ||
806 | /* A command can not be asynchronous AND expect an SKB to be set. */ | ||
807 | BUG_ON((cmd->meta.flags & CMD_ASYNC) && | ||
808 | (cmd->meta.flags & CMD_WANT_SKB)); | ||
809 | |||
810 | if (cmd->meta.flags & CMD_ASYNC) | ||
811 | return iwl_send_cmd_async(priv, cmd); | ||
812 | |||
813 | return iwl_send_cmd_sync(priv, cmd); | ||
814 | } | ||
815 | |||
816 | int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data) | ||
817 | { | ||
818 | struct iwl_host_cmd cmd = { | ||
819 | .id = id, | ||
820 | .len = len, | ||
821 | .data = data, | ||
822 | }; | ||
823 | |||
824 | return iwl_send_cmd_sync(priv, &cmd); | ||
825 | } | ||
826 | |||
827 | static int __must_check iwl_send_cmd_u32(struct iwl_priv *priv, u8 id, u32 val) | ||
828 | { | ||
829 | struct iwl_host_cmd cmd = { | ||
830 | .id = id, | ||
831 | .len = sizeof(val), | ||
832 | .data = &val, | ||
833 | }; | ||
834 | |||
835 | return iwl_send_cmd_sync(priv, &cmd); | ||
836 | } | ||
837 | |||
838 | int iwl_send_statistics_request(struct iwl_priv *priv) | ||
839 | { | ||
840 | return iwl_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0); | ||
841 | } | ||
842 | |||
843 | /** | ||
844 | * iwl_rxon_add_station - add station into station table. | ||
845 | * | ||
846 | * there is only one AP station with id= IWL_AP_ID | ||
847 | * NOTE: mutex must be held before calling the this fnction | ||
848 | */ | ||
849 | static int iwl_rxon_add_station(struct iwl_priv *priv, | ||
850 | const u8 *addr, int is_ap) | ||
851 | { | ||
852 | u8 rc; | ||
853 | |||
854 | /* Remove this station if it happens to already exist */ | ||
855 | iwl_remove_station(priv, addr, is_ap); | ||
856 | |||
857 | rc = iwl_add_station(priv, addr, is_ap, 0); | ||
858 | |||
859 | iwl4965_add_station(priv, addr, is_ap); | ||
860 | |||
861 | return rc; | ||
862 | } | ||
863 | |||
864 | /** | ||
865 | * iwl_set_rxon_channel - Set the phymode and channel values in staging RXON | ||
866 | * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz | ||
867 | * @channel: Any channel valid for the requested phymode | ||
868 | |||
869 | * In addition to setting the staging RXON, priv->phymode is also set. | ||
870 | * | ||
871 | * NOTE: Does not commit to the hardware; it sets appropriate bit fields | ||
872 | * in the staging RXON flag structure based on the phymode | ||
873 | */ | ||
874 | static int iwl_set_rxon_channel(struct iwl_priv *priv, u8 phymode, u16 channel) | ||
875 | { | ||
876 | if (!iwl_get_channel_info(priv, phymode, channel)) { | ||
877 | IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", | ||
878 | channel, phymode); | ||
879 | return -EINVAL; | ||
880 | } | ||
881 | |||
882 | if ((le16_to_cpu(priv->staging_rxon.channel) == channel) && | ||
883 | (priv->phymode == phymode)) | ||
884 | return 0; | ||
885 | |||
886 | priv->staging_rxon.channel = cpu_to_le16(channel); | ||
887 | if (phymode == MODE_IEEE80211A) | ||
888 | priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; | ||
889 | else | ||
890 | priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; | ||
891 | |||
892 | priv->phymode = phymode; | ||
893 | |||
894 | IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode); | ||
895 | |||
896 | return 0; | ||
897 | } | ||
898 | |||
899 | /** | ||
900 | * iwl_check_rxon_cmd - validate RXON structure is valid | ||
901 | * | ||
902 | * NOTE: This is really only useful during development and can eventually | ||
903 | * be #ifdef'd out once the driver is stable and folks aren't actively | ||
904 | * making changes | ||
905 | */ | ||
906 | static int iwl_check_rxon_cmd(struct iwl_rxon_cmd *rxon) | ||
907 | { | ||
908 | int error = 0; | ||
909 | int counter = 1; | ||
910 | |||
911 | if (rxon->flags & RXON_FLG_BAND_24G_MSK) { | ||
912 | error |= le32_to_cpu(rxon->flags & | ||
913 | (RXON_FLG_TGJ_NARROW_BAND_MSK | | ||
914 | RXON_FLG_RADAR_DETECT_MSK)); | ||
915 | if (error) | ||
916 | IWL_WARNING("check 24G fields %d | %d\n", | ||
917 | counter++, error); | ||
918 | } else { | ||
919 | error |= (rxon->flags & RXON_FLG_SHORT_SLOT_MSK) ? | ||
920 | 0 : le32_to_cpu(RXON_FLG_SHORT_SLOT_MSK); | ||
921 | if (error) | ||
922 | IWL_WARNING("check 52 fields %d | %d\n", | ||
923 | counter++, error); | ||
924 | error |= le32_to_cpu(rxon->flags & RXON_FLG_CCK_MSK); | ||
925 | if (error) | ||
926 | IWL_WARNING("check 52 CCK %d | %d\n", | ||
927 | counter++, error); | ||
928 | } | ||
929 | error |= (rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1; | ||
930 | if (error) | ||
931 | IWL_WARNING("check mac addr %d | %d\n", counter++, error); | ||
932 | |||
933 | /* make sure basic rates 6Mbps and 1Mbps are supported */ | ||
934 | error |= (((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0) && | ||
935 | ((rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0)); | ||
936 | if (error) | ||
937 | IWL_WARNING("check basic rate %d | %d\n", counter++, error); | ||
938 | |||
939 | error |= (le16_to_cpu(rxon->assoc_id) > 2007); | ||
940 | if (error) | ||
941 | IWL_WARNING("check assoc id %d | %d\n", counter++, error); | ||
942 | |||
943 | error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) | ||
944 | == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)); | ||
945 | if (error) | ||
946 | IWL_WARNING("check CCK and short slot %d | %d\n", | ||
947 | counter++, error); | ||
948 | |||
949 | error |= ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) | ||
950 | == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)); | ||
951 | if (error) | ||
952 | IWL_WARNING("check CCK & auto detect %d | %d\n", | ||
953 | counter++, error); | ||
954 | |||
955 | error |= ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK | | ||
956 | RXON_FLG_TGG_PROTECT_MSK)) == RXON_FLG_TGG_PROTECT_MSK); | ||
957 | if (error) | ||
958 | IWL_WARNING("check TGG and auto detect %d | %d\n", | ||
959 | counter++, error); | ||
960 | |||
961 | if (error) | ||
962 | IWL_WARNING("Tuning to channel %d\n", | ||
963 | le16_to_cpu(rxon->channel)); | ||
964 | |||
965 | if (error) { | ||
966 | IWL_ERROR("Not a valid iwl_rxon_assoc_cmd field values\n"); | ||
967 | return -1; | ||
968 | } | ||
969 | return 0; | ||
970 | } | ||
971 | |||
972 | /** | ||
973 | * iwl_full_rxon_required - determine if RXON_ASSOC can be used in RXON commit | ||
974 | * @priv: staging_rxon is comapred to active_rxon | ||
975 | * | ||
976 | * If the RXON structure is changing sufficient to require a new | ||
977 | * tune or to clear and reset the RXON_FILTER_ASSOC_MSK then return 1 | ||
978 | * to indicate a new tune is required. | ||
979 | */ | ||
980 | static int iwl_full_rxon_required(struct iwl_priv *priv) | ||
981 | { | ||
982 | |||
983 | /* These items are only settable from the full RXON command */ | ||
984 | if (!(priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) || | ||
985 | compare_ether_addr(priv->staging_rxon.bssid_addr, | ||
986 | priv->active_rxon.bssid_addr) || | ||
987 | compare_ether_addr(priv->staging_rxon.node_addr, | ||
988 | priv->active_rxon.node_addr) || | ||
989 | compare_ether_addr(priv->staging_rxon.wlap_bssid_addr, | ||
990 | priv->active_rxon.wlap_bssid_addr) || | ||
991 | (priv->staging_rxon.dev_type != priv->active_rxon.dev_type) || | ||
992 | (priv->staging_rxon.channel != priv->active_rxon.channel) || | ||
993 | (priv->staging_rxon.air_propagation != | ||
994 | priv->active_rxon.air_propagation) || | ||
995 | (priv->staging_rxon.ofdm_ht_single_stream_basic_rates != | ||
996 | priv->active_rxon.ofdm_ht_single_stream_basic_rates) || | ||
997 | (priv->staging_rxon.ofdm_ht_dual_stream_basic_rates != | ||
998 | priv->active_rxon.ofdm_ht_dual_stream_basic_rates) || | ||
999 | (priv->staging_rxon.rx_chain != priv->active_rxon.rx_chain) || | ||
1000 | (priv->staging_rxon.assoc_id != priv->active_rxon.assoc_id)) | ||
1001 | return 1; | ||
1002 | |||
1003 | /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can | ||
1004 | * be updated with the RXON_ASSOC command -- however only some | ||
1005 | * flag transitions are allowed using RXON_ASSOC */ | ||
1006 | |||
1007 | /* Check if we are not switching bands */ | ||
1008 | if ((priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) != | ||
1009 | (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)) | ||
1010 | return 1; | ||
1011 | |||
1012 | /* Check if we are switching association toggle */ | ||
1013 | if ((priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) != | ||
1014 | (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) | ||
1015 | return 1; | ||
1016 | |||
1017 | return 0; | ||
1018 | } | ||
1019 | |||
1020 | static int iwl_send_rxon_assoc(struct iwl_priv *priv) | ||
1021 | { | ||
1022 | int rc = 0; | ||
1023 | struct iwl_rx_packet *res = NULL; | ||
1024 | struct iwl_rxon_assoc_cmd rxon_assoc; | ||
1025 | struct iwl_host_cmd cmd = { | ||
1026 | .id = REPLY_RXON_ASSOC, | ||
1027 | .len = sizeof(rxon_assoc), | ||
1028 | .meta.flags = CMD_WANT_SKB, | ||
1029 | .data = &rxon_assoc, | ||
1030 | }; | ||
1031 | const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon; | ||
1032 | const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon; | ||
1033 | |||
1034 | if ((rxon1->flags == rxon2->flags) && | ||
1035 | (rxon1->filter_flags == rxon2->filter_flags) && | ||
1036 | (rxon1->cck_basic_rates == rxon2->cck_basic_rates) && | ||
1037 | (rxon1->ofdm_ht_single_stream_basic_rates == | ||
1038 | rxon2->ofdm_ht_single_stream_basic_rates) && | ||
1039 | (rxon1->ofdm_ht_dual_stream_basic_rates == | ||
1040 | rxon2->ofdm_ht_dual_stream_basic_rates) && | ||
1041 | (rxon1->rx_chain == rxon2->rx_chain) && | ||
1042 | (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) { | ||
1043 | IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n"); | ||
1044 | return 0; | ||
1045 | } | ||
1046 | |||
1047 | rxon_assoc.flags = priv->staging_rxon.flags; | ||
1048 | rxon_assoc.filter_flags = priv->staging_rxon.filter_flags; | ||
1049 | rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates; | ||
1050 | rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates; | ||
1051 | rxon_assoc.reserved = 0; | ||
1052 | rxon_assoc.ofdm_ht_single_stream_basic_rates = | ||
1053 | priv->staging_rxon.ofdm_ht_single_stream_basic_rates; | ||
1054 | rxon_assoc.ofdm_ht_dual_stream_basic_rates = | ||
1055 | priv->staging_rxon.ofdm_ht_dual_stream_basic_rates; | ||
1056 | rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain; | ||
1057 | |||
1058 | rc = iwl_send_cmd_sync(priv, &cmd); | ||
1059 | if (rc) | ||
1060 | return rc; | ||
1061 | |||
1062 | res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; | ||
1063 | if (res->hdr.flags & IWL_CMD_FAILED_MSK) { | ||
1064 | IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n"); | ||
1065 | rc = -EIO; | ||
1066 | } | ||
1067 | |||
1068 | priv->alloc_rxb_skb--; | ||
1069 | dev_kfree_skb_any(cmd.meta.u.skb); | ||
1070 | |||
1071 | return rc; | ||
1072 | } | ||
1073 | |||
1074 | /** | ||
1075 | * iwl_commit_rxon - commit staging_rxon to hardware | ||
1076 | * | ||
1077 | * The RXON command in staging_rxon is commited to the hardware and | ||
1078 | * the active_rxon structure is updated with the new data. This | ||
1079 | * function correctly transitions out of the RXON_ASSOC_MSK state if | ||
1080 | * a HW tune is required based on the RXON structure changes. | ||
1081 | */ | ||
1082 | static int iwl_commit_rxon(struct iwl_priv *priv) | ||
1083 | { | ||
1084 | /* cast away the const for active_rxon in this function */ | ||
1085 | struct iwl_rxon_cmd *active_rxon = (void *)&priv->active_rxon; | ||
1086 | int rc = 0; | ||
1087 | |||
1088 | if (!iwl_is_alive(priv)) | ||
1089 | return -1; | ||
1090 | |||
1091 | /* always get timestamp with Rx frame */ | ||
1092 | priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK; | ||
1093 | |||
1094 | rc = iwl_check_rxon_cmd(&priv->staging_rxon); | ||
1095 | if (rc) { | ||
1096 | IWL_ERROR("Invalid RXON configuration. Not committing.\n"); | ||
1097 | return -EINVAL; | ||
1098 | } | ||
1099 | |||
1100 | /* If we don't need to send a full RXON, we can use | ||
1101 | * iwl_rxon_assoc_cmd which is used to reconfigure filter | ||
1102 | * and other flags for the current radio configuration. */ | ||
1103 | if (!iwl_full_rxon_required(priv)) { | ||
1104 | rc = iwl_send_rxon_assoc(priv); | ||
1105 | if (rc) { | ||
1106 | IWL_ERROR("Error setting RXON_ASSOC " | ||
1107 | "configuration (%d).\n", rc); | ||
1108 | return rc; | ||
1109 | } | ||
1110 | |||
1111 | memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); | ||
1112 | |||
1113 | return 0; | ||
1114 | } | ||
1115 | |||
1116 | /* station table will be cleared */ | ||
1117 | priv->assoc_station_added = 0; | ||
1118 | |||
1119 | #ifdef CONFIG_IWLWIFI_SENSITIVITY | ||
1120 | priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT; | ||
1121 | if (!priv->error_recovering) | ||
1122 | priv->start_calib = 0; | ||
1123 | |||
1124 | iwl4965_init_sensitivity(priv, CMD_ASYNC, 1); | ||
1125 | #endif /* CONFIG_IWLWIFI_SENSITIVITY */ | ||
1126 | |||
1127 | /* If we are currently associated and the new config requires | ||
1128 | * an RXON_ASSOC and the new config wants the associated mask enabled, | ||
1129 | * we must clear the associated from the active configuration | ||
1130 | * before we apply the new config */ | ||
1131 | if (iwl_is_associated(priv) && | ||
1132 | (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) { | ||
1133 | IWL_DEBUG_INFO("Toggling associated bit on current RXON\n"); | ||
1134 | active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
1135 | |||
1136 | rc = iwl_send_cmd_pdu(priv, REPLY_RXON, | ||
1137 | sizeof(struct iwl_rxon_cmd), | ||
1138 | &priv->active_rxon); | ||
1139 | |||
1140 | /* If the mask clearing failed then we set | ||
1141 | * active_rxon back to what it was previously */ | ||
1142 | if (rc) { | ||
1143 | active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
1144 | IWL_ERROR("Error clearing ASSOC_MSK on current " | ||
1145 | "configuration (%d).\n", rc); | ||
1146 | return rc; | ||
1147 | } | ||
1148 | |||
1149 | /* The RXON bit toggling will have cleared out the | ||
1150 | * station table in the uCode, so blank it in the driver | ||
1151 | * as well */ | ||
1152 | iwl_clear_stations_table(priv); | ||
1153 | } else if (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) { | ||
1154 | /* When switching from non-associated to associated, the | ||
1155 | * uCode clears out the station table; so clear it in the | ||
1156 | * driver as well */ | ||
1157 | iwl_clear_stations_table(priv); | ||
1158 | } | ||
1159 | |||
1160 | IWL_DEBUG_INFO("Sending RXON\n" | ||
1161 | "* with%s RXON_FILTER_ASSOC_MSK\n" | ||
1162 | "* channel = %d\n" | ||
1163 | "* bssid = " MAC_FMT "\n", | ||
1164 | ((priv->staging_rxon.filter_flags & | ||
1165 | RXON_FILTER_ASSOC_MSK) ? "" : "out"), | ||
1166 | le16_to_cpu(priv->staging_rxon.channel), | ||
1167 | MAC_ARG(priv->staging_rxon.bssid_addr)); | ||
1168 | |||
1169 | /* Apply the new configuration */ | ||
1170 | rc = iwl_send_cmd_pdu(priv, REPLY_RXON, | ||
1171 | sizeof(struct iwl_rxon_cmd), &priv->staging_rxon); | ||
1172 | if (rc) { | ||
1173 | IWL_ERROR("Error setting new configuration (%d).\n", rc); | ||
1174 | return rc; | ||
1175 | } | ||
1176 | |||
1177 | #ifdef CONFIG_IWLWIFI_SENSITIVITY | ||
1178 | if (!priv->error_recovering) | ||
1179 | priv->start_calib = 0; | ||
1180 | |||
1181 | priv->sensitivity_data.state = IWL_SENS_CALIB_NEED_REINIT; | ||
1182 | iwl4965_init_sensitivity(priv, CMD_ASYNC, 1); | ||
1183 | #endif /* CONFIG_IWLWIFI_SENSITIVITY */ | ||
1184 | |||
1185 | memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); | ||
1186 | |||
1187 | /* If we issue a new RXON command which required a tune then we must | ||
1188 | * send a new TXPOWER command or we won't be able to Tx any frames */ | ||
1189 | rc = iwl_hw_reg_send_txpower(priv); | ||
1190 | if (rc) { | ||
1191 | IWL_ERROR("Error setting Tx power (%d).\n", rc); | ||
1192 | return rc; | ||
1193 | } | ||
1194 | |||
1195 | /* Add the broadcast address so we can send broadcast frames */ | ||
1196 | if (iwl_rxon_add_station(priv, BROADCAST_ADDR, 0) == | ||
1197 | IWL_INVALID_STATION) { | ||
1198 | IWL_ERROR("Error adding BROADCAST address for transmit.\n"); | ||
1199 | return -EIO; | ||
1200 | } | ||
1201 | |||
1202 | /* If we have set the ASSOC_MSK and we are in BSS mode then | ||
1203 | * add the IWL_AP_ID to the station rate table */ | ||
1204 | if (iwl_is_associated(priv) && | ||
1205 | (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { | ||
1206 | if (iwl_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1) | ||
1207 | == IWL_INVALID_STATION) { | ||
1208 | IWL_ERROR("Error adding AP address for transmit.\n"); | ||
1209 | return -EIO; | ||
1210 | } | ||
1211 | priv->assoc_station_added = 1; | ||
1212 | } | ||
1213 | |||
1214 | return 0; | ||
1215 | } | ||
1216 | |||
1217 | static int iwl_send_bt_config(struct iwl_priv *priv) | ||
1218 | { | ||
1219 | struct iwl_bt_cmd bt_cmd = { | ||
1220 | .flags = 3, | ||
1221 | .lead_time = 0xAA, | ||
1222 | .max_kill = 1, | ||
1223 | .kill_ack_mask = 0, | ||
1224 | .kill_cts_mask = 0, | ||
1225 | }; | ||
1226 | |||
1227 | return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, | ||
1228 | sizeof(struct iwl_bt_cmd), &bt_cmd); | ||
1229 | } | ||
1230 | |||
1231 | static int iwl_send_scan_abort(struct iwl_priv *priv) | ||
1232 | { | ||
1233 | int rc = 0; | ||
1234 | struct iwl_rx_packet *res; | ||
1235 | struct iwl_host_cmd cmd = { | ||
1236 | .id = REPLY_SCAN_ABORT_CMD, | ||
1237 | .meta.flags = CMD_WANT_SKB, | ||
1238 | }; | ||
1239 | |||
1240 | /* If there isn't a scan actively going on in the hardware | ||
1241 | * then we are in between scan bands and not actually | ||
1242 | * actively scanning, so don't send the abort command */ | ||
1243 | if (!test_bit(STATUS_SCAN_HW, &priv->status)) { | ||
1244 | clear_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
1245 | return 0; | ||
1246 | } | ||
1247 | |||
1248 | rc = iwl_send_cmd_sync(priv, &cmd); | ||
1249 | if (rc) { | ||
1250 | clear_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
1251 | return rc; | ||
1252 | } | ||
1253 | |||
1254 | res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; | ||
1255 | if (res->u.status != CAN_ABORT_STATUS) { | ||
1256 | /* The scan abort will return 1 for success or | ||
1257 | * 2 for "failure". A failure condition can be | ||
1258 | * due to simply not being in an active scan which | ||
1259 | * can occur if we send the scan abort before we | ||
1260 | * the microcode has notified us that a scan is | ||
1261 | * completed. */ | ||
1262 | IWL_DEBUG_INFO("SCAN_ABORT returned %d.\n", res->u.status); | ||
1263 | clear_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
1264 | clear_bit(STATUS_SCAN_HW, &priv->status); | ||
1265 | } | ||
1266 | |||
1267 | dev_kfree_skb_any(cmd.meta.u.skb); | ||
1268 | |||
1269 | return rc; | ||
1270 | } | ||
1271 | |||
1272 | static int iwl_card_state_sync_callback(struct iwl_priv *priv, | ||
1273 | struct iwl_cmd *cmd, | ||
1274 | struct sk_buff *skb) | ||
1275 | { | ||
1276 | return 1; | ||
1277 | } | ||
1278 | |||
1279 | /* | ||
1280 | * CARD_STATE_CMD | ||
1281 | * | ||
1282 | * Use: Sets the internal card state to enable, disable, or halt | ||
1283 | * | ||
1284 | * When in the 'enable' state the card operates as normal. | ||
1285 | * When in the 'disable' state, the card enters into a low power mode. | ||
1286 | * When in the 'halt' state, the card is shut down and must be fully | ||
1287 | * restarted to come back on. | ||
1288 | */ | ||
1289 | static int iwl_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag) | ||
1290 | { | ||
1291 | struct iwl_host_cmd cmd = { | ||
1292 | .id = REPLY_CARD_STATE_CMD, | ||
1293 | .len = sizeof(u32), | ||
1294 | .data = &flags, | ||
1295 | .meta.flags = meta_flag, | ||
1296 | }; | ||
1297 | |||
1298 | if (meta_flag & CMD_ASYNC) | ||
1299 | cmd.meta.u.callback = iwl_card_state_sync_callback; | ||
1300 | |||
1301 | return iwl_send_cmd(priv, &cmd); | ||
1302 | } | ||
1303 | |||
1304 | static int iwl_add_sta_sync_callback(struct iwl_priv *priv, | ||
1305 | struct iwl_cmd *cmd, struct sk_buff *skb) | ||
1306 | { | ||
1307 | struct iwl_rx_packet *res = NULL; | ||
1308 | |||
1309 | if (!skb) { | ||
1310 | IWL_ERROR("Error: Response NULL in REPLY_ADD_STA.\n"); | ||
1311 | return 1; | ||
1312 | } | ||
1313 | |||
1314 | res = (struct iwl_rx_packet *)skb->data; | ||
1315 | if (res->hdr.flags & IWL_CMD_FAILED_MSK) { | ||
1316 | IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", | ||
1317 | res->hdr.flags); | ||
1318 | return 1; | ||
1319 | } | ||
1320 | |||
1321 | switch (res->u.add_sta.status) { | ||
1322 | case ADD_STA_SUCCESS_MSK: | ||
1323 | break; | ||
1324 | default: | ||
1325 | break; | ||
1326 | } | ||
1327 | |||
1328 | /* We didn't cache the SKB; let the caller free it */ | ||
1329 | return 1; | ||
1330 | } | ||
1331 | |||
1332 | int iwl_send_add_station(struct iwl_priv *priv, | ||
1333 | struct iwl_addsta_cmd *sta, u8 flags) | ||
1334 | { | ||
1335 | struct iwl_rx_packet *res = NULL; | ||
1336 | int rc = 0; | ||
1337 | struct iwl_host_cmd cmd = { | ||
1338 | .id = REPLY_ADD_STA, | ||
1339 | .len = sizeof(struct iwl_addsta_cmd), | ||
1340 | .meta.flags = flags, | ||
1341 | .data = sta, | ||
1342 | }; | ||
1343 | |||
1344 | if (flags & CMD_ASYNC) | ||
1345 | cmd.meta.u.callback = iwl_add_sta_sync_callback; | ||
1346 | else | ||
1347 | cmd.meta.flags |= CMD_WANT_SKB; | ||
1348 | |||
1349 | rc = iwl_send_cmd(priv, &cmd); | ||
1350 | |||
1351 | if (rc || (flags & CMD_ASYNC)) | ||
1352 | return rc; | ||
1353 | |||
1354 | res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; | ||
1355 | if (res->hdr.flags & IWL_CMD_FAILED_MSK) { | ||
1356 | IWL_ERROR("Bad return from REPLY_ADD_STA (0x%08X)\n", | ||
1357 | res->hdr.flags); | ||
1358 | rc = -EIO; | ||
1359 | } | ||
1360 | |||
1361 | if (rc == 0) { | ||
1362 | switch (res->u.add_sta.status) { | ||
1363 | case ADD_STA_SUCCESS_MSK: | ||
1364 | IWL_DEBUG_INFO("REPLY_ADD_STA PASSED\n"); | ||
1365 | break; | ||
1366 | default: | ||
1367 | rc = -EIO; | ||
1368 | IWL_WARNING("REPLY_ADD_STA failed\n"); | ||
1369 | break; | ||
1370 | } | ||
1371 | } | ||
1372 | |||
1373 | priv->alloc_rxb_skb--; | ||
1374 | dev_kfree_skb_any(cmd.meta.u.skb); | ||
1375 | |||
1376 | return rc; | ||
1377 | } | ||
1378 | |||
1379 | static int iwl_update_sta_key_info(struct iwl_priv *priv, | ||
1380 | struct ieee80211_key_conf *keyconf, | ||
1381 | u8 sta_id) | ||
1382 | { | ||
1383 | unsigned long flags; | ||
1384 | __le16 key_flags = 0; | ||
1385 | |||
1386 | switch (keyconf->alg) { | ||
1387 | case ALG_CCMP: | ||
1388 | key_flags |= STA_KEY_FLG_CCMP; | ||
1389 | key_flags |= cpu_to_le16( | ||
1390 | keyconf->keyidx << STA_KEY_FLG_KEYID_POS); | ||
1391 | key_flags &= ~STA_KEY_FLG_INVALID; | ||
1392 | break; | ||
1393 | case ALG_TKIP: | ||
1394 | case ALG_WEP: | ||
1395 | return -EINVAL; | ||
1396 | default: | ||
1397 | return -EINVAL; | ||
1398 | } | ||
1399 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
1400 | priv->stations[sta_id].keyinfo.alg = keyconf->alg; | ||
1401 | priv->stations[sta_id].keyinfo.keylen = keyconf->keylen; | ||
1402 | memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, | ||
1403 | keyconf->keylen); | ||
1404 | |||
1405 | memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, | ||
1406 | keyconf->keylen); | ||
1407 | priv->stations[sta_id].sta.key.key_flags = key_flags; | ||
1408 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | ||
1409 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | ||
1410 | |||
1411 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
1412 | |||
1413 | IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n"); | ||
1414 | iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); | ||
1415 | return 0; | ||
1416 | } | ||
1417 | |||
1418 | static int iwl_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id) | ||
1419 | { | ||
1420 | unsigned long flags; | ||
1421 | |||
1422 | spin_lock_irqsave(&priv->sta_lock, flags); | ||
1423 | memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key)); | ||
1424 | memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl_keyinfo)); | ||
1425 | priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC; | ||
1426 | priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK; | ||
1427 | priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; | ||
1428 | spin_unlock_irqrestore(&priv->sta_lock, flags); | ||
1429 | |||
1430 | IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n"); | ||
1431 | iwl_send_add_station(priv, &priv->stations[sta_id].sta, 0); | ||
1432 | return 0; | ||
1433 | } | ||
1434 | |||
1435 | static void iwl_clear_free_frames(struct iwl_priv *priv) | ||
1436 | { | ||
1437 | struct list_head *element; | ||
1438 | |||
1439 | IWL_DEBUG_INFO("%d frames on pre-allocated heap on clear.\n", | ||
1440 | priv->frames_count); | ||
1441 | |||
1442 | while (!list_empty(&priv->free_frames)) { | ||
1443 | element = priv->free_frames.next; | ||
1444 | list_del(element); | ||
1445 | kfree(list_entry(element, struct iwl_frame, list)); | ||
1446 | priv->frames_count--; | ||
1447 | } | ||
1448 | |||
1449 | if (priv->frames_count) { | ||
1450 | IWL_WARNING("%d frames still in use. Did we lose one?\n", | ||
1451 | priv->frames_count); | ||
1452 | priv->frames_count = 0; | ||
1453 | } | ||
1454 | } | ||
1455 | |||
1456 | static struct iwl_frame *iwl_get_free_frame(struct iwl_priv *priv) | ||
1457 | { | ||
1458 | struct iwl_frame *frame; | ||
1459 | struct list_head *element; | ||
1460 | if (list_empty(&priv->free_frames)) { | ||
1461 | frame = kzalloc(sizeof(*frame), GFP_KERNEL); | ||
1462 | if (!frame) { | ||
1463 | IWL_ERROR("Could not allocate frame!\n"); | ||
1464 | return NULL; | ||
1465 | } | ||
1466 | |||
1467 | priv->frames_count++; | ||
1468 | return frame; | ||
1469 | } | ||
1470 | |||
1471 | element = priv->free_frames.next; | ||
1472 | list_del(element); | ||
1473 | return list_entry(element, struct iwl_frame, list); | ||
1474 | } | ||
1475 | |||
1476 | static void iwl_free_frame(struct iwl_priv *priv, struct iwl_frame *frame) | ||
1477 | { | ||
1478 | memset(frame, 0, sizeof(*frame)); | ||
1479 | list_add(&frame->list, &priv->free_frames); | ||
1480 | } | ||
1481 | |||
1482 | unsigned int iwl_fill_beacon_frame(struct iwl_priv *priv, | ||
1483 | struct ieee80211_hdr *hdr, | ||
1484 | const u8 *dest, int left) | ||
1485 | { | ||
1486 | |||
1487 | if (!iwl_is_associated(priv) || !priv->ibss_beacon || | ||
1488 | ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && | ||
1489 | (priv->iw_mode != IEEE80211_IF_TYPE_AP))) | ||
1490 | return 0; | ||
1491 | |||
1492 | if (priv->ibss_beacon->len > left) | ||
1493 | return 0; | ||
1494 | |||
1495 | memcpy(hdr, priv->ibss_beacon->data, priv->ibss_beacon->len); | ||
1496 | |||
1497 | return priv->ibss_beacon->len; | ||
1498 | } | ||
1499 | |||
1500 | int iwl_rate_index_from_plcp(int plcp) | ||
1501 | { | ||
1502 | int i = 0; | ||
1503 | |||
1504 | if (plcp & RATE_MCS_HT_MSK) { | ||
1505 | i = (plcp & 0xff); | ||
1506 | |||
1507 | if (i >= IWL_RATE_MIMO_6M_PLCP) | ||
1508 | i = i - IWL_RATE_MIMO_6M_PLCP; | ||
1509 | |||
1510 | i += IWL_FIRST_OFDM_RATE; | ||
1511 | /* skip 9M not supported in ht*/ | ||
1512 | if (i >= IWL_RATE_9M_INDEX) | ||
1513 | i += 1; | ||
1514 | if ((i >= IWL_FIRST_OFDM_RATE) && | ||
1515 | (i <= IWL_LAST_OFDM_RATE)) | ||
1516 | return i; | ||
1517 | } else { | ||
1518 | for (i = 0; i < ARRAY_SIZE(iwl_rates); i++) | ||
1519 | if (iwl_rates[i].plcp == (plcp &0xFF)) | ||
1520 | return i; | ||
1521 | } | ||
1522 | return -1; | ||
1523 | } | ||
1524 | |||
1525 | static u8 iwl_rate_get_lowest_plcp(int rate_mask) | ||
1526 | { | ||
1527 | u8 i; | ||
1528 | |||
1529 | for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID; | ||
1530 | i = iwl_rates[i].next_ieee) { | ||
1531 | if (rate_mask & (1 << i)) | ||
1532 | return iwl_rates[i].plcp; | ||
1533 | } | ||
1534 | |||
1535 | return IWL_RATE_INVALID; | ||
1536 | } | ||
1537 | |||
1538 | static int iwl_send_beacon_cmd(struct iwl_priv *priv) | ||
1539 | { | ||
1540 | struct iwl_frame *frame; | ||
1541 | unsigned int frame_size; | ||
1542 | int rc; | ||
1543 | u8 rate; | ||
1544 | |||
1545 | frame = iwl_get_free_frame(priv); | ||
1546 | |||
1547 | if (!frame) { | ||
1548 | IWL_ERROR("Could not obtain free frame buffer for beacon " | ||
1549 | "command.\n"); | ||
1550 | return -ENOMEM; | ||
1551 | } | ||
1552 | |||
1553 | if (!(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)) { | ||
1554 | rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & | ||
1555 | 0xFF0); | ||
1556 | if (rate == IWL_INVALID_RATE) | ||
1557 | rate = IWL_RATE_6M_PLCP; | ||
1558 | } else { | ||
1559 | rate = iwl_rate_get_lowest_plcp(priv->active_rate_basic & 0xF); | ||
1560 | if (rate == IWL_INVALID_RATE) | ||
1561 | rate = IWL_RATE_1M_PLCP; | ||
1562 | } | ||
1563 | |||
1564 | frame_size = iwl_hw_get_beacon_cmd(priv, frame, rate); | ||
1565 | |||
1566 | rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, | ||
1567 | &frame->u.cmd[0]); | ||
1568 | |||
1569 | iwl_free_frame(priv, frame); | ||
1570 | |||
1571 | return rc; | ||
1572 | } | ||
1573 | |||
1574 | /****************************************************************************** | ||
1575 | * | ||
1576 | * EEPROM related functions | ||
1577 | * | ||
1578 | ******************************************************************************/ | ||
1579 | |||
1580 | static void get_eeprom_mac(struct iwl_priv *priv, u8 *mac) | ||
1581 | { | ||
1582 | memcpy(mac, priv->eeprom.mac_address, 6); | ||
1583 | } | ||
1584 | |||
1585 | /** | ||
1586 | * iwl_eeprom_init - read EEPROM contents | ||
1587 | * | ||
1588 | * Load the EEPROM from adapter into priv->eeprom | ||
1589 | * | ||
1590 | * NOTE: This routine uses the non-debug IO access functions. | ||
1591 | */ | ||
1592 | int iwl_eeprom_init(struct iwl_priv *priv) | ||
1593 | { | ||
1594 | u16 *e = (u16 *)&priv->eeprom; | ||
1595 | u32 gp = iwl_read32(priv, CSR_EEPROM_GP); | ||
1596 | u32 r; | ||
1597 | int sz = sizeof(priv->eeprom); | ||
1598 | int rc; | ||
1599 | int i; | ||
1600 | u16 addr; | ||
1601 | |||
1602 | /* The EEPROM structure has several padding buffers within it | ||
1603 | * and when adding new EEPROM maps is subject to programmer errors | ||
1604 | * which may be very difficult to identify without explicitly | ||
1605 | * checking the resulting size of the eeprom map. */ | ||
1606 | BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE); | ||
1607 | |||
1608 | if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { | ||
1609 | IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp); | ||
1610 | return -ENOENT; | ||
1611 | } | ||
1612 | |||
1613 | rc = iwl_eeprom_aqcuire_semaphore(priv); | ||
1614 | if (rc < 0) { | ||
1615 | IWL_ERROR("Failed to aqcuire EEPROM semaphore.\n"); | ||
1616 | return -ENOENT; | ||
1617 | } | ||
1618 | |||
1619 | /* eeprom is an array of 16bit values */ | ||
1620 | for (addr = 0; addr < sz; addr += sizeof(u16)) { | ||
1621 | _iwl_write32(priv, CSR_EEPROM_REG, addr << 1); | ||
1622 | _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD); | ||
1623 | |||
1624 | for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT; | ||
1625 | i += IWL_EEPROM_ACCESS_DELAY) { | ||
1626 | r = _iwl_read_restricted(priv, CSR_EEPROM_REG); | ||
1627 | if (r & CSR_EEPROM_REG_READ_VALID_MSK) | ||
1628 | break; | ||
1629 | udelay(IWL_EEPROM_ACCESS_DELAY); | ||
1630 | } | ||
1631 | |||
1632 | if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) { | ||
1633 | IWL_ERROR("Time out reading EEPROM[%d]", addr); | ||
1634 | rc = -ETIMEDOUT; | ||
1635 | goto done; | ||
1636 | } | ||
1637 | e[addr / 2] = le16_to_cpu(r >> 16); | ||
1638 | } | ||
1639 | rc = 0; | ||
1640 | |||
1641 | done: | ||
1642 | iwl_eeprom_release_semaphore(priv); | ||
1643 | return rc; | ||
1644 | } | ||
1645 | |||
1646 | /****************************************************************************** | ||
1647 | * | ||
1648 | * Misc. internal state and helper functions | ||
1649 | * | ||
1650 | ******************************************************************************/ | ||
1651 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
1652 | |||
1653 | /** | ||
1654 | * iwl_report_frame - dump frame to syslog during debug sessions | ||
1655 | * | ||
1656 | * hack this function to show different aspects of received frames, | ||
1657 | * including selective frame dumps. | ||
1658 | * group100 parameter selects whether to show 1 out of 100 good frames. | ||
1659 | * | ||
1660 | * TODO: ieee80211_hdr stuff is common to 3945 and 4965, so frame type | ||
1661 | * info output is okay, but some of this stuff (e.g. iwl_rx_frame_stats) | ||
1662 | * is 3945-specific and gives bad output for 4965. Need to split the | ||
1663 | * functionality, keep common stuff here. | ||
1664 | */ | ||
1665 | void iwl_report_frame(struct iwl_priv *priv, | ||
1666 | struct iwl_rx_packet *pkt, | ||
1667 | struct ieee80211_hdr *header, int group100) | ||
1668 | { | ||
1669 | u32 to_us; | ||
1670 | u32 print_summary = 0; | ||
1671 | u32 print_dump = 0; /* set to 1 to dump all frames' contents */ | ||
1672 | u32 hundred = 0; | ||
1673 | u32 dataframe = 0; | ||
1674 | u16 fc; | ||
1675 | u16 seq_ctl; | ||
1676 | u16 channel; | ||
1677 | u16 phy_flags; | ||
1678 | int rate_sym; | ||
1679 | u16 length; | ||
1680 | u16 status; | ||
1681 | u16 bcn_tmr; | ||
1682 | u32 tsf_low; | ||
1683 | u64 tsf; | ||
1684 | u8 rssi; | ||
1685 | u8 agc; | ||
1686 | u16 sig_avg; | ||
1687 | u16 noise_diff; | ||
1688 | struct iwl_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); | ||
1689 | struct iwl_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); | ||
1690 | struct iwl_rx_frame_end *rx_end = IWL_RX_END(pkt); | ||
1691 | u8 *data = IWL_RX_DATA(pkt); | ||
1692 | |||
1693 | /* MAC header */ | ||
1694 | fc = le16_to_cpu(header->frame_control); | ||
1695 | seq_ctl = le16_to_cpu(header->seq_ctrl); | ||
1696 | |||
1697 | /* metadata */ | ||
1698 | channel = le16_to_cpu(rx_hdr->channel); | ||
1699 | phy_flags = le16_to_cpu(rx_hdr->phy_flags); | ||
1700 | rate_sym = rx_hdr->rate; | ||
1701 | length = le16_to_cpu(rx_hdr->len); | ||
1702 | |||
1703 | /* end-of-frame status and timestamp */ | ||
1704 | status = le32_to_cpu(rx_end->status); | ||
1705 | bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp); | ||
1706 | tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff; | ||
1707 | tsf = le64_to_cpu(rx_end->timestamp); | ||
1708 | |||
1709 | /* signal statistics */ | ||
1710 | rssi = rx_stats->rssi; | ||
1711 | agc = rx_stats->agc; | ||
1712 | sig_avg = le16_to_cpu(rx_stats->sig_avg); | ||
1713 | noise_diff = le16_to_cpu(rx_stats->noise_diff); | ||
1714 | |||
1715 | to_us = !compare_ether_addr(header->addr1, priv->mac_addr); | ||
1716 | |||
1717 | /* if data frame is to us and all is good, | ||
1718 | * (optionally) print summary for only 1 out of every 100 */ | ||
1719 | if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) == | ||
1720 | (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) { | ||
1721 | dataframe = 1; | ||
1722 | if (!group100) | ||
1723 | print_summary = 1; /* print each frame */ | ||
1724 | else if (priv->framecnt_to_us < 100) { | ||
1725 | priv->framecnt_to_us++; | ||
1726 | print_summary = 0; | ||
1727 | } else { | ||
1728 | priv->framecnt_to_us = 0; | ||
1729 | print_summary = 1; | ||
1730 | hundred = 1; | ||
1731 | } | ||
1732 | } else { | ||
1733 | /* print summary for all other frames */ | ||
1734 | print_summary = 1; | ||
1735 | } | ||
1736 | |||
1737 | if (print_summary) { | ||
1738 | char *title; | ||
1739 | u32 rate; | ||
1740 | |||
1741 | if (hundred) | ||
1742 | title = "100Frames"; | ||
1743 | else if (fc & IEEE80211_FCTL_RETRY) | ||
1744 | title = "Retry"; | ||
1745 | else if (ieee80211_is_assoc_response(fc)) | ||
1746 | title = "AscRsp"; | ||
1747 | else if (ieee80211_is_reassoc_response(fc)) | ||
1748 | title = "RasRsp"; | ||
1749 | else if (ieee80211_is_probe_response(fc)) { | ||
1750 | title = "PrbRsp"; | ||
1751 | print_dump = 1; /* dump frame contents */ | ||
1752 | } else if (ieee80211_is_beacon(fc)) { | ||
1753 | title = "Beacon"; | ||
1754 | print_dump = 1; /* dump frame contents */ | ||
1755 | } else if (ieee80211_is_atim(fc)) | ||
1756 | title = "ATIM"; | ||
1757 | else if (ieee80211_is_auth(fc)) | ||
1758 | title = "Auth"; | ||
1759 | else if (ieee80211_is_deauth(fc)) | ||
1760 | title = "DeAuth"; | ||
1761 | else if (ieee80211_is_disassoc(fc)) | ||
1762 | title = "DisAssoc"; | ||
1763 | else | ||
1764 | title = "Frame"; | ||
1765 | |||
1766 | rate = iwl_rate_index_from_plcp(rate_sym); | ||
1767 | if (rate == -1) | ||
1768 | rate = 0; | ||
1769 | else | ||
1770 | rate = iwl_rates[rate].ieee / 2; | ||
1771 | |||
1772 | /* print frame summary. | ||
1773 | * MAC addresses show just the last byte (for brevity), | ||
1774 | * but you can hack it to show more, if you'd like to. */ | ||
1775 | if (dataframe) | ||
1776 | IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, " | ||
1777 | "len=%u, rssi=%d, chnl=%d, rate=%u, \n", | ||
1778 | title, fc, header->addr1[5], | ||
1779 | length, rssi, channel, rate); | ||
1780 | else { | ||
1781 | /* src/dst addresses assume managed mode */ | ||
1782 | IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, " | ||
1783 | "src=0x%02x, rssi=%u, tim=%lu usec, " | ||
1784 | "phy=0x%02x, chnl=%d\n", | ||
1785 | title, fc, header->addr1[5], | ||
1786 | header->addr3[5], rssi, | ||
1787 | tsf_low - priv->scan_start_tsf, | ||
1788 | phy_flags, channel); | ||
1789 | } | ||
1790 | } | ||
1791 | if (print_dump) | ||
1792 | iwl_print_hex_dump(IWL_DL_RX, data, length); | ||
1793 | } | ||
1794 | #endif | ||
1795 | |||
1796 | static void iwl_unset_hw_setting(struct iwl_priv *priv) | ||
1797 | { | ||
1798 | if (priv->hw_setting.shared_virt) | ||
1799 | pci_free_consistent(priv->pci_dev, | ||
1800 | sizeof(struct iwl_shared), | ||
1801 | priv->hw_setting.shared_virt, | ||
1802 | priv->hw_setting.shared_phys); | ||
1803 | } | ||
1804 | |||
1805 | /** | ||
1806 | * iwl_supported_rate_to_ie - fill in the supported rate in IE field | ||
1807 | * | ||
1808 | * return : set the bit for each supported rate insert in ie | ||
1809 | */ | ||
1810 | static u16 iwl_supported_rate_to_ie(u8 *ie, u16 supported_rate, | ||
1811 | u16 basic_rate, int max_count) | ||
1812 | { | ||
1813 | u16 ret_rates = 0, bit; | ||
1814 | int i; | ||
1815 | u8 *rates; | ||
1816 | |||
1817 | rates = &(ie[1]); | ||
1818 | |||
1819 | for (bit = 1, i = 0; i < IWL_RATE_COUNT; i++, bit <<= 1) { | ||
1820 | if (bit & supported_rate) { | ||
1821 | ret_rates |= bit; | ||
1822 | rates[*ie] = iwl_rates[i].ieee | | ||
1823 | ((bit & basic_rate) ? 0x80 : 0x00); | ||
1824 | *ie = *ie + 1; | ||
1825 | if (*ie >= max_count) | ||
1826 | break; | ||
1827 | } | ||
1828 | } | ||
1829 | |||
1830 | return ret_rates; | ||
1831 | } | ||
1832 | |||
1833 | #ifdef CONFIG_IWLWIFI_HT | ||
1834 | void static iwl_set_ht_capab(struct ieee80211_hw *hw, | ||
1835 | struct ieee80211_ht_capability *ht_cap, | ||
1836 | u8 use_wide_chan); | ||
1837 | #endif | ||
1838 | |||
1839 | /** | ||
1840 | * iwl_fill_probe_req - fill in all required fields and IE for probe request | ||
1841 | */ | ||
1842 | static u16 iwl_fill_probe_req(struct iwl_priv *priv, | ||
1843 | struct ieee80211_mgmt *frame, | ||
1844 | int left, int is_direct) | ||
1845 | { | ||
1846 | int len = 0; | ||
1847 | u8 *pos = NULL; | ||
1848 | u16 ret_rates; | ||
1849 | |||
1850 | /* Make sure there is enough space for the probe request, | ||
1851 | * two mandatory IEs and the data */ | ||
1852 | left -= 24; | ||
1853 | if (left < 0) | ||
1854 | return 0; | ||
1855 | len += 24; | ||
1856 | |||
1857 | frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); | ||
1858 | memcpy(frame->da, BROADCAST_ADDR, ETH_ALEN); | ||
1859 | memcpy(frame->sa, priv->mac_addr, ETH_ALEN); | ||
1860 | memcpy(frame->bssid, BROADCAST_ADDR, ETH_ALEN); | ||
1861 | frame->seq_ctrl = 0; | ||
1862 | |||
1863 | /* fill in our indirect SSID IE */ | ||
1864 | /* ...next IE... */ | ||
1865 | |||
1866 | left -= 2; | ||
1867 | if (left < 0) | ||
1868 | return 0; | ||
1869 | len += 2; | ||
1870 | pos = &(frame->u.probe_req.variable[0]); | ||
1871 | *pos++ = WLAN_EID_SSID; | ||
1872 | *pos++ = 0; | ||
1873 | |||
1874 | /* fill in our direct SSID IE... */ | ||
1875 | if (is_direct) { | ||
1876 | /* ...next IE... */ | ||
1877 | left -= 2 + priv->essid_len; | ||
1878 | if (left < 0) | ||
1879 | return 0; | ||
1880 | /* ... fill it in... */ | ||
1881 | *pos++ = WLAN_EID_SSID; | ||
1882 | *pos++ = priv->essid_len; | ||
1883 | memcpy(pos, priv->essid, priv->essid_len); | ||
1884 | pos += priv->essid_len; | ||
1885 | len += 2 + priv->essid_len; | ||
1886 | } | ||
1887 | |||
1888 | /* fill in supported rate */ | ||
1889 | /* ...next IE... */ | ||
1890 | left -= 2; | ||
1891 | if (left < 0) | ||
1892 | return 0; | ||
1893 | /* ... fill it in... */ | ||
1894 | *pos++ = WLAN_EID_SUPP_RATES; | ||
1895 | *pos = 0; | ||
1896 | ret_rates = priv->active_rate = priv->rates_mask; | ||
1897 | priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; | ||
1898 | |||
1899 | iwl_supported_rate_to_ie(pos, priv->active_rate, | ||
1900 | priv->active_rate_basic, left); | ||
1901 | len += 2 + *pos; | ||
1902 | pos += (*pos) + 1; | ||
1903 | ret_rates = ~ret_rates & priv->active_rate; | ||
1904 | |||
1905 | if (ret_rates == 0) | ||
1906 | goto fill_end; | ||
1907 | |||
1908 | /* fill in supported extended rate */ | ||
1909 | /* ...next IE... */ | ||
1910 | left -= 2; | ||
1911 | if (left < 0) | ||
1912 | return 0; | ||
1913 | /* ... fill it in... */ | ||
1914 | *pos++ = WLAN_EID_EXT_SUPP_RATES; | ||
1915 | *pos = 0; | ||
1916 | iwl_supported_rate_to_ie(pos, ret_rates, priv->active_rate_basic, left); | ||
1917 | if (*pos > 0) | ||
1918 | len += 2 + *pos; | ||
1919 | |||
1920 | #ifdef CONFIG_IWLWIFI_HT | ||
1921 | if (is_direct && priv->is_ht_enabled) { | ||
1922 | u8 use_wide_chan = 1; | ||
1923 | |||
1924 | if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ) | ||
1925 | use_wide_chan = 0; | ||
1926 | pos += (*pos) + 1; | ||
1927 | *pos++ = WLAN_EID_HT_CAPABILITY; | ||
1928 | *pos++ = sizeof(struct ieee80211_ht_capability); | ||
1929 | iwl_set_ht_capab(NULL, (struct ieee80211_ht_capability *)pos, | ||
1930 | use_wide_chan); | ||
1931 | len += 2 + sizeof(struct ieee80211_ht_capability); | ||
1932 | } | ||
1933 | #endif /*CONFIG_IWLWIFI_HT */ | ||
1934 | |||
1935 | fill_end: | ||
1936 | return (u16)len; | ||
1937 | } | ||
1938 | |||
1939 | /* | ||
1940 | * QoS support | ||
1941 | */ | ||
1942 | #ifdef CONFIG_IWLWIFI_QOS | ||
1943 | static int iwl_send_qos_params_command(struct iwl_priv *priv, | ||
1944 | struct iwl_qosparam_cmd *qos) | ||
1945 | { | ||
1946 | |||
1947 | return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM, | ||
1948 | sizeof(struct iwl_qosparam_cmd), qos); | ||
1949 | } | ||
1950 | |||
1951 | static void iwl_reset_qos(struct iwl_priv *priv) | ||
1952 | { | ||
1953 | u16 cw_min = 15; | ||
1954 | u16 cw_max = 1023; | ||
1955 | u8 aifs = 2; | ||
1956 | u8 is_legacy = 0; | ||
1957 | unsigned long flags; | ||
1958 | int i; | ||
1959 | |||
1960 | spin_lock_irqsave(&priv->lock, flags); | ||
1961 | priv->qos_data.qos_active = 0; | ||
1962 | |||
1963 | if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) { | ||
1964 | if (priv->qos_data.qos_enable) | ||
1965 | priv->qos_data.qos_active = 1; | ||
1966 | if (!(priv->active_rate & 0xfff0)) { | ||
1967 | cw_min = 31; | ||
1968 | is_legacy = 1; | ||
1969 | } | ||
1970 | } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { | ||
1971 | if (priv->qos_data.qos_enable) | ||
1972 | priv->qos_data.qos_active = 1; | ||
1973 | } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) { | ||
1974 | cw_min = 31; | ||
1975 | is_legacy = 1; | ||
1976 | } | ||
1977 | |||
1978 | if (priv->qos_data.qos_active) | ||
1979 | aifs = 3; | ||
1980 | |||
1981 | priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min); | ||
1982 | priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max); | ||
1983 | priv->qos_data.def_qos_parm.ac[0].aifsn = aifs; | ||
1984 | priv->qos_data.def_qos_parm.ac[0].edca_txop = 0; | ||
1985 | priv->qos_data.def_qos_parm.ac[0].reserved1 = 0; | ||
1986 | |||
1987 | if (priv->qos_data.qos_active) { | ||
1988 | i = 1; | ||
1989 | priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min); | ||
1990 | priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max); | ||
1991 | priv->qos_data.def_qos_parm.ac[i].aifsn = 7; | ||
1992 | priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; | ||
1993 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
1994 | |||
1995 | i = 2; | ||
1996 | priv->qos_data.def_qos_parm.ac[i].cw_min = | ||
1997 | cpu_to_le16((cw_min + 1) / 2 - 1); | ||
1998 | priv->qos_data.def_qos_parm.ac[i].cw_max = | ||
1999 | cpu_to_le16(cw_max); | ||
2000 | priv->qos_data.def_qos_parm.ac[i].aifsn = 2; | ||
2001 | if (is_legacy) | ||
2002 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
2003 | cpu_to_le16(6016); | ||
2004 | else | ||
2005 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
2006 | cpu_to_le16(3008); | ||
2007 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
2008 | |||
2009 | i = 3; | ||
2010 | priv->qos_data.def_qos_parm.ac[i].cw_min = | ||
2011 | cpu_to_le16((cw_min + 1) / 4 - 1); | ||
2012 | priv->qos_data.def_qos_parm.ac[i].cw_max = | ||
2013 | cpu_to_le16((cw_max + 1) / 2 - 1); | ||
2014 | priv->qos_data.def_qos_parm.ac[i].aifsn = 2; | ||
2015 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
2016 | if (is_legacy) | ||
2017 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
2018 | cpu_to_le16(3264); | ||
2019 | else | ||
2020 | priv->qos_data.def_qos_parm.ac[i].edca_txop = | ||
2021 | cpu_to_le16(1504); | ||
2022 | } else { | ||
2023 | for (i = 1; i < 4; i++) { | ||
2024 | priv->qos_data.def_qos_parm.ac[i].cw_min = | ||
2025 | cpu_to_le16(cw_min); | ||
2026 | priv->qos_data.def_qos_parm.ac[i].cw_max = | ||
2027 | cpu_to_le16(cw_max); | ||
2028 | priv->qos_data.def_qos_parm.ac[i].aifsn = aifs; | ||
2029 | priv->qos_data.def_qos_parm.ac[i].edca_txop = 0; | ||
2030 | priv->qos_data.def_qos_parm.ac[i].reserved1 = 0; | ||
2031 | } | ||
2032 | } | ||
2033 | IWL_DEBUG_QOS("set QoS to default \n"); | ||
2034 | |||
2035 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2036 | } | ||
2037 | |||
2038 | static void iwl_activate_qos(struct iwl_priv *priv, u8 force) | ||
2039 | { | ||
2040 | unsigned long flags; | ||
2041 | |||
2042 | if (priv == NULL) | ||
2043 | return; | ||
2044 | |||
2045 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
2046 | return; | ||
2047 | |||
2048 | if (!priv->qos_data.qos_enable) | ||
2049 | return; | ||
2050 | |||
2051 | spin_lock_irqsave(&priv->lock, flags); | ||
2052 | priv->qos_data.def_qos_parm.qos_flags = 0; | ||
2053 | |||
2054 | if (priv->qos_data.qos_cap.q_AP.queue_request && | ||
2055 | !priv->qos_data.qos_cap.q_AP.txop_request) | ||
2056 | priv->qos_data.def_qos_parm.qos_flags |= | ||
2057 | QOS_PARAM_FLG_TXOP_TYPE_MSK; | ||
2058 | |||
2059 | if (priv->qos_data.qos_active) | ||
2060 | priv->qos_data.def_qos_parm.qos_flags |= | ||
2061 | QOS_PARAM_FLG_UPDATE_EDCA_MSK; | ||
2062 | |||
2063 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2064 | |||
2065 | if (force || iwl_is_associated(priv)) { | ||
2066 | IWL_DEBUG_QOS("send QoS cmd with Qos active %d \n", | ||
2067 | priv->qos_data.qos_active); | ||
2068 | |||
2069 | iwl_send_qos_params_command(priv, | ||
2070 | &(priv->qos_data.def_qos_parm)); | ||
2071 | } | ||
2072 | } | ||
2073 | |||
2074 | #endif /* CONFIG_IWLWIFI_QOS */ | ||
2075 | /* | ||
2076 | * Power management (not Tx power!) functions | ||
2077 | */ | ||
2078 | #define MSEC_TO_USEC 1024 | ||
2079 | |||
2080 | #define NOSLP __constant_cpu_to_le16(0), 0, 0 | ||
2081 | #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0 | ||
2082 | #define SLP_TIMEOUT(T) __constant_cpu_to_le32((T) * MSEC_TO_USEC) | ||
2083 | #define SLP_VEC(X0, X1, X2, X3, X4) {__constant_cpu_to_le32(X0), \ | ||
2084 | __constant_cpu_to_le32(X1), \ | ||
2085 | __constant_cpu_to_le32(X2), \ | ||
2086 | __constant_cpu_to_le32(X3), \ | ||
2087 | __constant_cpu_to_le32(X4)} | ||
2088 | |||
2089 | |||
2090 | /* default power management (not Tx power) table values */ | ||
2091 | /* for tim 0-10 */ | ||
2092 | static struct iwl_power_vec_entry range_0[IWL_POWER_AC] = { | ||
2093 | {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, | ||
2094 | {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, | ||
2095 | {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), SLP_VEC(2, 4, 6, 7, 7)}, 0}, | ||
2096 | {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), SLP_VEC(2, 6, 9, 9, 10)}, 0}, | ||
2097 | {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 10)}, 1}, | ||
2098 | {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), SLP_VEC(4, 7, 10, 10, 10)}, 1} | ||
2099 | }; | ||
2100 | |||
2101 | /* for tim > 10 */ | ||
2102 | static struct iwl_power_vec_entry range_1[IWL_POWER_AC] = { | ||
2103 | {{NOSLP, SLP_TIMEOUT(0), SLP_TIMEOUT(0), SLP_VEC(0, 0, 0, 0, 0)}, 0}, | ||
2104 | {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(500), | ||
2105 | SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, | ||
2106 | {{SLP, SLP_TIMEOUT(200), SLP_TIMEOUT(300), | ||
2107 | SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, | ||
2108 | {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(100), | ||
2109 | SLP_VEC(2, 6, 9, 9, 0xFF)}, 0}, | ||
2110 | {{SLP, SLP_TIMEOUT(50), SLP_TIMEOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, | ||
2111 | {{SLP, SLP_TIMEOUT(25), SLP_TIMEOUT(25), | ||
2112 | SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} | ||
2113 | }; | ||
2114 | |||
2115 | int iwl_power_init_handle(struct iwl_priv *priv) | ||
2116 | { | ||
2117 | int rc = 0, i; | ||
2118 | struct iwl_power_mgr *pow_data; | ||
2119 | int size = sizeof(struct iwl_power_vec_entry) * IWL_POWER_AC; | ||
2120 | u16 pci_pm; | ||
2121 | |||
2122 | IWL_DEBUG_POWER("Initialize power \n"); | ||
2123 | |||
2124 | pow_data = &(priv->power_data); | ||
2125 | |||
2126 | memset(pow_data, 0, sizeof(*pow_data)); | ||
2127 | |||
2128 | pow_data->active_index = IWL_POWER_RANGE_0; | ||
2129 | pow_data->dtim_val = 0xffff; | ||
2130 | |||
2131 | memcpy(&pow_data->pwr_range_0[0], &range_0[0], size); | ||
2132 | memcpy(&pow_data->pwr_range_1[0], &range_1[0], size); | ||
2133 | |||
2134 | rc = pci_read_config_word(priv->pci_dev, PCI_LINK_CTRL, &pci_pm); | ||
2135 | if (rc != 0) | ||
2136 | return 0; | ||
2137 | else { | ||
2138 | struct iwl_powertable_cmd *cmd; | ||
2139 | |||
2140 | IWL_DEBUG_POWER("adjust power command flags\n"); | ||
2141 | |||
2142 | for (i = 0; i < IWL_POWER_AC; i++) { | ||
2143 | cmd = &pow_data->pwr_range_0[i].cmd; | ||
2144 | |||
2145 | if (pci_pm & 0x1) | ||
2146 | cmd->flags &= ~IWL_POWER_PCI_PM_MSK; | ||
2147 | else | ||
2148 | cmd->flags |= IWL_POWER_PCI_PM_MSK; | ||
2149 | } | ||
2150 | } | ||
2151 | return rc; | ||
2152 | } | ||
2153 | |||
2154 | static int iwl_update_power_cmd(struct iwl_priv *priv, | ||
2155 | struct iwl_powertable_cmd *cmd, u32 mode) | ||
2156 | { | ||
2157 | int rc = 0, i; | ||
2158 | u8 skip; | ||
2159 | u32 max_sleep = 0; | ||
2160 | struct iwl_power_vec_entry *range; | ||
2161 | u8 period = 0; | ||
2162 | struct iwl_power_mgr *pow_data; | ||
2163 | |||
2164 | if (mode > IWL_POWER_INDEX_5) { | ||
2165 | IWL_DEBUG_POWER("Error invalid power mode \n"); | ||
2166 | return -1; | ||
2167 | } | ||
2168 | pow_data = &(priv->power_data); | ||
2169 | |||
2170 | if (pow_data->active_index == IWL_POWER_RANGE_0) | ||
2171 | range = &pow_data->pwr_range_0[0]; | ||
2172 | else | ||
2173 | range = &pow_data->pwr_range_1[1]; | ||
2174 | |||
2175 | memcpy(cmd, &range[mode].cmd, sizeof(struct iwl_powertable_cmd)); | ||
2176 | |||
2177 | #ifdef IWL_MAC80211_DISABLE | ||
2178 | if (priv->assoc_network != NULL) { | ||
2179 | unsigned long flags; | ||
2180 | |||
2181 | period = priv->assoc_network->tim.tim_period; | ||
2182 | } | ||
2183 | #endif /*IWL_MAC80211_DISABLE */ | ||
2184 | skip = range[mode].no_dtim; | ||
2185 | |||
2186 | if (period == 0) { | ||
2187 | period = 1; | ||
2188 | skip = 0; | ||
2189 | } | ||
2190 | |||
2191 | if (skip == 0) { | ||
2192 | max_sleep = period; | ||
2193 | cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; | ||
2194 | } else { | ||
2195 | __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]; | ||
2196 | max_sleep = (le32_to_cpu(slp_itrvl) / period) * period; | ||
2197 | cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; | ||
2198 | } | ||
2199 | |||
2200 | for (i = 0; i < IWL_POWER_VEC_SIZE; i++) { | ||
2201 | if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) | ||
2202 | cmd->sleep_interval[i] = cpu_to_le32(max_sleep); | ||
2203 | } | ||
2204 | |||
2205 | IWL_DEBUG_POWER("Flags value = 0x%08X\n", cmd->flags); | ||
2206 | IWL_DEBUG_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); | ||
2207 | IWL_DEBUG_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); | ||
2208 | IWL_DEBUG_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n", | ||
2209 | le32_to_cpu(cmd->sleep_interval[0]), | ||
2210 | le32_to_cpu(cmd->sleep_interval[1]), | ||
2211 | le32_to_cpu(cmd->sleep_interval[2]), | ||
2212 | le32_to_cpu(cmd->sleep_interval[3]), | ||
2213 | le32_to_cpu(cmd->sleep_interval[4])); | ||
2214 | |||
2215 | return rc; | ||
2216 | } | ||
2217 | |||
2218 | static int iwl_send_power_mode(struct iwl_priv *priv, u32 mode) | ||
2219 | { | ||
2220 | u32 final_mode = mode; | ||
2221 | int rc; | ||
2222 | struct iwl_powertable_cmd cmd; | ||
2223 | |||
2224 | /* If on battery, set to 3, | ||
2225 | * if plugged into AC power, set to CAM ("continuosly aware mode"), | ||
2226 | * else user level */ | ||
2227 | switch (mode) { | ||
2228 | case IWL_POWER_BATTERY: | ||
2229 | final_mode = IWL_POWER_INDEX_3; | ||
2230 | break; | ||
2231 | case IWL_POWER_AC: | ||
2232 | final_mode = IWL_POWER_MODE_CAM; | ||
2233 | break; | ||
2234 | default: | ||
2235 | final_mode = mode; | ||
2236 | break; | ||
2237 | } | ||
2238 | |||
2239 | cmd.keep_alive_beacons = 0; | ||
2240 | |||
2241 | iwl_update_power_cmd(priv, &cmd, final_mode); | ||
2242 | |||
2243 | rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd); | ||
2244 | |||
2245 | if (final_mode == IWL_POWER_MODE_CAM) | ||
2246 | clear_bit(STATUS_POWER_PMI, &priv->status); | ||
2247 | else | ||
2248 | set_bit(STATUS_POWER_PMI, &priv->status); | ||
2249 | |||
2250 | return rc; | ||
2251 | } | ||
2252 | |||
2253 | int iwl_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) | ||
2254 | { | ||
2255 | /* Filter incoming packets to determine if they are targeted toward | ||
2256 | * this network, discarding packets coming from ourselves */ | ||
2257 | switch (priv->iw_mode) { | ||
2258 | case IEEE80211_IF_TYPE_IBSS: /* Header: Dest. | Source | BSSID */ | ||
2259 | /* packets from our adapter are dropped (echo) */ | ||
2260 | if (!compare_ether_addr(header->addr2, priv->mac_addr)) | ||
2261 | return 0; | ||
2262 | /* {broad,multi}cast packets to our IBSS go through */ | ||
2263 | if (is_multicast_ether_addr(header->addr1)) | ||
2264 | return !compare_ether_addr(header->addr3, priv->bssid); | ||
2265 | /* packets to our adapter go through */ | ||
2266 | return !compare_ether_addr(header->addr1, priv->mac_addr); | ||
2267 | case IEEE80211_IF_TYPE_STA: /* Header: Dest. | AP{BSSID} | Source */ | ||
2268 | /* packets from our adapter are dropped (echo) */ | ||
2269 | if (!compare_ether_addr(header->addr3, priv->mac_addr)) | ||
2270 | return 0; | ||
2271 | /* {broad,multi}cast packets to our BSS go through */ | ||
2272 | if (is_multicast_ether_addr(header->addr1)) | ||
2273 | return !compare_ether_addr(header->addr2, priv->bssid); | ||
2274 | /* packets to our adapter go through */ | ||
2275 | return !compare_ether_addr(header->addr1, priv->mac_addr); | ||
2276 | } | ||
2277 | |||
2278 | return 1; | ||
2279 | } | ||
2280 | |||
2281 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x | ||
2282 | |||
2283 | const char *iwl_get_tx_fail_reason(u32 status) | ||
2284 | { | ||
2285 | switch (status & TX_STATUS_MSK) { | ||
2286 | case TX_STATUS_SUCCESS: | ||
2287 | return "SUCCESS"; | ||
2288 | TX_STATUS_ENTRY(SHORT_LIMIT); | ||
2289 | TX_STATUS_ENTRY(LONG_LIMIT); | ||
2290 | TX_STATUS_ENTRY(FIFO_UNDERRUN); | ||
2291 | TX_STATUS_ENTRY(MGMNT_ABORT); | ||
2292 | TX_STATUS_ENTRY(NEXT_FRAG); | ||
2293 | TX_STATUS_ENTRY(LIFE_EXPIRE); | ||
2294 | TX_STATUS_ENTRY(DEST_PS); | ||
2295 | TX_STATUS_ENTRY(ABORTED); | ||
2296 | TX_STATUS_ENTRY(BT_RETRY); | ||
2297 | TX_STATUS_ENTRY(STA_INVALID); | ||
2298 | TX_STATUS_ENTRY(FRAG_DROPPED); | ||
2299 | TX_STATUS_ENTRY(TID_DISABLE); | ||
2300 | TX_STATUS_ENTRY(FRAME_FLUSHED); | ||
2301 | TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); | ||
2302 | TX_STATUS_ENTRY(TX_LOCKED); | ||
2303 | TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); | ||
2304 | } | ||
2305 | |||
2306 | return "UNKNOWN"; | ||
2307 | } | ||
2308 | |||
2309 | /** | ||
2310 | * iwl_scan_cancel - Cancel any currently executing HW scan | ||
2311 | * | ||
2312 | * NOTE: priv->mutex is not required before calling this function | ||
2313 | */ | ||
2314 | static int iwl_scan_cancel(struct iwl_priv *priv) | ||
2315 | { | ||
2316 | if (!test_bit(STATUS_SCAN_HW, &priv->status)) { | ||
2317 | clear_bit(STATUS_SCANNING, &priv->status); | ||
2318 | return 0; | ||
2319 | } | ||
2320 | |||
2321 | if (test_bit(STATUS_SCANNING, &priv->status)) { | ||
2322 | if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
2323 | IWL_DEBUG_SCAN("Queuing scan abort.\n"); | ||
2324 | set_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
2325 | queue_work(priv->workqueue, &priv->abort_scan); | ||
2326 | |||
2327 | } else | ||
2328 | IWL_DEBUG_SCAN("Scan abort already in progress.\n"); | ||
2329 | |||
2330 | return test_bit(STATUS_SCANNING, &priv->status); | ||
2331 | } | ||
2332 | |||
2333 | return 0; | ||
2334 | } | ||
2335 | |||
2336 | /** | ||
2337 | * iwl_scan_cancel_timeout - Cancel any currently executing HW scan | ||
2338 | * @ms: amount of time to wait (in milliseconds) for scan to abort | ||
2339 | * | ||
2340 | * NOTE: priv->mutex must be held before calling this function | ||
2341 | */ | ||
2342 | static int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms) | ||
2343 | { | ||
2344 | unsigned long now = jiffies; | ||
2345 | int ret; | ||
2346 | |||
2347 | ret = iwl_scan_cancel(priv); | ||
2348 | if (ret && ms) { | ||
2349 | mutex_unlock(&priv->mutex); | ||
2350 | while (!time_after(jiffies, now + msecs_to_jiffies(ms)) && | ||
2351 | test_bit(STATUS_SCANNING, &priv->status)) | ||
2352 | msleep(1); | ||
2353 | mutex_lock(&priv->mutex); | ||
2354 | |||
2355 | return test_bit(STATUS_SCANNING, &priv->status); | ||
2356 | } | ||
2357 | |||
2358 | return ret; | ||
2359 | } | ||
2360 | |||
2361 | static void iwl_sequence_reset(struct iwl_priv *priv) | ||
2362 | { | ||
2363 | /* Reset ieee stats */ | ||
2364 | |||
2365 | /* We don't reset the net_device_stats (ieee->stats) on | ||
2366 | * re-association */ | ||
2367 | |||
2368 | priv->last_seq_num = -1; | ||
2369 | priv->last_frag_num = -1; | ||
2370 | priv->last_packet_time = 0; | ||
2371 | |||
2372 | iwl_scan_cancel(priv); | ||
2373 | } | ||
2374 | |||
2375 | #define MAX_UCODE_BEACON_INTERVAL 4096 | ||
2376 | #define INTEL_CONN_LISTEN_INTERVAL __constant_cpu_to_le16(0xA) | ||
2377 | |||
2378 | static __le16 iwl_adjust_beacon_interval(u16 beacon_val) | ||
2379 | { | ||
2380 | u16 new_val = 0; | ||
2381 | u16 beacon_factor = 0; | ||
2382 | |||
2383 | beacon_factor = | ||
2384 | (beacon_val + MAX_UCODE_BEACON_INTERVAL) | ||
2385 | / MAX_UCODE_BEACON_INTERVAL; | ||
2386 | new_val = beacon_val / beacon_factor; | ||
2387 | |||
2388 | return cpu_to_le16(new_val); | ||
2389 | } | ||
2390 | |||
2391 | static void iwl_setup_rxon_timing(struct iwl_priv *priv) | ||
2392 | { | ||
2393 | u64 interval_tm_unit; | ||
2394 | u64 tsf, result; | ||
2395 | unsigned long flags; | ||
2396 | struct ieee80211_conf *conf = NULL; | ||
2397 | u16 beacon_int = 0; | ||
2398 | |||
2399 | conf = ieee80211_get_hw_conf(priv->hw); | ||
2400 | |||
2401 | spin_lock_irqsave(&priv->lock, flags); | ||
2402 | priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1); | ||
2403 | priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0); | ||
2404 | |||
2405 | priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL; | ||
2406 | |||
2407 | tsf = priv->timestamp1; | ||
2408 | tsf = ((tsf << 32) | priv->timestamp0); | ||
2409 | |||
2410 | beacon_int = priv->beacon_int; | ||
2411 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2412 | |||
2413 | if (priv->iw_mode == IEEE80211_IF_TYPE_STA) { | ||
2414 | if (beacon_int == 0) { | ||
2415 | priv->rxon_timing.beacon_interval = cpu_to_le16(100); | ||
2416 | priv->rxon_timing.beacon_init_val = cpu_to_le32(102400); | ||
2417 | } else { | ||
2418 | priv->rxon_timing.beacon_interval = | ||
2419 | cpu_to_le16(beacon_int); | ||
2420 | priv->rxon_timing.beacon_interval = | ||
2421 | iwl_adjust_beacon_interval( | ||
2422 | le16_to_cpu(priv->rxon_timing.beacon_interval)); | ||
2423 | } | ||
2424 | |||
2425 | priv->rxon_timing.atim_window = 0; | ||
2426 | } else { | ||
2427 | priv->rxon_timing.beacon_interval = | ||
2428 | iwl_adjust_beacon_interval(conf->beacon_int); | ||
2429 | /* TODO: we need to get atim_window from upper stack | ||
2430 | * for now we set to 0 */ | ||
2431 | priv->rxon_timing.atim_window = 0; | ||
2432 | } | ||
2433 | |||
2434 | interval_tm_unit = | ||
2435 | (le16_to_cpu(priv->rxon_timing.beacon_interval) * 1024); | ||
2436 | result = do_div(tsf, interval_tm_unit); | ||
2437 | priv->rxon_timing.beacon_init_val = | ||
2438 | cpu_to_le32((u32) ((u64) interval_tm_unit - result)); | ||
2439 | |||
2440 | IWL_DEBUG_ASSOC | ||
2441 | ("beacon interval %d beacon timer %d beacon tim %d\n", | ||
2442 | le16_to_cpu(priv->rxon_timing.beacon_interval), | ||
2443 | le32_to_cpu(priv->rxon_timing.beacon_init_val), | ||
2444 | le16_to_cpu(priv->rxon_timing.atim_window)); | ||
2445 | } | ||
2446 | |||
2447 | static int iwl_scan_initiate(struct iwl_priv *priv) | ||
2448 | { | ||
2449 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { | ||
2450 | IWL_ERROR("APs don't scan.\n"); | ||
2451 | return 0; | ||
2452 | } | ||
2453 | |||
2454 | if (!iwl_is_ready_rf(priv)) { | ||
2455 | IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); | ||
2456 | return -EIO; | ||
2457 | } | ||
2458 | |||
2459 | if (test_bit(STATUS_SCANNING, &priv->status)) { | ||
2460 | IWL_DEBUG_SCAN("Scan already in progress.\n"); | ||
2461 | return -EAGAIN; | ||
2462 | } | ||
2463 | |||
2464 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
2465 | IWL_DEBUG_SCAN("Scan request while abort pending. " | ||
2466 | "Queuing.\n"); | ||
2467 | return -EAGAIN; | ||
2468 | } | ||
2469 | |||
2470 | IWL_DEBUG_INFO("Starting scan...\n"); | ||
2471 | priv->scan_bands = 2; | ||
2472 | set_bit(STATUS_SCANNING, &priv->status); | ||
2473 | priv->scan_start = jiffies; | ||
2474 | priv->scan_pass_start = priv->scan_start; | ||
2475 | |||
2476 | queue_work(priv->workqueue, &priv->request_scan); | ||
2477 | |||
2478 | return 0; | ||
2479 | } | ||
2480 | |||
2481 | static int iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt) | ||
2482 | { | ||
2483 | struct iwl_rxon_cmd *rxon = &priv->staging_rxon; | ||
2484 | |||
2485 | if (hw_decrypt) | ||
2486 | rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; | ||
2487 | else | ||
2488 | rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK; | ||
2489 | |||
2490 | return 0; | ||
2491 | } | ||
2492 | |||
2493 | static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode) | ||
2494 | { | ||
2495 | if (phymode == MODE_IEEE80211A) { | ||
2496 | priv->staging_rxon.flags &= | ||
2497 | ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK | ||
2498 | | RXON_FLG_CCK_MSK); | ||
2499 | priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; | ||
2500 | } else { | ||
2501 | /* Copied from iwl_bg_post_associate() */ | ||
2502 | if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) | ||
2503 | priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; | ||
2504 | else | ||
2505 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | ||
2506 | |||
2507 | if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) | ||
2508 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | ||
2509 | |||
2510 | priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; | ||
2511 | priv->staging_rxon.flags |= RXON_FLG_AUTO_DETECT_MSK; | ||
2512 | priv->staging_rxon.flags &= ~RXON_FLG_CCK_MSK; | ||
2513 | } | ||
2514 | } | ||
2515 | |||
2516 | /* | ||
2517 | * initilize rxon structure with default values fromm eeprom | ||
2518 | */ | ||
2519 | static void iwl_connection_init_rx_config(struct iwl_priv *priv) | ||
2520 | { | ||
2521 | const struct iwl_channel_info *ch_info; | ||
2522 | |||
2523 | memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); | ||
2524 | |||
2525 | switch (priv->iw_mode) { | ||
2526 | case IEEE80211_IF_TYPE_AP: | ||
2527 | priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; | ||
2528 | break; | ||
2529 | |||
2530 | case IEEE80211_IF_TYPE_STA: | ||
2531 | priv->staging_rxon.dev_type = RXON_DEV_TYPE_ESS; | ||
2532 | priv->staging_rxon.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; | ||
2533 | break; | ||
2534 | |||
2535 | case IEEE80211_IF_TYPE_IBSS: | ||
2536 | priv->staging_rxon.dev_type = RXON_DEV_TYPE_IBSS; | ||
2537 | priv->staging_rxon.flags = RXON_FLG_SHORT_PREAMBLE_MSK; | ||
2538 | priv->staging_rxon.filter_flags = RXON_FILTER_BCON_AWARE_MSK | | ||
2539 | RXON_FILTER_ACCEPT_GRP_MSK; | ||
2540 | break; | ||
2541 | |||
2542 | case IEEE80211_IF_TYPE_MNTR: | ||
2543 | priv->staging_rxon.dev_type = RXON_DEV_TYPE_SNIFFER; | ||
2544 | priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | | ||
2545 | RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; | ||
2546 | break; | ||
2547 | } | ||
2548 | |||
2549 | #if 0 | ||
2550 | /* TODO: Figure out when short_preamble would be set and cache from | ||
2551 | * that */ | ||
2552 | if (!hw_to_local(priv->hw)->short_preamble) | ||
2553 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; | ||
2554 | else | ||
2555 | priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; | ||
2556 | #endif | ||
2557 | |||
2558 | ch_info = iwl_get_channel_info(priv, priv->phymode, | ||
2559 | le16_to_cpu(priv->staging_rxon.channel)); | ||
2560 | |||
2561 | if (!ch_info) | ||
2562 | ch_info = &priv->channel_info[0]; | ||
2563 | |||
2564 | /* | ||
2565 | * in some case A channels are all non IBSS | ||
2566 | * in this case force B/G channel | ||
2567 | */ | ||
2568 | if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && | ||
2569 | !(is_channel_ibss(ch_info))) | ||
2570 | ch_info = &priv->channel_info[0]; | ||
2571 | |||
2572 | priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); | ||
2573 | if (is_channel_a_band(ch_info)) | ||
2574 | priv->phymode = MODE_IEEE80211A; | ||
2575 | else | ||
2576 | priv->phymode = MODE_IEEE80211G; | ||
2577 | |||
2578 | iwl_set_flags_for_phymode(priv, priv->phymode); | ||
2579 | |||
2580 | priv->staging_rxon.ofdm_basic_rates = | ||
2581 | (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; | ||
2582 | priv->staging_rxon.cck_basic_rates = | ||
2583 | (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; | ||
2584 | |||
2585 | priv->staging_rxon.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK | | ||
2586 | RXON_FLG_CHANNEL_MODE_PURE_40_MSK); | ||
2587 | memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); | ||
2588 | memcpy(priv->staging_rxon.wlap_bssid_addr, priv->mac_addr, ETH_ALEN); | ||
2589 | priv->staging_rxon.ofdm_ht_single_stream_basic_rates = 0xff; | ||
2590 | priv->staging_rxon.ofdm_ht_dual_stream_basic_rates = 0xff; | ||
2591 | iwl4965_set_rxon_chain(priv); | ||
2592 | } | ||
2593 | |||
2594 | static int iwl_set_mode(struct iwl_priv *priv, int mode) | ||
2595 | { | ||
2596 | if (!iwl_is_ready_rf(priv)) | ||
2597 | return -EAGAIN; | ||
2598 | |||
2599 | if (mode == IEEE80211_IF_TYPE_IBSS) { | ||
2600 | const struct iwl_channel_info *ch_info; | ||
2601 | |||
2602 | ch_info = iwl_get_channel_info(priv, | ||
2603 | priv->phymode, | ||
2604 | le16_to_cpu(priv->staging_rxon.channel)); | ||
2605 | |||
2606 | if (!ch_info || !is_channel_ibss(ch_info)) { | ||
2607 | IWL_ERROR("channel %d not IBSS channel\n", | ||
2608 | le16_to_cpu(priv->staging_rxon.channel)); | ||
2609 | return -EINVAL; | ||
2610 | } | ||
2611 | } | ||
2612 | |||
2613 | cancel_delayed_work(&priv->scan_check); | ||
2614 | if (iwl_scan_cancel_timeout(priv, 100)) { | ||
2615 | IWL_WARNING("Aborted scan still in progress after 100ms\n"); | ||
2616 | IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); | ||
2617 | return -EAGAIN; | ||
2618 | } | ||
2619 | |||
2620 | priv->iw_mode = mode; | ||
2621 | |||
2622 | iwl_connection_init_rx_config(priv); | ||
2623 | memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); | ||
2624 | |||
2625 | iwl_clear_stations_table(priv); | ||
2626 | |||
2627 | iwl_commit_rxon(priv); | ||
2628 | |||
2629 | return 0; | ||
2630 | } | ||
2631 | |||
2632 | static void iwl_build_tx_cmd_hwcrypto(struct iwl_priv *priv, | ||
2633 | struct ieee80211_tx_control *ctl, | ||
2634 | struct iwl_cmd *cmd, | ||
2635 | struct sk_buff *skb_frag, | ||
2636 | int last_frag) | ||
2637 | { | ||
2638 | struct iwl_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo; | ||
2639 | |||
2640 | switch (keyinfo->alg) { | ||
2641 | case ALG_CCMP: | ||
2642 | cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM; | ||
2643 | memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen); | ||
2644 | IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); | ||
2645 | break; | ||
2646 | |||
2647 | case ALG_TKIP: | ||
2648 | #if 0 | ||
2649 | cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP; | ||
2650 | |||
2651 | if (last_frag) | ||
2652 | memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8, | ||
2653 | 8); | ||
2654 | else | ||
2655 | memset(cmd->cmd.tx.tkip_mic.byte, 0, 8); | ||
2656 | #endif | ||
2657 | break; | ||
2658 | |||
2659 | case ALG_WEP: | ||
2660 | cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP | | ||
2661 | (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; | ||
2662 | |||
2663 | if (keyinfo->keylen == 13) | ||
2664 | cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128; | ||
2665 | |||
2666 | memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen); | ||
2667 | |||
2668 | IWL_DEBUG_TX("Configuring packet for WEP encryption " | ||
2669 | "with key %d\n", ctl->key_idx); | ||
2670 | break; | ||
2671 | |||
2672 | case ALG_NONE: | ||
2673 | IWL_DEBUG_TX("Tx packet in the clear (encrypt requested).\n"); | ||
2674 | break; | ||
2675 | |||
2676 | default: | ||
2677 | printk(KERN_ERR "Unknown encode alg %d\n", keyinfo->alg); | ||
2678 | break; | ||
2679 | } | ||
2680 | } | ||
2681 | |||
2682 | /* | ||
2683 | * handle build REPLY_TX command notification. | ||
2684 | */ | ||
2685 | static void iwl_build_tx_cmd_basic(struct iwl_priv *priv, | ||
2686 | struct iwl_cmd *cmd, | ||
2687 | struct ieee80211_tx_control *ctrl, | ||
2688 | struct ieee80211_hdr *hdr, | ||
2689 | int is_unicast, u8 std_id) | ||
2690 | { | ||
2691 | __le16 *qc; | ||
2692 | u16 fc = le16_to_cpu(hdr->frame_control); | ||
2693 | __le32 tx_flags = cmd->cmd.tx.tx_flags; | ||
2694 | |||
2695 | cmd->cmd.tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
2696 | if (!(ctrl->flags & IEEE80211_TXCTL_NO_ACK)) { | ||
2697 | tx_flags |= TX_CMD_FLG_ACK_MSK; | ||
2698 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) | ||
2699 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
2700 | if (ieee80211_is_probe_response(fc) && | ||
2701 | !(le16_to_cpu(hdr->seq_ctrl) & 0xf)) | ||
2702 | tx_flags |= TX_CMD_FLG_TSF_MSK; | ||
2703 | } else { | ||
2704 | tx_flags &= (~TX_CMD_FLG_ACK_MSK); | ||
2705 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
2706 | } | ||
2707 | |||
2708 | cmd->cmd.tx.sta_id = std_id; | ||
2709 | if (ieee80211_get_morefrag(hdr)) | ||
2710 | tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; | ||
2711 | |||
2712 | qc = ieee80211_get_qos_ctrl(hdr); | ||
2713 | if (qc) { | ||
2714 | cmd->cmd.tx.tid_tspec = (u8) (le16_to_cpu(*qc) & 0xf); | ||
2715 | tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; | ||
2716 | } else | ||
2717 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; | ||
2718 | |||
2719 | if (ctrl->flags & IEEE80211_TXCTL_USE_RTS_CTS) { | ||
2720 | tx_flags |= TX_CMD_FLG_RTS_MSK; | ||
2721 | tx_flags &= ~TX_CMD_FLG_CTS_MSK; | ||
2722 | } else if (ctrl->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) { | ||
2723 | tx_flags &= ~TX_CMD_FLG_RTS_MSK; | ||
2724 | tx_flags |= TX_CMD_FLG_CTS_MSK; | ||
2725 | } | ||
2726 | |||
2727 | if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) | ||
2728 | tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; | ||
2729 | |||
2730 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); | ||
2731 | if ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) { | ||
2732 | if ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ || | ||
2733 | (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ) | ||
2734 | cmd->cmd.tx.timeout.pm_frame_timeout = | ||
2735 | cpu_to_le16(3); | ||
2736 | else | ||
2737 | cmd->cmd.tx.timeout.pm_frame_timeout = | ||
2738 | cpu_to_le16(2); | ||
2739 | } else | ||
2740 | cmd->cmd.tx.timeout.pm_frame_timeout = 0; | ||
2741 | |||
2742 | cmd->cmd.tx.driver_txop = 0; | ||
2743 | cmd->cmd.tx.tx_flags = tx_flags; | ||
2744 | cmd->cmd.tx.next_frame_len = 0; | ||
2745 | } | ||
2746 | |||
2747 | static int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr) | ||
2748 | { | ||
2749 | int sta_id; | ||
2750 | u16 fc = le16_to_cpu(hdr->frame_control); | ||
2751 | |||
2752 | /* If this frame is broadcast or not data then use the broadcast | ||
2753 | * station id */ | ||
2754 | if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || | ||
2755 | is_multicast_ether_addr(hdr->addr1)) | ||
2756 | return priv->hw_setting.bcast_sta_id; | ||
2757 | |||
2758 | switch (priv->iw_mode) { | ||
2759 | |||
2760 | /* If this frame is part of a BSS network (we're a station), then | ||
2761 | * we use the AP's station id */ | ||
2762 | case IEEE80211_IF_TYPE_STA: | ||
2763 | return IWL_AP_ID; | ||
2764 | |||
2765 | /* If we are an AP, then find the station, or use BCAST */ | ||
2766 | case IEEE80211_IF_TYPE_AP: | ||
2767 | sta_id = iwl_hw_find_station(priv, hdr->addr1); | ||
2768 | if (sta_id != IWL_INVALID_STATION) | ||
2769 | return sta_id; | ||
2770 | return priv->hw_setting.bcast_sta_id; | ||
2771 | |||
2772 | /* If this frame is part of a IBSS network, then we use the | ||
2773 | * target specific station id */ | ||
2774 | case IEEE80211_IF_TYPE_IBSS: | ||
2775 | sta_id = iwl_hw_find_station(priv, hdr->addr1); | ||
2776 | if (sta_id != IWL_INVALID_STATION) | ||
2777 | return sta_id; | ||
2778 | |||
2779 | sta_id = iwl_add_station(priv, hdr->addr1, 0, CMD_ASYNC); | ||
2780 | |||
2781 | if (sta_id != IWL_INVALID_STATION) | ||
2782 | return sta_id; | ||
2783 | |||
2784 | IWL_DEBUG_DROP("Station " MAC_FMT " not in station map. " | ||
2785 | "Defaulting to broadcast...\n", | ||
2786 | MAC_ARG(hdr->addr1)); | ||
2787 | iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); | ||
2788 | return priv->hw_setting.bcast_sta_id; | ||
2789 | |||
2790 | default: | ||
2791 | IWL_WARNING("Unkown mode of operation: %d", priv->iw_mode); | ||
2792 | return priv->hw_setting.bcast_sta_id; | ||
2793 | } | ||
2794 | } | ||
2795 | |||
2796 | /* | ||
2797 | * start REPLY_TX command process | ||
2798 | */ | ||
2799 | static int iwl_tx_skb(struct iwl_priv *priv, | ||
2800 | struct sk_buff *skb, struct ieee80211_tx_control *ctl) | ||
2801 | { | ||
2802 | struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; | ||
2803 | struct iwl_tfd_frame *tfd; | ||
2804 | u32 *control_flags; | ||
2805 | int txq_id = ctl->queue; | ||
2806 | struct iwl_tx_queue *txq = NULL; | ||
2807 | struct iwl_queue *q = NULL; | ||
2808 | dma_addr_t phys_addr; | ||
2809 | dma_addr_t txcmd_phys; | ||
2810 | struct iwl_cmd *out_cmd = NULL; | ||
2811 | u16 len, idx, len_org; | ||
2812 | u8 id, hdr_len, unicast; | ||
2813 | u8 sta_id; | ||
2814 | u16 seq_number = 0; | ||
2815 | u16 fc; | ||
2816 | __le16 *qc; | ||
2817 | u8 wait_write_ptr = 0; | ||
2818 | unsigned long flags; | ||
2819 | int rc; | ||
2820 | |||
2821 | spin_lock_irqsave(&priv->lock, flags); | ||
2822 | if (iwl_is_rfkill(priv)) { | ||
2823 | IWL_DEBUG_DROP("Dropping - RF KILL\n"); | ||
2824 | goto drop_unlock; | ||
2825 | } | ||
2826 | |||
2827 | if (!priv->interface_id) { | ||
2828 | IWL_DEBUG_DROP("Dropping - !priv->interface_id\n"); | ||
2829 | goto drop_unlock; | ||
2830 | } | ||
2831 | |||
2832 | if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) { | ||
2833 | IWL_ERROR("ERROR: No TX rate available.\n"); | ||
2834 | goto drop_unlock; | ||
2835 | } | ||
2836 | |||
2837 | unicast = !is_multicast_ether_addr(hdr->addr1); | ||
2838 | id = 0; | ||
2839 | |||
2840 | fc = le16_to_cpu(hdr->frame_control); | ||
2841 | |||
2842 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
2843 | if (ieee80211_is_auth(fc)) | ||
2844 | IWL_DEBUG_TX("Sending AUTH frame\n"); | ||
2845 | else if (ieee80211_is_assoc_request(fc)) | ||
2846 | IWL_DEBUG_TX("Sending ASSOC frame\n"); | ||
2847 | else if (ieee80211_is_reassoc_request(fc)) | ||
2848 | IWL_DEBUG_TX("Sending REASSOC frame\n"); | ||
2849 | #endif | ||
2850 | |||
2851 | if (!iwl_is_associated(priv) && | ||
2852 | ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)) { | ||
2853 | IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n"); | ||
2854 | goto drop_unlock; | ||
2855 | } | ||
2856 | |||
2857 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2858 | |||
2859 | hdr_len = ieee80211_get_hdrlen(fc); | ||
2860 | sta_id = iwl_get_sta_id(priv, hdr); | ||
2861 | if (sta_id == IWL_INVALID_STATION) { | ||
2862 | IWL_DEBUG_DROP("Dropping - INVALID STATION: " MAC_FMT "\n", | ||
2863 | MAC_ARG(hdr->addr1)); | ||
2864 | goto drop; | ||
2865 | } | ||
2866 | |||
2867 | IWL_DEBUG_RATE("station Id %d\n", sta_id); | ||
2868 | |||
2869 | qc = ieee80211_get_qos_ctrl(hdr); | ||
2870 | if (qc) { | ||
2871 | u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); | ||
2872 | seq_number = priv->stations[sta_id].tid[tid].seq_number & | ||
2873 | IEEE80211_SCTL_SEQ; | ||
2874 | hdr->seq_ctrl = cpu_to_le16(seq_number) | | ||
2875 | (hdr->seq_ctrl & | ||
2876 | __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); | ||
2877 | seq_number += 0x10; | ||
2878 | #ifdef CONFIG_IWLWIFI_HT | ||
2879 | #ifdef CONFIG_IWLWIFI_HT_AGG | ||
2880 | /* aggregation is on for this <sta,tid> */ | ||
2881 | if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG) | ||
2882 | txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; | ||
2883 | #endif /* CONFIG_IWLWIFI_HT_AGG */ | ||
2884 | #endif /* CONFIG_IWLWIFI_HT */ | ||
2885 | } | ||
2886 | txq = &priv->txq[txq_id]; | ||
2887 | q = &txq->q; | ||
2888 | |||
2889 | spin_lock_irqsave(&priv->lock, flags); | ||
2890 | |||
2891 | tfd = &txq->bd[q->first_empty]; | ||
2892 | memset(tfd, 0, sizeof(*tfd)); | ||
2893 | control_flags = (u32 *) tfd; | ||
2894 | idx = get_cmd_index(q, q->first_empty, 0); | ||
2895 | |||
2896 | memset(&(txq->txb[q->first_empty]), 0, sizeof(struct iwl_tx_info)); | ||
2897 | txq->txb[q->first_empty].skb[0] = skb; | ||
2898 | memcpy(&(txq->txb[q->first_empty].status.control), | ||
2899 | ctl, sizeof(struct ieee80211_tx_control)); | ||
2900 | out_cmd = &txq->cmd[idx]; | ||
2901 | memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); | ||
2902 | memset(&out_cmd->cmd.tx, 0, sizeof(out_cmd->cmd.tx)); | ||
2903 | out_cmd->hdr.cmd = REPLY_TX; | ||
2904 | out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | | ||
2905 | INDEX_TO_SEQ(q->first_empty))); | ||
2906 | /* copy frags header */ | ||
2907 | memcpy(out_cmd->cmd.tx.hdr, hdr, hdr_len); | ||
2908 | |||
2909 | /* hdr = (struct ieee80211_hdr *)out_cmd->cmd.tx.hdr; */ | ||
2910 | len = priv->hw_setting.tx_cmd_len + | ||
2911 | sizeof(struct iwl_cmd_header) + hdr_len; | ||
2912 | |||
2913 | len_org = len; | ||
2914 | len = (len + 3) & ~3; | ||
2915 | |||
2916 | if (len_org != len) | ||
2917 | len_org = 1; | ||
2918 | else | ||
2919 | len_org = 0; | ||
2920 | |||
2921 | txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx + | ||
2922 | offsetof(struct iwl_cmd, hdr); | ||
2923 | |||
2924 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); | ||
2925 | |||
2926 | if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) | ||
2927 | iwl_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0); | ||
2928 | |||
2929 | /* 802.11 null functions have no payload... */ | ||
2930 | len = skb->len - hdr_len; | ||
2931 | if (len) { | ||
2932 | phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, | ||
2933 | len, PCI_DMA_TODEVICE); | ||
2934 | iwl_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, len); | ||
2935 | } | ||
2936 | |||
2937 | if (len_org) | ||
2938 | out_cmd->cmd.tx.tx_flags |= TX_CMD_FLG_MH_PAD_MSK; | ||
2939 | |||
2940 | len = (u16)skb->len; | ||
2941 | out_cmd->cmd.tx.len = cpu_to_le16(len); | ||
2942 | |||
2943 | /* TODO need this for burst mode later on */ | ||
2944 | iwl_build_tx_cmd_basic(priv, out_cmd, ctl, hdr, unicast, sta_id); | ||
2945 | |||
2946 | /* set is_hcca to 0; it probably will never be implemented */ | ||
2947 | iwl_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0); | ||
2948 | |||
2949 | iwl4965_tx_cmd(priv, out_cmd, sta_id, txcmd_phys, | ||
2950 | hdr, hdr_len, ctl, NULL); | ||
2951 | |||
2952 | if (!ieee80211_get_morefrag(hdr)) { | ||
2953 | txq->need_update = 1; | ||
2954 | if (qc) { | ||
2955 | u8 tid = (u8)(le16_to_cpu(*qc) & 0xf); | ||
2956 | priv->stations[sta_id].tid[tid].seq_number = seq_number; | ||
2957 | } | ||
2958 | } else { | ||
2959 | wait_write_ptr = 1; | ||
2960 | txq->need_update = 0; | ||
2961 | } | ||
2962 | |||
2963 | iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload, | ||
2964 | sizeof(out_cmd->cmd.tx)); | ||
2965 | |||
2966 | iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, | ||
2967 | ieee80211_get_hdrlen(fc)); | ||
2968 | |||
2969 | iwl4965_tx_queue_update_wr_ptr(priv, txq, len); | ||
2970 | |||
2971 | q->first_empty = iwl_queue_inc_wrap(q->first_empty, q->n_bd); | ||
2972 | rc = iwl_tx_queue_update_write_ptr(priv, txq); | ||
2973 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2974 | |||
2975 | if (rc) | ||
2976 | return rc; | ||
2977 | |||
2978 | if ((iwl_queue_space(q) < q->high_mark) | ||
2979 | && priv->mac80211_registered) { | ||
2980 | if (wait_write_ptr) { | ||
2981 | spin_lock_irqsave(&priv->lock, flags); | ||
2982 | txq->need_update = 1; | ||
2983 | iwl_tx_queue_update_write_ptr(priv, txq); | ||
2984 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2985 | } | ||
2986 | |||
2987 | ieee80211_stop_queue(priv->hw, ctl->queue); | ||
2988 | } | ||
2989 | |||
2990 | return 0; | ||
2991 | |||
2992 | drop_unlock: | ||
2993 | spin_unlock_irqrestore(&priv->lock, flags); | ||
2994 | drop: | ||
2995 | return -1; | ||
2996 | } | ||
2997 | |||
2998 | static void iwl_set_rate(struct iwl_priv *priv) | ||
2999 | { | ||
3000 | const struct ieee80211_hw_mode *hw = NULL; | ||
3001 | struct ieee80211_rate *rate; | ||
3002 | int i; | ||
3003 | |||
3004 | hw = iwl_get_hw_mode(priv, priv->phymode); | ||
3005 | |||
3006 | priv->active_rate = 0; | ||
3007 | priv->active_rate_basic = 0; | ||
3008 | |||
3009 | IWL_DEBUG_RATE("Setting rates for 802.11%c\n", | ||
3010 | hw->mode == MODE_IEEE80211A ? | ||
3011 | 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g')); | ||
3012 | |||
3013 | for (i = 0; i < hw->num_rates; i++) { | ||
3014 | rate = &(hw->rates[i]); | ||
3015 | if ((rate->val < IWL_RATE_COUNT) && | ||
3016 | (rate->flags & IEEE80211_RATE_SUPPORTED)) { | ||
3017 | IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n", | ||
3018 | rate->val, iwl_rates[rate->val].plcp, | ||
3019 | (rate->flags & IEEE80211_RATE_BASIC) ? | ||
3020 | "*" : ""); | ||
3021 | priv->active_rate |= (1 << rate->val); | ||
3022 | if (rate->flags & IEEE80211_RATE_BASIC) | ||
3023 | priv->active_rate_basic |= (1 << rate->val); | ||
3024 | } else | ||
3025 | IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n", | ||
3026 | rate->val, iwl_rates[rate->val].plcp); | ||
3027 | } | ||
3028 | |||
3029 | IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n", | ||
3030 | priv->active_rate, priv->active_rate_basic); | ||
3031 | |||
3032 | /* | ||
3033 | * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK) | ||
3034 | * otherwise set it to the default of all CCK rates and 6, 12, 24 for | ||
3035 | * OFDM | ||
3036 | */ | ||
3037 | if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK) | ||
3038 | priv->staging_rxon.cck_basic_rates = | ||
3039 | ((priv->active_rate_basic & | ||
3040 | IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF; | ||
3041 | else | ||
3042 | priv->staging_rxon.cck_basic_rates = | ||
3043 | (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF; | ||
3044 | |||
3045 | if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK) | ||
3046 | priv->staging_rxon.ofdm_basic_rates = | ||
3047 | ((priv->active_rate_basic & | ||
3048 | (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >> | ||
3049 | IWL_FIRST_OFDM_RATE) & 0xFF; | ||
3050 | else | ||
3051 | priv->staging_rxon.ofdm_basic_rates = | ||
3052 | (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; | ||
3053 | } | ||
3054 | |||
3055 | static void iwl_radio_kill_sw(struct iwl_priv *priv, int disable_radio) | ||
3056 | { | ||
3057 | unsigned long flags; | ||
3058 | |||
3059 | if (!!disable_radio == test_bit(STATUS_RF_KILL_SW, &priv->status)) | ||
3060 | return; | ||
3061 | |||
3062 | IWL_DEBUG_RF_KILL("Manual SW RF KILL set to: RADIO %s\n", | ||
3063 | disable_radio ? "OFF" : "ON"); | ||
3064 | |||
3065 | if (disable_radio) { | ||
3066 | iwl_scan_cancel(priv); | ||
3067 | /* FIXME: This is a workaround for AP */ | ||
3068 | if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { | ||
3069 | spin_lock_irqsave(&priv->lock, flags); | ||
3070 | iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, | ||
3071 | CSR_UCODE_SW_BIT_RFKILL); | ||
3072 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3073 | iwl_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0); | ||
3074 | set_bit(STATUS_RF_KILL_SW, &priv->status); | ||
3075 | } | ||
3076 | return; | ||
3077 | } | ||
3078 | |||
3079 | spin_lock_irqsave(&priv->lock, flags); | ||
3080 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
3081 | |||
3082 | clear_bit(STATUS_RF_KILL_SW, &priv->status); | ||
3083 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3084 | |||
3085 | /* wake up ucode */ | ||
3086 | msleep(10); | ||
3087 | |||
3088 | spin_lock_irqsave(&priv->lock, flags); | ||
3089 | iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
3090 | if (!iwl_grab_restricted_access(priv)) | ||
3091 | iwl_release_restricted_access(priv); | ||
3092 | spin_unlock_irqrestore(&priv->lock, flags); | ||
3093 | |||
3094 | if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { | ||
3095 | IWL_DEBUG_RF_KILL("Can not turn radio back on - " | ||
3096 | "disabled by HW switch\n"); | ||
3097 | return; | ||
3098 | } | ||
3099 | |||
3100 | queue_work(priv->workqueue, &priv->restart); | ||
3101 | return; | ||
3102 | } | ||
3103 | |||
3104 | void iwl_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb, | ||
3105 | u32 decrypt_res, struct ieee80211_rx_status *stats) | ||
3106 | { | ||
3107 | u16 fc = | ||
3108 | le16_to_cpu(((struct ieee80211_hdr *)skb->data)->frame_control); | ||
3109 | |||
3110 | if (priv->active_rxon.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK) | ||
3111 | return; | ||
3112 | |||
3113 | if (!(fc & IEEE80211_FCTL_PROTECTED)) | ||
3114 | return; | ||
3115 | |||
3116 | IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res); | ||
3117 | switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { | ||
3118 | case RX_RES_STATUS_SEC_TYPE_TKIP: | ||
3119 | if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == | ||
3120 | RX_RES_STATUS_BAD_ICV_MIC) | ||
3121 | stats->flag |= RX_FLAG_MMIC_ERROR; | ||
3122 | case RX_RES_STATUS_SEC_TYPE_WEP: | ||
3123 | case RX_RES_STATUS_SEC_TYPE_CCMP: | ||
3124 | if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == | ||
3125 | RX_RES_STATUS_DECRYPT_OK) { | ||
3126 | IWL_DEBUG_RX("hw decrypt successfully!!!\n"); | ||
3127 | stats->flag |= RX_FLAG_DECRYPTED; | ||
3128 | } | ||
3129 | break; | ||
3130 | |||
3131 | default: | ||
3132 | break; | ||
3133 | } | ||
3134 | } | ||
3135 | |||
3136 | void iwl_handle_data_packet_monitor(struct iwl_priv *priv, | ||
3137 | struct iwl_rx_mem_buffer *rxb, | ||
3138 | void *data, short len, | ||
3139 | struct ieee80211_rx_status *stats, | ||
3140 | u16 phy_flags) | ||
3141 | { | ||
3142 | struct iwl_rt_rx_hdr *iwl_rt; | ||
3143 | |||
3144 | /* First cache any information we need before we overwrite | ||
3145 | * the information provided in the skb from the hardware */ | ||
3146 | s8 signal = stats->ssi; | ||
3147 | s8 noise = 0; | ||
3148 | int rate = stats->rate; | ||
3149 | u64 tsf = stats->mactime; | ||
3150 | __le16 phy_flags_hw = cpu_to_le16(phy_flags); | ||
3151 | |||
3152 | /* We received data from the HW, so stop the watchdog */ | ||
3153 | if (len > IWL_RX_BUF_SIZE - sizeof(*iwl_rt)) { | ||
3154 | IWL_DEBUG_DROP("Dropping too large packet in monitor\n"); | ||
3155 | return; | ||
3156 | } | ||
3157 | |||
3158 | /* copy the frame data to write after where the radiotap header goes */ | ||
3159 | iwl_rt = (void *)rxb->skb->data; | ||
3160 | memmove(iwl_rt->payload, data, len); | ||
3161 | |||
3162 | iwl_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; | ||
3163 | iwl_rt->rt_hdr.it_pad = 0; /* always good to zero */ | ||
3164 | |||
3165 | /* total header + data */ | ||
3166 | iwl_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*iwl_rt)); | ||
3167 | |||
3168 | /* Set the size of the skb to the size of the frame */ | ||
3169 | skb_put(rxb->skb, sizeof(*iwl_rt) + len); | ||
3170 | |||
3171 | /* Big bitfield of all the fields we provide in radiotap */ | ||
3172 | iwl_rt->rt_hdr.it_present = | ||
3173 | cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) | | ||
3174 | (1 << IEEE80211_RADIOTAP_FLAGS) | | ||
3175 | (1 << IEEE80211_RADIOTAP_RATE) | | ||
3176 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | ||
3177 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | | ||
3178 | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | | ||
3179 | (1 << IEEE80211_RADIOTAP_ANTENNA)); | ||
3180 | |||
3181 | /* Zero the flags, we'll add to them as we go */ | ||
3182 | iwl_rt->rt_flags = 0; | ||
3183 | |||
3184 | iwl_rt->rt_tsf = cpu_to_le64(tsf); | ||
3185 | |||
3186 | /* Convert to dBm */ | ||
3187 | iwl_rt->rt_dbmsignal = signal; | ||
3188 | iwl_rt->rt_dbmnoise = noise; | ||
3189 | |||
3190 | /* Convert the channel frequency and set the flags */ | ||
3191 | iwl_rt->rt_channelMHz = cpu_to_le16(stats->freq); | ||
3192 | if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) | ||
3193 | iwl_rt->rt_chbitmask = | ||
3194 | cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ)); | ||
3195 | else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) | ||
3196 | iwl_rt->rt_chbitmask = | ||
3197 | cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ)); | ||
3198 | else /* 802.11g */ | ||
3199 | iwl_rt->rt_chbitmask = | ||
3200 | cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ)); | ||
3201 | |||
3202 | rate = iwl_rate_index_from_plcp(rate); | ||
3203 | if (rate == -1) | ||
3204 | iwl_rt->rt_rate = 0; | ||
3205 | else | ||
3206 | iwl_rt->rt_rate = iwl_rates[rate].ieee; | ||
3207 | |||
3208 | /* antenna number */ | ||
3209 | iwl_rt->rt_antenna = | ||
3210 | le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; | ||
3211 | |||
3212 | /* set the preamble flag if we have it */ | ||
3213 | if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) | ||
3214 | iwl_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; | ||
3215 | |||
3216 | IWL_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len); | ||
3217 | |||
3218 | stats->flag |= RX_FLAG_RADIOTAP; | ||
3219 | ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); | ||
3220 | rxb->skb = NULL; | ||
3221 | } | ||
3222 | |||
3223 | |||
3224 | #define IWL_PACKET_RETRY_TIME HZ | ||
3225 | |||
3226 | int is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header) | ||
3227 | { | ||
3228 | u16 sc = le16_to_cpu(header->seq_ctrl); | ||
3229 | u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; | ||
3230 | u16 frag = sc & IEEE80211_SCTL_FRAG; | ||
3231 | u16 *last_seq, *last_frag; | ||
3232 | unsigned long *last_time; | ||
3233 | |||
3234 | switch (priv->iw_mode) { | ||
3235 | case IEEE80211_IF_TYPE_IBSS:{ | ||
3236 | struct list_head *p; | ||
3237 | struct iwl_ibss_seq *entry = NULL; | ||
3238 | u8 *mac = header->addr2; | ||
3239 | int index = mac[5] & (IWL_IBSS_MAC_HASH_SIZE - 1); | ||
3240 | |||
3241 | __list_for_each(p, &priv->ibss_mac_hash[index]) { | ||
3242 | entry = | ||
3243 | list_entry(p, struct iwl_ibss_seq, list); | ||
3244 | if (!compare_ether_addr(entry->mac, mac)) | ||
3245 | break; | ||
3246 | } | ||
3247 | if (p == &priv->ibss_mac_hash[index]) { | ||
3248 | entry = kzalloc(sizeof(*entry), GFP_ATOMIC); | ||
3249 | if (!entry) { | ||
3250 | IWL_ERROR | ||
3251 | ("Cannot malloc new mac entry\n"); | ||
3252 | return 0; | ||
3253 | } | ||
3254 | memcpy(entry->mac, mac, ETH_ALEN); | ||
3255 | entry->seq_num = seq; | ||
3256 | entry->frag_num = frag; | ||
3257 | entry->packet_time = jiffies; | ||
3258 | list_add(&entry->list, | ||
3259 | &priv->ibss_mac_hash[index]); | ||
3260 | return 0; | ||
3261 | } | ||
3262 | last_seq = &entry->seq_num; | ||
3263 | last_frag = &entry->frag_num; | ||
3264 | last_time = &entry->packet_time; | ||
3265 | break; | ||
3266 | } | ||
3267 | case IEEE80211_IF_TYPE_STA: | ||
3268 | last_seq = &priv->last_seq_num; | ||
3269 | last_frag = &priv->last_frag_num; | ||
3270 | last_time = &priv->last_packet_time; | ||
3271 | break; | ||
3272 | default: | ||
3273 | return 0; | ||
3274 | } | ||
3275 | if ((*last_seq == seq) && | ||
3276 | time_after(*last_time + IWL_PACKET_RETRY_TIME, jiffies)) { | ||
3277 | if (*last_frag == frag) | ||
3278 | goto drop; | ||
3279 | if (*last_frag + 1 != frag) | ||
3280 | /* out-of-order fragment */ | ||
3281 | goto drop; | ||
3282 | } else | ||
3283 | *last_seq = seq; | ||
3284 | |||
3285 | *last_frag = frag; | ||
3286 | *last_time = jiffies; | ||
3287 | return 0; | ||
3288 | |||
3289 | drop: | ||
3290 | return 1; | ||
3291 | } | ||
3292 | |||
3293 | #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT | ||
3294 | |||
3295 | #include "iwl-spectrum.h" | ||
3296 | |||
3297 | #define BEACON_TIME_MASK_LOW 0x00FFFFFF | ||
3298 | #define BEACON_TIME_MASK_HIGH 0xFF000000 | ||
3299 | #define TIME_UNIT 1024 | ||
3300 | |||
3301 | /* | ||
3302 | * extended beacon time format | ||
3303 | * time in usec will be changed into a 32-bit value in 8:24 format | ||
3304 | * the high 1 byte is the beacon counts | ||
3305 | * the lower 3 bytes is the time in usec within one beacon interval | ||
3306 | */ | ||
3307 | |||
3308 | static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval) | ||
3309 | { | ||
3310 | u32 quot; | ||
3311 | u32 rem; | ||
3312 | u32 interval = beacon_interval * 1024; | ||
3313 | |||
3314 | if (!interval || !usec) | ||
3315 | return 0; | ||
3316 | |||
3317 | quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24); | ||
3318 | rem = (usec % interval) & BEACON_TIME_MASK_LOW; | ||
3319 | |||
3320 | return (quot << 24) + rem; | ||
3321 | } | ||
3322 | |||
3323 | /* base is usually what we get from ucode with each received frame, | ||
3324 | * the same as HW timer counter counting down | ||
3325 | */ | ||
3326 | |||
3327 | static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval) | ||
3328 | { | ||
3329 | u32 base_low = base & BEACON_TIME_MASK_LOW; | ||
3330 | u32 addon_low = addon & BEACON_TIME_MASK_LOW; | ||
3331 | u32 interval = beacon_interval * TIME_UNIT; | ||
3332 | u32 res = (base & BEACON_TIME_MASK_HIGH) + | ||
3333 | (addon & BEACON_TIME_MASK_HIGH); | ||
3334 | |||
3335 | if (base_low > addon_low) | ||
3336 | res += base_low - addon_low; | ||
3337 | else if (base_low < addon_low) { | ||
3338 | res += interval + base_low - addon_low; | ||
3339 | res += (1 << 24); | ||
3340 | } else | ||
3341 | res += (1 << 24); | ||
3342 | |||
3343 | return cpu_to_le32(res); | ||
3344 | } | ||
3345 | |||
3346 | static int iwl_get_measurement(struct iwl_priv *priv, | ||
3347 | struct ieee80211_measurement_params *params, | ||
3348 | u8 type) | ||
3349 | { | ||
3350 | struct iwl_spectrum_cmd spectrum; | ||
3351 | struct iwl_rx_packet *res; | ||
3352 | struct iwl_host_cmd cmd = { | ||
3353 | .id = REPLY_SPECTRUM_MEASUREMENT_CMD, | ||
3354 | .data = (void *)&spectrum, | ||
3355 | .meta.flags = CMD_WANT_SKB, | ||
3356 | }; | ||
3357 | u32 add_time = le64_to_cpu(params->start_time); | ||
3358 | int rc; | ||
3359 | int spectrum_resp_status; | ||
3360 | int duration = le16_to_cpu(params->duration); | ||
3361 | |||
3362 | if (iwl_is_associated(priv)) | ||
3363 | add_time = | ||
3364 | iwl_usecs_to_beacons( | ||
3365 | le64_to_cpu(params->start_time) - priv->last_tsf, | ||
3366 | le16_to_cpu(priv->rxon_timing.beacon_interval)); | ||
3367 | |||
3368 | memset(&spectrum, 0, sizeof(spectrum)); | ||
3369 | |||
3370 | spectrum.channel_count = cpu_to_le16(1); | ||
3371 | spectrum.flags = | ||
3372 | RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK; | ||
3373 | spectrum.filter_flags = MEASUREMENT_FILTER_FLAG; | ||
3374 | cmd.len = sizeof(spectrum); | ||
3375 | spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); | ||
3376 | |||
3377 | if (iwl_is_associated(priv)) | ||
3378 | spectrum.start_time = | ||
3379 | iwl_add_beacon_time(priv->last_beacon_time, | ||
3380 | add_time, | ||
3381 | le16_to_cpu(priv->rxon_timing.beacon_interval)); | ||
3382 | else | ||
3383 | spectrum.start_time = 0; | ||
3384 | |||
3385 | spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT); | ||
3386 | spectrum.channels[0].channel = params->channel; | ||
3387 | spectrum.channels[0].type = type; | ||
3388 | if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK) | ||
3389 | spectrum.flags |= RXON_FLG_BAND_24G_MSK | | ||
3390 | RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; | ||
3391 | |||
3392 | rc = iwl_send_cmd_sync(priv, &cmd); | ||
3393 | if (rc) | ||
3394 | return rc; | ||
3395 | |||
3396 | res = (struct iwl_rx_packet *)cmd.meta.u.skb->data; | ||
3397 | if (res->hdr.flags & IWL_CMD_FAILED_MSK) { | ||
3398 | IWL_ERROR("Bad return from REPLY_RX_ON_ASSOC command\n"); | ||
3399 | rc = -EIO; | ||
3400 | } | ||
3401 | |||
3402 | spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); | ||
3403 | switch (spectrum_resp_status) { | ||
3404 | case 0: /* Command will be handled */ | ||
3405 | if (res->u.spectrum.id != 0xff) { | ||
3406 | IWL_DEBUG_INFO | ||
3407 | ("Replaced existing measurement: %d\n", | ||
3408 | res->u.spectrum.id); | ||
3409 | priv->measurement_status &= ~MEASUREMENT_READY; | ||
3410 | } | ||
3411 | priv->measurement_status |= MEASUREMENT_ACTIVE; | ||
3412 | rc = 0; | ||
3413 | break; | ||
3414 | |||
3415 | case 1: /* Command will not be handled */ | ||
3416 | rc = -EAGAIN; | ||
3417 | break; | ||
3418 | } | ||
3419 | |||
3420 | dev_kfree_skb_any(cmd.meta.u.skb); | ||
3421 | |||
3422 | return rc; | ||
3423 | } | ||
3424 | #endif | ||
3425 | |||
3426 | static void iwl_txstatus_to_ieee(struct iwl_priv *priv, | ||
3427 | struct iwl_tx_info *tx_sta) | ||
3428 | { | ||
3429 | |||
3430 | tx_sta->status.ack_signal = 0; | ||
3431 | tx_sta->status.excessive_retries = 0; | ||
3432 | tx_sta->status.queue_length = 0; | ||
3433 | tx_sta->status.queue_number = 0; | ||
3434 | |||
3435 | if (in_interrupt()) | ||
3436 | ieee80211_tx_status_irqsafe(priv->hw, | ||
3437 | tx_sta->skb[0], &(tx_sta->status)); | ||
3438 | else | ||
3439 | ieee80211_tx_status(priv->hw, | ||
3440 | tx_sta->skb[0], &(tx_sta->status)); | ||
3441 | |||
3442 | tx_sta->skb[0] = NULL; | ||
3443 | } | ||
3444 | |||
3445 | /** | ||
3446 | * iwl_tx_queue_reclaim - Reclaim Tx queue entries no more used by NIC. | ||
3447 | * | ||
3448 | * When FW advances 'R' index, all entries between old and | ||
3449 | * new 'R' index need to be reclaimed. As result, some free space | ||
3450 | * forms. If there is enough free space (> low mark), wake Tx queue. | ||
3451 | */ | ||
3452 | int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index) | ||
3453 | { | ||
3454 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
3455 | struct iwl_queue *q = &txq->q; | ||
3456 | int nfreed = 0; | ||
3457 | |||
3458 | if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) { | ||
3459 | IWL_ERROR("Read index for DMA queue txq id (%d), index %d, " | ||
3460 | "is out of range [0-%d] %d %d.\n", txq_id, | ||
3461 | index, q->n_bd, q->first_empty, q->last_used); | ||
3462 | return 0; | ||
3463 | } | ||
3464 | |||
3465 | for (index = iwl_queue_inc_wrap(index, q->n_bd); | ||
3466 | q->last_used != index; | ||
3467 | q->last_used = iwl_queue_inc_wrap(q->last_used, q->n_bd)) { | ||
3468 | if (txq_id != IWL_CMD_QUEUE_NUM) { | ||
3469 | iwl_txstatus_to_ieee(priv, | ||
3470 | &(txq->txb[txq->q.last_used])); | ||
3471 | iwl_hw_txq_free_tfd(priv, txq); | ||
3472 | } else if (nfreed > 1) { | ||
3473 | IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index, | ||
3474 | q->first_empty, q->last_used); | ||
3475 | queue_work(priv->workqueue, &priv->restart); | ||
3476 | } | ||
3477 | nfreed++; | ||
3478 | } | ||
3479 | |||
3480 | if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && | ||
3481 | (txq_id != IWL_CMD_QUEUE_NUM) && | ||
3482 | priv->mac80211_registered) | ||
3483 | ieee80211_wake_queue(priv->hw, txq_id); | ||
3484 | |||
3485 | |||
3486 | return nfreed; | ||
3487 | } | ||
3488 | |||
3489 | static int iwl_is_tx_success(u32 status) | ||
3490 | { | ||
3491 | status &= TX_STATUS_MSK; | ||
3492 | return (status == TX_STATUS_SUCCESS) | ||
3493 | || (status == TX_STATUS_DIRECT_DONE); | ||
3494 | } | ||
3495 | |||
3496 | /****************************************************************************** | ||
3497 | * | ||
3498 | * Generic RX handler implementations | ||
3499 | * | ||
3500 | ******************************************************************************/ | ||
3501 | #ifdef CONFIG_IWLWIFI_HT | ||
3502 | #ifdef CONFIG_IWLWIFI_HT_AGG | ||
3503 | |||
3504 | static inline int iwl_get_ra_sta_id(struct iwl_priv *priv, | ||
3505 | struct ieee80211_hdr *hdr) | ||
3506 | { | ||
3507 | if (priv->iw_mode == IEEE80211_IF_TYPE_STA) | ||
3508 | return IWL_AP_ID; | ||
3509 | else { | ||
3510 | u8 *da = ieee80211_get_DA(hdr); | ||
3511 | return iwl_hw_find_station(priv, da); | ||
3512 | } | ||
3513 | } | ||
3514 | |||
3515 | static struct ieee80211_hdr *iwl_tx_queue_get_hdr( | ||
3516 | struct iwl_priv *priv, int txq_id, int idx) | ||
3517 | { | ||
3518 | if (priv->txq[txq_id].txb[idx].skb[0]) | ||
3519 | return (struct ieee80211_hdr *)priv->txq[txq_id]. | ||
3520 | txb[idx].skb[0]->data; | ||
3521 | return NULL; | ||
3522 | } | ||
3523 | |||
3524 | static inline u32 iwl_get_scd_ssn(struct iwl_tx_resp *tx_resp) | ||
3525 | { | ||
3526 | __le32 *scd_ssn = (__le32 *)((u32 *)&tx_resp->status + | ||
3527 | tx_resp->frame_count); | ||
3528 | return le32_to_cpu(*scd_ssn) & MAX_SN; | ||
3529 | |||
3530 | } | ||
3531 | static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv, | ||
3532 | struct iwl_ht_agg *agg, | ||
3533 | struct iwl_tx_resp *tx_resp, | ||
3534 | u16 start_idx) | ||
3535 | { | ||
3536 | u32 status; | ||
3537 | __le32 *frame_status = &tx_resp->status; | ||
3538 | struct ieee80211_tx_status *tx_status = NULL; | ||
3539 | struct ieee80211_hdr *hdr = NULL; | ||
3540 | int i, sh; | ||
3541 | int txq_id, idx; | ||
3542 | u16 seq; | ||
3543 | |||
3544 | if (agg->wait_for_ba) | ||
3545 | IWL_DEBUG_TX_REPLY("got tx repsons w/o back\n"); | ||
3546 | |||
3547 | agg->frame_count = tx_resp->frame_count; | ||
3548 | agg->start_idx = start_idx; | ||
3549 | agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); | ||
3550 | agg->bitmap0 = agg->bitmap1 = 0; | ||
3551 | |||
3552 | if (agg->frame_count == 1) { | ||
3553 | struct iwl_tx_queue *txq ; | ||
3554 | status = le32_to_cpu(frame_status[0]); | ||
3555 | |||
3556 | txq_id = agg->txq_id; | ||
3557 | txq = &priv->txq[txq_id]; | ||
3558 | /* FIXME: code repetition */ | ||
3559 | IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n", | ||
3560 | agg->frame_count, agg->start_idx); | ||
3561 | |||
3562 | tx_status = &(priv->txq[txq_id].txb[txq->q.last_used].status); | ||
3563 | tx_status->retry_count = tx_resp->failure_frame; | ||
3564 | tx_status->queue_number = status & 0xff; | ||
3565 | tx_status->queue_length = tx_resp->bt_kill_count; | ||
3566 | tx_status->queue_length |= tx_resp->failure_rts; | ||
3567 | |||
3568 | tx_status->flags = iwl_is_tx_success(status)? | ||
3569 | IEEE80211_TX_STATUS_ACK : 0; | ||
3570 | tx_status->control.tx_rate = | ||
3571 | iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags); | ||
3572 | /* FIXME: code repetition end */ | ||
3573 | |||
3574 | IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n", | ||
3575 | status & 0xff, tx_resp->failure_frame); | ||
3576 | IWL_DEBUG_TX_REPLY("Rate Info rate_n_flags=%x\n", | ||
3577 | iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags)); | ||
3578 | |||
3579 | agg->wait_for_ba = 0; | ||
3580 | } else { | ||
3581 | u64 bitmap = 0; | ||
3582 | int start = agg->start_idx; | ||
3583 | |||
3584 | for (i = 0; i < agg->frame_count; i++) { | ||
3585 | u16 sc; | ||
3586 | status = le32_to_cpu(frame_status[i]); | ||
3587 | seq = status >> 16; | ||
3588 | idx = SEQ_TO_INDEX(seq); | ||
3589 | txq_id = SEQ_TO_QUEUE(seq); | ||
3590 | |||
3591 | if (status & (AGG_TX_STATE_FEW_BYTES_MSK | | ||
3592 | AGG_TX_STATE_ABORT_MSK)) | ||
3593 | continue; | ||
3594 | |||
3595 | IWL_DEBUG_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n", | ||
3596 | agg->frame_count, txq_id, idx); | ||
3597 | |||
3598 | hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); | ||
3599 | |||
3600 | sc = le16_to_cpu(hdr->seq_ctrl); | ||
3601 | if (idx != (SEQ_TO_SN(sc) & 0xff)) { | ||
3602 | IWL_ERROR("BUG_ON idx doesn't match seq control" | ||
3603 | " idx=%d, seq_idx=%d, seq=%d\n", | ||
3604 | idx, SEQ_TO_SN(sc), | ||
3605 | hdr->seq_ctrl); | ||
3606 | return -1; | ||
3607 | } | ||
3608 | |||
3609 | IWL_DEBUG_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", | ||
3610 | i, idx, SEQ_TO_SN(sc)); | ||
3611 | |||
3612 | sh = idx - start; | ||
3613 | if (sh > 64) { | ||
3614 | sh = (start - idx) + 0xff; | ||
3615 | bitmap = bitmap << sh; | ||
3616 | sh = 0; | ||
3617 | start = idx; | ||
3618 | } else if (sh < -64) | ||
3619 | sh = 0xff - (start - idx); | ||
3620 | else if (sh < 0) { | ||
3621 | sh = start - idx; | ||
3622 | start = idx; | ||
3623 | bitmap = bitmap << sh; | ||
3624 | sh = 0; | ||
3625 | } | ||
3626 | bitmap |= (1 << sh); | ||
3627 | IWL_DEBUG_TX_REPLY("start=%d bitmap=0x%x\n", | ||
3628 | start, (u32)(bitmap & 0xFFFFFFFF)); | ||
3629 | } | ||
3630 | |||
3631 | agg->bitmap0 = bitmap & 0xFFFFFFFF; | ||
3632 | agg->bitmap1 = bitmap >> 32; | ||
3633 | agg->start_idx = start; | ||
3634 | agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); | ||
3635 | IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n", | ||
3636 | agg->frame_count, agg->start_idx, | ||
3637 | agg->bitmap0); | ||
3638 | |||
3639 | if (bitmap) | ||
3640 | agg->wait_for_ba = 1; | ||
3641 | } | ||
3642 | return 0; | ||
3643 | } | ||
3644 | #endif | ||
3645 | #endif | ||
3646 | |||
3647 | static void iwl_rx_reply_tx(struct iwl_priv *priv, | ||
3648 | struct iwl_rx_mem_buffer *rxb) | ||
3649 | { | ||
3650 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3651 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
3652 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
3653 | int index = SEQ_TO_INDEX(sequence); | ||
3654 | struct iwl_tx_queue *txq = &priv->txq[txq_id]; | ||
3655 | struct ieee80211_tx_status *tx_status; | ||
3656 | struct iwl_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; | ||
3657 | u32 status = le32_to_cpu(tx_resp->status); | ||
3658 | #ifdef CONFIG_IWLWIFI_HT | ||
3659 | #ifdef CONFIG_IWLWIFI_HT_AGG | ||
3660 | int tid, sta_id; | ||
3661 | #endif | ||
3662 | #endif | ||
3663 | |||
3664 | if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) { | ||
3665 | IWL_ERROR("Read index for DMA queue txq_id (%d) index %d " | ||
3666 | "is out of range [0-%d] %d %d\n", txq_id, | ||
3667 | index, txq->q.n_bd, txq->q.first_empty, | ||
3668 | txq->q.last_used); | ||
3669 | return; | ||
3670 | } | ||
3671 | |||
3672 | #ifdef CONFIG_IWLWIFI_HT | ||
3673 | #ifdef CONFIG_IWLWIFI_HT_AGG | ||
3674 | if (txq->sched_retry) { | ||
3675 | const u32 scd_ssn = iwl_get_scd_ssn(tx_resp); | ||
3676 | struct ieee80211_hdr *hdr = | ||
3677 | iwl_tx_queue_get_hdr(priv, txq_id, index); | ||
3678 | struct iwl_ht_agg *agg = NULL; | ||
3679 | __le16 *qc = ieee80211_get_qos_ctrl(hdr); | ||
3680 | |||
3681 | if (qc == NULL) { | ||
3682 | IWL_ERROR("BUG_ON qc is null!!!!\n"); | ||
3683 | return; | ||
3684 | } | ||
3685 | |||
3686 | tid = le16_to_cpu(*qc) & 0xf; | ||
3687 | |||
3688 | sta_id = iwl_get_ra_sta_id(priv, hdr); | ||
3689 | if (unlikely(sta_id == IWL_INVALID_STATION)) { | ||
3690 | IWL_ERROR("Station not known for\n"); | ||
3691 | return; | ||
3692 | } | ||
3693 | |||
3694 | agg = &priv->stations[sta_id].tid[tid].agg; | ||
3695 | |||
3696 | iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index); | ||
3697 | |||
3698 | if ((tx_resp->frame_count == 1) && | ||
3699 | !iwl_is_tx_success(status)) { | ||
3700 | /* TODO: send BAR */ | ||
3701 | } | ||
3702 | |||
3703 | if ((txq->q.last_used != (scd_ssn & 0xff))) { | ||
3704 | index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); | ||
3705 | IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn " | ||
3706 | "%d index %d\n", scd_ssn , index); | ||
3707 | iwl_tx_queue_reclaim(priv, txq_id, index); | ||
3708 | } | ||
3709 | } else { | ||
3710 | #endif /* CONFIG_IWLWIFI_HT_AGG */ | ||
3711 | #endif /* CONFIG_IWLWIFI_HT */ | ||
3712 | tx_status = &(txq->txb[txq->q.last_used].status); | ||
3713 | |||
3714 | tx_status->retry_count = tx_resp->failure_frame; | ||
3715 | tx_status->queue_number = status; | ||
3716 | tx_status->queue_length = tx_resp->bt_kill_count; | ||
3717 | tx_status->queue_length |= tx_resp->failure_rts; | ||
3718 | |||
3719 | tx_status->flags = | ||
3720 | iwl_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0; | ||
3721 | |||
3722 | tx_status->control.tx_rate = | ||
3723 | iwl_hw_get_rate_n_flags(tx_resp->rate_n_flags); | ||
3724 | |||
3725 | IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x " | ||
3726 | "retries %d\n", txq_id, iwl_get_tx_fail_reason(status), | ||
3727 | status, le32_to_cpu(tx_resp->rate_n_flags), | ||
3728 | tx_resp->failure_frame); | ||
3729 | |||
3730 | IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); | ||
3731 | if (index != -1) | ||
3732 | iwl_tx_queue_reclaim(priv, txq_id, index); | ||
3733 | #ifdef CONFIG_IWLWIFI_HT | ||
3734 | #ifdef CONFIG_IWLWIFI_HT_AGG | ||
3735 | } | ||
3736 | #endif /* CONFIG_IWLWIFI_HT_AGG */ | ||
3737 | #endif /* CONFIG_IWLWIFI_HT */ | ||
3738 | |||
3739 | if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) | ||
3740 | IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n"); | ||
3741 | } | ||
3742 | |||
3743 | |||
3744 | static void iwl_rx_reply_alive(struct iwl_priv *priv, | ||
3745 | struct iwl_rx_mem_buffer *rxb) | ||
3746 | { | ||
3747 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3748 | struct iwl_alive_resp *palive; | ||
3749 | struct delayed_work *pwork; | ||
3750 | |||
3751 | palive = &pkt->u.alive_frame; | ||
3752 | |||
3753 | IWL_DEBUG_INFO("Alive ucode status 0x%08X revision " | ||
3754 | "0x%01X 0x%01X\n", | ||
3755 | palive->is_valid, palive->ver_type, | ||
3756 | palive->ver_subtype); | ||
3757 | |||
3758 | if (palive->ver_subtype == INITIALIZE_SUBTYPE) { | ||
3759 | IWL_DEBUG_INFO("Initialization Alive received.\n"); | ||
3760 | memcpy(&priv->card_alive_init, | ||
3761 | &pkt->u.alive_frame, | ||
3762 | sizeof(struct iwl_init_alive_resp)); | ||
3763 | pwork = &priv->init_alive_start; | ||
3764 | } else { | ||
3765 | IWL_DEBUG_INFO("Runtime Alive received.\n"); | ||
3766 | memcpy(&priv->card_alive, &pkt->u.alive_frame, | ||
3767 | sizeof(struct iwl_alive_resp)); | ||
3768 | pwork = &priv->alive_start; | ||
3769 | } | ||
3770 | |||
3771 | /* We delay the ALIVE response by 5ms to | ||
3772 | * give the HW RF Kill time to activate... */ | ||
3773 | if (palive->is_valid == UCODE_VALID_OK) | ||
3774 | queue_delayed_work(priv->workqueue, pwork, | ||
3775 | msecs_to_jiffies(5)); | ||
3776 | else | ||
3777 | IWL_WARNING("uCode did not respond OK.\n"); | ||
3778 | } | ||
3779 | |||
3780 | static void iwl_rx_reply_add_sta(struct iwl_priv *priv, | ||
3781 | struct iwl_rx_mem_buffer *rxb) | ||
3782 | { | ||
3783 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3784 | |||
3785 | IWL_DEBUG_RX("Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); | ||
3786 | return; | ||
3787 | } | ||
3788 | |||
3789 | static void iwl_rx_reply_error(struct iwl_priv *priv, | ||
3790 | struct iwl_rx_mem_buffer *rxb) | ||
3791 | { | ||
3792 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3793 | |||
3794 | IWL_ERROR("Error Reply type 0x%08X cmd %s (0x%02X) " | ||
3795 | "seq 0x%04X ser 0x%08X\n", | ||
3796 | le32_to_cpu(pkt->u.err_resp.error_type), | ||
3797 | get_cmd_string(pkt->u.err_resp.cmd_id), | ||
3798 | pkt->u.err_resp.cmd_id, | ||
3799 | le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num), | ||
3800 | le32_to_cpu(pkt->u.err_resp.error_info)); | ||
3801 | } | ||
3802 | |||
3803 | #define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x | ||
3804 | |||
3805 | static void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | ||
3806 | { | ||
3807 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3808 | struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon; | ||
3809 | struct iwl_csa_notification *csa = &(pkt->u.csa_notif); | ||
3810 | IWL_DEBUG_11H("CSA notif: channel %d, status %d\n", | ||
3811 | le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); | ||
3812 | rxon->channel = csa->channel; | ||
3813 | priv->staging_rxon.channel = csa->channel; | ||
3814 | } | ||
3815 | |||
3816 | static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, | ||
3817 | struct iwl_rx_mem_buffer *rxb) | ||
3818 | { | ||
3819 | #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT | ||
3820 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3821 | struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); | ||
3822 | |||
3823 | if (!report->state) { | ||
3824 | IWL_DEBUG(IWL_DL_11H | IWL_DL_INFO, | ||
3825 | "Spectrum Measure Notification: Start\n"); | ||
3826 | return; | ||
3827 | } | ||
3828 | |||
3829 | memcpy(&priv->measure_report, report, sizeof(*report)); | ||
3830 | priv->measurement_status |= MEASUREMENT_READY; | ||
3831 | #endif | ||
3832 | } | ||
3833 | |||
3834 | static void iwl_rx_pm_sleep_notif(struct iwl_priv *priv, | ||
3835 | struct iwl_rx_mem_buffer *rxb) | ||
3836 | { | ||
3837 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
3838 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3839 | struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); | ||
3840 | IWL_DEBUG_RX("sleep mode: %d, src: %d\n", | ||
3841 | sleep->pm_sleep_mode, sleep->pm_wakeup_src); | ||
3842 | #endif | ||
3843 | } | ||
3844 | |||
3845 | static void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, | ||
3846 | struct iwl_rx_mem_buffer *rxb) | ||
3847 | { | ||
3848 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3849 | IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " | ||
3850 | "notification for %s:\n", | ||
3851 | le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); | ||
3852 | iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); | ||
3853 | } | ||
3854 | |||
3855 | static void iwl_bg_beacon_update(struct work_struct *work) | ||
3856 | { | ||
3857 | struct iwl_priv *priv = | ||
3858 | container_of(work, struct iwl_priv, beacon_update); | ||
3859 | struct sk_buff *beacon; | ||
3860 | |||
3861 | /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ | ||
3862 | beacon = ieee80211_beacon_get(priv->hw, priv->interface_id, NULL); | ||
3863 | |||
3864 | if (!beacon) { | ||
3865 | IWL_ERROR("update beacon failed\n"); | ||
3866 | return; | ||
3867 | } | ||
3868 | |||
3869 | mutex_lock(&priv->mutex); | ||
3870 | /* new beacon skb is allocated every time; dispose previous.*/ | ||
3871 | if (priv->ibss_beacon) | ||
3872 | dev_kfree_skb(priv->ibss_beacon); | ||
3873 | |||
3874 | priv->ibss_beacon = beacon; | ||
3875 | mutex_unlock(&priv->mutex); | ||
3876 | |||
3877 | iwl_send_beacon_cmd(priv); | ||
3878 | } | ||
3879 | |||
3880 | static void iwl_rx_beacon_notif(struct iwl_priv *priv, | ||
3881 | struct iwl_rx_mem_buffer *rxb) | ||
3882 | { | ||
3883 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
3884 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3885 | struct iwl_beacon_notif *beacon = &(pkt->u.beacon_status); | ||
3886 | u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); | ||
3887 | |||
3888 | IWL_DEBUG_RX("beacon status %x retries %d iss %d " | ||
3889 | "tsf %d %d rate %d\n", | ||
3890 | le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK, | ||
3891 | beacon->beacon_notify_hdr.failure_frame, | ||
3892 | le32_to_cpu(beacon->ibss_mgr_status), | ||
3893 | le32_to_cpu(beacon->high_tsf), | ||
3894 | le32_to_cpu(beacon->low_tsf), rate); | ||
3895 | #endif | ||
3896 | |||
3897 | if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && | ||
3898 | (!test_bit(STATUS_EXIT_PENDING, &priv->status))) | ||
3899 | queue_work(priv->workqueue, &priv->beacon_update); | ||
3900 | } | ||
3901 | |||
3902 | /* Service response to REPLY_SCAN_CMD (0x80) */ | ||
3903 | static void iwl_rx_reply_scan(struct iwl_priv *priv, | ||
3904 | struct iwl_rx_mem_buffer *rxb) | ||
3905 | { | ||
3906 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
3907 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3908 | struct iwl_scanreq_notification *notif = | ||
3909 | (struct iwl_scanreq_notification *)pkt->u.raw; | ||
3910 | |||
3911 | IWL_DEBUG_RX("Scan request status = 0x%x\n", notif->status); | ||
3912 | #endif | ||
3913 | } | ||
3914 | |||
3915 | /* Service SCAN_START_NOTIFICATION (0x82) */ | ||
3916 | static void iwl_rx_scan_start_notif(struct iwl_priv *priv, | ||
3917 | struct iwl_rx_mem_buffer *rxb) | ||
3918 | { | ||
3919 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3920 | struct iwl_scanstart_notification *notif = | ||
3921 | (struct iwl_scanstart_notification *)pkt->u.raw; | ||
3922 | priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); | ||
3923 | IWL_DEBUG_SCAN("Scan start: " | ||
3924 | "%d [802.11%s] " | ||
3925 | "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", | ||
3926 | notif->channel, | ||
3927 | notif->band ? "bg" : "a", | ||
3928 | notif->tsf_high, | ||
3929 | notif->tsf_low, notif->status, notif->beacon_timer); | ||
3930 | } | ||
3931 | |||
3932 | /* Service SCAN_RESULTS_NOTIFICATION (0x83) */ | ||
3933 | static void iwl_rx_scan_results_notif(struct iwl_priv *priv, | ||
3934 | struct iwl_rx_mem_buffer *rxb) | ||
3935 | { | ||
3936 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3937 | struct iwl_scanresults_notification *notif = | ||
3938 | (struct iwl_scanresults_notification *)pkt->u.raw; | ||
3939 | |||
3940 | IWL_DEBUG_SCAN("Scan ch.res: " | ||
3941 | "%d [802.11%s] " | ||
3942 | "(TSF: 0x%08X:%08X) - %d " | ||
3943 | "elapsed=%lu usec (%dms since last)\n", | ||
3944 | notif->channel, | ||
3945 | notif->band ? "bg" : "a", | ||
3946 | le32_to_cpu(notif->tsf_high), | ||
3947 | le32_to_cpu(notif->tsf_low), | ||
3948 | le32_to_cpu(notif->statistics[0]), | ||
3949 | le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf, | ||
3950 | jiffies_to_msecs(elapsed_jiffies | ||
3951 | (priv->last_scan_jiffies, jiffies))); | ||
3952 | |||
3953 | priv->last_scan_jiffies = jiffies; | ||
3954 | } | ||
3955 | |||
3956 | /* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ | ||
3957 | static void iwl_rx_scan_complete_notif(struct iwl_priv *priv, | ||
3958 | struct iwl_rx_mem_buffer *rxb) | ||
3959 | { | ||
3960 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
3961 | struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; | ||
3962 | |||
3963 | IWL_DEBUG_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", | ||
3964 | scan_notif->scanned_channels, | ||
3965 | scan_notif->tsf_low, | ||
3966 | scan_notif->tsf_high, scan_notif->status); | ||
3967 | |||
3968 | /* The HW is no longer scanning */ | ||
3969 | clear_bit(STATUS_SCAN_HW, &priv->status); | ||
3970 | |||
3971 | /* The scan completion notification came in, so kill that timer... */ | ||
3972 | cancel_delayed_work(&priv->scan_check); | ||
3973 | |||
3974 | IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n", | ||
3975 | (priv->scan_bands == 2) ? "2.4" : "5.2", | ||
3976 | jiffies_to_msecs(elapsed_jiffies | ||
3977 | (priv->scan_pass_start, jiffies))); | ||
3978 | |||
3979 | /* Remove this scanned band from the list | ||
3980 | * of pending bands to scan */ | ||
3981 | priv->scan_bands--; | ||
3982 | |||
3983 | /* If a request to abort was given, or the scan did not succeed | ||
3984 | * then we reset the scan state machine and terminate, | ||
3985 | * re-queuing another scan if one has been requested */ | ||
3986 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
3987 | IWL_DEBUG_INFO("Aborted scan completed.\n"); | ||
3988 | clear_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
3989 | } else { | ||
3990 | /* If there are more bands on this scan pass reschedule */ | ||
3991 | if (priv->scan_bands > 0) | ||
3992 | goto reschedule; | ||
3993 | } | ||
3994 | |||
3995 | priv->last_scan_jiffies = jiffies; | ||
3996 | IWL_DEBUG_INFO("Setting scan to off\n"); | ||
3997 | |||
3998 | clear_bit(STATUS_SCANNING, &priv->status); | ||
3999 | |||
4000 | IWL_DEBUG_INFO("Scan took %dms\n", | ||
4001 | jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies))); | ||
4002 | |||
4003 | queue_work(priv->workqueue, &priv->scan_completed); | ||
4004 | |||
4005 | return; | ||
4006 | |||
4007 | reschedule: | ||
4008 | priv->scan_pass_start = jiffies; | ||
4009 | queue_work(priv->workqueue, &priv->request_scan); | ||
4010 | } | ||
4011 | |||
4012 | /* Handle notification from uCode that card's power state is changing | ||
4013 | * due to software, hardware, or critical temperature RFKILL */ | ||
4014 | static void iwl_rx_card_state_notif(struct iwl_priv *priv, | ||
4015 | struct iwl_rx_mem_buffer *rxb) | ||
4016 | { | ||
4017 | struct iwl_rx_packet *pkt = (void *)rxb->skb->data; | ||
4018 | u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); | ||
4019 | unsigned long status = priv->status; | ||
4020 | |||
4021 | IWL_DEBUG_RF_KILL("Card state received: HW:%s SW:%s\n", | ||
4022 | (flags & HW_CARD_DISABLED) ? "Kill" : "On", | ||
4023 | (flags & SW_CARD_DISABLED) ? "Kill" : "On"); | ||
4024 | |||
4025 | if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | | ||
4026 | RF_CARD_DISABLED)) { | ||
4027 | |||
4028 | iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, | ||
4029 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
4030 | |||
4031 | if (!iwl_grab_restricted_access(priv)) { | ||
4032 | iwl_write_restricted( | ||
4033 | priv, HBUS_TARG_MBX_C, | ||
4034 | HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); | ||
4035 | |||
4036 | iwl_release_restricted_access(priv); | ||
4037 | } | ||
4038 | |||
4039 | if (!(flags & RXON_CARD_DISABLED)) { | ||
4040 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | ||
4041 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
4042 | if (!iwl_grab_restricted_access(priv)) { | ||
4043 | iwl_write_restricted( | ||
4044 | priv, HBUS_TARG_MBX_C, | ||
4045 | HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); | ||
4046 | |||
4047 | iwl_release_restricted_access(priv); | ||
4048 | } | ||
4049 | } | ||
4050 | |||
4051 | if (flags & RF_CARD_DISABLED) { | ||
4052 | iwl_write32(priv, CSR_UCODE_DRV_GP1_SET, | ||
4053 | CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); | ||
4054 | iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
4055 | if (!iwl_grab_restricted_access(priv)) | ||
4056 | iwl_release_restricted_access(priv); | ||
4057 | } | ||
4058 | } | ||
4059 | |||
4060 | if (flags & HW_CARD_DISABLED) | ||
4061 | set_bit(STATUS_RF_KILL_HW, &priv->status); | ||
4062 | else | ||
4063 | clear_bit(STATUS_RF_KILL_HW, &priv->status); | ||
4064 | |||
4065 | |||
4066 | if (flags & SW_CARD_DISABLED) | ||
4067 | set_bit(STATUS_RF_KILL_SW, &priv->status); | ||
4068 | else | ||
4069 | clear_bit(STATUS_RF_KILL_SW, &priv->status); | ||
4070 | |||
4071 | if (!(flags & RXON_CARD_DISABLED)) | ||
4072 | iwl_scan_cancel(priv); | ||
4073 | |||
4074 | if ((test_bit(STATUS_RF_KILL_HW, &status) != | ||
4075 | test_bit(STATUS_RF_KILL_HW, &priv->status)) || | ||
4076 | (test_bit(STATUS_RF_KILL_SW, &status) != | ||
4077 | test_bit(STATUS_RF_KILL_SW, &priv->status))) | ||
4078 | queue_work(priv->workqueue, &priv->rf_kill); | ||
4079 | else | ||
4080 | wake_up_interruptible(&priv->wait_command_queue); | ||
4081 | } | ||
4082 | |||
4083 | /** | ||
4084 | * iwl_setup_rx_handlers - Initialize Rx handler callbacks | ||
4085 | * | ||
4086 | * Setup the RX handlers for each of the reply types sent from the uCode | ||
4087 | * to the host. | ||
4088 | * | ||
4089 | * This function chains into the hardware specific files for them to setup | ||
4090 | * any hardware specific handlers as well. | ||
4091 | */ | ||
4092 | static void iwl_setup_rx_handlers(struct iwl_priv *priv) | ||
4093 | { | ||
4094 | priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive; | ||
4095 | priv->rx_handlers[REPLY_ADD_STA] = iwl_rx_reply_add_sta; | ||
4096 | priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error; | ||
4097 | priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa; | ||
4098 | priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] = | ||
4099 | iwl_rx_spectrum_measure_notif; | ||
4100 | priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif; | ||
4101 | priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] = | ||
4102 | iwl_rx_pm_debug_statistics_notif; | ||
4103 | priv->rx_handlers[BEACON_NOTIFICATION] = iwl_rx_beacon_notif; | ||
4104 | |||
4105 | /* NOTE: iwl_rx_statistics is different based on whether | ||
4106 | * the build is for the 3945 or the 4965. See the | ||
4107 | * corresponding implementation in iwl-XXXX.c | ||
4108 | * | ||
4109 | * The same handler is used for both the REPLY to a | ||
4110 | * discrete statistics request from the host as well as | ||
4111 | * for the periodic statistics notification from the uCode | ||
4112 | */ | ||
4113 | priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_hw_rx_statistics; | ||
4114 | priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_hw_rx_statistics; | ||
4115 | |||
4116 | priv->rx_handlers[REPLY_SCAN_CMD] = iwl_rx_reply_scan; | ||
4117 | priv->rx_handlers[SCAN_START_NOTIFICATION] = iwl_rx_scan_start_notif; | ||
4118 | priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] = | ||
4119 | iwl_rx_scan_results_notif; | ||
4120 | priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = | ||
4121 | iwl_rx_scan_complete_notif; | ||
4122 | priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl_rx_card_state_notif; | ||
4123 | priv->rx_handlers[REPLY_TX] = iwl_rx_reply_tx; | ||
4124 | |||
4125 | /* Setup hardware specific Rx handlers */ | ||
4126 | iwl_hw_rx_handler_setup(priv); | ||
4127 | } | ||
4128 | |||
4129 | /** | ||
4130 | * iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them | ||
4131 | * @rxb: Rx buffer to reclaim | ||
4132 | * | ||
4133 | * If an Rx buffer has an async callback associated with it the callback | ||
4134 | * will be executed. The attached skb (if present) will only be freed | ||
4135 | * if the callback returns 1 | ||
4136 | */ | ||
4137 | static void iwl_tx_cmd_complete(struct iwl_priv *priv, | ||
4138 | struct iwl_rx_mem_buffer *rxb) | ||
4139 | { | ||
4140 | struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; | ||
4141 | u16 sequence = le16_to_cpu(pkt->hdr.sequence); | ||
4142 | int txq_id = SEQ_TO_QUEUE(sequence); | ||
4143 | int index = SEQ_TO_INDEX(sequence); | ||
4144 | int huge = sequence & SEQ_HUGE_FRAME; | ||
4145 | int cmd_index; | ||
4146 | struct iwl_cmd *cmd; | ||
4147 | |||
4148 | /* If a Tx command is being handled and it isn't in the actual | ||
4149 | * command queue then there a command routing bug has been introduced | ||
4150 | * in the queue management code. */ | ||
4151 | if (txq_id != IWL_CMD_QUEUE_NUM) | ||
4152 | IWL_ERROR("Error wrong command queue %d command id 0x%X\n", | ||
4153 | txq_id, pkt->hdr.cmd); | ||
4154 | BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); | ||
4155 | |||
4156 | cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); | ||
4157 | cmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; | ||
4158 | |||
4159 | /* Input error checking is done when commands are added to queue. */ | ||
4160 | if (cmd->meta.flags & CMD_WANT_SKB) { | ||
4161 | cmd->meta.source->u.skb = rxb->skb; | ||
4162 | rxb->skb = NULL; | ||
4163 | } else if (cmd->meta.u.callback && | ||
4164 | !cmd->meta.u.callback(priv, cmd, rxb->skb)) | ||
4165 | rxb->skb = NULL; | ||
4166 | |||
4167 | iwl_tx_queue_reclaim(priv, txq_id, index); | ||
4168 | |||
4169 | if (!(cmd->meta.flags & CMD_ASYNC)) { | ||
4170 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
4171 | wake_up_interruptible(&priv->wait_command_queue); | ||
4172 | } | ||
4173 | } | ||
4174 | |||
4175 | /************************** RX-FUNCTIONS ****************************/ | ||
4176 | /* | ||
4177 | * Rx theory of operation | ||
4178 | * | ||
4179 | * The host allocates 32 DMA target addresses and passes the host address | ||
4180 | * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is | ||
4181 | * 0 to 31 | ||
4182 | * | ||
4183 | * Rx Queue Indexes | ||
4184 | * The host/firmware share two index registers for managing the Rx buffers. | ||
4185 | * | ||
4186 | * The READ index maps to the first position that the firmware may be writing | ||
4187 | * to -- the driver can read up to (but not including) this position and get | ||
4188 | * good data. | ||
4189 | * The READ index is managed by the firmware once the card is enabled. | ||
4190 | * | ||
4191 | * The WRITE index maps to the last position the driver has read from -- the | ||
4192 | * position preceding WRITE is the last slot the firmware can place a packet. | ||
4193 | * | ||
4194 | * The queue is empty (no good data) if WRITE = READ - 1, and is full if | ||
4195 | * WRITE = READ. | ||
4196 | * | ||
4197 | * During initialization the host sets up the READ queue position to the first | ||
4198 | * INDEX position, and WRITE to the last (READ - 1 wrapped) | ||
4199 | * | ||
4200 | * When the firmware places a packet in a buffer it will advance the READ index | ||
4201 | * and fire the RX interrupt. The driver can then query the READ index and | ||
4202 | * process as many packets as possible, moving the WRITE index forward as it | ||
4203 | * resets the Rx queue buffers with new memory. | ||
4204 | * | ||
4205 | * The management in the driver is as follows: | ||
4206 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When | ||
4207 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled | ||
4208 | * to replensish the iwl->rxq->rx_free. | ||
4209 | * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the | ||
4210 | * iwl->rxq is replenished and the READ INDEX is updated (updating the | ||
4211 | * 'processed' and 'read' driver indexes as well) | ||
4212 | * + A received packet is processed and handed to the kernel network stack, | ||
4213 | * detached from the iwl->rxq. The driver 'processed' index is updated. | ||
4214 | * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free | ||
4215 | * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ | ||
4216 | * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there | ||
4217 | * were enough free buffers and RX_STALLED is set it is cleared. | ||
4218 | * | ||
4219 | * | ||
4220 | * Driver sequence: | ||
4221 | * | ||
4222 | * iwl_rx_queue_alloc() Allocates rx_free | ||
4223 | * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls | ||
4224 | * iwl_rx_queue_restock | ||
4225 | * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx | ||
4226 | * queue, updates firmware pointers, and updates | ||
4227 | * the WRITE index. If insufficient rx_free buffers | ||
4228 | * are available, schedules iwl_rx_replenish | ||
4229 | * | ||
4230 | * -- enable interrupts -- | ||
4231 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the | ||
4232 | * READ INDEX, detaching the SKB from the pool. | ||
4233 | * Moves the packet buffer from queue to rx_used. | ||
4234 | * Calls iwl_rx_queue_restock to refill any empty | ||
4235 | * slots. | ||
4236 | * ... | ||
4237 | * | ||
4238 | */ | ||
4239 | |||
4240 | /** | ||
4241 | * iwl_rx_queue_space - Return number of free slots available in queue. | ||
4242 | */ | ||
4243 | static int iwl_rx_queue_space(const struct iwl_rx_queue *q) | ||
4244 | { | ||
4245 | int s = q->read - q->write; | ||
4246 | if (s <= 0) | ||
4247 | s += RX_QUEUE_SIZE; | ||
4248 | /* keep some buffer to not confuse full and empty queue */ | ||
4249 | s -= 2; | ||
4250 | if (s < 0) | ||
4251 | s = 0; | ||
4252 | return s; | ||
4253 | } | ||
4254 | |||
4255 | /** | ||
4256 | * iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue | ||
4257 | * | ||
4258 | * NOTE: This function has 3945 and 4965 specific code sections | ||
4259 | * but is declared in base due to the majority of the | ||
4260 | * implementation being the same (only a numeric constant is | ||
4261 | * different) | ||
4262 | * | ||
4263 | */ | ||
4264 | int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q) | ||
4265 | { | ||
4266 | u32 reg = 0; | ||
4267 | int rc = 0; | ||
4268 | unsigned long flags; | ||
4269 | |||
4270 | spin_lock_irqsave(&q->lock, flags); | ||
4271 | |||
4272 | if (q->need_update == 0) | ||
4273 | goto exit_unlock; | ||
4274 | |||
4275 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | ||
4276 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
4277 | |||
4278 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
4279 | iwl_set_bit(priv, CSR_GP_CNTRL, | ||
4280 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
4281 | goto exit_unlock; | ||
4282 | } | ||
4283 | |||
4284 | rc = iwl_grab_restricted_access(priv); | ||
4285 | if (rc) | ||
4286 | goto exit_unlock; | ||
4287 | |||
4288 | iwl_write_restricted(priv, FH_RSCSR_CHNL0_WPTR, | ||
4289 | q->write & ~0x7); | ||
4290 | iwl_release_restricted_access(priv); | ||
4291 | } else | ||
4292 | iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7); | ||
4293 | |||
4294 | |||
4295 | q->need_update = 0; | ||
4296 | |||
4297 | exit_unlock: | ||
4298 | spin_unlock_irqrestore(&q->lock, flags); | ||
4299 | return rc; | ||
4300 | } | ||
4301 | |||
4302 | /** | ||
4303 | * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer pointer. | ||
4304 | * | ||
4305 | * NOTE: This function has 3945 and 4965 specific code paths in it. | ||
4306 | */ | ||
4307 | static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv, | ||
4308 | dma_addr_t dma_addr) | ||
4309 | { | ||
4310 | return cpu_to_le32((u32)(dma_addr >> 8)); | ||
4311 | } | ||
4312 | |||
4313 | |||
4314 | /** | ||
4315 | * iwl_rx_queue_restock - refill RX queue from pre-allocated pool | ||
4316 | * | ||
4317 | * If there are slots in the RX queue that need to be restocked, | ||
4318 | * and we have free pre-allocated buffers, fill the ranks as much | ||
4319 | * as we can pulling from rx_free. | ||
4320 | * | ||
4321 | * This moves the 'write' index forward to catch up with 'processed', and | ||
4322 | * also updates the memory address in the firmware to reference the new | ||
4323 | * target buffer. | ||
4324 | */ | ||
4325 | int iwl_rx_queue_restock(struct iwl_priv *priv) | ||
4326 | { | ||
4327 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
4328 | struct list_head *element; | ||
4329 | struct iwl_rx_mem_buffer *rxb; | ||
4330 | unsigned long flags; | ||
4331 | int write, rc; | ||
4332 | |||
4333 | spin_lock_irqsave(&rxq->lock, flags); | ||
4334 | write = rxq->write & ~0x7; | ||
4335 | while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) { | ||
4336 | element = rxq->rx_free.next; | ||
4337 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
4338 | list_del(element); | ||
4339 | rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr); | ||
4340 | rxq->queue[rxq->write] = rxb; | ||
4341 | rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; | ||
4342 | rxq->free_count--; | ||
4343 | } | ||
4344 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
4345 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
4346 | * refill it */ | ||
4347 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
4348 | queue_work(priv->workqueue, &priv->rx_replenish); | ||
4349 | |||
4350 | |||
4351 | /* If we've added more space for the firmware to place data, tell it */ | ||
4352 | if ((write != (rxq->write & ~0x7)) | ||
4353 | || (abs(rxq->write - rxq->read) > 7)) { | ||
4354 | spin_lock_irqsave(&rxq->lock, flags); | ||
4355 | rxq->need_update = 1; | ||
4356 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
4357 | rc = iwl_rx_queue_update_write_ptr(priv, rxq); | ||
4358 | if (rc) | ||
4359 | return rc; | ||
4360 | } | ||
4361 | |||
4362 | return 0; | ||
4363 | } | ||
4364 | |||
4365 | /** | ||
4366 | * iwl_rx_replensih - Move all used packet from rx_used to rx_free | ||
4367 | * | ||
4368 | * When moving to rx_free an SKB is allocated for the slot. | ||
4369 | * | ||
4370 | * Also restock the Rx queue via iwl_rx_queue_restock. | ||
4371 | * This is called as a scheduled work item (except for during intialization) | ||
4372 | */ | ||
4373 | void iwl_rx_replenish(void *data) | ||
4374 | { | ||
4375 | struct iwl_priv *priv = data; | ||
4376 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
4377 | struct list_head *element; | ||
4378 | struct iwl_rx_mem_buffer *rxb; | ||
4379 | unsigned long flags; | ||
4380 | spin_lock_irqsave(&rxq->lock, flags); | ||
4381 | while (!list_empty(&rxq->rx_used)) { | ||
4382 | element = rxq->rx_used.next; | ||
4383 | rxb = list_entry(element, struct iwl_rx_mem_buffer, list); | ||
4384 | rxb->skb = | ||
4385 | alloc_skb(IWL_RX_BUF_SIZE, __GFP_NOWARN | GFP_ATOMIC); | ||
4386 | if (!rxb->skb) { | ||
4387 | if (net_ratelimit()) | ||
4388 | printk(KERN_CRIT DRV_NAME | ||
4389 | ": Can not allocate SKB buffers\n"); | ||
4390 | /* We don't reschedule replenish work here -- we will | ||
4391 | * call the restock method and if it still needs | ||
4392 | * more buffers it will schedule replenish */ | ||
4393 | break; | ||
4394 | } | ||
4395 | priv->alloc_rxb_skb++; | ||
4396 | list_del(element); | ||
4397 | rxb->dma_addr = | ||
4398 | pci_map_single(priv->pci_dev, rxb->skb->data, | ||
4399 | IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); | ||
4400 | list_add_tail(&rxb->list, &rxq->rx_free); | ||
4401 | rxq->free_count++; | ||
4402 | } | ||
4403 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
4404 | |||
4405 | spin_lock_irqsave(&priv->lock, flags); | ||
4406 | iwl_rx_queue_restock(priv); | ||
4407 | spin_unlock_irqrestore(&priv->lock, flags); | ||
4408 | } | ||
4409 | |||
4410 | /* Assumes that the skb field of the buffers in 'pool' is kept accurate. | ||
4411 | * If an SKB has been detached, the POOL needs to have it's SKB set to NULL | ||
4412 | * This free routine walks the list of POOL entries and if SKB is set to | ||
4413 | * non NULL it is unmapped and freed | ||
4414 | */ | ||
4415 | void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
4416 | { | ||
4417 | int i; | ||
4418 | for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { | ||
4419 | if (rxq->pool[i].skb != NULL) { | ||
4420 | pci_unmap_single(priv->pci_dev, | ||
4421 | rxq->pool[i].dma_addr, | ||
4422 | IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); | ||
4423 | dev_kfree_skb(rxq->pool[i].skb); | ||
4424 | } | ||
4425 | } | ||
4426 | |||
4427 | pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd, | ||
4428 | rxq->dma_addr); | ||
4429 | rxq->bd = NULL; | ||
4430 | } | ||
4431 | |||
4432 | int iwl_rx_queue_alloc(struct iwl_priv *priv) | ||
4433 | { | ||
4434 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
4435 | struct pci_dev *dev = priv->pci_dev; | ||
4436 | int i; | ||
4437 | |||
4438 | spin_lock_init(&rxq->lock); | ||
4439 | INIT_LIST_HEAD(&rxq->rx_free); | ||
4440 | INIT_LIST_HEAD(&rxq->rx_used); | ||
4441 | rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr); | ||
4442 | if (!rxq->bd) | ||
4443 | return -ENOMEM; | ||
4444 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
4445 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) | ||
4446 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
4447 | /* Set us so that we have processed and used all buffers, but have | ||
4448 | * not restocked the Rx queue with fresh buffers */ | ||
4449 | rxq->read = rxq->write = 0; | ||
4450 | rxq->free_count = 0; | ||
4451 | rxq->need_update = 0; | ||
4452 | return 0; | ||
4453 | } | ||
4454 | |||
4455 | void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) | ||
4456 | { | ||
4457 | unsigned long flags; | ||
4458 | int i; | ||
4459 | spin_lock_irqsave(&rxq->lock, flags); | ||
4460 | INIT_LIST_HEAD(&rxq->rx_free); | ||
4461 | INIT_LIST_HEAD(&rxq->rx_used); | ||
4462 | /* Fill the rx_used queue with _all_ of the Rx buffers */ | ||
4463 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { | ||
4464 | /* In the reset function, these buffers may have been allocated | ||
4465 | * to an SKB, so we need to unmap and free potential storage */ | ||
4466 | if (rxq->pool[i].skb != NULL) { | ||
4467 | pci_unmap_single(priv->pci_dev, | ||
4468 | rxq->pool[i].dma_addr, | ||
4469 | IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); | ||
4470 | priv->alloc_rxb_skb--; | ||
4471 | dev_kfree_skb(rxq->pool[i].skb); | ||
4472 | rxq->pool[i].skb = NULL; | ||
4473 | } | ||
4474 | list_add_tail(&rxq->pool[i].list, &rxq->rx_used); | ||
4475 | } | ||
4476 | |||
4477 | /* Set us so that we have processed and used all buffers, but have | ||
4478 | * not restocked the Rx queue with fresh buffers */ | ||
4479 | rxq->read = rxq->write = 0; | ||
4480 | rxq->free_count = 0; | ||
4481 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
4482 | } | ||
4483 | |||
4484 | /* Convert linear signal-to-noise ratio into dB */ | ||
4485 | static u8 ratio2dB[100] = { | ||
4486 | /* 0 1 2 3 4 5 6 7 8 9 */ | ||
4487 | 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */ | ||
4488 | 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */ | ||
4489 | 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */ | ||
4490 | 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */ | ||
4491 | 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */ | ||
4492 | 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */ | ||
4493 | 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */ | ||
4494 | 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */ | ||
4495 | 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */ | ||
4496 | 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */ | ||
4497 | }; | ||
4498 | |||
4499 | /* Calculates a relative dB value from a ratio of linear | ||
4500 | * (i.e. not dB) signal levels. | ||
4501 | * Conversion assumes that levels are voltages (20*log), not powers (10*log). */ | ||
4502 | int iwl_calc_db_from_ratio(int sig_ratio) | ||
4503 | { | ||
4504 | /* Anything above 1000:1 just report as 60 dB */ | ||
4505 | if (sig_ratio > 1000) | ||
4506 | return 60; | ||
4507 | |||
4508 | /* Above 100:1, divide by 10 and use table, | ||
4509 | * add 20 dB to make up for divide by 10 */ | ||
4510 | if (sig_ratio > 100) | ||
4511 | return (20 + (int)ratio2dB[sig_ratio/10]); | ||
4512 | |||
4513 | /* We shouldn't see this */ | ||
4514 | if (sig_ratio < 1) | ||
4515 | return 0; | ||
4516 | |||
4517 | /* Use table for ratios 1:1 - 99:1 */ | ||
4518 | return (int)ratio2dB[sig_ratio]; | ||
4519 | } | ||
4520 | |||
4521 | #define PERFECT_RSSI (-20) /* dBm */ | ||
4522 | #define WORST_RSSI (-95) /* dBm */ | ||
4523 | #define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI) | ||
4524 | |||
4525 | /* Calculate an indication of rx signal quality (a percentage, not dBm!). | ||
4526 | * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info | ||
4527 | * about formulas used below. */ | ||
4528 | int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm) | ||
4529 | { | ||
4530 | int sig_qual; | ||
4531 | int degradation = PERFECT_RSSI - rssi_dbm; | ||
4532 | |||
4533 | /* If we get a noise measurement, use signal-to-noise ratio (SNR) | ||
4534 | * as indicator; formula is (signal dbm - noise dbm). | ||
4535 | * SNR at or above 40 is a great signal (100%). | ||
4536 | * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator. | ||
4537 | * Weakest usable signal is usually 10 - 15 dB SNR. */ | ||
4538 | if (noise_dbm) { | ||
4539 | if (rssi_dbm - noise_dbm >= 40) | ||
4540 | return 100; | ||
4541 | else if (rssi_dbm < noise_dbm) | ||
4542 | return 0; | ||
4543 | sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2; | ||
4544 | |||
4545 | /* Else use just the signal level. | ||
4546 | * This formula is a least squares fit of data points collected and | ||
4547 | * compared with a reference system that had a percentage (%) display | ||
4548 | * for signal quality. */ | ||
4549 | } else | ||
4550 | sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation * | ||
4551 | (15 * RSSI_RANGE + 62 * degradation)) / | ||
4552 | (RSSI_RANGE * RSSI_RANGE); | ||
4553 | |||
4554 | if (sig_qual > 100) | ||
4555 | sig_qual = 100; | ||
4556 | else if (sig_qual < 1) | ||
4557 | sig_qual = 0; | ||
4558 | |||
4559 | return sig_qual; | ||
4560 | } | ||
4561 | |||
4562 | /** | ||
4563 | * iwl_rx_handle - Main entry function for receiving responses from the uCode | ||
4564 | * | ||
4565 | * Uses the priv->rx_handlers callback function array to invoke | ||
4566 | * the appropriate handlers, including command responses, | ||
4567 | * frame-received notifications, and other notifications. | ||
4568 | */ | ||
4569 | static void iwl_rx_handle(struct iwl_priv *priv) | ||
4570 | { | ||
4571 | struct iwl_rx_mem_buffer *rxb; | ||
4572 | struct iwl_rx_packet *pkt; | ||
4573 | struct iwl_rx_queue *rxq = &priv->rxq; | ||
4574 | u32 r, i; | ||
4575 | int reclaim; | ||
4576 | unsigned long flags; | ||
4577 | |||
4578 | r = iwl_hw_get_rx_read(priv); | ||
4579 | i = rxq->read; | ||
4580 | |||
4581 | /* Rx interrupt, but nothing sent from uCode */ | ||
4582 | if (i == r) | ||
4583 | IWL_DEBUG(IWL_DL_RX | IWL_DL_ISR, "r = %d, i = %d\n", r, i); | ||
4584 | |||
4585 | while (i != r) { | ||
4586 | rxb = rxq->queue[i]; | ||
4587 | |||
4588 | /* If an RXB doesn't have a queue slot associated with it | ||
4589 | * then a bug has been introduced in the queue refilling | ||
4590 | * routines -- catch it here */ | ||
4591 | BUG_ON(rxb == NULL); | ||
4592 | |||
4593 | rxq->queue[i] = NULL; | ||
4594 | |||
4595 | pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, | ||
4596 | IWL_RX_BUF_SIZE, | ||
4597 | PCI_DMA_FROMDEVICE); | ||
4598 | pkt = (struct iwl_rx_packet *)rxb->skb->data; | ||
4599 | |||
4600 | /* Reclaim a command buffer only if this packet is a response | ||
4601 | * to a (driver-originated) command. | ||
4602 | * If the packet (e.g. Rx frame) originated from uCode, | ||
4603 | * there is no command buffer to reclaim. | ||
4604 | * Ucode should set SEQ_RX_FRAME bit if ucode-originated, | ||
4605 | * but apparently a few don't get set; catch them here. */ | ||
4606 | reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && | ||
4607 | (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && | ||
4608 | (pkt->hdr.cmd != REPLY_4965_RX) && | ||
4609 | (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && | ||
4610 | (pkt->hdr.cmd != REPLY_TX); | ||
4611 | |||
4612 | /* Based on type of command response or notification, | ||
4613 | * handle those that need handling via function in | ||
4614 | * rx_handlers table. See iwl_setup_rx_handlers() */ | ||
4615 | if (priv->rx_handlers[pkt->hdr.cmd]) { | ||
4616 | IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, | ||
4617 | "r = %d, i = %d, %s, 0x%02x\n", r, i, | ||
4618 | get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); | ||
4619 | priv->rx_handlers[pkt->hdr.cmd] (priv, rxb); | ||
4620 | } else { | ||
4621 | /* No handling needed */ | ||
4622 | IWL_DEBUG(IWL_DL_HOST_COMMAND | IWL_DL_RX | IWL_DL_ISR, | ||
4623 | "r %d i %d No handler needed for %s, 0x%02x\n", | ||
4624 | r, i, get_cmd_string(pkt->hdr.cmd), | ||
4625 | pkt->hdr.cmd); | ||
4626 | } | ||
4627 | |||
4628 | if (reclaim) { | ||
4629 | /* Invoke any callbacks, transfer the skb to caller, | ||
4630 | * and fire off the (possibly) blocking iwl_send_cmd() | ||
4631 | * as we reclaim the driver command queue */ | ||
4632 | if (rxb && rxb->skb) | ||
4633 | iwl_tx_cmd_complete(priv, rxb); | ||
4634 | else | ||
4635 | IWL_WARNING("Claim null rxb?\n"); | ||
4636 | } | ||
4637 | |||
4638 | /* For now we just don't re-use anything. We can tweak this | ||
4639 | * later to try and re-use notification packets and SKBs that | ||
4640 | * fail to Rx correctly */ | ||
4641 | if (rxb->skb != NULL) { | ||
4642 | priv->alloc_rxb_skb--; | ||
4643 | dev_kfree_skb_any(rxb->skb); | ||
4644 | rxb->skb = NULL; | ||
4645 | } | ||
4646 | |||
4647 | pci_unmap_single(priv->pci_dev, rxb->dma_addr, | ||
4648 | IWL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); | ||
4649 | spin_lock_irqsave(&rxq->lock, flags); | ||
4650 | list_add_tail(&rxb->list, &priv->rxq.rx_used); | ||
4651 | spin_unlock_irqrestore(&rxq->lock, flags); | ||
4652 | i = (i + 1) & RX_QUEUE_MASK; | ||
4653 | } | ||
4654 | |||
4655 | /* Backtrack one entry */ | ||
4656 | priv->rxq.read = i; | ||
4657 | iwl_rx_queue_restock(priv); | ||
4658 | } | ||
4659 | |||
4660 | int iwl_tx_queue_update_write_ptr(struct iwl_priv *priv, | ||
4661 | struct iwl_tx_queue *txq) | ||
4662 | { | ||
4663 | u32 reg = 0; | ||
4664 | int rc = 0; | ||
4665 | int txq_id = txq->q.id; | ||
4666 | |||
4667 | if (txq->need_update == 0) | ||
4668 | return rc; | ||
4669 | |||
4670 | /* if we're trying to save power */ | ||
4671 | if (test_bit(STATUS_POWER_PMI, &priv->status)) { | ||
4672 | /* wake up nic if it's powered down ... | ||
4673 | * uCode will wake up, and interrupt us again, so next | ||
4674 | * time we'll skip this part. */ | ||
4675 | reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); | ||
4676 | |||
4677 | if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { | ||
4678 | IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg); | ||
4679 | iwl_set_bit(priv, CSR_GP_CNTRL, | ||
4680 | CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
4681 | return rc; | ||
4682 | } | ||
4683 | |||
4684 | /* restore this queue's parameters in nic hardware. */ | ||
4685 | rc = iwl_grab_restricted_access(priv); | ||
4686 | if (rc) | ||
4687 | return rc; | ||
4688 | iwl_write_restricted(priv, HBUS_TARG_WRPTR, | ||
4689 | txq->q.first_empty | (txq_id << 8)); | ||
4690 | iwl_release_restricted_access(priv); | ||
4691 | |||
4692 | /* else not in power-save mode, uCode will never sleep when we're | ||
4693 | * trying to tx (during RFKILL, we're not trying to tx). */ | ||
4694 | } else | ||
4695 | iwl_write32(priv, HBUS_TARG_WRPTR, | ||
4696 | txq->q.first_empty | (txq_id << 8)); | ||
4697 | |||
4698 | txq->need_update = 0; | ||
4699 | |||
4700 | return rc; | ||
4701 | } | ||
4702 | |||
4703 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
4704 | static void iwl_print_rx_config_cmd(struct iwl_rxon_cmd *rxon) | ||
4705 | { | ||
4706 | IWL_DEBUG_RADIO("RX CONFIG:\n"); | ||
4707 | iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); | ||
4708 | IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); | ||
4709 | IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); | ||
4710 | IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n", | ||
4711 | le32_to_cpu(rxon->filter_flags)); | ||
4712 | IWL_DEBUG_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type); | ||
4713 | IWL_DEBUG_RADIO("u8 ofdm_basic_rates: 0x%02x\n", | ||
4714 | rxon->ofdm_basic_rates); | ||
4715 | IWL_DEBUG_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates); | ||
4716 | IWL_DEBUG_RADIO("u8[6] node_addr: " MAC_FMT "\n", | ||
4717 | MAC_ARG(rxon->node_addr)); | ||
4718 | IWL_DEBUG_RADIO("u8[6] bssid_addr: " MAC_FMT "\n", | ||
4719 | MAC_ARG(rxon->bssid_addr)); | ||
4720 | IWL_DEBUG_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id)); | ||
4721 | } | ||
4722 | #endif | ||
4723 | |||
4724 | static void iwl_enable_interrupts(struct iwl_priv *priv) | ||
4725 | { | ||
4726 | IWL_DEBUG_ISR("Enabling interrupts\n"); | ||
4727 | set_bit(STATUS_INT_ENABLED, &priv->status); | ||
4728 | iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK); | ||
4729 | } | ||
4730 | |||
4731 | static inline void iwl_disable_interrupts(struct iwl_priv *priv) | ||
4732 | { | ||
4733 | clear_bit(STATUS_INT_ENABLED, &priv->status); | ||
4734 | |||
4735 | /* disable interrupts from uCode/NIC to host */ | ||
4736 | iwl_write32(priv, CSR_INT_MASK, 0x00000000); | ||
4737 | |||
4738 | /* acknowledge/clear/reset any interrupts still pending | ||
4739 | * from uCode or flow handler (Rx/Tx DMA) */ | ||
4740 | iwl_write32(priv, CSR_INT, 0xffffffff); | ||
4741 | iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); | ||
4742 | IWL_DEBUG_ISR("Disabled interrupts\n"); | ||
4743 | } | ||
4744 | |||
4745 | static const char *desc_lookup(int i) | ||
4746 | { | ||
4747 | switch (i) { | ||
4748 | case 1: | ||
4749 | return "FAIL"; | ||
4750 | case 2: | ||
4751 | return "BAD_PARAM"; | ||
4752 | case 3: | ||
4753 | return "BAD_CHECKSUM"; | ||
4754 | case 4: | ||
4755 | return "NMI_INTERRUPT"; | ||
4756 | case 5: | ||
4757 | return "SYSASSERT"; | ||
4758 | case 6: | ||
4759 | return "FATAL_ERROR"; | ||
4760 | } | ||
4761 | |||
4762 | return "UNKNOWN"; | ||
4763 | } | ||
4764 | |||
4765 | #define ERROR_START_OFFSET (1 * sizeof(u32)) | ||
4766 | #define ERROR_ELEM_SIZE (7 * sizeof(u32)) | ||
4767 | |||
4768 | static void iwl_dump_nic_error_log(struct iwl_priv *priv) | ||
4769 | { | ||
4770 | u32 data2, line; | ||
4771 | u32 desc, time, count, base, data1; | ||
4772 | u32 blink1, blink2, ilink1, ilink2; | ||
4773 | int rc; | ||
4774 | |||
4775 | base = le32_to_cpu(priv->card_alive.error_event_table_ptr); | ||
4776 | |||
4777 | if (!iwl_hw_valid_rtc_data_addr(base)) { | ||
4778 | IWL_ERROR("Not valid error log pointer 0x%08X\n", base); | ||
4779 | return; | ||
4780 | } | ||
4781 | |||
4782 | rc = iwl_grab_restricted_access(priv); | ||
4783 | if (rc) { | ||
4784 | IWL_WARNING("Can not read from adapter at this time.\n"); | ||
4785 | return; | ||
4786 | } | ||
4787 | |||
4788 | count = iwl_read_restricted_mem(priv, base); | ||
4789 | |||
4790 | if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { | ||
4791 | IWL_ERROR("Start IWL Error Log Dump:\n"); | ||
4792 | IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n", | ||
4793 | priv->status, priv->config, count); | ||
4794 | } | ||
4795 | |||
4796 | desc = iwl_read_restricted_mem(priv, base + 1 * sizeof(u32)); | ||
4797 | blink1 = iwl_read_restricted_mem(priv, base + 3 * sizeof(u32)); | ||
4798 | blink2 = iwl_read_restricted_mem(priv, base + 4 * sizeof(u32)); | ||
4799 | ilink1 = iwl_read_restricted_mem(priv, base + 5 * sizeof(u32)); | ||
4800 | ilink2 = iwl_read_restricted_mem(priv, base + 6 * sizeof(u32)); | ||
4801 | data1 = iwl_read_restricted_mem(priv, base + 7 * sizeof(u32)); | ||
4802 | data2 = iwl_read_restricted_mem(priv, base + 8 * sizeof(u32)); | ||
4803 | line = iwl_read_restricted_mem(priv, base + 9 * sizeof(u32)); | ||
4804 | time = iwl_read_restricted_mem(priv, base + 11 * sizeof(u32)); | ||
4805 | |||
4806 | IWL_ERROR("Desc Time " | ||
4807 | "data1 data2 line\n"); | ||
4808 | IWL_ERROR("%-13s (#%d) %010u 0x%08X 0x%08X %u\n", | ||
4809 | desc_lookup(desc), desc, time, data1, data2, line); | ||
4810 | IWL_ERROR("blink1 blink2 ilink1 ilink2\n"); | ||
4811 | IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, | ||
4812 | ilink1, ilink2); | ||
4813 | |||
4814 | iwl_release_restricted_access(priv); | ||
4815 | } | ||
4816 | |||
4817 | #define EVENT_START_OFFSET (4 * sizeof(u32)) | ||
4818 | |||
4819 | /** | ||
4820 | * iwl_print_event_log - Dump error event log to syslog | ||
4821 | * | ||
4822 | * NOTE: Must be called with iwl_grab_restricted_access() already obtained! | ||
4823 | */ | ||
4824 | static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, | ||
4825 | u32 num_events, u32 mode) | ||
4826 | { | ||
4827 | u32 i; | ||
4828 | u32 base; /* SRAM byte address of event log header */ | ||
4829 | u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */ | ||
4830 | u32 ptr; /* SRAM byte address of log data */ | ||
4831 | u32 ev, time, data; /* event log data */ | ||
4832 | |||
4833 | if (num_events == 0) | ||
4834 | return; | ||
4835 | |||
4836 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); | ||
4837 | |||
4838 | if (mode == 0) | ||
4839 | event_size = 2 * sizeof(u32); | ||
4840 | else | ||
4841 | event_size = 3 * sizeof(u32); | ||
4842 | |||
4843 | ptr = base + EVENT_START_OFFSET + (start_idx * event_size); | ||
4844 | |||
4845 | /* "time" is actually "data" for mode 0 (no timestamp). | ||
4846 | * place event id # at far right for easier visual parsing. */ | ||
4847 | for (i = 0; i < num_events; i++) { | ||
4848 | ev = iwl_read_restricted_mem(priv, ptr); | ||
4849 | ptr += sizeof(u32); | ||
4850 | time = iwl_read_restricted_mem(priv, ptr); | ||
4851 | ptr += sizeof(u32); | ||
4852 | if (mode == 0) | ||
4853 | IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */ | ||
4854 | else { | ||
4855 | data = iwl_read_restricted_mem(priv, ptr); | ||
4856 | ptr += sizeof(u32); | ||
4857 | IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev); | ||
4858 | } | ||
4859 | } | ||
4860 | } | ||
4861 | |||
4862 | static void iwl_dump_nic_event_log(struct iwl_priv *priv) | ||
4863 | { | ||
4864 | int rc; | ||
4865 | u32 base; /* SRAM byte address of event log header */ | ||
4866 | u32 capacity; /* event log capacity in # entries */ | ||
4867 | u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */ | ||
4868 | u32 num_wraps; /* # times uCode wrapped to top of log */ | ||
4869 | u32 next_entry; /* index of next entry to be written by uCode */ | ||
4870 | u32 size; /* # entries that we'll print */ | ||
4871 | |||
4872 | base = le32_to_cpu(priv->card_alive.log_event_table_ptr); | ||
4873 | if (!iwl_hw_valid_rtc_data_addr(base)) { | ||
4874 | IWL_ERROR("Invalid event log pointer 0x%08X\n", base); | ||
4875 | return; | ||
4876 | } | ||
4877 | |||
4878 | rc = iwl_grab_restricted_access(priv); | ||
4879 | if (rc) { | ||
4880 | IWL_WARNING("Can not read from adapter at this time.\n"); | ||
4881 | return; | ||
4882 | } | ||
4883 | |||
4884 | /* event log header */ | ||
4885 | capacity = iwl_read_restricted_mem(priv, base); | ||
4886 | mode = iwl_read_restricted_mem(priv, base + (1 * sizeof(u32))); | ||
4887 | num_wraps = iwl_read_restricted_mem(priv, base + (2 * sizeof(u32))); | ||
4888 | next_entry = iwl_read_restricted_mem(priv, base + (3 * sizeof(u32))); | ||
4889 | |||
4890 | size = num_wraps ? capacity : next_entry; | ||
4891 | |||
4892 | /* bail out if nothing in log */ | ||
4893 | if (size == 0) { | ||
4894 | IWL_ERROR("Start IPW Event Log Dump: nothing in log\n"); | ||
4895 | iwl_release_restricted_access(priv); | ||
4896 | return; | ||
4897 | } | ||
4898 | |||
4899 | IWL_ERROR("Start IPW Event Log Dump: display count %d, wraps %d\n", | ||
4900 | size, num_wraps); | ||
4901 | |||
4902 | /* if uCode has wrapped back to top of log, start at the oldest entry, | ||
4903 | * i.e the next one that uCode would fill. */ | ||
4904 | if (num_wraps) | ||
4905 | iwl_print_event_log(priv, next_entry, | ||
4906 | capacity - next_entry, mode); | ||
4907 | |||
4908 | /* (then/else) start at top of log */ | ||
4909 | iwl_print_event_log(priv, 0, next_entry, mode); | ||
4910 | |||
4911 | iwl_release_restricted_access(priv); | ||
4912 | } | ||
4913 | |||
4914 | /** | ||
4915 | * iwl_irq_handle_error - called for HW or SW error interrupt from card | ||
4916 | */ | ||
4917 | static void iwl_irq_handle_error(struct iwl_priv *priv) | ||
4918 | { | ||
4919 | /* Set the FW error flag -- cleared on iwl_down */ | ||
4920 | set_bit(STATUS_FW_ERROR, &priv->status); | ||
4921 | |||
4922 | /* Cancel currently queued command. */ | ||
4923 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | ||
4924 | |||
4925 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
4926 | if (iwl_debug_level & IWL_DL_FW_ERRORS) { | ||
4927 | iwl_dump_nic_error_log(priv); | ||
4928 | iwl_dump_nic_event_log(priv); | ||
4929 | iwl_print_rx_config_cmd(&priv->staging_rxon); | ||
4930 | } | ||
4931 | #endif | ||
4932 | |||
4933 | wake_up_interruptible(&priv->wait_command_queue); | ||
4934 | |||
4935 | /* Keep the restart process from trying to send host | ||
4936 | * commands by clearing the INIT status bit */ | ||
4937 | clear_bit(STATUS_READY, &priv->status); | ||
4938 | |||
4939 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) { | ||
4940 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS, | ||
4941 | "Restarting adapter due to uCode error.\n"); | ||
4942 | |||
4943 | if (iwl_is_associated(priv)) { | ||
4944 | memcpy(&priv->recovery_rxon, &priv->active_rxon, | ||
4945 | sizeof(priv->recovery_rxon)); | ||
4946 | priv->error_recovering = 1; | ||
4947 | } | ||
4948 | queue_work(priv->workqueue, &priv->restart); | ||
4949 | } | ||
4950 | } | ||
4951 | |||
4952 | static void iwl_error_recovery(struct iwl_priv *priv) | ||
4953 | { | ||
4954 | unsigned long flags; | ||
4955 | |||
4956 | memcpy(&priv->staging_rxon, &priv->recovery_rxon, | ||
4957 | sizeof(priv->staging_rxon)); | ||
4958 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
4959 | iwl_commit_rxon(priv); | ||
4960 | |||
4961 | iwl_rxon_add_station(priv, priv->bssid, 1); | ||
4962 | |||
4963 | spin_lock_irqsave(&priv->lock, flags); | ||
4964 | priv->assoc_id = le16_to_cpu(priv->staging_rxon.assoc_id); | ||
4965 | priv->error_recovering = 0; | ||
4966 | spin_unlock_irqrestore(&priv->lock, flags); | ||
4967 | } | ||
4968 | |||
4969 | static void iwl_irq_tasklet(struct iwl_priv *priv) | ||
4970 | { | ||
4971 | u32 inta, handled = 0; | ||
4972 | u32 inta_fh; | ||
4973 | unsigned long flags; | ||
4974 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
4975 | u32 inta_mask; | ||
4976 | #endif | ||
4977 | |||
4978 | spin_lock_irqsave(&priv->lock, flags); | ||
4979 | |||
4980 | /* Ack/clear/reset pending uCode interrupts. | ||
4981 | * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, | ||
4982 | * and will clear only when CSR_FH_INT_STATUS gets cleared. */ | ||
4983 | inta = iwl_read32(priv, CSR_INT); | ||
4984 | iwl_write32(priv, CSR_INT, inta); | ||
4985 | |||
4986 | /* Ack/clear/reset pending flow-handler (DMA) interrupts. | ||
4987 | * Any new interrupts that happen after this, either while we're | ||
4988 | * in this tasklet, or later, will show up in next ISR/tasklet. */ | ||
4989 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | ||
4990 | iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh); | ||
4991 | |||
4992 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
4993 | if (iwl_debug_level & IWL_DL_ISR) { | ||
4994 | inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ | ||
4995 | IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", | ||
4996 | inta, inta_mask, inta_fh); | ||
4997 | } | ||
4998 | #endif | ||
4999 | |||
5000 | /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not | ||
5001 | * atomic, make sure that inta covers all the interrupts that | ||
5002 | * we've discovered, even if FH interrupt came in just after | ||
5003 | * reading CSR_INT. */ | ||
5004 | if (inta_fh & CSR_FH_INT_RX_MASK) | ||
5005 | inta |= CSR_INT_BIT_FH_RX; | ||
5006 | if (inta_fh & CSR_FH_INT_TX_MASK) | ||
5007 | inta |= CSR_INT_BIT_FH_TX; | ||
5008 | |||
5009 | /* Now service all interrupt bits discovered above. */ | ||
5010 | if (inta & CSR_INT_BIT_HW_ERR) { | ||
5011 | IWL_ERROR("Microcode HW error detected. Restarting.\n"); | ||
5012 | |||
5013 | /* Tell the device to stop sending interrupts */ | ||
5014 | iwl_disable_interrupts(priv); | ||
5015 | |||
5016 | iwl_irq_handle_error(priv); | ||
5017 | |||
5018 | handled |= CSR_INT_BIT_HW_ERR; | ||
5019 | |||
5020 | spin_unlock_irqrestore(&priv->lock, flags); | ||
5021 | |||
5022 | return; | ||
5023 | } | ||
5024 | |||
5025 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
5026 | if (iwl_debug_level & (IWL_DL_ISR)) { | ||
5027 | /* NIC fires this, but we don't use it, redundant with WAKEUP */ | ||
5028 | if (inta & CSR_INT_BIT_MAC_CLK_ACTV) | ||
5029 | IWL_DEBUG_ISR("Microcode started or stopped.\n"); | ||
5030 | |||
5031 | /* Alive notification via Rx interrupt will do the real work */ | ||
5032 | if (inta & CSR_INT_BIT_ALIVE) | ||
5033 | IWL_DEBUG_ISR("Alive interrupt\n"); | ||
5034 | } | ||
5035 | #endif | ||
5036 | /* Safely ignore these bits for debug checks below */ | ||
5037 | inta &= ~(CSR_INT_BIT_MAC_CLK_ACTV | CSR_INT_BIT_ALIVE); | ||
5038 | |||
5039 | /* HW RF KILL switch toggled (4965 only) */ | ||
5040 | if (inta & CSR_INT_BIT_RF_KILL) { | ||
5041 | int hw_rf_kill = 0; | ||
5042 | if (!(iwl_read32(priv, CSR_GP_CNTRL) & | ||
5043 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) | ||
5044 | hw_rf_kill = 1; | ||
5045 | |||
5046 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL | IWL_DL_ISR, | ||
5047 | "RF_KILL bit toggled to %s.\n", | ||
5048 | hw_rf_kill ? "disable radio":"enable radio"); | ||
5049 | |||
5050 | /* Queue restart only if RF_KILL switch was set to "kill" | ||
5051 | * when we loaded driver, and is now set to "enable". | ||
5052 | * After we're Alive, RF_KILL gets handled by | ||
5053 | * iwl_rx_card_state_notif() */ | ||
5054 | if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) | ||
5055 | queue_work(priv->workqueue, &priv->restart); | ||
5056 | |||
5057 | handled |= CSR_INT_BIT_RF_KILL; | ||
5058 | } | ||
5059 | |||
5060 | /* Chip got too hot and stopped itself (4965 only) */ | ||
5061 | if (inta & CSR_INT_BIT_CT_KILL) { | ||
5062 | IWL_ERROR("Microcode CT kill error detected.\n"); | ||
5063 | handled |= CSR_INT_BIT_CT_KILL; | ||
5064 | } | ||
5065 | |||
5066 | /* Error detected by uCode */ | ||
5067 | if (inta & CSR_INT_BIT_SW_ERR) { | ||
5068 | IWL_ERROR("Microcode SW error detected. Restarting 0x%X.\n", | ||
5069 | inta); | ||
5070 | iwl_irq_handle_error(priv); | ||
5071 | handled |= CSR_INT_BIT_SW_ERR; | ||
5072 | } | ||
5073 | |||
5074 | /* uCode wakes up after power-down sleep */ | ||
5075 | if (inta & CSR_INT_BIT_WAKEUP) { | ||
5076 | IWL_DEBUG_ISR("Wakeup interrupt\n"); | ||
5077 | iwl_rx_queue_update_write_ptr(priv, &priv->rxq); | ||
5078 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[0]); | ||
5079 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[1]); | ||
5080 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[2]); | ||
5081 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[3]); | ||
5082 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[4]); | ||
5083 | iwl_tx_queue_update_write_ptr(priv, &priv->txq[5]); | ||
5084 | |||
5085 | handled |= CSR_INT_BIT_WAKEUP; | ||
5086 | } | ||
5087 | |||
5088 | /* All uCode command responses, including Tx command responses, | ||
5089 | * Rx "responses" (frame-received notification), and other | ||
5090 | * notifications from uCode come through here*/ | ||
5091 | if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { | ||
5092 | iwl_rx_handle(priv); | ||
5093 | handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); | ||
5094 | } | ||
5095 | |||
5096 | if (inta & CSR_INT_BIT_FH_TX) { | ||
5097 | IWL_DEBUG_ISR("Tx interrupt\n"); | ||
5098 | handled |= CSR_INT_BIT_FH_TX; | ||
5099 | } | ||
5100 | |||
5101 | if (inta & ~handled) | ||
5102 | IWL_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled); | ||
5103 | |||
5104 | if (inta & ~CSR_INI_SET_MASK) { | ||
5105 | IWL_WARNING("Disabled INTA bits 0x%08x were pending\n", | ||
5106 | inta & ~CSR_INI_SET_MASK); | ||
5107 | IWL_WARNING(" with FH_INT = 0x%08x\n", inta_fh); | ||
5108 | } | ||
5109 | |||
5110 | /* Re-enable all interrupts */ | ||
5111 | iwl_enable_interrupts(priv); | ||
5112 | |||
5113 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
5114 | if (iwl_debug_level & (IWL_DL_ISR)) { | ||
5115 | inta = iwl_read32(priv, CSR_INT); | ||
5116 | inta_mask = iwl_read32(priv, CSR_INT_MASK); | ||
5117 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | ||
5118 | IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " | ||
5119 | "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); | ||
5120 | } | ||
5121 | #endif | ||
5122 | spin_unlock_irqrestore(&priv->lock, flags); | ||
5123 | } | ||
5124 | |||
5125 | static irqreturn_t iwl_isr(int irq, void *data) | ||
5126 | { | ||
5127 | struct iwl_priv *priv = data; | ||
5128 | u32 inta, inta_mask; | ||
5129 | u32 inta_fh; | ||
5130 | if (!priv) | ||
5131 | return IRQ_NONE; | ||
5132 | |||
5133 | spin_lock(&priv->lock); | ||
5134 | |||
5135 | /* Disable (but don't clear!) interrupts here to avoid | ||
5136 | * back-to-back ISRs and sporadic interrupts from our NIC. | ||
5137 | * If we have something to service, the tasklet will re-enable ints. | ||
5138 | * If we *don't* have something, we'll re-enable before leaving here. */ | ||
5139 | inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */ | ||
5140 | iwl_write32(priv, CSR_INT_MASK, 0x00000000); | ||
5141 | |||
5142 | /* Discover which interrupts are active/pending */ | ||
5143 | inta = iwl_read32(priv, CSR_INT); | ||
5144 | inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS); | ||
5145 | |||
5146 | /* Ignore interrupt if there's nothing in NIC to service. | ||
5147 | * This may be due to IRQ shared with another device, | ||
5148 | * or due to sporadic interrupts thrown from our NIC. */ | ||
5149 | if (!inta && !inta_fh) { | ||
5150 | IWL_DEBUG_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n"); | ||
5151 | goto none; | ||
5152 | } | ||
5153 | |||
5154 | if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) { | ||
5155 | /* Hardware disappeared */ | ||
5156 | IWL_WARNING("HARDWARE GONE?? INTA == 0x%080x\n", inta); | ||
5157 | goto none; | ||
5158 | } | ||
5159 | |||
5160 | IWL_DEBUG_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", | ||
5161 | inta, inta_mask, inta_fh); | ||
5162 | |||
5163 | /* iwl_irq_tasklet() will service interrupts and re-enable them */ | ||
5164 | tasklet_schedule(&priv->irq_tasklet); | ||
5165 | spin_unlock(&priv->lock); | ||
5166 | |||
5167 | return IRQ_HANDLED; | ||
5168 | |||
5169 | none: | ||
5170 | /* re-enable interrupts here since we don't have anything to service. */ | ||
5171 | iwl_enable_interrupts(priv); | ||
5172 | spin_unlock(&priv->lock); | ||
5173 | return IRQ_NONE; | ||
5174 | } | ||
5175 | |||
5176 | /************************** EEPROM BANDS **************************** | ||
5177 | * | ||
5178 | * The iwl_eeprom_band definitions below provide the mapping from the | ||
5179 | * EEPROM contents to the specific channel number supported for each | ||
5180 | * band. | ||
5181 | * | ||
5182 | * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3 | ||
5183 | * definition below maps to physical channel 42 in the 5.2GHz spectrum. | ||
5184 | * The specific geography and calibration information for that channel | ||
5185 | * is contained in the eeprom map itself. | ||
5186 | * | ||
5187 | * During init, we copy the eeprom information and channel map | ||
5188 | * information into priv->channel_info_24/52 and priv->channel_map_24/52 | ||
5189 | * | ||
5190 | * channel_map_24/52 provides the index in the channel_info array for a | ||
5191 | * given channel. We have to have two separate maps as there is channel | ||
5192 | * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and | ||
5193 | * band_2 | ||
5194 | * | ||
5195 | * A value of 0xff stored in the channel_map indicates that the channel | ||
5196 | * is not supported by the hardware at all. | ||
5197 | * | ||
5198 | * A value of 0xfe in the channel_map indicates that the channel is not | ||
5199 | * valid for Tx with the current hardware. This means that | ||
5200 | * while the system can tune and receive on a given channel, it may not | ||
5201 | * be able to associate or transmit any frames on that | ||
5202 | * channel. There is no corresponding channel information for that | ||
5203 | * entry. | ||
5204 | * | ||
5205 | *********************************************************************/ | ||
5206 | |||
5207 | /* 2.4 GHz */ | ||
5208 | static const u8 iwl_eeprom_band_1[14] = { | ||
5209 | 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 | ||
5210 | }; | ||
5211 | |||
5212 | /* 5.2 GHz bands */ | ||
5213 | static const u8 iwl_eeprom_band_2[] = { | ||
5214 | 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 | ||
5215 | }; | ||
5216 | |||
5217 | static const u8 iwl_eeprom_band_3[] = { /* 5205-5320MHz */ | ||
5218 | 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 | ||
5219 | }; | ||
5220 | |||
5221 | static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */ | ||
5222 | 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 | ||
5223 | }; | ||
5224 | |||
5225 | static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */ | ||
5226 | 145, 149, 153, 157, 161, 165 | ||
5227 | }; | ||
5228 | |||
5229 | static u8 iwl_eeprom_band_6[] = { /* 2.4 FAT channel */ | ||
5230 | 1, 2, 3, 4, 5, 6, 7 | ||
5231 | }; | ||
5232 | |||
5233 | static u8 iwl_eeprom_band_7[] = { /* 5.2 FAT channel */ | ||
5234 | 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 | ||
5235 | }; | ||
5236 | |||
5237 | static void iwl_init_band_reference(const struct iwl_priv *priv, int band, | ||
5238 | int *eeprom_ch_count, | ||
5239 | const struct iwl_eeprom_channel | ||
5240 | **eeprom_ch_info, | ||
5241 | const u8 **eeprom_ch_index) | ||
5242 | { | ||
5243 | switch (band) { | ||
5244 | case 1: /* 2.4GHz band */ | ||
5245 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); | ||
5246 | *eeprom_ch_info = priv->eeprom.band_1_channels; | ||
5247 | *eeprom_ch_index = iwl_eeprom_band_1; | ||
5248 | break; | ||
5249 | case 2: /* 5.2GHz band */ | ||
5250 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); | ||
5251 | *eeprom_ch_info = priv->eeprom.band_2_channels; | ||
5252 | *eeprom_ch_index = iwl_eeprom_band_2; | ||
5253 | break; | ||
5254 | case 3: /* 5.2GHz band */ | ||
5255 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); | ||
5256 | *eeprom_ch_info = priv->eeprom.band_3_channels; | ||
5257 | *eeprom_ch_index = iwl_eeprom_band_3; | ||
5258 | break; | ||
5259 | case 4: /* 5.2GHz band */ | ||
5260 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); | ||
5261 | *eeprom_ch_info = priv->eeprom.band_4_channels; | ||
5262 | *eeprom_ch_index = iwl_eeprom_band_4; | ||
5263 | break; | ||
5264 | case 5: /* 5.2GHz band */ | ||
5265 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); | ||
5266 | *eeprom_ch_info = priv->eeprom.band_5_channels; | ||
5267 | *eeprom_ch_index = iwl_eeprom_band_5; | ||
5268 | break; | ||
5269 | case 6: | ||
5270 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); | ||
5271 | *eeprom_ch_info = priv->eeprom.band_24_channels; | ||
5272 | *eeprom_ch_index = iwl_eeprom_band_6; | ||
5273 | break; | ||
5274 | case 7: | ||
5275 | *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); | ||
5276 | *eeprom_ch_info = priv->eeprom.band_52_channels; | ||
5277 | *eeprom_ch_index = iwl_eeprom_band_7; | ||
5278 | break; | ||
5279 | default: | ||
5280 | BUG(); | ||
5281 | return; | ||
5282 | } | ||
5283 | } | ||
5284 | |||
5285 | const struct iwl_channel_info *iwl_get_channel_info(const struct iwl_priv *priv, | ||
5286 | int phymode, u16 channel) | ||
5287 | { | ||
5288 | int i; | ||
5289 | |||
5290 | switch (phymode) { | ||
5291 | case MODE_IEEE80211A: | ||
5292 | for (i = 14; i < priv->channel_count; i++) { | ||
5293 | if (priv->channel_info[i].channel == channel) | ||
5294 | return &priv->channel_info[i]; | ||
5295 | } | ||
5296 | break; | ||
5297 | |||
5298 | case MODE_IEEE80211B: | ||
5299 | case MODE_IEEE80211G: | ||
5300 | if (channel >= 1 && channel <= 14) | ||
5301 | return &priv->channel_info[channel - 1]; | ||
5302 | break; | ||
5303 | |||
5304 | } | ||
5305 | |||
5306 | return NULL; | ||
5307 | } | ||
5308 | |||
5309 | #define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \ | ||
5310 | ? # x " " : "") | ||
5311 | |||
5312 | static int iwl_init_channel_map(struct iwl_priv *priv) | ||
5313 | { | ||
5314 | int eeprom_ch_count = 0; | ||
5315 | const u8 *eeprom_ch_index = NULL; | ||
5316 | const struct iwl_eeprom_channel *eeprom_ch_info = NULL; | ||
5317 | int band, ch; | ||
5318 | struct iwl_channel_info *ch_info; | ||
5319 | |||
5320 | if (priv->channel_count) { | ||
5321 | IWL_DEBUG_INFO("Channel map already initialized.\n"); | ||
5322 | return 0; | ||
5323 | } | ||
5324 | |||
5325 | if (priv->eeprom.version < 0x2f) { | ||
5326 | IWL_WARNING("Unsupported EEPROM version: 0x%04X\n", | ||
5327 | priv->eeprom.version); | ||
5328 | return -EINVAL; | ||
5329 | } | ||
5330 | |||
5331 | IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n"); | ||
5332 | |||
5333 | priv->channel_count = | ||
5334 | ARRAY_SIZE(iwl_eeprom_band_1) + | ||
5335 | ARRAY_SIZE(iwl_eeprom_band_2) + | ||
5336 | ARRAY_SIZE(iwl_eeprom_band_3) + | ||
5337 | ARRAY_SIZE(iwl_eeprom_band_4) + | ||
5338 | ARRAY_SIZE(iwl_eeprom_band_5); | ||
5339 | |||
5340 | IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count); | ||
5341 | |||
5342 | priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * | ||
5343 | priv->channel_count, GFP_KERNEL); | ||
5344 | if (!priv->channel_info) { | ||
5345 | IWL_ERROR("Could not allocate channel_info\n"); | ||
5346 | priv->channel_count = 0; | ||
5347 | return -ENOMEM; | ||
5348 | } | ||
5349 | |||
5350 | ch_info = priv->channel_info; | ||
5351 | |||
5352 | /* Loop through the 5 EEPROM bands adding them in order to the | ||
5353 | * channel map we maintain (that contains additional information than | ||
5354 | * what just in the EEPROM) */ | ||
5355 | for (band = 1; band <= 5; band++) { | ||
5356 | |||
5357 | iwl_init_band_reference(priv, band, &eeprom_ch_count, | ||
5358 | &eeprom_ch_info, &eeprom_ch_index); | ||
5359 | |||
5360 | /* Loop through each band adding each of the channels */ | ||
5361 | for (ch = 0; ch < eeprom_ch_count; ch++) { | ||
5362 | ch_info->channel = eeprom_ch_index[ch]; | ||
5363 | ch_info->phymode = (band == 1) ? MODE_IEEE80211B : | ||
5364 | MODE_IEEE80211A; | ||
5365 | |||
5366 | /* permanently store EEPROM's channel regulatory flags | ||
5367 | * and max power in channel info database. */ | ||
5368 | ch_info->eeprom = eeprom_ch_info[ch]; | ||
5369 | |||
5370 | /* Copy the run-time flags so they are there even on | ||
5371 | * invalid channels */ | ||
5372 | ch_info->flags = eeprom_ch_info[ch].flags; | ||
5373 | |||
5374 | if (!(is_channel_valid(ch_info))) { | ||
5375 | IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - " | ||
5376 | "No traffic\n", | ||
5377 | ch_info->channel, | ||
5378 | ch_info->flags, | ||
5379 | is_channel_a_band(ch_info) ? | ||
5380 | "5.2" : "2.4"); | ||
5381 | ch_info++; | ||
5382 | continue; | ||
5383 | } | ||
5384 | |||
5385 | /* Initialize regulatory-based run-time data */ | ||
5386 | ch_info->max_power_avg = ch_info->curr_txpow = | ||
5387 | eeprom_ch_info[ch].max_power_avg; | ||
5388 | ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; | ||
5389 | ch_info->min_power = 0; | ||
5390 | |||
5391 | IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" | ||
5392 | " %ddBm): Ad-Hoc %ssupported\n", | ||
5393 | ch_info->channel, | ||
5394 | is_channel_a_band(ch_info) ? | ||
5395 | "5.2" : "2.4", | ||
5396 | CHECK_AND_PRINT(IBSS), | ||
5397 | CHECK_AND_PRINT(ACTIVE), | ||
5398 | CHECK_AND_PRINT(RADAR), | ||
5399 | CHECK_AND_PRINT(WIDE), | ||
5400 | CHECK_AND_PRINT(NARROW), | ||
5401 | CHECK_AND_PRINT(DFS), | ||
5402 | eeprom_ch_info[ch].flags, | ||
5403 | eeprom_ch_info[ch].max_power_avg, | ||
5404 | ((eeprom_ch_info[ch]. | ||
5405 | flags & EEPROM_CHANNEL_IBSS) | ||
5406 | && !(eeprom_ch_info[ch]. | ||
5407 | flags & EEPROM_CHANNEL_RADAR)) | ||
5408 | ? "" : "not "); | ||
5409 | |||
5410 | /* Set the user_txpower_limit to the highest power | ||
5411 | * supported by any channel */ | ||
5412 | if (eeprom_ch_info[ch].max_power_avg > | ||
5413 | priv->user_txpower_limit) | ||
5414 | priv->user_txpower_limit = | ||
5415 | eeprom_ch_info[ch].max_power_avg; | ||
5416 | |||
5417 | ch_info++; | ||
5418 | } | ||
5419 | } | ||
5420 | |||
5421 | for (band = 6; band <= 7; band++) { | ||
5422 | int phymode; | ||
5423 | u8 fat_extension_chan; | ||
5424 | |||
5425 | iwl_init_band_reference(priv, band, &eeprom_ch_count, | ||
5426 | &eeprom_ch_info, &eeprom_ch_index); | ||
5427 | |||
5428 | phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A; | ||
5429 | /* Loop through each band adding each of the channels */ | ||
5430 | for (ch = 0; ch < eeprom_ch_count; ch++) { | ||
5431 | |||
5432 | if ((band == 6) && | ||
5433 | ((eeprom_ch_index[ch] == 5) || | ||
5434 | (eeprom_ch_index[ch] == 6) || | ||
5435 | (eeprom_ch_index[ch] == 7))) | ||
5436 | fat_extension_chan = HT_IE_EXT_CHANNEL_MAX; | ||
5437 | else | ||
5438 | fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE; | ||
5439 | |||
5440 | iwl4965_set_fat_chan_info(priv, phymode, | ||
5441 | eeprom_ch_index[ch], | ||
5442 | &(eeprom_ch_info[ch]), | ||
5443 | fat_extension_chan); | ||
5444 | |||
5445 | iwl4965_set_fat_chan_info(priv, phymode, | ||
5446 | (eeprom_ch_index[ch] + 4), | ||
5447 | &(eeprom_ch_info[ch]), | ||
5448 | HT_IE_EXT_CHANNEL_BELOW); | ||
5449 | } | ||
5450 | } | ||
5451 | |||
5452 | return 0; | ||
5453 | } | ||
5454 | |||
5455 | /* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after | ||
5456 | * sending probe req. This should be set long enough to hear probe responses | ||
5457 | * from more than one AP. */ | ||
5458 | #define IWL_ACTIVE_DWELL_TIME_24 (20) /* all times in msec */ | ||
5459 | #define IWL_ACTIVE_DWELL_TIME_52 (10) | ||
5460 | |||
5461 | /* For faster active scanning, scan will move to the next channel if fewer than | ||
5462 | * PLCP_QUIET_THRESH packets are heard on this channel within | ||
5463 | * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell | ||
5464 | * time if it's a quiet channel (nothing responded to our probe, and there's | ||
5465 | * no other traffic). | ||
5466 | * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */ | ||
5467 | #define IWL_PLCP_QUIET_THRESH __constant_cpu_to_le16(1) /* packets */ | ||
5468 | #define IWL_ACTIVE_QUIET_TIME __constant_cpu_to_le16(5) /* msec */ | ||
5469 | |||
5470 | /* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel. | ||
5471 | * Must be set longer than active dwell time. | ||
5472 | * For the most reliable scan, set > AP beacon interval (typically 100msec). */ | ||
5473 | #define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */ | ||
5474 | #define IWL_PASSIVE_DWELL_TIME_52 (10) | ||
5475 | #define IWL_PASSIVE_DWELL_BASE (100) | ||
5476 | #define IWL_CHANNEL_TUNE_TIME 5 | ||
5477 | |||
5478 | static inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv, int phymode) | ||
5479 | { | ||
5480 | if (phymode == MODE_IEEE80211A) | ||
5481 | return IWL_ACTIVE_DWELL_TIME_52; | ||
5482 | else | ||
5483 | return IWL_ACTIVE_DWELL_TIME_24; | ||
5484 | } | ||
5485 | |||
5486 | static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, int phymode) | ||
5487 | { | ||
5488 | u16 active = iwl_get_active_dwell_time(priv, phymode); | ||
5489 | u16 passive = (phymode != MODE_IEEE80211A) ? | ||
5490 | IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : | ||
5491 | IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; | ||
5492 | |||
5493 | if (iwl_is_associated(priv)) { | ||
5494 | /* If we're associated, we clamp the maximum passive | ||
5495 | * dwell time to be 98% of the beacon interval (minus | ||
5496 | * 2 * channel tune time) */ | ||
5497 | passive = priv->beacon_int; | ||
5498 | if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive) | ||
5499 | passive = IWL_PASSIVE_DWELL_BASE; | ||
5500 | passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; | ||
5501 | } | ||
5502 | |||
5503 | if (passive <= active) | ||
5504 | passive = active + 1; | ||
5505 | |||
5506 | return passive; | ||
5507 | } | ||
5508 | |||
5509 | static int iwl_get_channels_for_scan(struct iwl_priv *priv, int phymode, | ||
5510 | u8 is_active, u8 direct_mask, | ||
5511 | struct iwl_scan_channel *scan_ch) | ||
5512 | { | ||
5513 | const struct ieee80211_channel *channels = NULL; | ||
5514 | const struct ieee80211_hw_mode *hw_mode; | ||
5515 | const struct iwl_channel_info *ch_info; | ||
5516 | u16 passive_dwell = 0; | ||
5517 | u16 active_dwell = 0; | ||
5518 | int added, i; | ||
5519 | |||
5520 | hw_mode = iwl_get_hw_mode(priv, phymode); | ||
5521 | if (!hw_mode) | ||
5522 | return 0; | ||
5523 | |||
5524 | channels = hw_mode->channels; | ||
5525 | |||
5526 | active_dwell = iwl_get_active_dwell_time(priv, phymode); | ||
5527 | passive_dwell = iwl_get_passive_dwell_time(priv, phymode); | ||
5528 | |||
5529 | for (i = 0, added = 0; i < hw_mode->num_channels; i++) { | ||
5530 | if (channels[i].chan == | ||
5531 | le16_to_cpu(priv->active_rxon.channel)) { | ||
5532 | if (iwl_is_associated(priv)) { | ||
5533 | IWL_DEBUG_SCAN | ||
5534 | ("Skipping current channel %d\n", | ||
5535 | le16_to_cpu(priv->active_rxon.channel)); | ||
5536 | continue; | ||
5537 | } | ||
5538 | } else if (priv->only_active_channel) | ||
5539 | continue; | ||
5540 | |||
5541 | scan_ch->channel = channels[i].chan; | ||
5542 | |||
5543 | ch_info = iwl_get_channel_info(priv, phymode, scan_ch->channel); | ||
5544 | if (!is_channel_valid(ch_info)) { | ||
5545 | IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", | ||
5546 | scan_ch->channel); | ||
5547 | continue; | ||
5548 | } | ||
5549 | |||
5550 | if (!is_active || is_channel_passive(ch_info) || | ||
5551 | !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN)) | ||
5552 | scan_ch->type = 0; /* passive */ | ||
5553 | else | ||
5554 | scan_ch->type = 1; /* active */ | ||
5555 | |||
5556 | if (scan_ch->type & 1) | ||
5557 | scan_ch->type |= (direct_mask << 1); | ||
5558 | |||
5559 | if (is_channel_narrow(ch_info)) | ||
5560 | scan_ch->type |= (1 << 7); | ||
5561 | |||
5562 | scan_ch->active_dwell = cpu_to_le16(active_dwell); | ||
5563 | scan_ch->passive_dwell = cpu_to_le16(passive_dwell); | ||
5564 | |||
5565 | /* Set power levels to defaults */ | ||
5566 | scan_ch->tpc.dsp_atten = 110; | ||
5567 | /* scan_pwr_info->tpc.dsp_atten; */ | ||
5568 | |||
5569 | /*scan_pwr_info->tpc.tx_gain; */ | ||
5570 | if (phymode == MODE_IEEE80211A) | ||
5571 | scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; | ||
5572 | else { | ||
5573 | scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); | ||
5574 | /* NOTE: if we were doing 6Mb OFDM for scans we'd use | ||
5575 | * power level | ||
5576 | scan_ch->tpc.tx_gain = ((1<<5) | (2 << 3)) | 3; | ||
5577 | */ | ||
5578 | } | ||
5579 | |||
5580 | IWL_DEBUG_SCAN("Scanning %d [%s %d]\n", | ||
5581 | scan_ch->channel, | ||
5582 | (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE", | ||
5583 | (scan_ch->type & 1) ? | ||
5584 | active_dwell : passive_dwell); | ||
5585 | |||
5586 | scan_ch++; | ||
5587 | added++; | ||
5588 | } | ||
5589 | |||
5590 | IWL_DEBUG_SCAN("total channels to scan %d \n", added); | ||
5591 | return added; | ||
5592 | } | ||
5593 | |||
5594 | static void iwl_reset_channel_flag(struct iwl_priv *priv) | ||
5595 | { | ||
5596 | int i, j; | ||
5597 | for (i = 0; i < 3; i++) { | ||
5598 | struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i]; | ||
5599 | for (j = 0; j < hw_mode->num_channels; j++) | ||
5600 | hw_mode->channels[j].flag = hw_mode->channels[j].val; | ||
5601 | } | ||
5602 | } | ||
5603 | |||
5604 | static void iwl_init_hw_rates(struct iwl_priv *priv, | ||
5605 | struct ieee80211_rate *rates) | ||
5606 | { | ||
5607 | int i; | ||
5608 | |||
5609 | for (i = 0; i < IWL_RATE_COUNT; i++) { | ||
5610 | rates[i].rate = iwl_rates[i].ieee * 5; | ||
5611 | rates[i].val = i; /* Rate scaling will work on indexes */ | ||
5612 | rates[i].val2 = i; | ||
5613 | rates[i].flags = IEEE80211_RATE_SUPPORTED; | ||
5614 | /* Only OFDM have the bits-per-symbol set */ | ||
5615 | if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE)) | ||
5616 | rates[i].flags |= IEEE80211_RATE_OFDM; | ||
5617 | else { | ||
5618 | /* | ||
5619 | * If CCK 1M then set rate flag to CCK else CCK_2 | ||
5620 | * which is CCK | PREAMBLE2 | ||
5621 | */ | ||
5622 | rates[i].flags |= (iwl_rates[i].plcp == 10) ? | ||
5623 | IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2; | ||
5624 | } | ||
5625 | |||
5626 | /* Set up which ones are basic rates... */ | ||
5627 | if (IWL_BASIC_RATES_MASK & (1 << i)) | ||
5628 | rates[i].flags |= IEEE80211_RATE_BASIC; | ||
5629 | } | ||
5630 | |||
5631 | iwl4965_init_hw_rates(priv, rates); | ||
5632 | } | ||
5633 | |||
5634 | /** | ||
5635 | * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom | ||
5636 | */ | ||
5637 | static int iwl_init_geos(struct iwl_priv *priv) | ||
5638 | { | ||
5639 | struct iwl_channel_info *ch; | ||
5640 | struct ieee80211_hw_mode *modes; | ||
5641 | struct ieee80211_channel *channels; | ||
5642 | struct ieee80211_channel *geo_ch; | ||
5643 | struct ieee80211_rate *rates; | ||
5644 | int i = 0; | ||
5645 | enum { | ||
5646 | A = 0, | ||
5647 | B = 1, | ||
5648 | G = 2, | ||
5649 | A_11N = 3, | ||
5650 | G_11N = 4, | ||
5651 | }; | ||
5652 | int mode_count = 5; | ||
5653 | |||
5654 | if (priv->modes) { | ||
5655 | IWL_DEBUG_INFO("Geography modes already initialized.\n"); | ||
5656 | set_bit(STATUS_GEO_CONFIGURED, &priv->status); | ||
5657 | return 0; | ||
5658 | } | ||
5659 | |||
5660 | modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count, | ||
5661 | GFP_KERNEL); | ||
5662 | if (!modes) | ||
5663 | return -ENOMEM; | ||
5664 | |||
5665 | channels = kzalloc(sizeof(struct ieee80211_channel) * | ||
5666 | priv->channel_count, GFP_KERNEL); | ||
5667 | if (!channels) { | ||
5668 | kfree(modes); | ||
5669 | return -ENOMEM; | ||
5670 | } | ||
5671 | |||
5672 | rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)), | ||
5673 | GFP_KERNEL); | ||
5674 | if (!rates) { | ||
5675 | kfree(modes); | ||
5676 | kfree(channels); | ||
5677 | return -ENOMEM; | ||
5678 | } | ||
5679 | |||
5680 | /* 0 = 802.11a | ||
5681 | * 1 = 802.11b | ||
5682 | * 2 = 802.11g | ||
5683 | */ | ||
5684 | |||
5685 | /* 5.2GHz channels start after the 2.4GHz channels */ | ||
5686 | modes[A].mode = MODE_IEEE80211A; | ||
5687 | modes[A].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; | ||
5688 | modes[A].rates = rates; | ||
5689 | modes[A].num_rates = 8; /* just OFDM */ | ||
5690 | modes[A].rates = &rates[4]; | ||
5691 | modes[A].num_channels = 0; | ||
5692 | |||
5693 | modes[B].mode = MODE_IEEE80211B; | ||
5694 | modes[B].channels = channels; | ||
5695 | modes[B].rates = rates; | ||
5696 | modes[B].num_rates = 4; /* just CCK */ | ||
5697 | modes[B].num_channels = 0; | ||
5698 | |||
5699 | modes[G].mode = MODE_IEEE80211G; | ||
5700 | modes[G].channels = channels; | ||
5701 | modes[G].rates = rates; | ||
5702 | modes[G].num_rates = 12; /* OFDM & CCK */ | ||
5703 | modes[G].num_channels = 0; | ||
5704 | |||
5705 | modes[G_11N].mode = MODE_IEEE80211G; | ||
5706 | modes[G_11N].channels = channels; | ||
5707 | modes[G_11N].num_rates = 13; /* OFDM & CCK */ | ||
5708 | modes[G_11N].rates = rates; | ||
5709 | modes[G_11N].num_channels = 0; | ||
5710 | |||
5711 | modes[A_11N].mode = MODE_IEEE80211A; | ||
5712 | modes[A_11N].channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)]; | ||
5713 | modes[A_11N].rates = &rates[4]; | ||
5714 | modes[A_11N].num_rates = 9; /* just OFDM */ | ||
5715 | modes[A_11N].num_channels = 0; | ||
5716 | |||
5717 | priv->ieee_channels = channels; | ||
5718 | priv->ieee_rates = rates; | ||
5719 | |||
5720 | iwl_init_hw_rates(priv, rates); | ||
5721 | |||
5722 | for (i = 0, geo_ch = channels; i < priv->channel_count; i++) { | ||
5723 | ch = &priv->channel_info[i]; | ||
5724 | |||
5725 | if (!is_channel_valid(ch)) { | ||
5726 | IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- " | ||
5727 | "skipping.\n", | ||
5728 | ch->channel, is_channel_a_band(ch) ? | ||
5729 | "5.2" : "2.4"); | ||
5730 | continue; | ||
5731 | } | ||
5732 | |||
5733 | if (is_channel_a_band(ch)) { | ||
5734 | geo_ch = &modes[A].channels[modes[A].num_channels++]; | ||
5735 | modes[A_11N].num_channels++; | ||
5736 | } else { | ||
5737 | geo_ch = &modes[B].channels[modes[B].num_channels++]; | ||
5738 | modes[G].num_channels++; | ||
5739 | modes[G_11N].num_channels++; | ||
5740 | } | ||
5741 | |||
5742 | geo_ch->freq = ieee80211chan2mhz(ch->channel); | ||
5743 | geo_ch->chan = ch->channel; | ||
5744 | geo_ch->power_level = ch->max_power_avg; | ||
5745 | geo_ch->antenna_max = 0xff; | ||
5746 | |||
5747 | if (is_channel_valid(ch)) { | ||
5748 | geo_ch->flag = IEEE80211_CHAN_W_SCAN; | ||
5749 | if (ch->flags & EEPROM_CHANNEL_IBSS) | ||
5750 | geo_ch->flag |= IEEE80211_CHAN_W_IBSS; | ||
5751 | |||
5752 | if (ch->flags & EEPROM_CHANNEL_ACTIVE) | ||
5753 | geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN; | ||
5754 | |||
5755 | if (ch->flags & EEPROM_CHANNEL_RADAR) | ||
5756 | geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT; | ||
5757 | |||
5758 | if (ch->max_power_avg > priv->max_channel_txpower_limit) | ||
5759 | priv->max_channel_txpower_limit = | ||
5760 | ch->max_power_avg; | ||
5761 | } | ||
5762 | |||
5763 | geo_ch->val = geo_ch->flag; | ||
5764 | } | ||
5765 | |||
5766 | if ((modes[A].num_channels == 0) && priv->is_abg) { | ||
5767 | printk(KERN_INFO DRV_NAME | ||
5768 | ": Incorrectly detected BG card as ABG. Please send " | ||
5769 | "your PCI ID 0x%04X:0x%04X to maintainer.\n", | ||
5770 | priv->pci_dev->device, priv->pci_dev->subsystem_device); | ||
5771 | priv->is_abg = 0; | ||
5772 | } | ||
5773 | |||
5774 | printk(KERN_INFO DRV_NAME | ||
5775 | ": Tunable channels: %d 802.11bg, %d 802.11a channels\n", | ||
5776 | modes[G].num_channels, modes[A].num_channels); | ||
5777 | |||
5778 | /* | ||
5779 | * NOTE: We register these in preference of order -- the | ||
5780 | * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick | ||
5781 | * a phymode based on rates or AP capabilities but seems to | ||
5782 | * configure it purely on if the channel being configured | ||
5783 | * is supported by a mode -- and the first match is taken | ||
5784 | */ | ||
5785 | |||
5786 | if (modes[G].num_channels) | ||
5787 | ieee80211_register_hwmode(priv->hw, &modes[G]); | ||
5788 | if (modes[B].num_channels) | ||
5789 | ieee80211_register_hwmode(priv->hw, &modes[B]); | ||
5790 | if (modes[A].num_channels) | ||
5791 | ieee80211_register_hwmode(priv->hw, &modes[A]); | ||
5792 | |||
5793 | priv->modes = modes; | ||
5794 | set_bit(STATUS_GEO_CONFIGURED, &priv->status); | ||
5795 | |||
5796 | return 0; | ||
5797 | } | ||
5798 | |||
5799 | /****************************************************************************** | ||
5800 | * | ||
5801 | * uCode download functions | ||
5802 | * | ||
5803 | ******************************************************************************/ | ||
5804 | |||
5805 | static void iwl_dealloc_ucode_pci(struct iwl_priv *priv) | ||
5806 | { | ||
5807 | if (priv->ucode_code.v_addr != NULL) { | ||
5808 | pci_free_consistent(priv->pci_dev, | ||
5809 | priv->ucode_code.len, | ||
5810 | priv->ucode_code.v_addr, | ||
5811 | priv->ucode_code.p_addr); | ||
5812 | priv->ucode_code.v_addr = NULL; | ||
5813 | } | ||
5814 | if (priv->ucode_data.v_addr != NULL) { | ||
5815 | pci_free_consistent(priv->pci_dev, | ||
5816 | priv->ucode_data.len, | ||
5817 | priv->ucode_data.v_addr, | ||
5818 | priv->ucode_data.p_addr); | ||
5819 | priv->ucode_data.v_addr = NULL; | ||
5820 | } | ||
5821 | if (priv->ucode_data_backup.v_addr != NULL) { | ||
5822 | pci_free_consistent(priv->pci_dev, | ||
5823 | priv->ucode_data_backup.len, | ||
5824 | priv->ucode_data_backup.v_addr, | ||
5825 | priv->ucode_data_backup.p_addr); | ||
5826 | priv->ucode_data_backup.v_addr = NULL; | ||
5827 | } | ||
5828 | if (priv->ucode_init.v_addr != NULL) { | ||
5829 | pci_free_consistent(priv->pci_dev, | ||
5830 | priv->ucode_init.len, | ||
5831 | priv->ucode_init.v_addr, | ||
5832 | priv->ucode_init.p_addr); | ||
5833 | priv->ucode_init.v_addr = NULL; | ||
5834 | } | ||
5835 | if (priv->ucode_init_data.v_addr != NULL) { | ||
5836 | pci_free_consistent(priv->pci_dev, | ||
5837 | priv->ucode_init_data.len, | ||
5838 | priv->ucode_init_data.v_addr, | ||
5839 | priv->ucode_init_data.p_addr); | ||
5840 | priv->ucode_init_data.v_addr = NULL; | ||
5841 | } | ||
5842 | if (priv->ucode_boot.v_addr != NULL) { | ||
5843 | pci_free_consistent(priv->pci_dev, | ||
5844 | priv->ucode_boot.len, | ||
5845 | priv->ucode_boot.v_addr, | ||
5846 | priv->ucode_boot.p_addr); | ||
5847 | priv->ucode_boot.v_addr = NULL; | ||
5848 | } | ||
5849 | } | ||
5850 | |||
5851 | /** | ||
5852 | * iwl_verify_inst_full - verify runtime uCode image in card vs. host, | ||
5853 | * looking at all data. | ||
5854 | */ | ||
5855 | static int iwl_verify_inst_full(struct iwl_priv *priv, __le32 * image, u32 len) | ||
5856 | { | ||
5857 | u32 val; | ||
5858 | u32 save_len = len; | ||
5859 | int rc = 0; | ||
5860 | u32 errcnt; | ||
5861 | |||
5862 | IWL_DEBUG_INFO("ucode inst image size is %u\n", len); | ||
5863 | |||
5864 | rc = iwl_grab_restricted_access(priv); | ||
5865 | if (rc) | ||
5866 | return rc; | ||
5867 | |||
5868 | iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND); | ||
5869 | |||
5870 | errcnt = 0; | ||
5871 | for (; len > 0; len -= sizeof(u32), image++) { | ||
5872 | /* read data comes through single port, auto-incr addr */ | ||
5873 | /* NOTE: Use the debugless read so we don't flood kernel log | ||
5874 | * if IWL_DL_IO is set */ | ||
5875 | val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); | ||
5876 | if (val != le32_to_cpu(*image)) { | ||
5877 | IWL_ERROR("uCode INST section is invalid at " | ||
5878 | "offset 0x%x, is 0x%x, s/b 0x%x\n", | ||
5879 | save_len - len, val, le32_to_cpu(*image)); | ||
5880 | rc = -EIO; | ||
5881 | errcnt++; | ||
5882 | if (errcnt >= 20) | ||
5883 | break; | ||
5884 | } | ||
5885 | } | ||
5886 | |||
5887 | iwl_release_restricted_access(priv); | ||
5888 | |||
5889 | if (!errcnt) | ||
5890 | IWL_DEBUG_INFO | ||
5891 | ("ucode image in INSTRUCTION memory is good\n"); | ||
5892 | |||
5893 | return rc; | ||
5894 | } | ||
5895 | |||
5896 | |||
5897 | /** | ||
5898 | * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host, | ||
5899 | * using sample data 100 bytes apart. If these sample points are good, | ||
5900 | * it's a pretty good bet that everything between them is good, too. | ||
5901 | */ | ||
5902 | static int iwl_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len) | ||
5903 | { | ||
5904 | u32 val; | ||
5905 | int rc = 0; | ||
5906 | u32 errcnt = 0; | ||
5907 | u32 i; | ||
5908 | |||
5909 | IWL_DEBUG_INFO("ucode inst image size is %u\n", len); | ||
5910 | |||
5911 | rc = iwl_grab_restricted_access(priv); | ||
5912 | if (rc) | ||
5913 | return rc; | ||
5914 | |||
5915 | for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) { | ||
5916 | /* read data comes through single port, auto-incr addr */ | ||
5917 | /* NOTE: Use the debugless read so we don't flood kernel log | ||
5918 | * if IWL_DL_IO is set */ | ||
5919 | iwl_write_restricted(priv, HBUS_TARG_MEM_RADDR, | ||
5920 | i + RTC_INST_LOWER_BOUND); | ||
5921 | val = _iwl_read_restricted(priv, HBUS_TARG_MEM_RDAT); | ||
5922 | if (val != le32_to_cpu(*image)) { | ||
5923 | #if 0 /* Enable this if you want to see details */ | ||
5924 | IWL_ERROR("uCode INST section is invalid at " | ||
5925 | "offset 0x%x, is 0x%x, s/b 0x%x\n", | ||
5926 | i, val, *image); | ||
5927 | #endif | ||
5928 | rc = -EIO; | ||
5929 | errcnt++; | ||
5930 | if (errcnt >= 3) | ||
5931 | break; | ||
5932 | } | ||
5933 | } | ||
5934 | |||
5935 | iwl_release_restricted_access(priv); | ||
5936 | |||
5937 | return rc; | ||
5938 | } | ||
5939 | |||
5940 | |||
5941 | /** | ||
5942 | * iwl_verify_ucode - determine which instruction image is in SRAM, | ||
5943 | * and verify its contents | ||
5944 | */ | ||
5945 | static int iwl_verify_ucode(struct iwl_priv *priv) | ||
5946 | { | ||
5947 | __le32 *image; | ||
5948 | u32 len; | ||
5949 | int rc = 0; | ||
5950 | |||
5951 | /* Try bootstrap */ | ||
5952 | image = (__le32 *)priv->ucode_boot.v_addr; | ||
5953 | len = priv->ucode_boot.len; | ||
5954 | rc = iwl_verify_inst_sparse(priv, image, len); | ||
5955 | if (rc == 0) { | ||
5956 | IWL_DEBUG_INFO("Bootstrap uCode is good in inst SRAM\n"); | ||
5957 | return 0; | ||
5958 | } | ||
5959 | |||
5960 | /* Try initialize */ | ||
5961 | image = (__le32 *)priv->ucode_init.v_addr; | ||
5962 | len = priv->ucode_init.len; | ||
5963 | rc = iwl_verify_inst_sparse(priv, image, len); | ||
5964 | if (rc == 0) { | ||
5965 | IWL_DEBUG_INFO("Initialize uCode is good in inst SRAM\n"); | ||
5966 | return 0; | ||
5967 | } | ||
5968 | |||
5969 | /* Try runtime/protocol */ | ||
5970 | image = (__le32 *)priv->ucode_code.v_addr; | ||
5971 | len = priv->ucode_code.len; | ||
5972 | rc = iwl_verify_inst_sparse(priv, image, len); | ||
5973 | if (rc == 0) { | ||
5974 | IWL_DEBUG_INFO("Runtime uCode is good in inst SRAM\n"); | ||
5975 | return 0; | ||
5976 | } | ||
5977 | |||
5978 | IWL_ERROR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n"); | ||
5979 | |||
5980 | /* Show first several data entries in instruction SRAM. | ||
5981 | * Selection of bootstrap image is arbitrary. */ | ||
5982 | image = (__le32 *)priv->ucode_boot.v_addr; | ||
5983 | len = priv->ucode_boot.len; | ||
5984 | rc = iwl_verify_inst_full(priv, image, len); | ||
5985 | |||
5986 | return rc; | ||
5987 | } | ||
5988 | |||
5989 | |||
5990 | /* check contents of special bootstrap uCode SRAM */ | ||
5991 | static int iwl_verify_bsm(struct iwl_priv *priv) | ||
5992 | { | ||
5993 | __le32 *image = priv->ucode_boot.v_addr; | ||
5994 | u32 len = priv->ucode_boot.len; | ||
5995 | u32 reg; | ||
5996 | u32 val; | ||
5997 | |||
5998 | IWL_DEBUG_INFO("Begin verify bsm\n"); | ||
5999 | |||
6000 | /* verify BSM SRAM contents */ | ||
6001 | val = iwl_read_restricted_reg(priv, BSM_WR_DWCOUNT_REG); | ||
6002 | for (reg = BSM_SRAM_LOWER_BOUND; | ||
6003 | reg < BSM_SRAM_LOWER_BOUND + len; | ||
6004 | reg += sizeof(u32), image ++) { | ||
6005 | val = iwl_read_restricted_reg(priv, reg); | ||
6006 | if (val != le32_to_cpu(*image)) { | ||
6007 | IWL_ERROR("BSM uCode verification failed at " | ||
6008 | "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n", | ||
6009 | BSM_SRAM_LOWER_BOUND, | ||
6010 | reg - BSM_SRAM_LOWER_BOUND, len, | ||
6011 | val, le32_to_cpu(*image)); | ||
6012 | return -EIO; | ||
6013 | } | ||
6014 | } | ||
6015 | |||
6016 | IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n"); | ||
6017 | |||
6018 | return 0; | ||
6019 | } | ||
6020 | |||
6021 | /** | ||
6022 | * iwl_load_bsm - Load bootstrap instructions | ||
6023 | * | ||
6024 | * BSM operation: | ||
6025 | * | ||
6026 | * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program | ||
6027 | * in special SRAM that does not power down during RFKILL. When powering back | ||
6028 | * up after power-saving sleeps (or during initial uCode load), the BSM loads | ||
6029 | * the bootstrap program into the on-board processor, and starts it. | ||
6030 | * | ||
6031 | * The bootstrap program loads (via DMA) instructions and data for a new | ||
6032 | * program from host DRAM locations indicated by the host driver in the | ||
6033 | * BSM_DRAM_* registers. Once the new program is loaded, it starts | ||
6034 | * automatically. | ||
6035 | * | ||
6036 | * When initializing the NIC, the host driver points the BSM to the | ||
6037 | * "initialize" uCode image. This uCode sets up some internal data, then | ||
6038 | * notifies host via "initialize alive" that it is complete. | ||
6039 | * | ||
6040 | * The host then replaces the BSM_DRAM_* pointer values to point to the | ||
6041 | * normal runtime uCode instructions and a backup uCode data cache buffer | ||
6042 | * (filled initially with starting data values for the on-board processor), | ||
6043 | * then triggers the "initialize" uCode to load and launch the runtime uCode, | ||
6044 | * which begins normal operation. | ||
6045 | * | ||
6046 | * When doing a power-save shutdown, runtime uCode saves data SRAM into | ||
6047 | * the backup data cache in DRAM before SRAM is powered down. | ||
6048 | * | ||
6049 | * When powering back up, the BSM loads the bootstrap program. This reloads | ||
6050 | * the runtime uCode instructions and the backup data cache into SRAM, | ||
6051 | * and re-launches the runtime uCode from where it left off. | ||
6052 | */ | ||
6053 | static int iwl_load_bsm(struct iwl_priv *priv) | ||
6054 | { | ||
6055 | __le32 *image = priv->ucode_boot.v_addr; | ||
6056 | u32 len = priv->ucode_boot.len; | ||
6057 | dma_addr_t pinst; | ||
6058 | dma_addr_t pdata; | ||
6059 | u32 inst_len; | ||
6060 | u32 data_len; | ||
6061 | int rc; | ||
6062 | int i; | ||
6063 | u32 done; | ||
6064 | u32 reg_offset; | ||
6065 | |||
6066 | IWL_DEBUG_INFO("Begin load bsm\n"); | ||
6067 | |||
6068 | /* make sure bootstrap program is no larger than BSM's SRAM size */ | ||
6069 | if (len > IWL_MAX_BSM_SIZE) | ||
6070 | return -EINVAL; | ||
6071 | |||
6072 | /* Tell bootstrap uCode where to find the "Initialize" uCode | ||
6073 | * in host DRAM ... bits 31:0 for 3945, bits 35:4 for 4965. | ||
6074 | * NOTE: iwl_initialize_alive_start() will replace these values, | ||
6075 | * after the "initialize" uCode has run, to point to | ||
6076 | * runtime/protocol instructions and backup data cache. */ | ||
6077 | pinst = priv->ucode_init.p_addr >> 4; | ||
6078 | pdata = priv->ucode_init_data.p_addr >> 4; | ||
6079 | inst_len = priv->ucode_init.len; | ||
6080 | data_len = priv->ucode_init_data.len; | ||
6081 | |||
6082 | rc = iwl_grab_restricted_access(priv); | ||
6083 | if (rc) | ||
6084 | return rc; | ||
6085 | |||
6086 | iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); | ||
6087 | iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); | ||
6088 | iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len); | ||
6089 | iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len); | ||
6090 | |||
6091 | /* Fill BSM memory with bootstrap instructions */ | ||
6092 | for (reg_offset = BSM_SRAM_LOWER_BOUND; | ||
6093 | reg_offset < BSM_SRAM_LOWER_BOUND + len; | ||
6094 | reg_offset += sizeof(u32), image++) | ||
6095 | _iwl_write_restricted_reg(priv, reg_offset, | ||
6096 | le32_to_cpu(*image)); | ||
6097 | |||
6098 | rc = iwl_verify_bsm(priv); | ||
6099 | if (rc) { | ||
6100 | iwl_release_restricted_access(priv); | ||
6101 | return rc; | ||
6102 | } | ||
6103 | |||
6104 | /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */ | ||
6105 | iwl_write_restricted_reg(priv, BSM_WR_MEM_SRC_REG, 0x0); | ||
6106 | iwl_write_restricted_reg(priv, BSM_WR_MEM_DST_REG, | ||
6107 | RTC_INST_LOWER_BOUND); | ||
6108 | iwl_write_restricted_reg(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32)); | ||
6109 | |||
6110 | /* Load bootstrap code into instruction SRAM now, | ||
6111 | * to prepare to load "initialize" uCode */ | ||
6112 | iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, | ||
6113 | BSM_WR_CTRL_REG_BIT_START); | ||
6114 | |||
6115 | /* Wait for load of bootstrap uCode to finish */ | ||
6116 | for (i = 0; i < 100; i++) { | ||
6117 | done = iwl_read_restricted_reg(priv, BSM_WR_CTRL_REG); | ||
6118 | if (!(done & BSM_WR_CTRL_REG_BIT_START)) | ||
6119 | break; | ||
6120 | udelay(10); | ||
6121 | } | ||
6122 | if (i < 100) | ||
6123 | IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i); | ||
6124 | else { | ||
6125 | IWL_ERROR("BSM write did not complete!\n"); | ||
6126 | return -EIO; | ||
6127 | } | ||
6128 | |||
6129 | /* Enable future boot loads whenever power management unit triggers it | ||
6130 | * (e.g. when powering back up after power-save shutdown) */ | ||
6131 | iwl_write_restricted_reg(priv, BSM_WR_CTRL_REG, | ||
6132 | BSM_WR_CTRL_REG_BIT_START_EN); | ||
6133 | |||
6134 | iwl_release_restricted_access(priv); | ||
6135 | |||
6136 | return 0; | ||
6137 | } | ||
6138 | |||
6139 | static void iwl_nic_start(struct iwl_priv *priv) | ||
6140 | { | ||
6141 | /* Remove all resets to allow NIC to operate */ | ||
6142 | iwl_write32(priv, CSR_RESET, 0); | ||
6143 | } | ||
6144 | |||
6145 | /** | ||
6146 | * iwl_read_ucode - Read uCode images from disk file. | ||
6147 | * | ||
6148 | * Copy into buffers for card to fetch via bus-mastering | ||
6149 | */ | ||
6150 | static int iwl_read_ucode(struct iwl_priv *priv) | ||
6151 | { | ||
6152 | struct iwl_ucode *ucode; | ||
6153 | int rc = 0; | ||
6154 | const struct firmware *ucode_raw; | ||
6155 | const char *name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode"; | ||
6156 | u8 *src; | ||
6157 | size_t len; | ||
6158 | u32 ver, inst_size, data_size, init_size, init_data_size, boot_size; | ||
6159 | |||
6160 | /* Ask kernel firmware_class module to get the boot firmware off disk. | ||
6161 | * request_firmware() is synchronous, file is in memory on return. */ | ||
6162 | rc = request_firmware(&ucode_raw, name, &priv->pci_dev->dev); | ||
6163 | if (rc < 0) { | ||
6164 | IWL_ERROR("%s firmware file req failed: Reason %d\n", name, rc); | ||
6165 | goto error; | ||
6166 | } | ||
6167 | |||
6168 | IWL_DEBUG_INFO("Got firmware '%s' file (%zd bytes) from disk\n", | ||
6169 | name, ucode_raw->size); | ||
6170 | |||
6171 | /* Make sure that we got at least our header! */ | ||
6172 | if (ucode_raw->size < sizeof(*ucode)) { | ||
6173 | IWL_ERROR("File size way too small!\n"); | ||
6174 | rc = -EINVAL; | ||
6175 | goto err_release; | ||
6176 | } | ||
6177 | |||
6178 | /* Data from ucode file: header followed by uCode images */ | ||
6179 | ucode = (void *)ucode_raw->data; | ||
6180 | |||
6181 | ver = le32_to_cpu(ucode->ver); | ||
6182 | inst_size = le32_to_cpu(ucode->inst_size); | ||
6183 | data_size = le32_to_cpu(ucode->data_size); | ||
6184 | init_size = le32_to_cpu(ucode->init_size); | ||
6185 | init_data_size = le32_to_cpu(ucode->init_data_size); | ||
6186 | boot_size = le32_to_cpu(ucode->boot_size); | ||
6187 | |||
6188 | IWL_DEBUG_INFO("f/w package hdr ucode version = 0x%x\n", ver); | ||
6189 | IWL_DEBUG_INFO("f/w package hdr runtime inst size = %u\n", | ||
6190 | inst_size); | ||
6191 | IWL_DEBUG_INFO("f/w package hdr runtime data size = %u\n", | ||
6192 | data_size); | ||
6193 | IWL_DEBUG_INFO("f/w package hdr init inst size = %u\n", | ||
6194 | init_size); | ||
6195 | IWL_DEBUG_INFO("f/w package hdr init data size = %u\n", | ||
6196 | init_data_size); | ||
6197 | IWL_DEBUG_INFO("f/w package hdr boot inst size = %u\n", | ||
6198 | boot_size); | ||
6199 | |||
6200 | /* Verify size of file vs. image size info in file's header */ | ||
6201 | if (ucode_raw->size < sizeof(*ucode) + | ||
6202 | inst_size + data_size + init_size + | ||
6203 | init_data_size + boot_size) { | ||
6204 | |||
6205 | IWL_DEBUG_INFO("uCode file size %d too small\n", | ||
6206 | (int)ucode_raw->size); | ||
6207 | rc = -EINVAL; | ||
6208 | goto err_release; | ||
6209 | } | ||
6210 | |||
6211 | /* Verify that uCode images will fit in card's SRAM */ | ||
6212 | if (inst_size > IWL_MAX_INST_SIZE) { | ||
6213 | IWL_DEBUG_INFO("uCode instr len %d too large to fit in card\n", | ||
6214 | (int)inst_size); | ||
6215 | rc = -EINVAL; | ||
6216 | goto err_release; | ||
6217 | } | ||
6218 | |||
6219 | if (data_size > IWL_MAX_DATA_SIZE) { | ||
6220 | IWL_DEBUG_INFO("uCode data len %d too large to fit in card\n", | ||
6221 | (int)data_size); | ||
6222 | rc = -EINVAL; | ||
6223 | goto err_release; | ||
6224 | } | ||
6225 | if (init_size > IWL_MAX_INST_SIZE) { | ||
6226 | IWL_DEBUG_INFO | ||
6227 | ("uCode init instr len %d too large to fit in card\n", | ||
6228 | (int)init_size); | ||
6229 | rc = -EINVAL; | ||
6230 | goto err_release; | ||
6231 | } | ||
6232 | if (init_data_size > IWL_MAX_DATA_SIZE) { | ||
6233 | IWL_DEBUG_INFO | ||
6234 | ("uCode init data len %d too large to fit in card\n", | ||
6235 | (int)init_data_size); | ||
6236 | rc = -EINVAL; | ||
6237 | goto err_release; | ||
6238 | } | ||
6239 | if (boot_size > IWL_MAX_BSM_SIZE) { | ||
6240 | IWL_DEBUG_INFO | ||
6241 | ("uCode boot instr len %d too large to fit in bsm\n", | ||
6242 | (int)boot_size); | ||
6243 | rc = -EINVAL; | ||
6244 | goto err_release; | ||
6245 | } | ||
6246 | |||
6247 | /* Allocate ucode buffers for card's bus-master loading ... */ | ||
6248 | |||
6249 | /* Runtime instructions and 2 copies of data: | ||
6250 | * 1) unmodified from disk | ||
6251 | * 2) backup cache for save/restore during power-downs */ | ||
6252 | priv->ucode_code.len = inst_size; | ||
6253 | priv->ucode_code.v_addr = | ||
6254 | pci_alloc_consistent(priv->pci_dev, | ||
6255 | priv->ucode_code.len, | ||
6256 | &(priv->ucode_code.p_addr)); | ||
6257 | |||
6258 | priv->ucode_data.len = data_size; | ||
6259 | priv->ucode_data.v_addr = | ||
6260 | pci_alloc_consistent(priv->pci_dev, | ||
6261 | priv->ucode_data.len, | ||
6262 | &(priv->ucode_data.p_addr)); | ||
6263 | |||
6264 | priv->ucode_data_backup.len = data_size; | ||
6265 | priv->ucode_data_backup.v_addr = | ||
6266 | pci_alloc_consistent(priv->pci_dev, | ||
6267 | priv->ucode_data_backup.len, | ||
6268 | &(priv->ucode_data_backup.p_addr)); | ||
6269 | |||
6270 | |||
6271 | /* Initialization instructions and data */ | ||
6272 | priv->ucode_init.len = init_size; | ||
6273 | priv->ucode_init.v_addr = | ||
6274 | pci_alloc_consistent(priv->pci_dev, | ||
6275 | priv->ucode_init.len, | ||
6276 | &(priv->ucode_init.p_addr)); | ||
6277 | |||
6278 | priv->ucode_init_data.len = init_data_size; | ||
6279 | priv->ucode_init_data.v_addr = | ||
6280 | pci_alloc_consistent(priv->pci_dev, | ||
6281 | priv->ucode_init_data.len, | ||
6282 | &(priv->ucode_init_data.p_addr)); | ||
6283 | |||
6284 | /* Bootstrap (instructions only, no data) */ | ||
6285 | priv->ucode_boot.len = boot_size; | ||
6286 | priv->ucode_boot.v_addr = | ||
6287 | pci_alloc_consistent(priv->pci_dev, | ||
6288 | priv->ucode_boot.len, | ||
6289 | &(priv->ucode_boot.p_addr)); | ||
6290 | |||
6291 | if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || | ||
6292 | !priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr || | ||
6293 | !priv->ucode_boot.v_addr || !priv->ucode_data_backup.v_addr) | ||
6294 | goto err_pci_alloc; | ||
6295 | |||
6296 | /* Copy images into buffers for card's bus-master reads ... */ | ||
6297 | |||
6298 | /* Runtime instructions (first block of data in file) */ | ||
6299 | src = &ucode->data[0]; | ||
6300 | len = priv->ucode_code.len; | ||
6301 | IWL_DEBUG_INFO("Copying (but not loading) uCode instr len %d\n", | ||
6302 | (int)len); | ||
6303 | memcpy(priv->ucode_code.v_addr, src, len); | ||
6304 | IWL_DEBUG_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", | ||
6305 | priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); | ||
6306 | |||
6307 | /* Runtime data (2nd block) | ||
6308 | * NOTE: Copy into backup buffer will be done in iwl_up() */ | ||
6309 | src = &ucode->data[inst_size]; | ||
6310 | len = priv->ucode_data.len; | ||
6311 | IWL_DEBUG_INFO("Copying (but not loading) uCode data len %d\n", | ||
6312 | (int)len); | ||
6313 | memcpy(priv->ucode_data.v_addr, src, len); | ||
6314 | memcpy(priv->ucode_data_backup.v_addr, src, len); | ||
6315 | |||
6316 | /* Initialization instructions (3rd block) */ | ||
6317 | if (init_size) { | ||
6318 | src = &ucode->data[inst_size + data_size]; | ||
6319 | len = priv->ucode_init.len; | ||
6320 | IWL_DEBUG_INFO("Copying (but not loading) init instr len %d\n", | ||
6321 | (int)len); | ||
6322 | memcpy(priv->ucode_init.v_addr, src, len); | ||
6323 | } | ||
6324 | |||
6325 | /* Initialization data (4th block) */ | ||
6326 | if (init_data_size) { | ||
6327 | src = &ucode->data[inst_size + data_size + init_size]; | ||
6328 | len = priv->ucode_init_data.len; | ||
6329 | IWL_DEBUG_INFO("Copying (but not loading) init data len %d\n", | ||
6330 | (int)len); | ||
6331 | memcpy(priv->ucode_init_data.v_addr, src, len); | ||
6332 | } | ||
6333 | |||
6334 | /* Bootstrap instructions (5th block) */ | ||
6335 | src = &ucode->data[inst_size + data_size + init_size + init_data_size]; | ||
6336 | len = priv->ucode_boot.len; | ||
6337 | IWL_DEBUG_INFO("Copying (but not loading) boot instr len %d\n", | ||
6338 | (int)len); | ||
6339 | memcpy(priv->ucode_boot.v_addr, src, len); | ||
6340 | |||
6341 | /* We have our copies now, allow OS release its copies */ | ||
6342 | release_firmware(ucode_raw); | ||
6343 | return 0; | ||
6344 | |||
6345 | err_pci_alloc: | ||
6346 | IWL_ERROR("failed to allocate pci memory\n"); | ||
6347 | rc = -ENOMEM; | ||
6348 | iwl_dealloc_ucode_pci(priv); | ||
6349 | |||
6350 | err_release: | ||
6351 | release_firmware(ucode_raw); | ||
6352 | |||
6353 | error: | ||
6354 | return rc; | ||
6355 | } | ||
6356 | |||
6357 | |||
6358 | /** | ||
6359 | * iwl_set_ucode_ptrs - Set uCode address location | ||
6360 | * | ||
6361 | * Tell initialization uCode where to find runtime uCode. | ||
6362 | * | ||
6363 | * BSM registers initially contain pointers to initialization uCode. | ||
6364 | * We need to replace them to load runtime uCode inst and data, | ||
6365 | * and to save runtime data when powering down. | ||
6366 | */ | ||
6367 | static int iwl_set_ucode_ptrs(struct iwl_priv *priv) | ||
6368 | { | ||
6369 | dma_addr_t pinst; | ||
6370 | dma_addr_t pdata; | ||
6371 | int rc = 0; | ||
6372 | unsigned long flags; | ||
6373 | |||
6374 | /* bits 35:4 for 4965 */ | ||
6375 | pinst = priv->ucode_code.p_addr >> 4; | ||
6376 | pdata = priv->ucode_data_backup.p_addr >> 4; | ||
6377 | |||
6378 | spin_lock_irqsave(&priv->lock, flags); | ||
6379 | rc = iwl_grab_restricted_access(priv); | ||
6380 | if (rc) { | ||
6381 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6382 | return rc; | ||
6383 | } | ||
6384 | |||
6385 | /* Tell bootstrap uCode where to find image to load */ | ||
6386 | iwl_write_restricted_reg(priv, BSM_DRAM_INST_PTR_REG, pinst); | ||
6387 | iwl_write_restricted_reg(priv, BSM_DRAM_DATA_PTR_REG, pdata); | ||
6388 | iwl_write_restricted_reg(priv, BSM_DRAM_DATA_BYTECOUNT_REG, | ||
6389 | priv->ucode_data.len); | ||
6390 | |||
6391 | /* Inst bytecount must be last to set up, bit 31 signals uCode | ||
6392 | * that all new ptr/size info is in place */ | ||
6393 | iwl_write_restricted_reg(priv, BSM_DRAM_INST_BYTECOUNT_REG, | ||
6394 | priv->ucode_code.len | BSM_DRAM_INST_LOAD); | ||
6395 | |||
6396 | iwl_release_restricted_access(priv); | ||
6397 | |||
6398 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6399 | |||
6400 | IWL_DEBUG_INFO("Runtime uCode pointers are set.\n"); | ||
6401 | |||
6402 | return rc; | ||
6403 | } | ||
6404 | |||
6405 | /** | ||
6406 | * iwl_init_alive_start - Called after REPLY_ALIVE notification receieved | ||
6407 | * | ||
6408 | * Called after REPLY_ALIVE notification received from "initialize" uCode. | ||
6409 | * | ||
6410 | * The 4965 "initialize" ALIVE reply contains calibration data for: | ||
6411 | * Voltage, temperature, and MIMO tx gain correction, now stored in priv | ||
6412 | * (3945 does not contain this data). | ||
6413 | * | ||
6414 | * Tell "initialize" uCode to go ahead and load the runtime uCode. | ||
6415 | */ | ||
6416 | static void iwl_init_alive_start(struct iwl_priv *priv) | ||
6417 | { | ||
6418 | /* Check alive response for "valid" sign from uCode */ | ||
6419 | if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { | ||
6420 | /* We had an error bringing up the hardware, so take it | ||
6421 | * all the way back down so we can try again */ | ||
6422 | IWL_DEBUG_INFO("Initialize Alive failed.\n"); | ||
6423 | goto restart; | ||
6424 | } | ||
6425 | |||
6426 | /* Bootstrap uCode has loaded initialize uCode ... verify inst image. | ||
6427 | * This is a paranoid check, because we would not have gotten the | ||
6428 | * "initialize" alive if code weren't properly loaded. */ | ||
6429 | if (iwl_verify_ucode(priv)) { | ||
6430 | /* Runtime instruction load was bad; | ||
6431 | * take it all the way back down so we can try again */ | ||
6432 | IWL_DEBUG_INFO("Bad \"initialize\" uCode load.\n"); | ||
6433 | goto restart; | ||
6434 | } | ||
6435 | |||
6436 | /* Calculate temperature */ | ||
6437 | priv->temperature = iwl4965_get_temperature(priv); | ||
6438 | |||
6439 | /* Send pointers to protocol/runtime uCode image ... init code will | ||
6440 | * load and launch runtime uCode, which will send us another "Alive" | ||
6441 | * notification. */ | ||
6442 | IWL_DEBUG_INFO("Initialization Alive received.\n"); | ||
6443 | if (iwl_set_ucode_ptrs(priv)) { | ||
6444 | /* Runtime instruction load won't happen; | ||
6445 | * take it all the way back down so we can try again */ | ||
6446 | IWL_DEBUG_INFO("Couldn't set up uCode pointers.\n"); | ||
6447 | goto restart; | ||
6448 | } | ||
6449 | return; | ||
6450 | |||
6451 | restart: | ||
6452 | queue_work(priv->workqueue, &priv->restart); | ||
6453 | } | ||
6454 | |||
6455 | |||
6456 | /** | ||
6457 | * iwl_alive_start - called after REPLY_ALIVE notification received | ||
6458 | * from protocol/runtime uCode (initialization uCode's | ||
6459 | * Alive gets handled by iwl_init_alive_start()). | ||
6460 | */ | ||
6461 | static void iwl_alive_start(struct iwl_priv *priv) | ||
6462 | { | ||
6463 | int rc = 0; | ||
6464 | |||
6465 | IWL_DEBUG_INFO("Runtime Alive received.\n"); | ||
6466 | |||
6467 | if (priv->card_alive.is_valid != UCODE_VALID_OK) { | ||
6468 | /* We had an error bringing up the hardware, so take it | ||
6469 | * all the way back down so we can try again */ | ||
6470 | IWL_DEBUG_INFO("Alive failed.\n"); | ||
6471 | goto restart; | ||
6472 | } | ||
6473 | |||
6474 | /* Initialize uCode has loaded Runtime uCode ... verify inst image. | ||
6475 | * This is a paranoid check, because we would not have gotten the | ||
6476 | * "runtime" alive if code weren't properly loaded. */ | ||
6477 | if (iwl_verify_ucode(priv)) { | ||
6478 | /* Runtime instruction load was bad; | ||
6479 | * take it all the way back down so we can try again */ | ||
6480 | IWL_DEBUG_INFO("Bad runtime uCode load.\n"); | ||
6481 | goto restart; | ||
6482 | } | ||
6483 | |||
6484 | iwl_clear_stations_table(priv); | ||
6485 | |||
6486 | rc = iwl4965_alive_notify(priv); | ||
6487 | if (rc) { | ||
6488 | IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n", | ||
6489 | rc); | ||
6490 | goto restart; | ||
6491 | } | ||
6492 | |||
6493 | /* After the ALIVE response, we can process host commands */ | ||
6494 | set_bit(STATUS_ALIVE, &priv->status); | ||
6495 | |||
6496 | /* Clear out the uCode error bit if it is set */ | ||
6497 | clear_bit(STATUS_FW_ERROR, &priv->status); | ||
6498 | |||
6499 | rc = iwl_init_channel_map(priv); | ||
6500 | if (rc) { | ||
6501 | IWL_ERROR("initializing regulatory failed: %d\n", rc); | ||
6502 | return; | ||
6503 | } | ||
6504 | |||
6505 | iwl_init_geos(priv); | ||
6506 | |||
6507 | if (iwl_is_rfkill(priv)) | ||
6508 | return; | ||
6509 | |||
6510 | if (!priv->mac80211_registered) { | ||
6511 | /* Unlock so any user space entry points can call back into | ||
6512 | * the driver without a deadlock... */ | ||
6513 | mutex_unlock(&priv->mutex); | ||
6514 | iwl_rate_control_register(priv->hw); | ||
6515 | rc = ieee80211_register_hw(priv->hw); | ||
6516 | priv->hw->conf.beacon_int = 100; | ||
6517 | mutex_lock(&priv->mutex); | ||
6518 | |||
6519 | if (rc) { | ||
6520 | IWL_ERROR("Failed to register network " | ||
6521 | "device (error %d)\n", rc); | ||
6522 | return; | ||
6523 | } | ||
6524 | |||
6525 | priv->mac80211_registered = 1; | ||
6526 | |||
6527 | iwl_reset_channel_flag(priv); | ||
6528 | } else | ||
6529 | ieee80211_start_queues(priv->hw); | ||
6530 | |||
6531 | priv->active_rate = priv->rates_mask; | ||
6532 | priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; | ||
6533 | |||
6534 | iwl_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode)); | ||
6535 | |||
6536 | if (iwl_is_associated(priv)) { | ||
6537 | struct iwl_rxon_cmd *active_rxon = | ||
6538 | (struct iwl_rxon_cmd *)(&priv->active_rxon); | ||
6539 | |||
6540 | memcpy(&priv->staging_rxon, &priv->active_rxon, | ||
6541 | sizeof(priv->staging_rxon)); | ||
6542 | active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
6543 | } else { | ||
6544 | /* Initialize our rx_config data */ | ||
6545 | iwl_connection_init_rx_config(priv); | ||
6546 | memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); | ||
6547 | } | ||
6548 | |||
6549 | /* Configure BT coexistence */ | ||
6550 | iwl_send_bt_config(priv); | ||
6551 | |||
6552 | /* Configure the adapter for unassociated operation */ | ||
6553 | iwl_commit_rxon(priv); | ||
6554 | |||
6555 | /* At this point, the NIC is initialized and operational */ | ||
6556 | priv->notif_missed_beacons = 0; | ||
6557 | set_bit(STATUS_READY, &priv->status); | ||
6558 | |||
6559 | iwl4965_rf_kill_ct_config(priv); | ||
6560 | IWL_DEBUG_INFO("ALIVE processing complete.\n"); | ||
6561 | |||
6562 | if (priv->error_recovering) | ||
6563 | iwl_error_recovery(priv); | ||
6564 | |||
6565 | return; | ||
6566 | |||
6567 | restart: | ||
6568 | queue_work(priv->workqueue, &priv->restart); | ||
6569 | } | ||
6570 | |||
6571 | static void iwl_cancel_deferred_work(struct iwl_priv *priv); | ||
6572 | |||
6573 | static void __iwl_down(struct iwl_priv *priv) | ||
6574 | { | ||
6575 | unsigned long flags; | ||
6576 | int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); | ||
6577 | struct ieee80211_conf *conf = NULL; | ||
6578 | |||
6579 | IWL_DEBUG_INFO(DRV_NAME " is going down\n"); | ||
6580 | |||
6581 | conf = ieee80211_get_hw_conf(priv->hw); | ||
6582 | |||
6583 | if (!exit_pending) | ||
6584 | set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
6585 | |||
6586 | iwl_clear_stations_table(priv); | ||
6587 | |||
6588 | /* Unblock any waiting calls */ | ||
6589 | wake_up_interruptible_all(&priv->wait_command_queue); | ||
6590 | |||
6591 | iwl_cancel_deferred_work(priv); | ||
6592 | |||
6593 | /* Wipe out the EXIT_PENDING status bit if we are not actually | ||
6594 | * exiting the module */ | ||
6595 | if (!exit_pending) | ||
6596 | clear_bit(STATUS_EXIT_PENDING, &priv->status); | ||
6597 | |||
6598 | /* stop and reset the on-board processor */ | ||
6599 | iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); | ||
6600 | |||
6601 | /* tell the device to stop sending interrupts */ | ||
6602 | iwl_disable_interrupts(priv); | ||
6603 | |||
6604 | if (priv->mac80211_registered) | ||
6605 | ieee80211_stop_queues(priv->hw); | ||
6606 | |||
6607 | /* If we have not previously called iwl_init() then | ||
6608 | * clear all bits but the RF Kill and SUSPEND bits and return */ | ||
6609 | if (!iwl_is_init(priv)) { | ||
6610 | priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << | ||
6611 | STATUS_RF_KILL_HW | | ||
6612 | test_bit(STATUS_RF_KILL_SW, &priv->status) << | ||
6613 | STATUS_RF_KILL_SW | | ||
6614 | test_bit(STATUS_IN_SUSPEND, &priv->status) << | ||
6615 | STATUS_IN_SUSPEND; | ||
6616 | goto exit; | ||
6617 | } | ||
6618 | |||
6619 | /* ...otherwise clear out all the status bits but the RF Kill and | ||
6620 | * SUSPEND bits and continue taking the NIC down. */ | ||
6621 | priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << | ||
6622 | STATUS_RF_KILL_HW | | ||
6623 | test_bit(STATUS_RF_KILL_SW, &priv->status) << | ||
6624 | STATUS_RF_KILL_SW | | ||
6625 | test_bit(STATUS_IN_SUSPEND, &priv->status) << | ||
6626 | STATUS_IN_SUSPEND | | ||
6627 | test_bit(STATUS_FW_ERROR, &priv->status) << | ||
6628 | STATUS_FW_ERROR; | ||
6629 | |||
6630 | spin_lock_irqsave(&priv->lock, flags); | ||
6631 | iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
6632 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6633 | |||
6634 | iwl_hw_txq_ctx_stop(priv); | ||
6635 | iwl_hw_rxq_stop(priv); | ||
6636 | |||
6637 | spin_lock_irqsave(&priv->lock, flags); | ||
6638 | if (!iwl_grab_restricted_access(priv)) { | ||
6639 | iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, | ||
6640 | APMG_CLK_VAL_DMA_CLK_RQT); | ||
6641 | iwl_release_restricted_access(priv); | ||
6642 | } | ||
6643 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6644 | |||
6645 | udelay(5); | ||
6646 | |||
6647 | iwl_hw_nic_stop_master(priv); | ||
6648 | iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); | ||
6649 | iwl_hw_nic_reset(priv); | ||
6650 | |||
6651 | exit: | ||
6652 | memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); | ||
6653 | |||
6654 | if (priv->ibss_beacon) | ||
6655 | dev_kfree_skb(priv->ibss_beacon); | ||
6656 | priv->ibss_beacon = NULL; | ||
6657 | |||
6658 | /* clear out any free frames */ | ||
6659 | iwl_clear_free_frames(priv); | ||
6660 | } | ||
6661 | |||
6662 | static void iwl_down(struct iwl_priv *priv) | ||
6663 | { | ||
6664 | mutex_lock(&priv->mutex); | ||
6665 | __iwl_down(priv); | ||
6666 | mutex_unlock(&priv->mutex); | ||
6667 | } | ||
6668 | |||
6669 | #define MAX_HW_RESTARTS 5 | ||
6670 | |||
6671 | static int __iwl_up(struct iwl_priv *priv) | ||
6672 | { | ||
6673 | int rc, i; | ||
6674 | u32 hw_rf_kill = 0; | ||
6675 | |||
6676 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { | ||
6677 | IWL_WARNING("Exit pending; will not bring the NIC up\n"); | ||
6678 | return -EIO; | ||
6679 | } | ||
6680 | |||
6681 | if (test_bit(STATUS_RF_KILL_SW, &priv->status)) { | ||
6682 | IWL_WARNING("Radio disabled by SW RF kill (module " | ||
6683 | "parameter)\n"); | ||
6684 | return 0; | ||
6685 | } | ||
6686 | |||
6687 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
6688 | |||
6689 | rc = iwl_hw_nic_init(priv); | ||
6690 | if (rc) { | ||
6691 | IWL_ERROR("Unable to int nic\n"); | ||
6692 | return rc; | ||
6693 | } | ||
6694 | |||
6695 | /* make sure rfkill handshake bits are cleared */ | ||
6696 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
6697 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | ||
6698 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
6699 | |||
6700 | /* clear (again), then enable host interrupts */ | ||
6701 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
6702 | iwl_enable_interrupts(priv); | ||
6703 | |||
6704 | /* really make sure rfkill handshake bits are cleared */ | ||
6705 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
6706 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
6707 | |||
6708 | /* Copy original ucode data image from disk into backup cache. | ||
6709 | * This will be used to initialize the on-board processor's | ||
6710 | * data SRAM for a clean start when the runtime program first loads. */ | ||
6711 | memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr, | ||
6712 | priv->ucode_data.len); | ||
6713 | |||
6714 | /* If platform's RF_KILL switch is set to KILL, | ||
6715 | * wait for BIT_INT_RF_KILL interrupt before loading uCode | ||
6716 | * and getting things started */ | ||
6717 | if (!(iwl_read32(priv, CSR_GP_CNTRL) & | ||
6718 | CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) | ||
6719 | hw_rf_kill = 1; | ||
6720 | |||
6721 | if (test_bit(STATUS_RF_KILL_HW, &priv->status) || hw_rf_kill) { | ||
6722 | IWL_WARNING("Radio disabled by HW RF Kill switch\n"); | ||
6723 | return 0; | ||
6724 | } | ||
6725 | |||
6726 | for (i = 0; i < MAX_HW_RESTARTS; i++) { | ||
6727 | |||
6728 | iwl_clear_stations_table(priv); | ||
6729 | |||
6730 | /* load bootstrap state machine, | ||
6731 | * load bootstrap program into processor's memory, | ||
6732 | * prepare to load the "initialize" uCode */ | ||
6733 | rc = iwl_load_bsm(priv); | ||
6734 | |||
6735 | if (rc) { | ||
6736 | IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc); | ||
6737 | continue; | ||
6738 | } | ||
6739 | |||
6740 | /* start card; "initialize" will load runtime ucode */ | ||
6741 | iwl_nic_start(priv); | ||
6742 | |||
6743 | /* MAC Address location in EEPROM same for 3945/4965 */ | ||
6744 | get_eeprom_mac(priv, priv->mac_addr); | ||
6745 | IWL_DEBUG_INFO("MAC address: " MAC_FMT "\n", | ||
6746 | MAC_ARG(priv->mac_addr)); | ||
6747 | |||
6748 | SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); | ||
6749 | |||
6750 | IWL_DEBUG_INFO(DRV_NAME " is coming up\n"); | ||
6751 | |||
6752 | return 0; | ||
6753 | } | ||
6754 | |||
6755 | set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
6756 | __iwl_down(priv); | ||
6757 | |||
6758 | /* tried to restart and config the device for as long as our | ||
6759 | * patience could withstand */ | ||
6760 | IWL_ERROR("Unable to initialize device after %d attempts.\n", i); | ||
6761 | return -EIO; | ||
6762 | } | ||
6763 | |||
6764 | |||
6765 | /***************************************************************************** | ||
6766 | * | ||
6767 | * Workqueue callbacks | ||
6768 | * | ||
6769 | *****************************************************************************/ | ||
6770 | |||
6771 | static void iwl_bg_init_alive_start(struct work_struct *data) | ||
6772 | { | ||
6773 | struct iwl_priv *priv = | ||
6774 | container_of(data, struct iwl_priv, init_alive_start.work); | ||
6775 | |||
6776 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6777 | return; | ||
6778 | |||
6779 | mutex_lock(&priv->mutex); | ||
6780 | iwl_init_alive_start(priv); | ||
6781 | mutex_unlock(&priv->mutex); | ||
6782 | } | ||
6783 | |||
6784 | static void iwl_bg_alive_start(struct work_struct *data) | ||
6785 | { | ||
6786 | struct iwl_priv *priv = | ||
6787 | container_of(data, struct iwl_priv, alive_start.work); | ||
6788 | |||
6789 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6790 | return; | ||
6791 | |||
6792 | mutex_lock(&priv->mutex); | ||
6793 | iwl_alive_start(priv); | ||
6794 | mutex_unlock(&priv->mutex); | ||
6795 | } | ||
6796 | |||
6797 | static void iwl_bg_rf_kill(struct work_struct *work) | ||
6798 | { | ||
6799 | struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill); | ||
6800 | |||
6801 | wake_up_interruptible(&priv->wait_command_queue); | ||
6802 | |||
6803 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6804 | return; | ||
6805 | |||
6806 | mutex_lock(&priv->mutex); | ||
6807 | |||
6808 | if (!iwl_is_rfkill(priv)) { | ||
6809 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL, | ||
6810 | "HW and/or SW RF Kill no longer active, restarting " | ||
6811 | "device\n"); | ||
6812 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6813 | queue_work(priv->workqueue, &priv->restart); | ||
6814 | } else { | ||
6815 | |||
6816 | if (!test_bit(STATUS_RF_KILL_HW, &priv->status)) | ||
6817 | IWL_DEBUG_RF_KILL("Can not turn radio back on - " | ||
6818 | "disabled by SW switch\n"); | ||
6819 | else | ||
6820 | IWL_WARNING("Radio Frequency Kill Switch is On:\n" | ||
6821 | "Kill switch must be turned off for " | ||
6822 | "wireless networking to work.\n"); | ||
6823 | } | ||
6824 | mutex_unlock(&priv->mutex); | ||
6825 | } | ||
6826 | |||
6827 | #define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) | ||
6828 | |||
6829 | static void iwl_bg_scan_check(struct work_struct *data) | ||
6830 | { | ||
6831 | struct iwl_priv *priv = | ||
6832 | container_of(data, struct iwl_priv, scan_check.work); | ||
6833 | |||
6834 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6835 | return; | ||
6836 | |||
6837 | mutex_lock(&priv->mutex); | ||
6838 | if (test_bit(STATUS_SCANNING, &priv->status) || | ||
6839 | test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
6840 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, | ||
6841 | "Scan completion watchdog resetting adapter (%dms)\n", | ||
6842 | jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG)); | ||
6843 | if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
6844 | queue_work(priv->workqueue, &priv->restart); | ||
6845 | } | ||
6846 | mutex_unlock(&priv->mutex); | ||
6847 | } | ||
6848 | |||
6849 | static void iwl_bg_request_scan(struct work_struct *data) | ||
6850 | { | ||
6851 | struct iwl_priv *priv = | ||
6852 | container_of(data, struct iwl_priv, request_scan); | ||
6853 | struct iwl_host_cmd cmd = { | ||
6854 | .id = REPLY_SCAN_CMD, | ||
6855 | .len = sizeof(struct iwl_scan_cmd), | ||
6856 | .meta.flags = CMD_SIZE_HUGE, | ||
6857 | }; | ||
6858 | int rc = 0; | ||
6859 | struct iwl_scan_cmd *scan; | ||
6860 | struct ieee80211_conf *conf = NULL; | ||
6861 | u8 direct_mask; | ||
6862 | int phymode; | ||
6863 | |||
6864 | conf = ieee80211_get_hw_conf(priv->hw); | ||
6865 | |||
6866 | mutex_lock(&priv->mutex); | ||
6867 | |||
6868 | if (!iwl_is_ready(priv)) { | ||
6869 | IWL_WARNING("request scan called when driver not ready.\n"); | ||
6870 | goto done; | ||
6871 | } | ||
6872 | |||
6873 | /* Make sure the scan wasn't cancelled before this queued work | ||
6874 | * was given the chance to run... */ | ||
6875 | if (!test_bit(STATUS_SCANNING, &priv->status)) | ||
6876 | goto done; | ||
6877 | |||
6878 | /* This should never be called or scheduled if there is currently | ||
6879 | * a scan active in the hardware. */ | ||
6880 | if (test_bit(STATUS_SCAN_HW, &priv->status)) { | ||
6881 | IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. " | ||
6882 | "Ignoring second request.\n"); | ||
6883 | rc = -EIO; | ||
6884 | goto done; | ||
6885 | } | ||
6886 | |||
6887 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { | ||
6888 | IWL_DEBUG_SCAN("Aborting scan due to device shutdown\n"); | ||
6889 | goto done; | ||
6890 | } | ||
6891 | |||
6892 | if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { | ||
6893 | IWL_DEBUG_HC("Scan request while abort pending. Queuing.\n"); | ||
6894 | goto done; | ||
6895 | } | ||
6896 | |||
6897 | if (iwl_is_rfkill(priv)) { | ||
6898 | IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n"); | ||
6899 | goto done; | ||
6900 | } | ||
6901 | |||
6902 | if (!test_bit(STATUS_READY, &priv->status)) { | ||
6903 | IWL_DEBUG_HC("Scan request while uninitialized. Queuing.\n"); | ||
6904 | goto done; | ||
6905 | } | ||
6906 | |||
6907 | if (!priv->scan_bands) { | ||
6908 | IWL_DEBUG_HC("Aborting scan due to no requested bands\n"); | ||
6909 | goto done; | ||
6910 | } | ||
6911 | |||
6912 | if (!priv->scan) { | ||
6913 | priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) + | ||
6914 | IWL_MAX_SCAN_SIZE, GFP_KERNEL); | ||
6915 | if (!priv->scan) { | ||
6916 | rc = -ENOMEM; | ||
6917 | goto done; | ||
6918 | } | ||
6919 | } | ||
6920 | scan = priv->scan; | ||
6921 | memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE); | ||
6922 | |||
6923 | scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; | ||
6924 | scan->quiet_time = IWL_ACTIVE_QUIET_TIME; | ||
6925 | |||
6926 | if (iwl_is_associated(priv)) { | ||
6927 | u16 interval = 0; | ||
6928 | u32 extra; | ||
6929 | u32 suspend_time = 100; | ||
6930 | u32 scan_suspend_time = 100; | ||
6931 | unsigned long flags; | ||
6932 | |||
6933 | IWL_DEBUG_INFO("Scanning while associated...\n"); | ||
6934 | |||
6935 | spin_lock_irqsave(&priv->lock, flags); | ||
6936 | interval = priv->beacon_int; | ||
6937 | spin_unlock_irqrestore(&priv->lock, flags); | ||
6938 | |||
6939 | scan->suspend_time = 0; | ||
6940 | scan->max_out_time = cpu_to_le32(600 * 1024); | ||
6941 | if (!interval) | ||
6942 | interval = suspend_time; | ||
6943 | |||
6944 | extra = (suspend_time / interval) << 22; | ||
6945 | scan_suspend_time = (extra | | ||
6946 | ((suspend_time % interval) * 1024)); | ||
6947 | scan->suspend_time = cpu_to_le32(scan_suspend_time); | ||
6948 | IWL_DEBUG_SCAN("suspend_time 0x%X beacon interval %d\n", | ||
6949 | scan_suspend_time, interval); | ||
6950 | } | ||
6951 | |||
6952 | /* We should add the ability for user to lock to PASSIVE ONLY */ | ||
6953 | if (priv->one_direct_scan) { | ||
6954 | IWL_DEBUG_SCAN | ||
6955 | ("Kicking off one direct scan for '%s'\n", | ||
6956 | iwl_escape_essid(priv->direct_ssid, | ||
6957 | priv->direct_ssid_len)); | ||
6958 | scan->direct_scan[0].id = WLAN_EID_SSID; | ||
6959 | scan->direct_scan[0].len = priv->direct_ssid_len; | ||
6960 | memcpy(scan->direct_scan[0].ssid, | ||
6961 | priv->direct_ssid, priv->direct_ssid_len); | ||
6962 | direct_mask = 1; | ||
6963 | } else if (!iwl_is_associated(priv)) { | ||
6964 | scan->direct_scan[0].id = WLAN_EID_SSID; | ||
6965 | scan->direct_scan[0].len = priv->essid_len; | ||
6966 | memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); | ||
6967 | direct_mask = 1; | ||
6968 | } else | ||
6969 | direct_mask = 0; | ||
6970 | |||
6971 | /* We don't build a direct scan probe request; the uCode will do | ||
6972 | * that based on the direct_mask added to each channel entry */ | ||
6973 | scan->tx_cmd.len = cpu_to_le16( | ||
6974 | iwl_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data, | ||
6975 | IWL_MAX_SCAN_SIZE - sizeof(scan), 0)); | ||
6976 | scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; | ||
6977 | scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id; | ||
6978 | scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; | ||
6979 | |||
6980 | /* flags + rate selection */ | ||
6981 | |||
6982 | scan->tx_cmd.tx_flags |= cpu_to_le32(0x200); | ||
6983 | |||
6984 | switch (priv->scan_bands) { | ||
6985 | case 2: | ||
6986 | scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; | ||
6987 | scan->tx_cmd.rate_n_flags = | ||
6988 | iwl_hw_set_rate_n_flags(IWL_RATE_1M_PLCP, | ||
6989 | RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK); | ||
6990 | |||
6991 | scan->good_CRC_th = 0; | ||
6992 | phymode = MODE_IEEE80211G; | ||
6993 | break; | ||
6994 | |||
6995 | case 1: | ||
6996 | scan->tx_cmd.rate_n_flags = | ||
6997 | iwl_hw_set_rate_n_flags(IWL_RATE_6M_PLCP, | ||
6998 | RATE_MCS_ANT_B_MSK); | ||
6999 | scan->good_CRC_th = IWL_GOOD_CRC_TH; | ||
7000 | phymode = MODE_IEEE80211A; | ||
7001 | break; | ||
7002 | |||
7003 | default: | ||
7004 | IWL_WARNING("Invalid scan band count\n"); | ||
7005 | goto done; | ||
7006 | } | ||
7007 | |||
7008 | /* select Rx chains */ | ||
7009 | |||
7010 | /* Force use of chains B and C (0x6) for scan Rx. | ||
7011 | * Avoid A (0x1) because of its off-channel reception on A-band. | ||
7012 | * MIMO is not used here, but value is required to make uCode happy. */ | ||
7013 | scan->rx_chain = RXON_RX_CHAIN_DRIVER_FORCE_MSK | | ||
7014 | cpu_to_le16((0x7 << RXON_RX_CHAIN_VALID_POS) | | ||
7015 | (0x6 << RXON_RX_CHAIN_FORCE_SEL_POS) | | ||
7016 | (0x7 << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS)); | ||
7017 | |||
7018 | if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) | ||
7019 | scan->filter_flags = RXON_FILTER_PROMISC_MSK; | ||
7020 | |||
7021 | if (direct_mask) | ||
7022 | IWL_DEBUG_SCAN | ||
7023 | ("Initiating direct scan for %s.\n", | ||
7024 | iwl_escape_essid(priv->essid, priv->essid_len)); | ||
7025 | else | ||
7026 | IWL_DEBUG_SCAN("Initiating indirect scan.\n"); | ||
7027 | |||
7028 | scan->channel_count = | ||
7029 | iwl_get_channels_for_scan( | ||
7030 | priv, phymode, 1, /* active */ | ||
7031 | direct_mask, | ||
7032 | (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); | ||
7033 | |||
7034 | cmd.len += le16_to_cpu(scan->tx_cmd.len) + | ||
7035 | scan->channel_count * sizeof(struct iwl_scan_channel); | ||
7036 | cmd.data = scan; | ||
7037 | scan->len = cpu_to_le16(cmd.len); | ||
7038 | |||
7039 | set_bit(STATUS_SCAN_HW, &priv->status); | ||
7040 | rc = iwl_send_cmd_sync(priv, &cmd); | ||
7041 | if (rc) | ||
7042 | goto done; | ||
7043 | |||
7044 | queue_delayed_work(priv->workqueue, &priv->scan_check, | ||
7045 | IWL_SCAN_CHECK_WATCHDOG); | ||
7046 | |||
7047 | mutex_unlock(&priv->mutex); | ||
7048 | return; | ||
7049 | |||
7050 | done: | ||
7051 | /* inform mac80211 sacn aborted */ | ||
7052 | queue_work(priv->workqueue, &priv->scan_completed); | ||
7053 | mutex_unlock(&priv->mutex); | ||
7054 | } | ||
7055 | |||
7056 | static void iwl_bg_up(struct work_struct *data) | ||
7057 | { | ||
7058 | struct iwl_priv *priv = container_of(data, struct iwl_priv, up); | ||
7059 | |||
7060 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
7061 | return; | ||
7062 | |||
7063 | mutex_lock(&priv->mutex); | ||
7064 | __iwl_up(priv); | ||
7065 | mutex_unlock(&priv->mutex); | ||
7066 | } | ||
7067 | |||
7068 | static void iwl_bg_restart(struct work_struct *data) | ||
7069 | { | ||
7070 | struct iwl_priv *priv = container_of(data, struct iwl_priv, restart); | ||
7071 | |||
7072 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
7073 | return; | ||
7074 | |||
7075 | iwl_down(priv); | ||
7076 | queue_work(priv->workqueue, &priv->up); | ||
7077 | } | ||
7078 | |||
7079 | static void iwl_bg_rx_replenish(struct work_struct *data) | ||
7080 | { | ||
7081 | struct iwl_priv *priv = | ||
7082 | container_of(data, struct iwl_priv, rx_replenish); | ||
7083 | |||
7084 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
7085 | return; | ||
7086 | |||
7087 | mutex_lock(&priv->mutex); | ||
7088 | iwl_rx_replenish(priv); | ||
7089 | mutex_unlock(&priv->mutex); | ||
7090 | } | ||
7091 | |||
7092 | static void iwl_bg_post_associate(struct work_struct *data) | ||
7093 | { | ||
7094 | struct iwl_priv *priv = container_of(data, struct iwl_priv, | ||
7095 | post_associate.work); | ||
7096 | |||
7097 | int rc = 0; | ||
7098 | struct ieee80211_conf *conf = NULL; | ||
7099 | |||
7100 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { | ||
7101 | IWL_ERROR("%s Should not be called in AP mode\n", __FUNCTION__); | ||
7102 | return; | ||
7103 | } | ||
7104 | |||
7105 | IWL_DEBUG_ASSOC("Associated as %d to: " MAC_FMT "\n", | ||
7106 | priv->assoc_id, MAC_ARG(priv->active_rxon.bssid_addr)); | ||
7107 | |||
7108 | |||
7109 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
7110 | return; | ||
7111 | |||
7112 | mutex_lock(&priv->mutex); | ||
7113 | |||
7114 | conf = ieee80211_get_hw_conf(priv->hw); | ||
7115 | |||
7116 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7117 | iwl_commit_rxon(priv); | ||
7118 | |||
7119 | memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); | ||
7120 | iwl_setup_rxon_timing(priv); | ||
7121 | rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, | ||
7122 | sizeof(priv->rxon_timing), &priv->rxon_timing); | ||
7123 | if (rc) | ||
7124 | IWL_WARNING("REPLY_RXON_TIMING failed - " | ||
7125 | "Attempting to continue.\n"); | ||
7126 | |||
7127 | priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
7128 | |||
7129 | #ifdef CONFIG_IWLWIFI_HT | ||
7130 | if (priv->is_ht_enabled && priv->current_assoc_ht.is_ht) | ||
7131 | iwl4965_set_rxon_ht(priv, &priv->current_assoc_ht); | ||
7132 | else { | ||
7133 | priv->active_rate_ht[0] = 0; | ||
7134 | priv->active_rate_ht[1] = 0; | ||
7135 | priv->current_channel_width = IWL_CHANNEL_WIDTH_20MHZ; | ||
7136 | } | ||
7137 | #endif /* CONFIG_IWLWIFI_HT*/ | ||
7138 | iwl4965_set_rxon_chain(priv); | ||
7139 | priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); | ||
7140 | |||
7141 | IWL_DEBUG_ASSOC("assoc id %d beacon interval %d\n", | ||
7142 | priv->assoc_id, priv->beacon_int); | ||
7143 | |||
7144 | if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) | ||
7145 | priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; | ||
7146 | else | ||
7147 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; | ||
7148 | |||
7149 | if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { | ||
7150 | if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) | ||
7151 | priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; | ||
7152 | else | ||
7153 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | ||
7154 | |||
7155 | if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) | ||
7156 | priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; | ||
7157 | |||
7158 | } | ||
7159 | |||
7160 | iwl_commit_rxon(priv); | ||
7161 | |||
7162 | switch (priv->iw_mode) { | ||
7163 | case IEEE80211_IF_TYPE_STA: | ||
7164 | iwl_rate_scale_init(priv->hw, IWL_AP_ID); | ||
7165 | break; | ||
7166 | |||
7167 | case IEEE80211_IF_TYPE_IBSS: | ||
7168 | |||
7169 | /* clear out the station table */ | ||
7170 | iwl_clear_stations_table(priv); | ||
7171 | |||
7172 | iwl_rxon_add_station(priv, BROADCAST_ADDR, 0); | ||
7173 | iwl_rxon_add_station(priv, priv->bssid, 0); | ||
7174 | iwl_rate_scale_init(priv->hw, IWL_STA_ID); | ||
7175 | iwl_send_beacon_cmd(priv); | ||
7176 | |||
7177 | break; | ||
7178 | |||
7179 | default: | ||
7180 | IWL_ERROR("%s Should not be called in %d mode\n", | ||
7181 | __FUNCTION__, priv->iw_mode); | ||
7182 | break; | ||
7183 | } | ||
7184 | |||
7185 | iwl_sequence_reset(priv); | ||
7186 | |||
7187 | #ifdef CONFIG_IWLWIFI_SENSITIVITY | ||
7188 | /* Enable Rx differential gain and sensitivity calibrations */ | ||
7189 | iwl4965_chain_noise_reset(priv); | ||
7190 | priv->start_calib = 1; | ||
7191 | #endif /* CONFIG_IWLWIFI_SENSITIVITY */ | ||
7192 | |||
7193 | if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) | ||
7194 | priv->assoc_station_added = 1; | ||
7195 | |||
7196 | #ifdef CONFIG_IWLWIFI_QOS | ||
7197 | iwl_activate_qos(priv, 0); | ||
7198 | #endif /* CONFIG_IWLWIFI_QOS */ | ||
7199 | mutex_unlock(&priv->mutex); | ||
7200 | } | ||
7201 | |||
7202 | static void iwl_bg_abort_scan(struct work_struct *work) | ||
7203 | { | ||
7204 | struct iwl_priv *priv = container_of(work, struct iwl_priv, | ||
7205 | abort_scan); | ||
7206 | |||
7207 | if (!iwl_is_ready(priv)) | ||
7208 | return; | ||
7209 | |||
7210 | mutex_lock(&priv->mutex); | ||
7211 | |||
7212 | set_bit(STATUS_SCAN_ABORTING, &priv->status); | ||
7213 | iwl_send_scan_abort(priv); | ||
7214 | |||
7215 | mutex_unlock(&priv->mutex); | ||
7216 | } | ||
7217 | |||
7218 | static void iwl_bg_scan_completed(struct work_struct *work) | ||
7219 | { | ||
7220 | struct iwl_priv *priv = | ||
7221 | container_of(work, struct iwl_priv, scan_completed); | ||
7222 | |||
7223 | IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n"); | ||
7224 | |||
7225 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) | ||
7226 | return; | ||
7227 | |||
7228 | ieee80211_scan_completed(priv->hw); | ||
7229 | |||
7230 | /* Since setting the TXPOWER may have been deferred while | ||
7231 | * performing the scan, fire one off */ | ||
7232 | mutex_lock(&priv->mutex); | ||
7233 | iwl_hw_reg_send_txpower(priv); | ||
7234 | mutex_unlock(&priv->mutex); | ||
7235 | } | ||
7236 | |||
7237 | /***************************************************************************** | ||
7238 | * | ||
7239 | * mac80211 entry point functions | ||
7240 | * | ||
7241 | *****************************************************************************/ | ||
7242 | |||
7243 | static int iwl_mac_open(struct ieee80211_hw *hw) | ||
7244 | { | ||
7245 | struct iwl_priv *priv = hw->priv; | ||
7246 | |||
7247 | IWL_DEBUG_MAC80211("enter\n"); | ||
7248 | |||
7249 | /* we should be verifying the device is ready to be opened */ | ||
7250 | mutex_lock(&priv->mutex); | ||
7251 | |||
7252 | priv->is_open = 1; | ||
7253 | |||
7254 | if (!iwl_is_rfkill(priv)) | ||
7255 | ieee80211_start_queues(priv->hw); | ||
7256 | |||
7257 | mutex_unlock(&priv->mutex); | ||
7258 | IWL_DEBUG_MAC80211("leave\n"); | ||
7259 | return 0; | ||
7260 | } | ||
7261 | |||
7262 | static int iwl_mac_stop(struct ieee80211_hw *hw) | ||
7263 | { | ||
7264 | struct iwl_priv *priv = hw->priv; | ||
7265 | |||
7266 | IWL_DEBUG_MAC80211("enter\n"); | ||
7267 | priv->is_open = 0; | ||
7268 | /*netif_stop_queue(dev); */ | ||
7269 | flush_workqueue(priv->workqueue); | ||
7270 | IWL_DEBUG_MAC80211("leave\n"); | ||
7271 | |||
7272 | return 0; | ||
7273 | } | ||
7274 | |||
7275 | static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, | ||
7276 | struct ieee80211_tx_control *ctl) | ||
7277 | { | ||
7278 | struct iwl_priv *priv = hw->priv; | ||
7279 | |||
7280 | IWL_DEBUG_MAC80211("enter\n"); | ||
7281 | |||
7282 | if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { | ||
7283 | IWL_DEBUG_MAC80211("leave - monitor\n"); | ||
7284 | return -1; | ||
7285 | } | ||
7286 | |||
7287 | IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, | ||
7288 | ctl->tx_rate); | ||
7289 | |||
7290 | if (iwl_tx_skb(priv, skb, ctl)) | ||
7291 | dev_kfree_skb_any(skb); | ||
7292 | |||
7293 | IWL_DEBUG_MAC80211("leave\n"); | ||
7294 | return 0; | ||
7295 | } | ||
7296 | |||
7297 | static int iwl_mac_add_interface(struct ieee80211_hw *hw, | ||
7298 | struct ieee80211_if_init_conf *conf) | ||
7299 | { | ||
7300 | struct iwl_priv *priv = hw->priv; | ||
7301 | unsigned long flags; | ||
7302 | |||
7303 | IWL_DEBUG_MAC80211("enter: id %d, type %d\n", conf->if_id, conf->type); | ||
7304 | if (conf->mac_addr) | ||
7305 | IWL_DEBUG_MAC80211("enter: MAC " MAC_FMT "\n", | ||
7306 | MAC_ARG(conf->mac_addr)); | ||
7307 | |||
7308 | if (priv->interface_id) { | ||
7309 | IWL_DEBUG_MAC80211("leave - interface_id != 0\n"); | ||
7310 | return 0; | ||
7311 | } | ||
7312 | |||
7313 | spin_lock_irqsave(&priv->lock, flags); | ||
7314 | priv->interface_id = conf->if_id; | ||
7315 | |||
7316 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7317 | |||
7318 | mutex_lock(&priv->mutex); | ||
7319 | iwl_set_mode(priv, conf->type); | ||
7320 | |||
7321 | IWL_DEBUG_MAC80211("leave\n"); | ||
7322 | mutex_unlock(&priv->mutex); | ||
7323 | |||
7324 | return 0; | ||
7325 | } | ||
7326 | |||
7327 | /** | ||
7328 | * iwl_mac_config - mac80211 config callback | ||
7329 | * | ||
7330 | * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to | ||
7331 | * be set inappropriately and the driver currently sets the hardware up to | ||
7332 | * use it whenever needed. | ||
7333 | */ | ||
7334 | static int iwl_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) | ||
7335 | { | ||
7336 | struct iwl_priv *priv = hw->priv; | ||
7337 | const struct iwl_channel_info *ch_info; | ||
7338 | unsigned long flags; | ||
7339 | |||
7340 | mutex_lock(&priv->mutex); | ||
7341 | IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel); | ||
7342 | |||
7343 | if (!iwl_is_ready(priv)) { | ||
7344 | IWL_DEBUG_MAC80211("leave - not ready\n"); | ||
7345 | mutex_unlock(&priv->mutex); | ||
7346 | return -EIO; | ||
7347 | } | ||
7348 | |||
7349 | /* TODO: Figure out how to get ieee80211_local->sta_scanning w/ only | ||
7350 | * what is exposed through include/ declrations */ | ||
7351 | if (unlikely(!iwl_param_disable_hw_scan && | ||
7352 | test_bit(STATUS_SCANNING, &priv->status))) { | ||
7353 | IWL_DEBUG_MAC80211("leave - scanning\n"); | ||
7354 | mutex_unlock(&priv->mutex); | ||
7355 | return 0; | ||
7356 | } | ||
7357 | |||
7358 | spin_lock_irqsave(&priv->lock, flags); | ||
7359 | |||
7360 | ch_info = iwl_get_channel_info(priv, conf->phymode, conf->channel); | ||
7361 | if (!is_channel_valid(ch_info)) { | ||
7362 | IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n", | ||
7363 | conf->channel, conf->phymode); | ||
7364 | IWL_DEBUG_MAC80211("leave - invalid channel\n"); | ||
7365 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7366 | mutex_unlock(&priv->mutex); | ||
7367 | return -EINVAL; | ||
7368 | } | ||
7369 | |||
7370 | #ifdef CONFIG_IWLWIFI_HT | ||
7371 | /* if we are switching fron ht to 2.4 clear flags | ||
7372 | * from any ht related info since 2.4 does not | ||
7373 | * support ht */ | ||
7374 | if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel) | ||
7375 | #ifdef IEEE80211_CONF_CHANNEL_SWITCH | ||
7376 | && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) | ||
7377 | #endif | ||
7378 | ) | ||
7379 | priv->staging_rxon.flags = 0; | ||
7380 | #endif /* CONFIG_IWLWIFI_HT */ | ||
7381 | |||
7382 | iwl_set_rxon_channel(priv, conf->phymode, conf->channel); | ||
7383 | |||
7384 | iwl_set_flags_for_phymode(priv, conf->phymode); | ||
7385 | |||
7386 | /* The list of supported rates and rate mask can be different | ||
7387 | * for each phymode; since the phymode may have changed, reset | ||
7388 | * the rate mask to what mac80211 lists */ | ||
7389 | iwl_set_rate(priv); | ||
7390 | |||
7391 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7392 | |||
7393 | #ifdef IEEE80211_CONF_CHANNEL_SWITCH | ||
7394 | if (conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) { | ||
7395 | iwl_hw_channel_switch(priv, conf->channel); | ||
7396 | mutex_unlock(&priv->mutex); | ||
7397 | return 0; | ||
7398 | } | ||
7399 | #endif | ||
7400 | |||
7401 | iwl_radio_kill_sw(priv, !conf->radio_enabled); | ||
7402 | |||
7403 | if (!conf->radio_enabled) { | ||
7404 | IWL_DEBUG_MAC80211("leave - radio disabled\n"); | ||
7405 | mutex_unlock(&priv->mutex); | ||
7406 | return 0; | ||
7407 | } | ||
7408 | |||
7409 | if (iwl_is_rfkill(priv)) { | ||
7410 | IWL_DEBUG_MAC80211("leave - RF kill\n"); | ||
7411 | mutex_unlock(&priv->mutex); | ||
7412 | return -EIO; | ||
7413 | } | ||
7414 | |||
7415 | iwl_set_rate(priv); | ||
7416 | |||
7417 | if (memcmp(&priv->active_rxon, | ||
7418 | &priv->staging_rxon, sizeof(priv->staging_rxon))) | ||
7419 | iwl_commit_rxon(priv); | ||
7420 | else | ||
7421 | IWL_DEBUG_INFO("No re-sending same RXON configuration.\n"); | ||
7422 | |||
7423 | IWL_DEBUG_MAC80211("leave\n"); | ||
7424 | |||
7425 | mutex_unlock(&priv->mutex); | ||
7426 | |||
7427 | return 0; | ||
7428 | } | ||
7429 | |||
7430 | static void iwl_config_ap(struct iwl_priv *priv) | ||
7431 | { | ||
7432 | int rc = 0; | ||
7433 | |||
7434 | if (priv->status & STATUS_EXIT_PENDING) | ||
7435 | return; | ||
7436 | |||
7437 | /* The following should be done only at AP bring up */ | ||
7438 | if ((priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) == 0) { | ||
7439 | |||
7440 | /* RXON - unassoc (to set timing command) */ | ||
7441 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7442 | iwl_commit_rxon(priv); | ||
7443 | |||
7444 | /* RXON Timing */ | ||
7445 | memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); | ||
7446 | iwl_setup_rxon_timing(priv); | ||
7447 | rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, | ||
7448 | sizeof(priv->rxon_timing), &priv->rxon_timing); | ||
7449 | if (rc) | ||
7450 | IWL_WARNING("REPLY_RXON_TIMING failed - " | ||
7451 | "Attempting to continue.\n"); | ||
7452 | |||
7453 | iwl4965_set_rxon_chain(priv); | ||
7454 | |||
7455 | /* FIXME: what should be the assoc_id for AP? */ | ||
7456 | priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); | ||
7457 | if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) | ||
7458 | priv->staging_rxon.flags |= | ||
7459 | RXON_FLG_SHORT_PREAMBLE_MSK; | ||
7460 | else | ||
7461 | priv->staging_rxon.flags &= | ||
7462 | ~RXON_FLG_SHORT_PREAMBLE_MSK; | ||
7463 | |||
7464 | if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { | ||
7465 | if (priv->assoc_capability & | ||
7466 | WLAN_CAPABILITY_SHORT_SLOT_TIME) | ||
7467 | priv->staging_rxon.flags |= | ||
7468 | RXON_FLG_SHORT_SLOT_MSK; | ||
7469 | else | ||
7470 | priv->staging_rxon.flags &= | ||
7471 | ~RXON_FLG_SHORT_SLOT_MSK; | ||
7472 | |||
7473 | if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) | ||
7474 | priv->staging_rxon.flags &= | ||
7475 | ~RXON_FLG_SHORT_SLOT_MSK; | ||
7476 | } | ||
7477 | /* restore RXON assoc */ | ||
7478 | priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; | ||
7479 | iwl_commit_rxon(priv); | ||
7480 | #ifdef CONFIG_IWLWIFI_QOS | ||
7481 | iwl_activate_qos(priv, 1); | ||
7482 | #endif | ||
7483 | iwl_rxon_add_station(priv, BROADCAST_ADDR, 0); | ||
7484 | iwl_send_beacon_cmd(priv); | ||
7485 | } else | ||
7486 | iwl_send_beacon_cmd(priv); | ||
7487 | |||
7488 | /* FIXME - we need to add code here to detect a totally new | ||
7489 | * configuration, reset the AP, unassoc, rxon timing, assoc, | ||
7490 | * clear sta table, add BCAST sta... */ | ||
7491 | } | ||
7492 | |||
7493 | static int iwl_mac_config_interface(struct ieee80211_hw *hw, int if_id, | ||
7494 | struct ieee80211_if_conf *conf) | ||
7495 | { | ||
7496 | struct iwl_priv *priv = hw->priv; | ||
7497 | unsigned long flags; | ||
7498 | int rc; | ||
7499 | |||
7500 | if (conf == NULL) | ||
7501 | return -EIO; | ||
7502 | |||
7503 | if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && | ||
7504 | (!conf->beacon || !conf->ssid_len)) { | ||
7505 | IWL_DEBUG_MAC80211 | ||
7506 | ("Leaving in AP mode because HostAPD is not ready.\n"); | ||
7507 | return 0; | ||
7508 | } | ||
7509 | |||
7510 | mutex_lock(&priv->mutex); | ||
7511 | |||
7512 | IWL_DEBUG_MAC80211("enter: interface id %d\n", if_id); | ||
7513 | if (conf->bssid) | ||
7514 | IWL_DEBUG_MAC80211("bssid: " MAC_FMT "\n", | ||
7515 | MAC_ARG(conf->bssid)); | ||
7516 | |||
7517 | if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) && | ||
7518 | !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { | ||
7519 | IWL_DEBUG_MAC80211("leave - scanning\n"); | ||
7520 | mutex_unlock(&priv->mutex); | ||
7521 | return 0; | ||
7522 | } | ||
7523 | |||
7524 | if (priv->interface_id != if_id) { | ||
7525 | IWL_DEBUG_MAC80211("leave - interface_id != if_id\n"); | ||
7526 | mutex_unlock(&priv->mutex); | ||
7527 | return 0; | ||
7528 | } | ||
7529 | |||
7530 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { | ||
7531 | if (!conf->bssid) { | ||
7532 | conf->bssid = priv->mac_addr; | ||
7533 | memcpy(priv->bssid, priv->mac_addr, ETH_ALEN); | ||
7534 | IWL_DEBUG_MAC80211("bssid was set to: " MAC_FMT "\n", | ||
7535 | MAC_ARG(conf->bssid)); | ||
7536 | } | ||
7537 | if (priv->ibss_beacon) | ||
7538 | dev_kfree_skb(priv->ibss_beacon); | ||
7539 | |||
7540 | priv->ibss_beacon = conf->beacon; | ||
7541 | } | ||
7542 | |||
7543 | if (conf->bssid && !is_zero_ether_addr(conf->bssid) && | ||
7544 | !is_multicast_ether_addr(conf->bssid)) { | ||
7545 | /* If there is currently a HW scan going on in the background | ||
7546 | * then we need to cancel it else the RXON below will fail. */ | ||
7547 | if (iwl_scan_cancel_timeout(priv, 100)) { | ||
7548 | IWL_WARNING("Aborted scan still in progress " | ||
7549 | "after 100ms\n"); | ||
7550 | IWL_DEBUG_MAC80211("leaving - scan abort failed.\n"); | ||
7551 | mutex_unlock(&priv->mutex); | ||
7552 | return -EAGAIN; | ||
7553 | } | ||
7554 | memcpy(priv->staging_rxon.bssid_addr, conf->bssid, ETH_ALEN); | ||
7555 | |||
7556 | /* TODO: Audit driver for usage of these members and see | ||
7557 | * if mac80211 deprecates them (priv->bssid looks like it | ||
7558 | * shouldn't be there, but I haven't scanned the IBSS code | ||
7559 | * to verify) - jpk */ | ||
7560 | memcpy(priv->bssid, conf->bssid, ETH_ALEN); | ||
7561 | |||
7562 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) | ||
7563 | iwl_config_ap(priv); | ||
7564 | else { | ||
7565 | priv->staging_rxon.filter_flags |= | ||
7566 | RXON_FILTER_ASSOC_MSK; | ||
7567 | rc = iwl_commit_rxon(priv); | ||
7568 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && rc) | ||
7569 | iwl_rxon_add_station( | ||
7570 | priv, priv->active_rxon.bssid_addr, 1); | ||
7571 | } | ||
7572 | |||
7573 | } else { | ||
7574 | priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; | ||
7575 | iwl_commit_rxon(priv); | ||
7576 | } | ||
7577 | |||
7578 | spin_lock_irqsave(&priv->lock, flags); | ||
7579 | if (!conf->ssid_len) | ||
7580 | memset(priv->essid, 0, IW_ESSID_MAX_SIZE); | ||
7581 | else | ||
7582 | memcpy(priv->essid, conf->ssid, conf->ssid_len); | ||
7583 | |||
7584 | priv->essid_len = conf->ssid_len; | ||
7585 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7586 | |||
7587 | IWL_DEBUG_MAC80211("leave\n"); | ||
7588 | mutex_unlock(&priv->mutex); | ||
7589 | |||
7590 | return 0; | ||
7591 | } | ||
7592 | |||
7593 | static void iwl_mac_remove_interface(struct ieee80211_hw *hw, | ||
7594 | struct ieee80211_if_init_conf *conf) | ||
7595 | { | ||
7596 | struct iwl_priv *priv = hw->priv; | ||
7597 | |||
7598 | IWL_DEBUG_MAC80211("enter\n"); | ||
7599 | |||
7600 | mutex_lock(&priv->mutex); | ||
7601 | if (priv->interface_id == conf->if_id) { | ||
7602 | priv->interface_id = 0; | ||
7603 | memset(priv->bssid, 0, ETH_ALEN); | ||
7604 | memset(priv->essid, 0, IW_ESSID_MAX_SIZE); | ||
7605 | priv->essid_len = 0; | ||
7606 | } | ||
7607 | mutex_unlock(&priv->mutex); | ||
7608 | |||
7609 | IWL_DEBUG_MAC80211("leave\n"); | ||
7610 | |||
7611 | } | ||
7612 | |||
7613 | #define IWL_DELAY_NEXT_SCAN (HZ*2) | ||
7614 | static int iwl_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) | ||
7615 | { | ||
7616 | int rc = 0; | ||
7617 | unsigned long flags; | ||
7618 | struct iwl_priv *priv = hw->priv; | ||
7619 | |||
7620 | IWL_DEBUG_MAC80211("enter\n"); | ||
7621 | |||
7622 | spin_lock_irqsave(&priv->lock, flags); | ||
7623 | |||
7624 | if (!iwl_is_ready_rf(priv)) { | ||
7625 | rc = -EIO; | ||
7626 | IWL_DEBUG_MAC80211("leave - not ready or exit pending\n"); | ||
7627 | goto out_unlock; | ||
7628 | } | ||
7629 | |||
7630 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { /* APs don't scan */ | ||
7631 | rc = -EIO; | ||
7632 | IWL_ERROR("ERROR: APs don't scan\n"); | ||
7633 | goto out_unlock; | ||
7634 | } | ||
7635 | |||
7636 | /* if we just finished scan ask for delay */ | ||
7637 | if (priv->last_scan_jiffies && | ||
7638 | time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, | ||
7639 | jiffies)) { | ||
7640 | rc = -EAGAIN; | ||
7641 | goto out_unlock; | ||
7642 | } | ||
7643 | if (len) { | ||
7644 | IWL_DEBUG_SCAN("direct scan for " | ||
7645 | "%s [%d]\n ", | ||
7646 | iwl_escape_essid(ssid, len), (int)len); | ||
7647 | |||
7648 | priv->one_direct_scan = 1; | ||
7649 | priv->direct_ssid_len = (u8) | ||
7650 | min((u8) len, (u8) IW_ESSID_MAX_SIZE); | ||
7651 | memcpy(priv->direct_ssid, ssid, priv->direct_ssid_len); | ||
7652 | } | ||
7653 | |||
7654 | rc = iwl_scan_initiate(priv); | ||
7655 | |||
7656 | IWL_DEBUG_MAC80211("leave\n"); | ||
7657 | |||
7658 | out_unlock: | ||
7659 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7660 | |||
7661 | return rc; | ||
7662 | } | ||
7663 | |||
7664 | static int iwl_mac_set_key(struct ieee80211_hw *hw, set_key_cmd cmd, | ||
7665 | const u8 *local_addr, const u8 *addr, | ||
7666 | struct ieee80211_key_conf *key) | ||
7667 | { | ||
7668 | struct iwl_priv *priv = hw->priv; | ||
7669 | int rc = 0; | ||
7670 | u8 sta_id; | ||
7671 | |||
7672 | IWL_DEBUG_MAC80211("enter\n"); | ||
7673 | |||
7674 | if (!iwl_param_hwcrypto) { | ||
7675 | IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); | ||
7676 | return -EOPNOTSUPP; | ||
7677 | } | ||
7678 | |||
7679 | if (is_zero_ether_addr(addr)) | ||
7680 | /* only support pairwise keys */ | ||
7681 | return -EOPNOTSUPP; | ||
7682 | |||
7683 | sta_id = iwl_hw_find_station(priv, addr); | ||
7684 | if (sta_id == IWL_INVALID_STATION) { | ||
7685 | IWL_DEBUG_MAC80211("leave - " MAC_FMT " not in station map.\n", | ||
7686 | MAC_ARG(addr)); | ||
7687 | return -EINVAL; | ||
7688 | } | ||
7689 | |||
7690 | mutex_lock(&priv->mutex); | ||
7691 | |||
7692 | switch (cmd) { | ||
7693 | case SET_KEY: | ||
7694 | rc = iwl_update_sta_key_info(priv, key, sta_id); | ||
7695 | if (!rc) { | ||
7696 | iwl_set_rxon_hwcrypto(priv, 1); | ||
7697 | iwl_commit_rxon(priv); | ||
7698 | key->hw_key_idx = sta_id; | ||
7699 | IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n"); | ||
7700 | key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; | ||
7701 | } | ||
7702 | break; | ||
7703 | case DISABLE_KEY: | ||
7704 | rc = iwl_clear_sta_key_info(priv, sta_id); | ||
7705 | if (!rc) { | ||
7706 | iwl_set_rxon_hwcrypto(priv, 0); | ||
7707 | iwl_commit_rxon(priv); | ||
7708 | IWL_DEBUG_MAC80211("disable hwcrypto key\n"); | ||
7709 | } | ||
7710 | break; | ||
7711 | default: | ||
7712 | rc = -EINVAL; | ||
7713 | } | ||
7714 | |||
7715 | IWL_DEBUG_MAC80211("leave\n"); | ||
7716 | mutex_unlock(&priv->mutex); | ||
7717 | |||
7718 | return rc; | ||
7719 | } | ||
7720 | |||
7721 | static int iwl_mac_conf_tx(struct ieee80211_hw *hw, int queue, | ||
7722 | const struct ieee80211_tx_queue_params *params) | ||
7723 | { | ||
7724 | struct iwl_priv *priv = hw->priv; | ||
7725 | #ifdef CONFIG_IWLWIFI_QOS | ||
7726 | unsigned long flags; | ||
7727 | int q; | ||
7728 | #endif /* CONFIG_IWL_QOS */ | ||
7729 | |||
7730 | IWL_DEBUG_MAC80211("enter\n"); | ||
7731 | |||
7732 | if (!iwl_is_ready_rf(priv)) { | ||
7733 | IWL_DEBUG_MAC80211("leave - RF not ready\n"); | ||
7734 | return -EIO; | ||
7735 | } | ||
7736 | |||
7737 | if (queue >= AC_NUM) { | ||
7738 | IWL_DEBUG_MAC80211("leave - queue >= AC_NUM %d\n", queue); | ||
7739 | return 0; | ||
7740 | } | ||
7741 | |||
7742 | #ifdef CONFIG_IWLWIFI_QOS | ||
7743 | if (!priv->qos_data.qos_enable) { | ||
7744 | priv->qos_data.qos_active = 0; | ||
7745 | IWL_DEBUG_MAC80211("leave - qos not enabled\n"); | ||
7746 | return 0; | ||
7747 | } | ||
7748 | q = AC_NUM - 1 - queue; | ||
7749 | |||
7750 | spin_lock_irqsave(&priv->lock, flags); | ||
7751 | |||
7752 | priv->qos_data.def_qos_parm.ac[q].cw_min = cpu_to_le16(params->cw_min); | ||
7753 | priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); | ||
7754 | priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; | ||
7755 | priv->qos_data.def_qos_parm.ac[q].edca_txop = | ||
7756 | cpu_to_le16((params->burst_time * 100)); | ||
7757 | |||
7758 | priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; | ||
7759 | priv->qos_data.qos_active = 1; | ||
7760 | |||
7761 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7762 | |||
7763 | mutex_lock(&priv->mutex); | ||
7764 | if (priv->iw_mode == IEEE80211_IF_TYPE_AP) | ||
7765 | iwl_activate_qos(priv, 1); | ||
7766 | else if (priv->assoc_id && iwl_is_associated(priv)) | ||
7767 | iwl_activate_qos(priv, 0); | ||
7768 | |||
7769 | mutex_unlock(&priv->mutex); | ||
7770 | |||
7771 | #endif /*CONFIG_IWLWIFI_QOS */ | ||
7772 | |||
7773 | IWL_DEBUG_MAC80211("leave\n"); | ||
7774 | return 0; | ||
7775 | } | ||
7776 | |||
7777 | static int iwl_mac_get_tx_stats(struct ieee80211_hw *hw, | ||
7778 | struct ieee80211_tx_queue_stats *stats) | ||
7779 | { | ||
7780 | struct iwl_priv *priv = hw->priv; | ||
7781 | int i, avail; | ||
7782 | struct iwl_tx_queue *txq; | ||
7783 | struct iwl_queue *q; | ||
7784 | unsigned long flags; | ||
7785 | |||
7786 | IWL_DEBUG_MAC80211("enter\n"); | ||
7787 | |||
7788 | if (!iwl_is_ready_rf(priv)) { | ||
7789 | IWL_DEBUG_MAC80211("leave - RF not ready\n"); | ||
7790 | return -EIO; | ||
7791 | } | ||
7792 | |||
7793 | spin_lock_irqsave(&priv->lock, flags); | ||
7794 | |||
7795 | for (i = 0; i < AC_NUM; i++) { | ||
7796 | txq = &priv->txq[i]; | ||
7797 | q = &txq->q; | ||
7798 | avail = iwl_queue_space(q); | ||
7799 | |||
7800 | stats->data[i].len = q->n_window - avail; | ||
7801 | stats->data[i].limit = q->n_window - q->high_mark; | ||
7802 | stats->data[i].count = q->n_window; | ||
7803 | |||
7804 | } | ||
7805 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7806 | |||
7807 | IWL_DEBUG_MAC80211("leave\n"); | ||
7808 | |||
7809 | return 0; | ||
7810 | } | ||
7811 | |||
7812 | static int iwl_mac_get_stats(struct ieee80211_hw *hw, | ||
7813 | struct ieee80211_low_level_stats *stats) | ||
7814 | { | ||
7815 | IWL_DEBUG_MAC80211("enter\n"); | ||
7816 | IWL_DEBUG_MAC80211("leave\n"); | ||
7817 | |||
7818 | return 0; | ||
7819 | } | ||
7820 | |||
7821 | static u64 iwl_mac_get_tsf(struct ieee80211_hw *hw) | ||
7822 | { | ||
7823 | IWL_DEBUG_MAC80211("enter\n"); | ||
7824 | IWL_DEBUG_MAC80211("leave\n"); | ||
7825 | |||
7826 | return 0; | ||
7827 | } | ||
7828 | |||
7829 | static void iwl_mac_reset_tsf(struct ieee80211_hw *hw) | ||
7830 | { | ||
7831 | struct iwl_priv *priv = hw->priv; | ||
7832 | unsigned long flags; | ||
7833 | |||
7834 | mutex_lock(&priv->mutex); | ||
7835 | IWL_DEBUG_MAC80211("enter\n"); | ||
7836 | |||
7837 | priv->lq_mngr.lq_ready = 0; | ||
7838 | #ifdef CONFIG_IWLWIFI_HT | ||
7839 | spin_lock_irqsave(&priv->lock, flags); | ||
7840 | memset(&priv->current_assoc_ht, 0, sizeof(struct sta_ht_info)); | ||
7841 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7842 | #ifdef CONFIG_IWLWIFI_HT_AGG | ||
7843 | /* if (priv->lq_mngr.agg_ctrl.granted_ba) | ||
7844 | iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);*/ | ||
7845 | |||
7846 | memset(&(priv->lq_mngr.agg_ctrl), 0, sizeof(struct iwl_agg_control)); | ||
7847 | priv->lq_mngr.agg_ctrl.tid_traffic_load_threshold = 10; | ||
7848 | priv->lq_mngr.agg_ctrl.ba_timeout = 5000; | ||
7849 | priv->lq_mngr.agg_ctrl.auto_agg = 1; | ||
7850 | |||
7851 | if (priv->lq_mngr.agg_ctrl.auto_agg) | ||
7852 | priv->lq_mngr.agg_ctrl.requested_ba = TID_ALL_ENABLED; | ||
7853 | #endif /*CONFIG_IWLWIFI_HT_AGG */ | ||
7854 | #endif /* CONFIG_IWLWIFI_HT */ | ||
7855 | |||
7856 | #ifdef CONFIG_IWLWIFI_QOS | ||
7857 | iwl_reset_qos(priv); | ||
7858 | #endif | ||
7859 | |||
7860 | cancel_delayed_work(&priv->post_associate); | ||
7861 | |||
7862 | spin_lock_irqsave(&priv->lock, flags); | ||
7863 | priv->assoc_id = 0; | ||
7864 | priv->assoc_capability = 0; | ||
7865 | priv->call_post_assoc_from_beacon = 0; | ||
7866 | priv->assoc_station_added = 0; | ||
7867 | |||
7868 | /* new association get rid of ibss beacon skb */ | ||
7869 | if (priv->ibss_beacon) | ||
7870 | dev_kfree_skb(priv->ibss_beacon); | ||
7871 | |||
7872 | priv->ibss_beacon = NULL; | ||
7873 | |||
7874 | priv->beacon_int = priv->hw->conf.beacon_int; | ||
7875 | priv->timestamp1 = 0; | ||
7876 | priv->timestamp0 = 0; | ||
7877 | if ((priv->iw_mode == IEEE80211_IF_TYPE_STA)) | ||
7878 | priv->beacon_int = 0; | ||
7879 | |||
7880 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7881 | |||
7882 | /* Per mac80211.h: This is only used in IBSS mode... */ | ||
7883 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { | ||
7884 | IWL_DEBUG_MAC80211("leave - not in IBSS\n"); | ||
7885 | mutex_unlock(&priv->mutex); | ||
7886 | return; | ||
7887 | } | ||
7888 | |||
7889 | if (!iwl_is_ready_rf(priv)) { | ||
7890 | IWL_DEBUG_MAC80211("leave - not ready\n"); | ||
7891 | mutex_unlock(&priv->mutex); | ||
7892 | return; | ||
7893 | } | ||
7894 | |||
7895 | priv->only_active_channel = 0; | ||
7896 | |||
7897 | iwl_set_rate(priv); | ||
7898 | |||
7899 | mutex_unlock(&priv->mutex); | ||
7900 | |||
7901 | IWL_DEBUG_MAC80211("leave\n"); | ||
7902 | |||
7903 | } | ||
7904 | |||
7905 | static int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, | ||
7906 | struct ieee80211_tx_control *control) | ||
7907 | { | ||
7908 | struct iwl_priv *priv = hw->priv; | ||
7909 | unsigned long flags; | ||
7910 | |||
7911 | mutex_lock(&priv->mutex); | ||
7912 | IWL_DEBUG_MAC80211("enter\n"); | ||
7913 | |||
7914 | if (!iwl_is_ready_rf(priv)) { | ||
7915 | IWL_DEBUG_MAC80211("leave - RF not ready\n"); | ||
7916 | mutex_unlock(&priv->mutex); | ||
7917 | return -EIO; | ||
7918 | } | ||
7919 | |||
7920 | if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) { | ||
7921 | IWL_DEBUG_MAC80211("leave - not IBSS\n"); | ||
7922 | mutex_unlock(&priv->mutex); | ||
7923 | return -EIO; | ||
7924 | } | ||
7925 | |||
7926 | spin_lock_irqsave(&priv->lock, flags); | ||
7927 | |||
7928 | if (priv->ibss_beacon) | ||
7929 | dev_kfree_skb(priv->ibss_beacon); | ||
7930 | |||
7931 | priv->ibss_beacon = skb; | ||
7932 | |||
7933 | priv->assoc_id = 0; | ||
7934 | |||
7935 | IWL_DEBUG_MAC80211("leave\n"); | ||
7936 | spin_unlock_irqrestore(&priv->lock, flags); | ||
7937 | |||
7938 | #ifdef CONFIG_IWLWIFI_QOS | ||
7939 | iwl_reset_qos(priv); | ||
7940 | #endif | ||
7941 | |||
7942 | queue_work(priv->workqueue, &priv->post_associate.work); | ||
7943 | |||
7944 | mutex_unlock(&priv->mutex); | ||
7945 | |||
7946 | return 0; | ||
7947 | } | ||
7948 | |||
7949 | #ifdef CONFIG_IWLWIFI_HT | ||
7950 | union ht_cap_info { | ||
7951 | struct { | ||
7952 | u16 advanced_coding_cap :1; | ||
7953 | u16 supported_chan_width_set :1; | ||
7954 | u16 mimo_power_save_mode :2; | ||
7955 | u16 green_field :1; | ||
7956 | u16 short_GI20 :1; | ||
7957 | u16 short_GI40 :1; | ||
7958 | u16 tx_stbc :1; | ||
7959 | u16 rx_stbc :1; | ||
7960 | u16 beam_forming :1; | ||
7961 | u16 delayed_ba :1; | ||
7962 | u16 maximal_amsdu_size :1; | ||
7963 | u16 cck_mode_at_40MHz :1; | ||
7964 | u16 psmp_support :1; | ||
7965 | u16 stbc_ctrl_frame_support :1; | ||
7966 | u16 sig_txop_protection_support :1; | ||
7967 | }; | ||
7968 | u16 val; | ||
7969 | } __attribute__ ((packed)); | ||
7970 | |||
7971 | union ht_param_info{ | ||
7972 | struct { | ||
7973 | u8 max_rx_ampdu_factor :2; | ||
7974 | u8 mpdu_density :3; | ||
7975 | u8 reserved :3; | ||
7976 | }; | ||
7977 | u8 val; | ||
7978 | } __attribute__ ((packed)); | ||
7979 | |||
7980 | union ht_exra_param_info { | ||
7981 | struct { | ||
7982 | u8 ext_chan_offset :2; | ||
7983 | u8 tx_chan_width :1; | ||
7984 | u8 rifs_mode :1; | ||
7985 | u8 controlled_access_only :1; | ||
7986 | u8 service_interval_granularity :3; | ||
7987 | }; | ||
7988 | u8 val; | ||
7989 | } __attribute__ ((packed)); | ||
7990 | |||
7991 | union ht_operation_mode{ | ||
7992 | struct { | ||
7993 | u16 op_mode :2; | ||
7994 | u16 non_GF :1; | ||
7995 | u16 reserved :13; | ||
7996 | }; | ||
7997 | u16 val; | ||
7998 | } __attribute__ ((packed)); | ||
7999 | |||
8000 | |||
8001 | static int sta_ht_info_init(struct ieee80211_ht_capability *ht_cap, | ||
8002 | struct ieee80211_ht_additional_info *ht_extra, | ||
8003 | struct sta_ht_info *ht_info_ap, | ||
8004 | struct sta_ht_info *ht_info) | ||
8005 | { | ||
8006 | union ht_cap_info cap; | ||
8007 | union ht_operation_mode op_mode; | ||
8008 | union ht_param_info param_info; | ||
8009 | union ht_exra_param_info extra_param_info; | ||
8010 | |||
8011 | IWL_DEBUG_MAC80211("enter: \n"); | ||
8012 | |||
8013 | if (!ht_info) { | ||
8014 | IWL_DEBUG_MAC80211("leave: ht_info is NULL\n"); | ||
8015 | return -1; | ||
8016 | } | ||
8017 | |||
8018 | if (ht_cap) { | ||
8019 | cap.val = (u16) le16_to_cpu(ht_cap->capabilities_info); | ||
8020 | param_info.val = ht_cap->mac_ht_params_info; | ||
8021 | ht_info->is_ht = 1; | ||
8022 | if (cap.short_GI20) | ||
8023 | ht_info->sgf |= 0x1; | ||
8024 | if (cap.short_GI40) | ||
8025 | ht_info->sgf |= 0x2; | ||
8026 | ht_info->is_green_field = cap.green_field; | ||
8027 | ht_info->max_amsdu_size = cap.maximal_amsdu_size; | ||
8028 | ht_info->supported_chan_width = cap.supported_chan_width_set; | ||
8029 | ht_info->tx_mimo_ps_mode = cap.mimo_power_save_mode; | ||
8030 | memcpy(ht_info->supp_rates, ht_cap->supported_mcs_set, 16); | ||
8031 | |||
8032 | ht_info->ampdu_factor = param_info.max_rx_ampdu_factor; | ||
8033 | ht_info->mpdu_density = param_info.mpdu_density; | ||
8034 | |||
8035 | IWL_DEBUG_MAC80211("SISO mask 0x%X MIMO mask 0x%X \n", | ||
8036 | ht_cap->supported_mcs_set[0], | ||
8037 | ht_cap->supported_mcs_set[1]); | ||
8038 | |||
8039 | if (ht_info_ap) { | ||
8040 | ht_info->control_channel = ht_info_ap->control_channel; | ||
8041 | ht_info->extension_chan_offset = | ||
8042 | ht_info_ap->extension_chan_offset; | ||
8043 | ht_info->tx_chan_width = ht_info_ap->tx_chan_width; | ||
8044 | ht_info->operating_mode = ht_info_ap->operating_mode; | ||
8045 | } | ||
8046 | |||
8047 | if (ht_extra) { | ||
8048 | extra_param_info.val = ht_extra->ht_param; | ||
8049 | ht_info->control_channel = ht_extra->control_chan; | ||
8050 | ht_info->extension_chan_offset = | ||
8051 | extra_param_info.ext_chan_offset; | ||
8052 | ht_info->tx_chan_width = extra_param_info.tx_chan_width; | ||
8053 | op_mode.val = (u16) | ||
8054 | le16_to_cpu(ht_extra->operation_mode); | ||
8055 | ht_info->operating_mode = op_mode.op_mode; | ||
8056 | IWL_DEBUG_MAC80211("control channel %d\n", | ||
8057 | ht_extra->control_chan); | ||
8058 | } | ||
8059 | } else | ||
8060 | ht_info->is_ht = 0; | ||
8061 | |||
8062 | IWL_DEBUG_MAC80211("leave\n"); | ||
8063 | return 0; | ||
8064 | } | ||
8065 | |||
8066 | static int iwl_mac_conf_ht(struct ieee80211_hw *hw, | ||
8067 | struct ieee80211_ht_capability *ht_cap, | ||
8068 | struct ieee80211_ht_additional_info *ht_extra) | ||
8069 | { | ||
8070 | struct iwl_priv *priv = hw->priv; | ||
8071 | int rs; | ||
8072 | |||
8073 | IWL_DEBUG_MAC80211("enter: \n"); | ||
8074 | |||
8075 | rs = sta_ht_info_init(ht_cap, ht_extra, NULL, &priv->current_assoc_ht); | ||
8076 | iwl4965_set_rxon_chain(priv); | ||
8077 | |||
8078 | if (priv && priv->assoc_id && | ||
8079 | (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { | ||
8080 | unsigned long flags; | ||
8081 | |||
8082 | spin_lock_irqsave(&priv->lock, flags); | ||
8083 | if (priv->beacon_int) | ||
8084 | queue_work(priv->workqueue, &priv->post_associate.work); | ||
8085 | else | ||
8086 | priv->call_post_assoc_from_beacon = 1; | ||
8087 | spin_unlock_irqrestore(&priv->lock, flags); | ||
8088 | } | ||
8089 | |||
8090 | IWL_DEBUG_MAC80211("leave: control channel %d\n", | ||
8091 | ht_extra->control_chan); | ||
8092 | return rs; | ||
8093 | |||
8094 | } | ||
8095 | |||
8096 | static void iwl_set_ht_capab(struct ieee80211_hw *hw, | ||
8097 | struct ieee80211_ht_capability *ht_cap, | ||
8098 | u8 use_wide_chan) | ||
8099 | { | ||
8100 | union ht_cap_info cap; | ||
8101 | union ht_param_info param_info; | ||
8102 | |||
8103 | memset(&cap, 0, sizeof(union ht_cap_info)); | ||
8104 | memset(¶m_info, 0, sizeof(union ht_param_info)); | ||
8105 | |||
8106 | cap.maximal_amsdu_size = HT_IE_MAX_AMSDU_SIZE_4K; | ||
8107 | cap.green_field = 1; | ||
8108 | cap.short_GI20 = 1; | ||
8109 | cap.short_GI40 = 1; | ||
8110 | cap.supported_chan_width_set = use_wide_chan; | ||
8111 | cap.mimo_power_save_mode = 0x3; | ||
8112 | |||
8113 | param_info.max_rx_ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; | ||
8114 | param_info.mpdu_density = CFG_HT_MPDU_DENSITY_DEF; | ||
8115 | ht_cap->capabilities_info = (__le16) cpu_to_le16(cap.val); | ||
8116 | ht_cap->mac_ht_params_info = (u8) param_info.val; | ||
8117 | |||
8118 | ht_cap->supported_mcs_set[0] = 0xff; | ||
8119 | ht_cap->supported_mcs_set[1] = 0xff; | ||
8120 | ht_cap->supported_mcs_set[4] = | ||
8121 | (cap.supported_chan_width_set) ? 0x1: 0x0; | ||
8122 | } | ||
8123 | |||
8124 | static void iwl_mac_get_ht_capab(struct ieee80211_hw *hw, | ||
8125 | struct ieee80211_ht_capability *ht_cap) | ||
8126 | { | ||
8127 | u8 use_wide_channel = 1; | ||
8128 | struct iwl_priv *priv = hw->priv; | ||
8129 | |||
8130 | IWL_DEBUG_MAC80211("enter: \n"); | ||
8131 | if (priv->channel_width != IWL_CHANNEL_WIDTH_40MHZ) | ||
8132 | use_wide_channel = 0; | ||
8133 | |||
8134 | /* no fat tx allowed on 2.4GHZ */ | ||
8135 | if (priv->phymode != MODE_IEEE80211A) | ||
8136 | use_wide_channel = 0; | ||
8137 | |||
8138 | iwl_set_ht_capab(hw, ht_cap, use_wide_channel); | ||
8139 | IWL_DEBUG_MAC80211("leave: \n"); | ||
8140 | } | ||
8141 | #endif /*CONFIG_IWLWIFI_HT*/ | ||
8142 | |||
8143 | /***************************************************************************** | ||
8144 | * | ||
8145 | * sysfs attributes | ||
8146 | * | ||
8147 | *****************************************************************************/ | ||
8148 | |||
8149 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
8150 | |||
8151 | /* | ||
8152 | * The following adds a new attribute to the sysfs representation | ||
8153 | * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/) | ||
8154 | * used for controlling the debug level. | ||
8155 | * | ||
8156 | * See the level definitions in iwl for details. | ||
8157 | */ | ||
8158 | |||
8159 | static ssize_t show_debug_level(struct device_driver *d, char *buf) | ||
8160 | { | ||
8161 | return sprintf(buf, "0x%08X\n", iwl_debug_level); | ||
8162 | } | ||
8163 | static ssize_t store_debug_level(struct device_driver *d, | ||
8164 | const char *buf, size_t count) | ||
8165 | { | ||
8166 | char *p = (char *)buf; | ||
8167 | u32 val; | ||
8168 | |||
8169 | val = simple_strtoul(p, &p, 0); | ||
8170 | if (p == buf) | ||
8171 | printk(KERN_INFO DRV_NAME | ||
8172 | ": %s is not in hex or decimal form.\n", buf); | ||
8173 | else | ||
8174 | iwl_debug_level = val; | ||
8175 | |||
8176 | return strnlen(buf, count); | ||
8177 | } | ||
8178 | |||
8179 | static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, | ||
8180 | show_debug_level, store_debug_level); | ||
8181 | |||
8182 | #endif /* CONFIG_IWLWIFI_DEBUG */ | ||
8183 | |||
8184 | static ssize_t show_rf_kill(struct device *d, | ||
8185 | struct device_attribute *attr, char *buf) | ||
8186 | { | ||
8187 | /* | ||
8188 | * 0 - RF kill not enabled | ||
8189 | * 1 - SW based RF kill active (sysfs) | ||
8190 | * 2 - HW based RF kill active | ||
8191 | * 3 - Both HW and SW based RF kill active | ||
8192 | */ | ||
8193 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8194 | int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) | | ||
8195 | (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0); | ||
8196 | |||
8197 | return sprintf(buf, "%i\n", val); | ||
8198 | } | ||
8199 | |||
8200 | static ssize_t store_rf_kill(struct device *d, | ||
8201 | struct device_attribute *attr, | ||
8202 | const char *buf, size_t count) | ||
8203 | { | ||
8204 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8205 | |||
8206 | mutex_lock(&priv->mutex); | ||
8207 | iwl_radio_kill_sw(priv, buf[0] == '1'); | ||
8208 | mutex_unlock(&priv->mutex); | ||
8209 | |||
8210 | return count; | ||
8211 | } | ||
8212 | |||
8213 | static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); | ||
8214 | |||
8215 | static ssize_t show_temperature(struct device *d, | ||
8216 | struct device_attribute *attr, char *buf) | ||
8217 | { | ||
8218 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8219 | |||
8220 | if (!iwl_is_alive(priv)) | ||
8221 | return -EAGAIN; | ||
8222 | |||
8223 | return sprintf(buf, "%d\n", iwl_hw_get_temperature(priv)); | ||
8224 | } | ||
8225 | |||
8226 | static DEVICE_ATTR(temperature, S_IRUGO, show_temperature, NULL); | ||
8227 | |||
8228 | static ssize_t show_rs_window(struct device *d, | ||
8229 | struct device_attribute *attr, | ||
8230 | char *buf) | ||
8231 | { | ||
8232 | struct iwl_priv *priv = d->driver_data; | ||
8233 | return iwl_fill_rs_info(priv->hw, buf, IWL_AP_ID); | ||
8234 | } | ||
8235 | static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL); | ||
8236 | |||
8237 | static ssize_t show_tx_power(struct device *d, | ||
8238 | struct device_attribute *attr, char *buf) | ||
8239 | { | ||
8240 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8241 | return sprintf(buf, "%d\n", priv->user_txpower_limit); | ||
8242 | } | ||
8243 | |||
8244 | static ssize_t store_tx_power(struct device *d, | ||
8245 | struct device_attribute *attr, | ||
8246 | const char *buf, size_t count) | ||
8247 | { | ||
8248 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8249 | char *p = (char *)buf; | ||
8250 | u32 val; | ||
8251 | |||
8252 | val = simple_strtoul(p, &p, 10); | ||
8253 | if (p == buf) | ||
8254 | printk(KERN_INFO DRV_NAME | ||
8255 | ": %s is not in decimal form.\n", buf); | ||
8256 | else | ||
8257 | iwl_hw_reg_set_txpower(priv, val); | ||
8258 | |||
8259 | return count; | ||
8260 | } | ||
8261 | |||
8262 | static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); | ||
8263 | |||
8264 | static ssize_t show_flags(struct device *d, | ||
8265 | struct device_attribute *attr, char *buf) | ||
8266 | { | ||
8267 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8268 | |||
8269 | return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); | ||
8270 | } | ||
8271 | |||
8272 | static ssize_t store_flags(struct device *d, | ||
8273 | struct device_attribute *attr, | ||
8274 | const char *buf, size_t count) | ||
8275 | { | ||
8276 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8277 | u32 flags = simple_strtoul(buf, NULL, 0); | ||
8278 | |||
8279 | mutex_lock(&priv->mutex); | ||
8280 | if (le32_to_cpu(priv->staging_rxon.flags) != flags) { | ||
8281 | /* Cancel any currently running scans... */ | ||
8282 | if (iwl_scan_cancel_timeout(priv, 100)) | ||
8283 | IWL_WARNING("Could not cancel scan.\n"); | ||
8284 | else { | ||
8285 | IWL_DEBUG_INFO("Committing rxon.flags = 0x%04X\n", | ||
8286 | flags); | ||
8287 | priv->staging_rxon.flags = cpu_to_le32(flags); | ||
8288 | iwl_commit_rxon(priv); | ||
8289 | } | ||
8290 | } | ||
8291 | mutex_unlock(&priv->mutex); | ||
8292 | |||
8293 | return count; | ||
8294 | } | ||
8295 | |||
8296 | static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags); | ||
8297 | |||
8298 | static ssize_t show_filter_flags(struct device *d, | ||
8299 | struct device_attribute *attr, char *buf) | ||
8300 | { | ||
8301 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8302 | |||
8303 | return sprintf(buf, "0x%04X\n", | ||
8304 | le32_to_cpu(priv->active_rxon.filter_flags)); | ||
8305 | } | ||
8306 | |||
8307 | static ssize_t store_filter_flags(struct device *d, | ||
8308 | struct device_attribute *attr, | ||
8309 | const char *buf, size_t count) | ||
8310 | { | ||
8311 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8312 | u32 filter_flags = simple_strtoul(buf, NULL, 0); | ||
8313 | |||
8314 | mutex_lock(&priv->mutex); | ||
8315 | if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) { | ||
8316 | /* Cancel any currently running scans... */ | ||
8317 | if (iwl_scan_cancel_timeout(priv, 100)) | ||
8318 | IWL_WARNING("Could not cancel scan.\n"); | ||
8319 | else { | ||
8320 | IWL_DEBUG_INFO("Committing rxon.filter_flags = " | ||
8321 | "0x%04X\n", filter_flags); | ||
8322 | priv->staging_rxon.filter_flags = | ||
8323 | cpu_to_le32(filter_flags); | ||
8324 | iwl_commit_rxon(priv); | ||
8325 | } | ||
8326 | } | ||
8327 | mutex_unlock(&priv->mutex); | ||
8328 | |||
8329 | return count; | ||
8330 | } | ||
8331 | |||
8332 | static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, | ||
8333 | store_filter_flags); | ||
8334 | |||
8335 | static ssize_t show_tune(struct device *d, | ||
8336 | struct device_attribute *attr, char *buf) | ||
8337 | { | ||
8338 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8339 | |||
8340 | return sprintf(buf, "0x%04X\n", | ||
8341 | (priv->phymode << 8) | | ||
8342 | le16_to_cpu(priv->active_rxon.channel)); | ||
8343 | } | ||
8344 | |||
8345 | static void iwl_set_flags_for_phymode(struct iwl_priv *priv, u8 phymode); | ||
8346 | |||
8347 | static ssize_t store_tune(struct device *d, | ||
8348 | struct device_attribute *attr, | ||
8349 | const char *buf, size_t count) | ||
8350 | { | ||
8351 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8352 | char *p = (char *)buf; | ||
8353 | u16 tune = simple_strtoul(p, &p, 0); | ||
8354 | u8 phymode = (tune >> 8) & 0xff; | ||
8355 | u16 channel = tune & 0xff; | ||
8356 | |||
8357 | IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel); | ||
8358 | |||
8359 | mutex_lock(&priv->mutex); | ||
8360 | if ((le16_to_cpu(priv->staging_rxon.channel) != channel) || | ||
8361 | (priv->phymode != phymode)) { | ||
8362 | const struct iwl_channel_info *ch_info; | ||
8363 | |||
8364 | ch_info = iwl_get_channel_info(priv, phymode, channel); | ||
8365 | if (!ch_info) { | ||
8366 | IWL_WARNING("Requested invalid phymode/channel " | ||
8367 | "combination: %d %d\n", phymode, channel); | ||
8368 | mutex_unlock(&priv->mutex); | ||
8369 | return -EINVAL; | ||
8370 | } | ||
8371 | |||
8372 | /* Cancel any currently running scans... */ | ||
8373 | if (iwl_scan_cancel_timeout(priv, 100)) | ||
8374 | IWL_WARNING("Could not cancel scan.\n"); | ||
8375 | else { | ||
8376 | IWL_DEBUG_INFO("Committing phymode and " | ||
8377 | "rxon.channel = %d %d\n", | ||
8378 | phymode, channel); | ||
8379 | |||
8380 | iwl_set_rxon_channel(priv, phymode, channel); | ||
8381 | iwl_set_flags_for_phymode(priv, phymode); | ||
8382 | |||
8383 | iwl_set_rate(priv); | ||
8384 | iwl_commit_rxon(priv); | ||
8385 | } | ||
8386 | } | ||
8387 | mutex_unlock(&priv->mutex); | ||
8388 | |||
8389 | return count; | ||
8390 | } | ||
8391 | |||
8392 | static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune); | ||
8393 | |||
8394 | #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT | ||
8395 | |||
8396 | static ssize_t show_measurement(struct device *d, | ||
8397 | struct device_attribute *attr, char *buf) | ||
8398 | { | ||
8399 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8400 | struct iwl_spectrum_notification measure_report; | ||
8401 | u32 size = sizeof(measure_report), len = 0, ofs = 0; | ||
8402 | u8 *data = (u8 *) & measure_report; | ||
8403 | unsigned long flags; | ||
8404 | |||
8405 | spin_lock_irqsave(&priv->lock, flags); | ||
8406 | if (!(priv->measurement_status & MEASUREMENT_READY)) { | ||
8407 | spin_unlock_irqrestore(&priv->lock, flags); | ||
8408 | return 0; | ||
8409 | } | ||
8410 | memcpy(&measure_report, &priv->measure_report, size); | ||
8411 | priv->measurement_status = 0; | ||
8412 | spin_unlock_irqrestore(&priv->lock, flags); | ||
8413 | |||
8414 | while (size && (PAGE_SIZE - len)) { | ||
8415 | hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, | ||
8416 | PAGE_SIZE - len, 1); | ||
8417 | len = strlen(buf); | ||
8418 | if (PAGE_SIZE - len) | ||
8419 | buf[len++] = '\n'; | ||
8420 | |||
8421 | ofs += 16; | ||
8422 | size -= min(size, 16U); | ||
8423 | } | ||
8424 | |||
8425 | return len; | ||
8426 | } | ||
8427 | |||
8428 | static ssize_t store_measurement(struct device *d, | ||
8429 | struct device_attribute *attr, | ||
8430 | const char *buf, size_t count) | ||
8431 | { | ||
8432 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8433 | struct ieee80211_measurement_params params = { | ||
8434 | .channel = le16_to_cpu(priv->active_rxon.channel), | ||
8435 | .start_time = cpu_to_le64(priv->last_tsf), | ||
8436 | .duration = cpu_to_le16(1), | ||
8437 | }; | ||
8438 | u8 type = IWL_MEASURE_BASIC; | ||
8439 | u8 buffer[32]; | ||
8440 | u8 channel; | ||
8441 | |||
8442 | if (count) { | ||
8443 | char *p = buffer; | ||
8444 | strncpy(buffer, buf, min(sizeof(buffer), count)); | ||
8445 | channel = simple_strtoul(p, NULL, 0); | ||
8446 | if (channel) | ||
8447 | params.channel = channel; | ||
8448 | |||
8449 | p = buffer; | ||
8450 | while (*p && *p != ' ') | ||
8451 | p++; | ||
8452 | if (*p) | ||
8453 | type = simple_strtoul(p + 1, NULL, 0); | ||
8454 | } | ||
8455 | |||
8456 | IWL_DEBUG_INFO("Invoking measurement of type %d on " | ||
8457 | "channel %d (for '%s')\n", type, params.channel, buf); | ||
8458 | iwl_get_measurement(priv, ¶ms, type); | ||
8459 | |||
8460 | return count; | ||
8461 | } | ||
8462 | |||
8463 | static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, | ||
8464 | show_measurement, store_measurement); | ||
8465 | #endif /* CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT */ | ||
8466 | |||
8467 | static ssize_t store_retry_rate(struct device *d, | ||
8468 | struct device_attribute *attr, | ||
8469 | const char *buf, size_t count) | ||
8470 | { | ||
8471 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8472 | |||
8473 | priv->retry_rate = simple_strtoul(buf, NULL, 0); | ||
8474 | if (priv->retry_rate <= 0) | ||
8475 | priv->retry_rate = 1; | ||
8476 | |||
8477 | return count; | ||
8478 | } | ||
8479 | |||
8480 | static ssize_t show_retry_rate(struct device *d, | ||
8481 | struct device_attribute *attr, char *buf) | ||
8482 | { | ||
8483 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8484 | return sprintf(buf, "%d", priv->retry_rate); | ||
8485 | } | ||
8486 | |||
8487 | static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, show_retry_rate, | ||
8488 | store_retry_rate); | ||
8489 | |||
8490 | static ssize_t store_power_level(struct device *d, | ||
8491 | struct device_attribute *attr, | ||
8492 | const char *buf, size_t count) | ||
8493 | { | ||
8494 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8495 | int rc; | ||
8496 | int mode; | ||
8497 | |||
8498 | mode = simple_strtoul(buf, NULL, 0); | ||
8499 | mutex_lock(&priv->mutex); | ||
8500 | |||
8501 | if (!iwl_is_ready(priv)) { | ||
8502 | rc = -EAGAIN; | ||
8503 | goto out; | ||
8504 | } | ||
8505 | |||
8506 | if ((mode < 1) || (mode > IWL_POWER_LIMIT) || (mode == IWL_POWER_AC)) | ||
8507 | mode = IWL_POWER_AC; | ||
8508 | else | ||
8509 | mode |= IWL_POWER_ENABLED; | ||
8510 | |||
8511 | if (mode != priv->power_mode) { | ||
8512 | rc = iwl_send_power_mode(priv, IWL_POWER_LEVEL(mode)); | ||
8513 | if (rc) { | ||
8514 | IWL_DEBUG_MAC80211("failed setting power mode.\n"); | ||
8515 | goto out; | ||
8516 | } | ||
8517 | priv->power_mode = mode; | ||
8518 | } | ||
8519 | |||
8520 | rc = count; | ||
8521 | |||
8522 | out: | ||
8523 | mutex_unlock(&priv->mutex); | ||
8524 | return rc; | ||
8525 | } | ||
8526 | |||
8527 | #define MAX_WX_STRING 80 | ||
8528 | |||
8529 | /* Values are in microsecond */ | ||
8530 | static const s32 timeout_duration[] = { | ||
8531 | 350000, | ||
8532 | 250000, | ||
8533 | 75000, | ||
8534 | 37000, | ||
8535 | 25000, | ||
8536 | }; | ||
8537 | static const s32 period_duration[] = { | ||
8538 | 400000, | ||
8539 | 700000, | ||
8540 | 1000000, | ||
8541 | 1000000, | ||
8542 | 1000000 | ||
8543 | }; | ||
8544 | |||
8545 | static ssize_t show_power_level(struct device *d, | ||
8546 | struct device_attribute *attr, char *buf) | ||
8547 | { | ||
8548 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8549 | int level = IWL_POWER_LEVEL(priv->power_mode); | ||
8550 | char *p = buf; | ||
8551 | |||
8552 | p += sprintf(p, "%d ", level); | ||
8553 | switch (level) { | ||
8554 | case IWL_POWER_MODE_CAM: | ||
8555 | case IWL_POWER_AC: | ||
8556 | p += sprintf(p, "(AC)"); | ||
8557 | break; | ||
8558 | case IWL_POWER_BATTERY: | ||
8559 | p += sprintf(p, "(BATTERY)"); | ||
8560 | break; | ||
8561 | default: | ||
8562 | p += sprintf(p, | ||
8563 | "(Timeout %dms, Period %dms)", | ||
8564 | timeout_duration[level - 1] / 1000, | ||
8565 | period_duration[level - 1] / 1000); | ||
8566 | } | ||
8567 | |||
8568 | if (!(priv->power_mode & IWL_POWER_ENABLED)) | ||
8569 | p += sprintf(p, " OFF\n"); | ||
8570 | else | ||
8571 | p += sprintf(p, " \n"); | ||
8572 | |||
8573 | return (p - buf + 1); | ||
8574 | |||
8575 | } | ||
8576 | |||
8577 | static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level, | ||
8578 | store_power_level); | ||
8579 | |||
8580 | static ssize_t show_channels(struct device *d, | ||
8581 | struct device_attribute *attr, char *buf) | ||
8582 | { | ||
8583 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8584 | int len = 0, i; | ||
8585 | struct ieee80211_channel *channels = NULL; | ||
8586 | const struct ieee80211_hw_mode *hw_mode = NULL; | ||
8587 | int count = 0; | ||
8588 | |||
8589 | if (!iwl_is_ready(priv)) | ||
8590 | return -EAGAIN; | ||
8591 | |||
8592 | hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211G); | ||
8593 | if (!hw_mode) | ||
8594 | hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211B); | ||
8595 | if (hw_mode) { | ||
8596 | channels = hw_mode->channels; | ||
8597 | count = hw_mode->num_channels; | ||
8598 | } | ||
8599 | |||
8600 | len += | ||
8601 | sprintf(&buf[len], | ||
8602 | "Displaying %d channels in 2.4GHz band " | ||
8603 | "(802.11bg):\n", count); | ||
8604 | |||
8605 | for (i = 0; i < count; i++) | ||
8606 | len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", | ||
8607 | channels[i].chan, | ||
8608 | channels[i].power_level, | ||
8609 | channels[i]. | ||
8610 | flag & IEEE80211_CHAN_W_RADAR_DETECT ? | ||
8611 | " (IEEE 802.11h required)" : "", | ||
8612 | (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) | ||
8613 | || (channels[i]. | ||
8614 | flag & | ||
8615 | IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : | ||
8616 | ", IBSS", | ||
8617 | channels[i]. | ||
8618 | flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? | ||
8619 | "active/passive" : "passive only"); | ||
8620 | |||
8621 | hw_mode = iwl_get_hw_mode(priv, MODE_IEEE80211A); | ||
8622 | if (hw_mode) { | ||
8623 | channels = hw_mode->channels; | ||
8624 | count = hw_mode->num_channels; | ||
8625 | } else { | ||
8626 | channels = NULL; | ||
8627 | count = 0; | ||
8628 | } | ||
8629 | |||
8630 | len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band " | ||
8631 | "(802.11a):\n", count); | ||
8632 | |||
8633 | for (i = 0; i < count; i++) | ||
8634 | len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n", | ||
8635 | channels[i].chan, | ||
8636 | channels[i].power_level, | ||
8637 | channels[i]. | ||
8638 | flag & IEEE80211_CHAN_W_RADAR_DETECT ? | ||
8639 | " (IEEE 802.11h required)" : "", | ||
8640 | (!(channels[i].flag & IEEE80211_CHAN_W_IBSS) | ||
8641 | || (channels[i]. | ||
8642 | flag & | ||
8643 | IEEE80211_CHAN_W_RADAR_DETECT)) ? "" : | ||
8644 | ", IBSS", | ||
8645 | channels[i]. | ||
8646 | flag & IEEE80211_CHAN_W_ACTIVE_SCAN ? | ||
8647 | "active/passive" : "passive only"); | ||
8648 | |||
8649 | return len; | ||
8650 | } | ||
8651 | |||
8652 | static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); | ||
8653 | |||
8654 | static ssize_t show_statistics(struct device *d, | ||
8655 | struct device_attribute *attr, char *buf) | ||
8656 | { | ||
8657 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8658 | u32 size = sizeof(struct iwl_notif_statistics); | ||
8659 | u32 len = 0, ofs = 0; | ||
8660 | u8 *data = (u8 *) & priv->statistics; | ||
8661 | int rc = 0; | ||
8662 | |||
8663 | if (!iwl_is_alive(priv)) | ||
8664 | return -EAGAIN; | ||
8665 | |||
8666 | mutex_lock(&priv->mutex); | ||
8667 | rc = iwl_send_statistics_request(priv); | ||
8668 | mutex_unlock(&priv->mutex); | ||
8669 | |||
8670 | if (rc) { | ||
8671 | len = sprintf(buf, | ||
8672 | "Error sending statistics request: 0x%08X\n", rc); | ||
8673 | return len; | ||
8674 | } | ||
8675 | |||
8676 | while (size && (PAGE_SIZE - len)) { | ||
8677 | hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len, | ||
8678 | PAGE_SIZE - len, 1); | ||
8679 | len = strlen(buf); | ||
8680 | if (PAGE_SIZE - len) | ||
8681 | buf[len++] = '\n'; | ||
8682 | |||
8683 | ofs += 16; | ||
8684 | size -= min(size, 16U); | ||
8685 | } | ||
8686 | |||
8687 | return len; | ||
8688 | } | ||
8689 | |||
8690 | static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); | ||
8691 | |||
8692 | static ssize_t show_antenna(struct device *d, | ||
8693 | struct device_attribute *attr, char *buf) | ||
8694 | { | ||
8695 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8696 | |||
8697 | if (!iwl_is_alive(priv)) | ||
8698 | return -EAGAIN; | ||
8699 | |||
8700 | return sprintf(buf, "%d\n", priv->antenna); | ||
8701 | } | ||
8702 | |||
8703 | static ssize_t store_antenna(struct device *d, | ||
8704 | struct device_attribute *attr, | ||
8705 | const char *buf, size_t count) | ||
8706 | { | ||
8707 | int ant; | ||
8708 | struct iwl_priv *priv = dev_get_drvdata(d); | ||
8709 | |||
8710 | if (count == 0) | ||
8711 | return 0; | ||
8712 | |||
8713 | if (sscanf(buf, "%1i", &ant) != 1) { | ||
8714 | IWL_DEBUG_INFO("not in hex or decimal form.\n"); | ||
8715 | return count; | ||
8716 | } | ||
8717 | |||
8718 | if ((ant >= 0) && (ant <= 2)) { | ||
8719 | IWL_DEBUG_INFO("Setting antenna select to %d.\n", ant); | ||
8720 | priv->antenna = (enum iwl_antenna)ant; | ||
8721 | } else | ||
8722 | IWL_DEBUG_INFO("Bad antenna select value %d.\n", ant); | ||
8723 | |||
8724 | |||
8725 | return count; | ||
8726 | } | ||
8727 | |||
8728 | static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna); | ||
8729 | |||
8730 | static ssize_t show_status(struct device *d, | ||
8731 | struct device_attribute *attr, char *buf) | ||
8732 | { | ||
8733 | struct iwl_priv *priv = (struct iwl_priv *)d->driver_data; | ||
8734 | if (!iwl_is_alive(priv)) | ||
8735 | return -EAGAIN; | ||
8736 | return sprintf(buf, "0x%08x\n", (int)priv->status); | ||
8737 | } | ||
8738 | |||
8739 | static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); | ||
8740 | |||
8741 | static ssize_t dump_error_log(struct device *d, | ||
8742 | struct device_attribute *attr, | ||
8743 | const char *buf, size_t count) | ||
8744 | { | ||
8745 | char *p = (char *)buf; | ||
8746 | |||
8747 | if (p[0] == '1') | ||
8748 | iwl_dump_nic_error_log((struct iwl_priv *)d->driver_data); | ||
8749 | |||
8750 | return strnlen(buf, count); | ||
8751 | } | ||
8752 | |||
8753 | static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log); | ||
8754 | |||
8755 | static ssize_t dump_event_log(struct device *d, | ||
8756 | struct device_attribute *attr, | ||
8757 | const char *buf, size_t count) | ||
8758 | { | ||
8759 | char *p = (char *)buf; | ||
8760 | |||
8761 | if (p[0] == '1') | ||
8762 | iwl_dump_nic_event_log((struct iwl_priv *)d->driver_data); | ||
8763 | |||
8764 | return strnlen(buf, count); | ||
8765 | } | ||
8766 | |||
8767 | static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log); | ||
8768 | |||
8769 | /***************************************************************************** | ||
8770 | * | ||
8771 | * driver setup and teardown | ||
8772 | * | ||
8773 | *****************************************************************************/ | ||
8774 | |||
8775 | static void iwl_setup_deferred_work(struct iwl_priv *priv) | ||
8776 | { | ||
8777 | priv->workqueue = create_workqueue(DRV_NAME); | ||
8778 | |||
8779 | init_waitqueue_head(&priv->wait_command_queue); | ||
8780 | |||
8781 | INIT_WORK(&priv->up, iwl_bg_up); | ||
8782 | INIT_WORK(&priv->restart, iwl_bg_restart); | ||
8783 | INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish); | ||
8784 | INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); | ||
8785 | INIT_WORK(&priv->request_scan, iwl_bg_request_scan); | ||
8786 | INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); | ||
8787 | INIT_WORK(&priv->rf_kill, iwl_bg_rf_kill); | ||
8788 | INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update); | ||
8789 | INIT_DELAYED_WORK(&priv->post_associate, iwl_bg_post_associate); | ||
8790 | INIT_DELAYED_WORK(&priv->init_alive_start, iwl_bg_init_alive_start); | ||
8791 | INIT_DELAYED_WORK(&priv->alive_start, iwl_bg_alive_start); | ||
8792 | INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); | ||
8793 | |||
8794 | iwl_hw_setup_deferred_work(priv); | ||
8795 | |||
8796 | tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) | ||
8797 | iwl_irq_tasklet, (unsigned long)priv); | ||
8798 | } | ||
8799 | |||
8800 | static void iwl_cancel_deferred_work(struct iwl_priv *priv) | ||
8801 | { | ||
8802 | iwl_hw_cancel_deferred_work(priv); | ||
8803 | |||
8804 | cancel_delayed_work(&priv->scan_check); | ||
8805 | cancel_delayed_work(&priv->alive_start); | ||
8806 | cancel_delayed_work(&priv->post_associate); | ||
8807 | cancel_work_sync(&priv->beacon_update); | ||
8808 | } | ||
8809 | |||
8810 | static struct attribute *iwl_sysfs_entries[] = { | ||
8811 | &dev_attr_antenna.attr, | ||
8812 | &dev_attr_channels.attr, | ||
8813 | &dev_attr_dump_errors.attr, | ||
8814 | &dev_attr_dump_events.attr, | ||
8815 | &dev_attr_flags.attr, | ||
8816 | &dev_attr_filter_flags.attr, | ||
8817 | #ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT | ||
8818 | &dev_attr_measurement.attr, | ||
8819 | #endif | ||
8820 | &dev_attr_power_level.attr, | ||
8821 | &dev_attr_retry_rate.attr, | ||
8822 | &dev_attr_rf_kill.attr, | ||
8823 | &dev_attr_rs_window.attr, | ||
8824 | &dev_attr_statistics.attr, | ||
8825 | &dev_attr_status.attr, | ||
8826 | &dev_attr_temperature.attr, | ||
8827 | &dev_attr_tune.attr, | ||
8828 | &dev_attr_tx_power.attr, | ||
8829 | |||
8830 | NULL | ||
8831 | }; | ||
8832 | |||
8833 | static struct attribute_group iwl_attribute_group = { | ||
8834 | .name = NULL, /* put in device directory */ | ||
8835 | .attrs = iwl_sysfs_entries, | ||
8836 | }; | ||
8837 | |||
8838 | static struct ieee80211_ops iwl_hw_ops = { | ||
8839 | .tx = iwl_mac_tx, | ||
8840 | .open = iwl_mac_open, | ||
8841 | .stop = iwl_mac_stop, | ||
8842 | .add_interface = iwl_mac_add_interface, | ||
8843 | .remove_interface = iwl_mac_remove_interface, | ||
8844 | .config = iwl_mac_config, | ||
8845 | .config_interface = iwl_mac_config_interface, | ||
8846 | .set_key = iwl_mac_set_key, | ||
8847 | .get_stats = iwl_mac_get_stats, | ||
8848 | .get_tx_stats = iwl_mac_get_tx_stats, | ||
8849 | .conf_tx = iwl_mac_conf_tx, | ||
8850 | .get_tsf = iwl_mac_get_tsf, | ||
8851 | .reset_tsf = iwl_mac_reset_tsf, | ||
8852 | .beacon_update = iwl_mac_beacon_update, | ||
8853 | #ifdef CONFIG_IWLWIFI_HT | ||
8854 | .conf_ht = iwl_mac_conf_ht, | ||
8855 | .get_ht_capab = iwl_mac_get_ht_capab, | ||
8856 | #ifdef CONFIG_IWLWIFI_HT_AGG | ||
8857 | .ht_tx_agg_start = iwl_mac_ht_tx_agg_start, | ||
8858 | .ht_tx_agg_stop = iwl_mac_ht_tx_agg_stop, | ||
8859 | .ht_rx_agg_start = iwl_mac_ht_rx_agg_start, | ||
8860 | .ht_rx_agg_stop = iwl_mac_ht_rx_agg_stop, | ||
8861 | #endif /* CONFIG_IWLWIFI_HT_AGG */ | ||
8862 | #endif /* CONFIG_IWLWIFI_HT */ | ||
8863 | .hw_scan = iwl_mac_hw_scan | ||
8864 | }; | ||
8865 | |||
8866 | static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | ||
8867 | { | ||
8868 | int err = 0; | ||
8869 | struct iwl_priv *priv; | ||
8870 | struct ieee80211_hw *hw; | ||
8871 | int i; | ||
8872 | |||
8873 | if (iwl_param_disable_hw_scan) { | ||
8874 | IWL_DEBUG_INFO("Disabling hw_scan\n"); | ||
8875 | iwl_hw_ops.hw_scan = NULL; | ||
8876 | } | ||
8877 | |||
8878 | if ((iwl_param_queues_num > IWL_MAX_NUM_QUEUES) || | ||
8879 | (iwl_param_queues_num < IWL_MIN_NUM_QUEUES)) { | ||
8880 | IWL_ERROR("invalid queues_num, should be between %d and %d\n", | ||
8881 | IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES); | ||
8882 | err = -EINVAL; | ||
8883 | goto out; | ||
8884 | } | ||
8885 | |||
8886 | /* mac80211 allocates memory for this device instance, including | ||
8887 | * space for this driver's private structure */ | ||
8888 | hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwl_hw_ops); | ||
8889 | if (hw == NULL) { | ||
8890 | IWL_ERROR("Can not allocate network device\n"); | ||
8891 | err = -ENOMEM; | ||
8892 | goto out; | ||
8893 | } | ||
8894 | SET_IEEE80211_DEV(hw, &pdev->dev); | ||
8895 | |||
8896 | IWL_DEBUG_INFO("*** LOAD DRIVER ***\n"); | ||
8897 | priv = hw->priv; | ||
8898 | priv->hw = hw; | ||
8899 | |||
8900 | priv->pci_dev = pdev; | ||
8901 | priv->antenna = (enum iwl_antenna)iwl_param_antenna; | ||
8902 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
8903 | iwl_debug_level = iwl_param_debug; | ||
8904 | atomic_set(&priv->restrict_refcnt, 0); | ||
8905 | #endif | ||
8906 | priv->retry_rate = 1; | ||
8907 | |||
8908 | priv->ibss_beacon = NULL; | ||
8909 | |||
8910 | /* Tell mac80211 and its clients (e.g. Wireless Extensions) | ||
8911 | * the range of signal quality values that we'll provide. | ||
8912 | * Negative values for level/noise indicate that we'll provide dBm. | ||
8913 | * For WE, at least, non-0 values here *enable* display of values | ||
8914 | * in app (iwconfig). */ | ||
8915 | hw->max_rssi = -20; /* signal level, negative indicates dBm */ | ||
8916 | hw->max_noise = -20; /* noise level, negative indicates dBm */ | ||
8917 | hw->max_signal = 100; /* link quality indication (%) */ | ||
8918 | |||
8919 | /* Tell mac80211 our Tx characteristics */ | ||
8920 | hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; | ||
8921 | |||
8922 | hw->queues = 4; | ||
8923 | #ifdef CONFIG_IWLWIFI_HT | ||
8924 | #ifdef CONFIG_IWLWIFI_HT_AGG | ||
8925 | hw->queues = 16; | ||
8926 | #endif /* CONFIG_IWLWIFI_HT_AGG */ | ||
8927 | #endif /* CONFIG_IWLWIFI_HT */ | ||
8928 | |||
8929 | spin_lock_init(&priv->lock); | ||
8930 | spin_lock_init(&priv->power_data.lock); | ||
8931 | spin_lock_init(&priv->sta_lock); | ||
8932 | spin_lock_init(&priv->hcmd_lock); | ||
8933 | spin_lock_init(&priv->lq_mngr.lock); | ||
8934 | |||
8935 | for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) | ||
8936 | INIT_LIST_HEAD(&priv->ibss_mac_hash[i]); | ||
8937 | |||
8938 | INIT_LIST_HEAD(&priv->free_frames); | ||
8939 | |||
8940 | mutex_init(&priv->mutex); | ||
8941 | if (pci_enable_device(pdev)) { | ||
8942 | err = -ENODEV; | ||
8943 | goto out_ieee80211_free_hw; | ||
8944 | } | ||
8945 | |||
8946 | pci_set_master(pdev); | ||
8947 | |||
8948 | iwl_clear_stations_table(priv); | ||
8949 | |||
8950 | priv->data_retry_limit = -1; | ||
8951 | priv->ieee_channels = NULL; | ||
8952 | priv->ieee_rates = NULL; | ||
8953 | priv->phymode = -1; | ||
8954 | |||
8955 | err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); | ||
8956 | if (!err) | ||
8957 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
8958 | if (err) { | ||
8959 | printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); | ||
8960 | goto out_pci_disable_device; | ||
8961 | } | ||
8962 | |||
8963 | pci_set_drvdata(pdev, priv); | ||
8964 | err = pci_request_regions(pdev, DRV_NAME); | ||
8965 | if (err) | ||
8966 | goto out_pci_disable_device; | ||
8967 | /* We disable the RETRY_TIMEOUT register (0x41) to keep | ||
8968 | * PCI Tx retries from interfering with C3 CPU state */ | ||
8969 | pci_write_config_byte(pdev, 0x41, 0x00); | ||
8970 | priv->hw_base = pci_iomap(pdev, 0, 0); | ||
8971 | if (!priv->hw_base) { | ||
8972 | err = -ENODEV; | ||
8973 | goto out_pci_release_regions; | ||
8974 | } | ||
8975 | |||
8976 | IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n", | ||
8977 | (unsigned long long) pci_resource_len(pdev, 0)); | ||
8978 | IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); | ||
8979 | |||
8980 | /* Initialize module parameter values here */ | ||
8981 | |||
8982 | if (iwl_param_disable) { | ||
8983 | set_bit(STATUS_RF_KILL_SW, &priv->status); | ||
8984 | IWL_DEBUG_INFO("Radio disabled.\n"); | ||
8985 | } | ||
8986 | |||
8987 | priv->iw_mode = IEEE80211_IF_TYPE_STA; | ||
8988 | |||
8989 | priv->ps_mode = 0; | ||
8990 | priv->use_ant_b_for_management_frame = 1; /* start with ant B */ | ||
8991 | priv->is_ht_enabled = 1; | ||
8992 | priv->channel_width = IWL_CHANNEL_WIDTH_40MHZ; | ||
8993 | priv->valid_antenna = 0x7; /* assume all 3 connected */ | ||
8994 | priv->ps_mode = IWL_MIMO_PS_NONE; | ||
8995 | priv->cck_power_index_compensation = iwl_read32( | ||
8996 | priv, CSR_HW_REV_WA_REG); | ||
8997 | |||
8998 | iwl4965_set_rxon_chain(priv); | ||
8999 | |||
9000 | printk(KERN_INFO DRV_NAME | ||
9001 | ": Detected Intel Wireless WiFi Link 4965AGN\n"); | ||
9002 | |||
9003 | /* Device-specific setup */ | ||
9004 | if (iwl_hw_set_hw_setting(priv)) { | ||
9005 | IWL_ERROR("failed to set hw settings\n"); | ||
9006 | mutex_unlock(&priv->mutex); | ||
9007 | goto out_iounmap; | ||
9008 | } | ||
9009 | |||
9010 | #ifdef CONFIG_IWLWIFI_QOS | ||
9011 | if (iwl_param_qos_enable) | ||
9012 | priv->qos_data.qos_enable = 1; | ||
9013 | |||
9014 | iwl_reset_qos(priv); | ||
9015 | |||
9016 | priv->qos_data.qos_active = 0; | ||
9017 | priv->qos_data.qos_cap.val = 0; | ||
9018 | #endif /* CONFIG_IWLWIFI_QOS */ | ||
9019 | |||
9020 | iwl_set_rxon_channel(priv, MODE_IEEE80211G, 6); | ||
9021 | iwl_setup_deferred_work(priv); | ||
9022 | iwl_setup_rx_handlers(priv); | ||
9023 | |||
9024 | priv->rates_mask = IWL_RATES_MASK; | ||
9025 | /* If power management is turned on, default to AC mode */ | ||
9026 | priv->power_mode = IWL_POWER_AC; | ||
9027 | priv->user_txpower_limit = IWL_DEFAULT_TX_POWER; | ||
9028 | |||
9029 | pci_enable_msi(pdev); | ||
9030 | |||
9031 | err = request_irq(pdev->irq, iwl_isr, IRQF_SHARED, DRV_NAME, priv); | ||
9032 | if (err) { | ||
9033 | IWL_ERROR("Error allocating IRQ %d\n", pdev->irq); | ||
9034 | goto out_disable_msi; | ||
9035 | } | ||
9036 | |||
9037 | mutex_lock(&priv->mutex); | ||
9038 | |||
9039 | err = sysfs_create_group(&pdev->dev.kobj, &iwl_attribute_group); | ||
9040 | if (err) { | ||
9041 | IWL_ERROR("failed to create sysfs device attributes\n"); | ||
9042 | mutex_unlock(&priv->mutex); | ||
9043 | goto out_release_irq; | ||
9044 | } | ||
9045 | |||
9046 | /* fetch ucode file from disk, alloc and copy to bus-master buffers ... | ||
9047 | * ucode filename and max sizes are card-specific. */ | ||
9048 | err = iwl_read_ucode(priv); | ||
9049 | if (err) { | ||
9050 | IWL_ERROR("Could not read microcode: %d\n", err); | ||
9051 | mutex_unlock(&priv->mutex); | ||
9052 | goto out_pci_alloc; | ||
9053 | } | ||
9054 | |||
9055 | mutex_unlock(&priv->mutex); | ||
9056 | |||
9057 | IWL_DEBUG_INFO("Queing UP work.\n"); | ||
9058 | |||
9059 | queue_work(priv->workqueue, &priv->up); | ||
9060 | |||
9061 | return 0; | ||
9062 | |||
9063 | out_pci_alloc: | ||
9064 | iwl_dealloc_ucode_pci(priv); | ||
9065 | |||
9066 | sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); | ||
9067 | |||
9068 | out_release_irq: | ||
9069 | free_irq(pdev->irq, priv); | ||
9070 | |||
9071 | out_disable_msi: | ||
9072 | pci_disable_msi(pdev); | ||
9073 | destroy_workqueue(priv->workqueue); | ||
9074 | priv->workqueue = NULL; | ||
9075 | iwl_unset_hw_setting(priv); | ||
9076 | |||
9077 | out_iounmap: | ||
9078 | pci_iounmap(pdev, priv->hw_base); | ||
9079 | out_pci_release_regions: | ||
9080 | pci_release_regions(pdev); | ||
9081 | out_pci_disable_device: | ||
9082 | pci_disable_device(pdev); | ||
9083 | pci_set_drvdata(pdev, NULL); | ||
9084 | out_ieee80211_free_hw: | ||
9085 | ieee80211_free_hw(priv->hw); | ||
9086 | out: | ||
9087 | return err; | ||
9088 | } | ||
9089 | |||
9090 | static void iwl_pci_remove(struct pci_dev *pdev) | ||
9091 | { | ||
9092 | struct iwl_priv *priv = pci_get_drvdata(pdev); | ||
9093 | struct list_head *p, *q; | ||
9094 | int i; | ||
9095 | |||
9096 | if (!priv) | ||
9097 | return; | ||
9098 | |||
9099 | IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); | ||
9100 | |||
9101 | mutex_lock(&priv->mutex); | ||
9102 | set_bit(STATUS_EXIT_PENDING, &priv->status); | ||
9103 | __iwl_down(priv); | ||
9104 | mutex_unlock(&priv->mutex); | ||
9105 | |||
9106 | /* Free MAC hash list for ADHOC */ | ||
9107 | for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) { | ||
9108 | list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { | ||
9109 | list_del(p); | ||
9110 | kfree(list_entry(p, struct iwl_ibss_seq, list)); | ||
9111 | } | ||
9112 | } | ||
9113 | |||
9114 | sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group); | ||
9115 | |||
9116 | iwl_dealloc_ucode_pci(priv); | ||
9117 | |||
9118 | if (priv->rxq.bd) | ||
9119 | iwl_rx_queue_free(priv, &priv->rxq); | ||
9120 | iwl_hw_txq_ctx_free(priv); | ||
9121 | |||
9122 | iwl_unset_hw_setting(priv); | ||
9123 | iwl_clear_stations_table(priv); | ||
9124 | |||
9125 | if (priv->mac80211_registered) { | ||
9126 | ieee80211_unregister_hw(priv->hw); | ||
9127 | iwl_rate_control_unregister(priv->hw); | ||
9128 | } | ||
9129 | |||
9130 | /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes | ||
9131 | * priv->workqueue... so we can't take down the workqueue | ||
9132 | * until now... */ | ||
9133 | destroy_workqueue(priv->workqueue); | ||
9134 | priv->workqueue = NULL; | ||
9135 | |||
9136 | free_irq(pdev->irq, priv); | ||
9137 | pci_disable_msi(pdev); | ||
9138 | pci_iounmap(pdev, priv->hw_base); | ||
9139 | pci_release_regions(pdev); | ||
9140 | pci_disable_device(pdev); | ||
9141 | pci_set_drvdata(pdev, NULL); | ||
9142 | |||
9143 | kfree(priv->channel_info); | ||
9144 | |||
9145 | kfree(priv->ieee_channels); | ||
9146 | kfree(priv->ieee_rates); | ||
9147 | |||
9148 | if (priv->ibss_beacon) | ||
9149 | dev_kfree_skb(priv->ibss_beacon); | ||
9150 | |||
9151 | ieee80211_free_hw(priv->hw); | ||
9152 | } | ||
9153 | |||
9154 | #ifdef CONFIG_PM | ||
9155 | |||
9156 | static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
9157 | { | ||
9158 | struct iwl_priv *priv = pci_get_drvdata(pdev); | ||
9159 | |||
9160 | mutex_lock(&priv->mutex); | ||
9161 | |||
9162 | set_bit(STATUS_IN_SUSPEND, &priv->status); | ||
9163 | |||
9164 | /* Take down the device; powers it off, etc. */ | ||
9165 | __iwl_down(priv); | ||
9166 | |||
9167 | if (priv->mac80211_registered) | ||
9168 | ieee80211_stop_queues(priv->hw); | ||
9169 | |||
9170 | pci_save_state(pdev); | ||
9171 | pci_disable_device(pdev); | ||
9172 | pci_set_power_state(pdev, PCI_D3hot); | ||
9173 | |||
9174 | mutex_unlock(&priv->mutex); | ||
9175 | |||
9176 | return 0; | ||
9177 | } | ||
9178 | |||
9179 | static void iwl_resume(struct iwl_priv *priv) | ||
9180 | { | ||
9181 | unsigned long flags; | ||
9182 | |||
9183 | /* The following it a temporary work around due to the | ||
9184 | * suspend / resume not fully initializing the NIC correctly. | ||
9185 | * Without all of the following, resume will not attempt to take | ||
9186 | * down the NIC (it shouldn't really need to) and will just try | ||
9187 | * and bring the NIC back up. However that fails during the | ||
9188 | * ucode verification process. This then causes iwl_down to be | ||
9189 | * called *after* iwl_hw_nic_init() has succeeded -- which | ||
9190 | * then lets the next init sequence succeed. So, we've | ||
9191 | * replicated all of that NIC init code here... */ | ||
9192 | |||
9193 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
9194 | |||
9195 | iwl_hw_nic_init(priv); | ||
9196 | |||
9197 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
9198 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, | ||
9199 | CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); | ||
9200 | iwl_write32(priv, CSR_INT, 0xFFFFFFFF); | ||
9201 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
9202 | iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); | ||
9203 | |||
9204 | /* tell the device to stop sending interrupts */ | ||
9205 | iwl_disable_interrupts(priv); | ||
9206 | |||
9207 | spin_lock_irqsave(&priv->lock, flags); | ||
9208 | iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); | ||
9209 | |||
9210 | if (!iwl_grab_restricted_access(priv)) { | ||
9211 | iwl_write_restricted_reg(priv, APMG_CLK_DIS_REG, | ||
9212 | APMG_CLK_VAL_DMA_CLK_RQT); | ||
9213 | iwl_release_restricted_access(priv); | ||
9214 | } | ||
9215 | spin_unlock_irqrestore(&priv->lock, flags); | ||
9216 | |||
9217 | udelay(5); | ||
9218 | |||
9219 | iwl_hw_nic_reset(priv); | ||
9220 | |||
9221 | /* Bring the device back up */ | ||
9222 | clear_bit(STATUS_IN_SUSPEND, &priv->status); | ||
9223 | queue_work(priv->workqueue, &priv->up); | ||
9224 | } | ||
9225 | |||
9226 | static int iwl_pci_resume(struct pci_dev *pdev) | ||
9227 | { | ||
9228 | struct iwl_priv *priv = pci_get_drvdata(pdev); | ||
9229 | int err; | ||
9230 | |||
9231 | printk(KERN_INFO "Coming out of suspend...\n"); | ||
9232 | |||
9233 | mutex_lock(&priv->mutex); | ||
9234 | |||
9235 | pci_set_power_state(pdev, PCI_D0); | ||
9236 | err = pci_enable_device(pdev); | ||
9237 | pci_restore_state(pdev); | ||
9238 | |||
9239 | /* | ||
9240 | * Suspend/Resume resets the PCI configuration space, so we have to | ||
9241 | * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries | ||
9242 | * from interfering with C3 CPU state. pci_restore_state won't help | ||
9243 | * here since it only restores the first 64 bytes pci config header. | ||
9244 | */ | ||
9245 | pci_write_config_byte(pdev, 0x41, 0x00); | ||
9246 | |||
9247 | iwl_resume(priv); | ||
9248 | mutex_unlock(&priv->mutex); | ||
9249 | |||
9250 | return 0; | ||
9251 | } | ||
9252 | |||
9253 | #endif /* CONFIG_PM */ | ||
9254 | |||
9255 | /***************************************************************************** | ||
9256 | * | ||
9257 | * driver and module entry point | ||
9258 | * | ||
9259 | *****************************************************************************/ | ||
9260 | |||
9261 | static struct pci_driver iwl_driver = { | ||
9262 | .name = DRV_NAME, | ||
9263 | .id_table = iwl_hw_card_ids, | ||
9264 | .probe = iwl_pci_probe, | ||
9265 | .remove = __devexit_p(iwl_pci_remove), | ||
9266 | #ifdef CONFIG_PM | ||
9267 | .suspend = iwl_pci_suspend, | ||
9268 | .resume = iwl_pci_resume, | ||
9269 | #endif | ||
9270 | }; | ||
9271 | |||
9272 | static int __init iwl_init(void) | ||
9273 | { | ||
9274 | |||
9275 | int ret; | ||
9276 | printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); | ||
9277 | printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); | ||
9278 | ret = pci_register_driver(&iwl_driver); | ||
9279 | if (ret) { | ||
9280 | IWL_ERROR("Unable to initialize PCI module\n"); | ||
9281 | return ret; | ||
9282 | } | ||
9283 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
9284 | ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level); | ||
9285 | if (ret) { | ||
9286 | IWL_ERROR("Unable to create driver sysfs file\n"); | ||
9287 | pci_unregister_driver(&iwl_driver); | ||
9288 | return ret; | ||
9289 | } | ||
9290 | #endif | ||
9291 | |||
9292 | return ret; | ||
9293 | } | ||
9294 | |||
9295 | static void __exit iwl_exit(void) | ||
9296 | { | ||
9297 | #ifdef CONFIG_IWLWIFI_DEBUG | ||
9298 | driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level); | ||
9299 | #endif | ||
9300 | pci_unregister_driver(&iwl_driver); | ||
9301 | } | ||
9302 | |||
9303 | module_param_named(antenna, iwl_param_antenna, int, 0444); | ||
9304 | MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); | ||
9305 | module_param_named(disable, iwl_param_disable, int, 0444); | ||
9306 | MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); | ||
9307 | module_param_named(hwcrypto, iwl_param_hwcrypto, int, 0444); | ||
9308 | MODULE_PARM_DESC(hwcrypto, | ||
9309 | "using hardware crypto engine (default 0 [software])\n"); | ||
9310 | module_param_named(debug, iwl_param_debug, int, 0444); | ||
9311 | MODULE_PARM_DESC(debug, "debug output mask"); | ||
9312 | module_param_named(disable_hw_scan, iwl_param_disable_hw_scan, int, 0444); | ||
9313 | MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); | ||
9314 | |||
9315 | module_param_named(queues_num, iwl_param_queues_num, int, 0444); | ||
9316 | MODULE_PARM_DESC(queues_num, "number of hw queues."); | ||
9317 | |||
9318 | /* QoS */ | ||
9319 | module_param_named(qos_enable, iwl_param_qos_enable, int, 0444); | ||
9320 | MODULE_PARM_DESC(qos_enable, "enable all QoS functionality"); | ||
9321 | |||
9322 | module_exit(iwl_exit); | ||
9323 | module_init(iwl_init); | ||