aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ipw2200.c
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2008-11-11 16:22:09 -0500
committerJohn W. Linville <linville@tuxdriver.com>2008-11-21 11:08:17 -0500
commit0795cd29b6fe05107b40080cb1fccadb96320c96 (patch)
tree36618d9489bb4e9dc8abd2505e48528c92facb65 /drivers/net/wireless/ipw2200.c
parent2ba4b32ecf748d5f45f298fc9677fa46d1dd9aff (diff)
ipw2x00: relocate ipw2100/ipw2200 to common directory
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/ipw2200.c')
-rw-r--r--drivers/net/wireless/ipw2200.c11984
1 files changed, 0 insertions, 11984 deletions
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
deleted file mode 100644
index d2a2b7586d08..000000000000
--- a/drivers/net/wireless/ipw2200.c
+++ /dev/null
@@ -1,11984 +0,0 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31******************************************************************************/
32
33#include "ipw2200.h"
34
35
36#ifndef KBUILD_EXTMOD
37#define VK "k"
38#else
39#define VK
40#endif
41
42#ifdef CONFIG_IPW2200_DEBUG
43#define VD "d"
44#else
45#define VD
46#endif
47
48#ifdef CONFIG_IPW2200_MONITOR
49#define VM "m"
50#else
51#define VM
52#endif
53
54#ifdef CONFIG_IPW2200_PROMISCUOUS
55#define VP "p"
56#else
57#define VP
58#endif
59
60#ifdef CONFIG_IPW2200_RADIOTAP
61#define VR "r"
62#else
63#define VR
64#endif
65
66#ifdef CONFIG_IPW2200_QOS
67#define VQ "q"
68#else
69#define VQ
70#endif
71
72#define IPW2200_VERSION "1.2.2" VK VD VM VP VR VQ
73#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
75#define DRV_VERSION IPW2200_VERSION
76
77#define ETH_P_80211_STATS (ETH_P_80211_RAW + 1)
78
79MODULE_DESCRIPTION(DRV_DESCRIPTION);
80MODULE_VERSION(DRV_VERSION);
81MODULE_AUTHOR(DRV_COPYRIGHT);
82MODULE_LICENSE("GPL");
83
84static int cmdlog = 0;
85static int debug = 0;
86static int channel = 0;
87static int mode = 0;
88
89static u32 ipw_debug_level;
90static int associate;
91static int auto_create = 1;
92static int led = 0;
93static int disable = 0;
94static int bt_coexist = 0;
95static int hwcrypto = 0;
96static int roaming = 1;
97static const char ipw_modes[] = {
98 'a', 'b', 'g', '?'
99};
100static int antenna = CFG_SYS_ANTENNA_BOTH;
101
102#ifdef CONFIG_IPW2200_PROMISCUOUS
103static int rtap_iface = 0; /* def: 0 -- do not create rtap interface */
104#endif
105
106
107#ifdef CONFIG_IPW2200_QOS
108static int qos_enable = 0;
109static int qos_burst_enable = 0;
110static int qos_no_ack_mask = 0;
111static int burst_duration_CCK = 0;
112static int burst_duration_OFDM = 0;
113
114static struct ieee80211_qos_parameters def_qos_parameters_OFDM = {
115 {QOS_TX0_CW_MIN_OFDM, QOS_TX1_CW_MIN_OFDM, QOS_TX2_CW_MIN_OFDM,
116 QOS_TX3_CW_MIN_OFDM},
117 {QOS_TX0_CW_MAX_OFDM, QOS_TX1_CW_MAX_OFDM, QOS_TX2_CW_MAX_OFDM,
118 QOS_TX3_CW_MAX_OFDM},
119 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
120 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
121 {QOS_TX0_TXOP_LIMIT_OFDM, QOS_TX1_TXOP_LIMIT_OFDM,
122 QOS_TX2_TXOP_LIMIT_OFDM, QOS_TX3_TXOP_LIMIT_OFDM}
123};
124
125static struct ieee80211_qos_parameters def_qos_parameters_CCK = {
126 {QOS_TX0_CW_MIN_CCK, QOS_TX1_CW_MIN_CCK, QOS_TX2_CW_MIN_CCK,
127 QOS_TX3_CW_MIN_CCK},
128 {QOS_TX0_CW_MAX_CCK, QOS_TX1_CW_MAX_CCK, QOS_TX2_CW_MAX_CCK,
129 QOS_TX3_CW_MAX_CCK},
130 {QOS_TX0_AIFS, QOS_TX1_AIFS, QOS_TX2_AIFS, QOS_TX3_AIFS},
131 {QOS_TX0_ACM, QOS_TX1_ACM, QOS_TX2_ACM, QOS_TX3_ACM},
132 {QOS_TX0_TXOP_LIMIT_CCK, QOS_TX1_TXOP_LIMIT_CCK, QOS_TX2_TXOP_LIMIT_CCK,
133 QOS_TX3_TXOP_LIMIT_CCK}
134};
135
136static struct ieee80211_qos_parameters def_parameters_OFDM = {
137 {DEF_TX0_CW_MIN_OFDM, DEF_TX1_CW_MIN_OFDM, DEF_TX2_CW_MIN_OFDM,
138 DEF_TX3_CW_MIN_OFDM},
139 {DEF_TX0_CW_MAX_OFDM, DEF_TX1_CW_MAX_OFDM, DEF_TX2_CW_MAX_OFDM,
140 DEF_TX3_CW_MAX_OFDM},
141 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
142 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
143 {DEF_TX0_TXOP_LIMIT_OFDM, DEF_TX1_TXOP_LIMIT_OFDM,
144 DEF_TX2_TXOP_LIMIT_OFDM, DEF_TX3_TXOP_LIMIT_OFDM}
145};
146
147static struct ieee80211_qos_parameters def_parameters_CCK = {
148 {DEF_TX0_CW_MIN_CCK, DEF_TX1_CW_MIN_CCK, DEF_TX2_CW_MIN_CCK,
149 DEF_TX3_CW_MIN_CCK},
150 {DEF_TX0_CW_MAX_CCK, DEF_TX1_CW_MAX_CCK, DEF_TX2_CW_MAX_CCK,
151 DEF_TX3_CW_MAX_CCK},
152 {DEF_TX0_AIFS, DEF_TX1_AIFS, DEF_TX2_AIFS, DEF_TX3_AIFS},
153 {DEF_TX0_ACM, DEF_TX1_ACM, DEF_TX2_ACM, DEF_TX3_ACM},
154 {DEF_TX0_TXOP_LIMIT_CCK, DEF_TX1_TXOP_LIMIT_CCK, DEF_TX2_TXOP_LIMIT_CCK,
155 DEF_TX3_TXOP_LIMIT_CCK}
156};
157
158static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
159
160static int from_priority_to_tx_queue[] = {
161 IPW_TX_QUEUE_1, IPW_TX_QUEUE_2, IPW_TX_QUEUE_2, IPW_TX_QUEUE_1,
162 IPW_TX_QUEUE_3, IPW_TX_QUEUE_3, IPW_TX_QUEUE_4, IPW_TX_QUEUE_4
163};
164
165static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv);
166
167static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
168 *qos_param);
169static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
170 *qos_param);
171#endif /* CONFIG_IPW2200_QOS */
172
173static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev);
174static void ipw_remove_current_network(struct ipw_priv *priv);
175static void ipw_rx(struct ipw_priv *priv);
176static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
177 struct clx2_tx_queue *txq, int qindex);
178static int ipw_queue_reset(struct ipw_priv *priv);
179
180static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
181 int len, int sync);
182
183static void ipw_tx_queue_free(struct ipw_priv *);
184
185static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
186static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
187static void ipw_rx_queue_replenish(void *);
188static int ipw_up(struct ipw_priv *);
189static void ipw_bg_up(struct work_struct *work);
190static void ipw_down(struct ipw_priv *);
191static void ipw_bg_down(struct work_struct *work);
192static int ipw_config(struct ipw_priv *);
193static int init_supported_rates(struct ipw_priv *priv,
194 struct ipw_supported_rates *prates);
195static void ipw_set_hwcrypto_keys(struct ipw_priv *);
196static void ipw_send_wep_keys(struct ipw_priv *, int);
197
198static int snprint_line(char *buf, size_t count,
199 const u8 * data, u32 len, u32 ofs)
200{
201 int out, i, j, l;
202 char c;
203
204 out = snprintf(buf, count, "%08X", ofs);
205
206 for (l = 0, i = 0; i < 2; i++) {
207 out += snprintf(buf + out, count - out, " ");
208 for (j = 0; j < 8 && l < len; j++, l++)
209 out += snprintf(buf + out, count - out, "%02X ",
210 data[(i * 8 + j)]);
211 for (; j < 8; j++)
212 out += snprintf(buf + out, count - out, " ");
213 }
214
215 out += snprintf(buf + out, count - out, " ");
216 for (l = 0, i = 0; i < 2; i++) {
217 out += snprintf(buf + out, count - out, " ");
218 for (j = 0; j < 8 && l < len; j++, l++) {
219 c = data[(i * 8 + j)];
220 if (!isascii(c) || !isprint(c))
221 c = '.';
222
223 out += snprintf(buf + out, count - out, "%c", c);
224 }
225
226 for (; j < 8; j++)
227 out += snprintf(buf + out, count - out, " ");
228 }
229
230 return out;
231}
232
233static void printk_buf(int level, const u8 * data, u32 len)
234{
235 char line[81];
236 u32 ofs = 0;
237 if (!(ipw_debug_level & level))
238 return;
239
240 while (len) {
241 snprint_line(line, sizeof(line), &data[ofs],
242 min(len, 16U), ofs);
243 printk(KERN_DEBUG "%s\n", line);
244 ofs += 16;
245 len -= min(len, 16U);
246 }
247}
248
249static int snprintk_buf(u8 * output, size_t size, const u8 * data, size_t len)
250{
251 size_t out = size;
252 u32 ofs = 0;
253 int total = 0;
254
255 while (size && len) {
256 out = snprint_line(output, size, &data[ofs],
257 min_t(size_t, len, 16U), ofs);
258
259 ofs += 16;
260 output += out;
261 size -= out;
262 len -= min_t(size_t, len, 16U);
263 total += out;
264 }
265 return total;
266}
267
268/* alias for 32-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
269static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
270#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
271
272/* alias for 8-bit indirect read (for SRAM/reg above 4K), with debug wrapper */
273static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
274#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
275
276/* 8-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
277static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
278static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
279{
280 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__,
281 __LINE__, (u32) (b), (u32) (c));
282 _ipw_write_reg8(a, b, c);
283}
284
285/* 16-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
286static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
287static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
288{
289 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__,
290 __LINE__, (u32) (b), (u32) (c));
291 _ipw_write_reg16(a, b, c);
292}
293
294/* 32-bit indirect write (for SRAM/reg above 4K), with debug wrapper */
295static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
296static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
297{
298 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__,
299 __LINE__, (u32) (b), (u32) (c));
300 _ipw_write_reg32(a, b, c);
301}
302
303/* 8-bit direct write (low 4K) */
304#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
305
306/* 8-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
307#define ipw_write8(ipw, ofs, val) do { \
308 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
309 _ipw_write8(ipw, ofs, val); \
310 } while (0)
311
312/* 16-bit direct write (low 4K) */
313#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
314
315/* 16-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
316#define ipw_write16(ipw, ofs, val) \
317 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
318 _ipw_write16(ipw, ofs, val)
319
320/* 32-bit direct write (low 4K) */
321#define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
322
323/* 32-bit direct write (for low 4K of SRAM/regs), with debug wrapper */
324#define ipw_write32(ipw, ofs, val) \
325 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
326 _ipw_write32(ipw, ofs, val)
327
328/* 8-bit direct read (low 4K) */
329#define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
330
331/* 8-bit direct read (low 4K), with debug wrapper */
332static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
333{
334 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32) (ofs));
335 return _ipw_read8(ipw, ofs);
336}
337
338/* alias to 8-bit direct read (low 4K of SRAM/regs), with debug wrapper */
339#define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
340
341/* 16-bit direct read (low 4K) */
342#define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
343
344/* 16-bit direct read (low 4K), with debug wrapper */
345static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
346{
347 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32) (ofs));
348 return _ipw_read16(ipw, ofs);
349}
350
351/* alias to 16-bit direct read (low 4K of SRAM/regs), with debug wrapper */
352#define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
353
354/* 32-bit direct read (low 4K) */
355#define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
356
357/* 32-bit direct read (low 4K), with debug wrapper */
358static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs)
359{
360 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32) (ofs));
361 return _ipw_read32(ipw, ofs);
362}
363
364/* alias to 32-bit direct read (low 4K of SRAM/regs), with debug wrapper */
365#define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
366
367/* multi-byte read (above 4K), with debug wrapper */
368static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
369static inline void __ipw_read_indirect(const char *f, int l,
370 struct ipw_priv *a, u32 b, u8 * c, int d)
371{
372 IPW_DEBUG_IO("%s %d: read_indirect(0x%08X) %d bytes\n", f, l, (u32) (b),
373 d);
374 _ipw_read_indirect(a, b, c, d);
375}
376
377/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
378#define ipw_read_indirect(a, b, c, d) __ipw_read_indirect(__FILE__, __LINE__, a, b, c, d)
379
380/* alias to multi-byte read (SRAM/regs above 4K), with debug wrapper */
381static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * data,
382 int num);
383#define ipw_write_indirect(a, b, c, d) \
384 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
385 _ipw_write_indirect(a, b, c, d)
386
387/* 32-bit indirect write (above 4K) */
388static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value)
389{
390 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n", priv, reg, value);
391 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
392 _ipw_write32(priv, IPW_INDIRECT_DATA, value);
393}
394
395/* 8-bit indirect write (above 4K) */
396static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
397{
398 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
399 u32 dif_len = reg - aligned_addr;
400
401 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
402 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
403 _ipw_write8(priv, IPW_INDIRECT_DATA + dif_len, value);
404}
405
406/* 16-bit indirect write (above 4K) */
407static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value)
408{
409 u32 aligned_addr = reg & IPW_INDIRECT_ADDR_MASK; /* dword align */
410 u32 dif_len = (reg - aligned_addr) & (~0x1ul);
411
412 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
413 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
414 _ipw_write16(priv, IPW_INDIRECT_DATA + dif_len, value);
415}
416
417/* 8-bit indirect read (above 4K) */
418static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
419{
420 u32 word;
421 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
422 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
423 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
424 return (word >> ((reg & 0x3) * 8)) & 0xff;
425}
426
427/* 32-bit indirect read (above 4K) */
428static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
429{
430 u32 value;
431
432 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
433
434 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
435 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
436 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
437 return value;
438}
439
440/* General purpose, no alignment requirement, iterative (multi-byte) read, */
441/* for area above 1st 4K of SRAM/reg space */
442static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
443 int num)
444{
445 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
446 u32 dif_len = addr - aligned_addr;
447 u32 i;
448
449 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
450
451 if (num <= 0) {
452 return;
453 }
454
455 /* Read the first dword (or portion) byte by byte */
456 if (unlikely(dif_len)) {
457 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
458 /* Start reading at aligned_addr + dif_len */
459 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--)
460 *buf++ = _ipw_read8(priv, IPW_INDIRECT_DATA + i);
461 aligned_addr += 4;
462 }
463
464 /* Read all of the middle dwords as dwords, with auto-increment */
465 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
466 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
467 *(u32 *) buf = _ipw_read32(priv, IPW_AUTOINC_DATA);
468
469 /* Read the last dword (or portion) byte by byte */
470 if (unlikely(num)) {
471 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
472 for (i = 0; num > 0; i++, num--)
473 *buf++ = ipw_read8(priv, IPW_INDIRECT_DATA + i);
474 }
475}
476
477/* General purpose, no alignment requirement, iterative (multi-byte) write, */
478/* for area above 1st 4K of SRAM/reg space */
479static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
480 int num)
481{
482 u32 aligned_addr = addr & IPW_INDIRECT_ADDR_MASK; /* dword align */
483 u32 dif_len = addr - aligned_addr;
484 u32 i;
485
486 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
487
488 if (num <= 0) {
489 return;
490 }
491
492 /* Write the first dword (or portion) byte by byte */
493 if (unlikely(dif_len)) {
494 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
495 /* Start writing at aligned_addr + dif_len */
496 for (i = dif_len; ((i < 4) && (num > 0)); i++, num--, buf++)
497 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
498 aligned_addr += 4;
499 }
500
501 /* Write all of the middle dwords as dwords, with auto-increment */
502 _ipw_write32(priv, IPW_AUTOINC_ADDR, aligned_addr);
503 for (; num >= 4; buf += 4, aligned_addr += 4, num -= 4)
504 _ipw_write32(priv, IPW_AUTOINC_DATA, *(u32 *) buf);
505
506 /* Write the last dword (or portion) byte by byte */
507 if (unlikely(num)) {
508 _ipw_write32(priv, IPW_INDIRECT_ADDR, aligned_addr);
509 for (i = 0; num > 0; i++, num--, buf++)
510 _ipw_write8(priv, IPW_INDIRECT_DATA + i, *buf);
511 }
512}
513
514/* General purpose, no alignment requirement, iterative (multi-byte) write, */
515/* for 1st 4K of SRAM/regs space */
516static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
517 int num)
518{
519 memcpy_toio((priv->hw_base + addr), buf, num);
520}
521
522/* Set bit(s) in low 4K of SRAM/regs */
523static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
524{
525 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
526}
527
528/* Clear bit(s) in low 4K of SRAM/regs */
529static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
530{
531 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
532}
533
534static inline void __ipw_enable_interrupts(struct ipw_priv *priv)
535{
536 if (priv->status & STATUS_INT_ENABLED)
537 return;
538 priv->status |= STATUS_INT_ENABLED;
539 ipw_write32(priv, IPW_INTA_MASK_R, IPW_INTA_MASK_ALL);
540}
541
542static inline void __ipw_disable_interrupts(struct ipw_priv *priv)
543{
544 if (!(priv->status & STATUS_INT_ENABLED))
545 return;
546 priv->status &= ~STATUS_INT_ENABLED;
547 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
548}
549
550static inline void ipw_enable_interrupts(struct ipw_priv *priv)
551{
552 unsigned long flags;
553
554 spin_lock_irqsave(&priv->irq_lock, flags);
555 __ipw_enable_interrupts(priv);
556 spin_unlock_irqrestore(&priv->irq_lock, flags);
557}
558
559static inline void ipw_disable_interrupts(struct ipw_priv *priv)
560{
561 unsigned long flags;
562
563 spin_lock_irqsave(&priv->irq_lock, flags);
564 __ipw_disable_interrupts(priv);
565 spin_unlock_irqrestore(&priv->irq_lock, flags);
566}
567
568static char *ipw_error_desc(u32 val)
569{
570 switch (val) {
571 case IPW_FW_ERROR_OK:
572 return "ERROR_OK";
573 case IPW_FW_ERROR_FAIL:
574 return "ERROR_FAIL";
575 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
576 return "MEMORY_UNDERFLOW";
577 case IPW_FW_ERROR_MEMORY_OVERFLOW:
578 return "MEMORY_OVERFLOW";
579 case IPW_FW_ERROR_BAD_PARAM:
580 return "BAD_PARAM";
581 case IPW_FW_ERROR_BAD_CHECKSUM:
582 return "BAD_CHECKSUM";
583 case IPW_FW_ERROR_NMI_INTERRUPT:
584 return "NMI_INTERRUPT";
585 case IPW_FW_ERROR_BAD_DATABASE:
586 return "BAD_DATABASE";
587 case IPW_FW_ERROR_ALLOC_FAIL:
588 return "ALLOC_FAIL";
589 case IPW_FW_ERROR_DMA_UNDERRUN:
590 return "DMA_UNDERRUN";
591 case IPW_FW_ERROR_DMA_STATUS:
592 return "DMA_STATUS";
593 case IPW_FW_ERROR_DINO_ERROR:
594 return "DINO_ERROR";
595 case IPW_FW_ERROR_EEPROM_ERROR:
596 return "EEPROM_ERROR";
597 case IPW_FW_ERROR_SYSASSERT:
598 return "SYSASSERT";
599 case IPW_FW_ERROR_FATAL_ERROR:
600 return "FATAL_ERROR";
601 default:
602 return "UNKNOWN_ERROR";
603 }
604}
605
606static void ipw_dump_error_log(struct ipw_priv *priv,
607 struct ipw_fw_error *error)
608{
609 u32 i;
610
611 if (!error) {
612 IPW_ERROR("Error allocating and capturing error log. "
613 "Nothing to dump.\n");
614 return;
615 }
616
617 IPW_ERROR("Start IPW Error Log Dump:\n");
618 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
619 error->status, error->config);
620
621 for (i = 0; i < error->elem_len; i++)
622 IPW_ERROR("%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
623 ipw_error_desc(error->elem[i].desc),
624 error->elem[i].time,
625 error->elem[i].blink1,
626 error->elem[i].blink2,
627 error->elem[i].link1,
628 error->elem[i].link2, error->elem[i].data);
629 for (i = 0; i < error->log_len; i++)
630 IPW_ERROR("%i\t0x%08x\t%i\n",
631 error->log[i].time,
632 error->log[i].data, error->log[i].event);
633}
634
635static inline int ipw_is_init(struct ipw_priv *priv)
636{
637 return (priv->status & STATUS_INIT) ? 1 : 0;
638}
639
640static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val, u32 * len)
641{
642 u32 addr, field_info, field_len, field_count, total_len;
643
644 IPW_DEBUG_ORD("ordinal = %i\n", ord);
645
646 if (!priv || !val || !len) {
647 IPW_DEBUG_ORD("Invalid argument\n");
648 return -EINVAL;
649 }
650
651 /* verify device ordinal tables have been initialized */
652 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
653 IPW_DEBUG_ORD("Access ordinals before initialization\n");
654 return -EINVAL;
655 }
656
657 switch (IPW_ORD_TABLE_ID_MASK & ord) {
658 case IPW_ORD_TABLE_0_MASK:
659 /*
660 * TABLE 0: Direct access to a table of 32 bit values
661 *
662 * This is a very simple table with the data directly
663 * read from the table
664 */
665
666 /* remove the table id from the ordinal */
667 ord &= IPW_ORD_TABLE_VALUE_MASK;
668
669 /* boundary check */
670 if (ord > priv->table0_len) {
671 IPW_DEBUG_ORD("ordinal value (%i) longer then "
672 "max (%i)\n", ord, priv->table0_len);
673 return -EINVAL;
674 }
675
676 /* verify we have enough room to store the value */
677 if (*len < sizeof(u32)) {
678 IPW_DEBUG_ORD("ordinal buffer length too small, "
679 "need %zd\n", sizeof(u32));
680 return -EINVAL;
681 }
682
683 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
684 ord, priv->table0_addr + (ord << 2));
685
686 *len = sizeof(u32);
687 ord <<= 2;
688 *((u32 *) val) = ipw_read32(priv, priv->table0_addr + ord);
689 break;
690
691 case IPW_ORD_TABLE_1_MASK:
692 /*
693 * TABLE 1: Indirect access to a table of 32 bit values
694 *
695 * This is a fairly large table of u32 values each
696 * representing starting addr for the data (which is
697 * also a u32)
698 */
699
700 /* remove the table id from the ordinal */
701 ord &= IPW_ORD_TABLE_VALUE_MASK;
702
703 /* boundary check */
704 if (ord > priv->table1_len) {
705 IPW_DEBUG_ORD("ordinal value too long\n");
706 return -EINVAL;
707 }
708
709 /* verify we have enough room to store the value */
710 if (*len < sizeof(u32)) {
711 IPW_DEBUG_ORD("ordinal buffer length too small, "
712 "need %zd\n", sizeof(u32));
713 return -EINVAL;
714 }
715
716 *((u32 *) val) =
717 ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
718 *len = sizeof(u32);
719 break;
720
721 case IPW_ORD_TABLE_2_MASK:
722 /*
723 * TABLE 2: Indirect access to a table of variable sized values
724 *
725 * This table consist of six values, each containing
726 * - dword containing the starting offset of the data
727 * - dword containing the lengh in the first 16bits
728 * and the count in the second 16bits
729 */
730
731 /* remove the table id from the ordinal */
732 ord &= IPW_ORD_TABLE_VALUE_MASK;
733
734 /* boundary check */
735 if (ord > priv->table2_len) {
736 IPW_DEBUG_ORD("ordinal value too long\n");
737 return -EINVAL;
738 }
739
740 /* get the address of statistic */
741 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
742
743 /* get the second DW of statistics ;
744 * two 16-bit words - first is length, second is count */
745 field_info =
746 ipw_read_reg32(priv,
747 priv->table2_addr + (ord << 3) +
748 sizeof(u32));
749
750 /* get each entry length */
751 field_len = *((u16 *) & field_info);
752
753 /* get number of entries */
754 field_count = *(((u16 *) & field_info) + 1);
755
756 /* abort if not enought memory */
757 total_len = field_len * field_count;
758 if (total_len > *len) {
759 *len = total_len;
760 return -EINVAL;
761 }
762
763 *len = total_len;
764 if (!total_len)
765 return 0;
766
767 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
768 "field_info = 0x%08x\n",
769 addr, total_len, field_info);
770 ipw_read_indirect(priv, addr, val, total_len);
771 break;
772
773 default:
774 IPW_DEBUG_ORD("Invalid ordinal!\n");
775 return -EINVAL;
776
777 }
778
779 return 0;
780}
781
782static void ipw_init_ordinals(struct ipw_priv *priv)
783{
784 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
785 priv->table0_len = ipw_read32(priv, priv->table0_addr);
786
787 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
788 priv->table0_addr, priv->table0_len);
789
790 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
791 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
792
793 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
794 priv->table1_addr, priv->table1_len);
795
796 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
797 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
798 priv->table2_len &= 0x0000ffff; /* use first two bytes */
799
800 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
801 priv->table2_addr, priv->table2_len);
802
803}
804
805static u32 ipw_register_toggle(u32 reg)
806{
807 reg &= ~IPW_START_STANDBY;
808 if (reg & IPW_GATE_ODMA)
809 reg &= ~IPW_GATE_ODMA;
810 if (reg & IPW_GATE_IDMA)
811 reg &= ~IPW_GATE_IDMA;
812 if (reg & IPW_GATE_ADMA)
813 reg &= ~IPW_GATE_ADMA;
814 return reg;
815}
816
817/*
818 * LED behavior:
819 * - On radio ON, turn on any LEDs that require to be on during start
820 * - On initialization, start unassociated blink
821 * - On association, disable unassociated blink
822 * - On disassociation, start unassociated blink
823 * - On radio OFF, turn off any LEDs started during radio on
824 *
825 */
826#define LD_TIME_LINK_ON msecs_to_jiffies(300)
827#define LD_TIME_LINK_OFF msecs_to_jiffies(2700)
828#define LD_TIME_ACT_ON msecs_to_jiffies(250)
829
830static void ipw_led_link_on(struct ipw_priv *priv)
831{
832 unsigned long flags;
833 u32 led;
834
835 /* If configured to not use LEDs, or nic_type is 1,
836 * then we don't toggle a LINK led */
837 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
838 return;
839
840 spin_lock_irqsave(&priv->lock, flags);
841
842 if (!(priv->status & STATUS_RF_KILL_MASK) &&
843 !(priv->status & STATUS_LED_LINK_ON)) {
844 IPW_DEBUG_LED("Link LED On\n");
845 led = ipw_read_reg32(priv, IPW_EVENT_REG);
846 led |= priv->led_association_on;
847
848 led = ipw_register_toggle(led);
849
850 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
851 ipw_write_reg32(priv, IPW_EVENT_REG, led);
852
853 priv->status |= STATUS_LED_LINK_ON;
854
855 /* If we aren't associated, schedule turning the LED off */
856 if (!(priv->status & STATUS_ASSOCIATED))
857 queue_delayed_work(priv->workqueue,
858 &priv->led_link_off,
859 LD_TIME_LINK_ON);
860 }
861
862 spin_unlock_irqrestore(&priv->lock, flags);
863}
864
865static void ipw_bg_led_link_on(struct work_struct *work)
866{
867 struct ipw_priv *priv =
868 container_of(work, struct ipw_priv, led_link_on.work);
869 mutex_lock(&priv->mutex);
870 ipw_led_link_on(priv);
871 mutex_unlock(&priv->mutex);
872}
873
874static void ipw_led_link_off(struct ipw_priv *priv)
875{
876 unsigned long flags;
877 u32 led;
878
879 /* If configured not to use LEDs, or nic type is 1,
880 * then we don't goggle the LINK led. */
881 if (priv->config & CFG_NO_LED || priv->nic_type == EEPROM_NIC_TYPE_1)
882 return;
883
884 spin_lock_irqsave(&priv->lock, flags);
885
886 if (priv->status & STATUS_LED_LINK_ON) {
887 led = ipw_read_reg32(priv, IPW_EVENT_REG);
888 led &= priv->led_association_off;
889 led = ipw_register_toggle(led);
890
891 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
892 ipw_write_reg32(priv, IPW_EVENT_REG, led);
893
894 IPW_DEBUG_LED("Link LED Off\n");
895
896 priv->status &= ~STATUS_LED_LINK_ON;
897
898 /* If we aren't associated and the radio is on, schedule
899 * turning the LED on (blink while unassociated) */
900 if (!(priv->status & STATUS_RF_KILL_MASK) &&
901 !(priv->status & STATUS_ASSOCIATED))
902 queue_delayed_work(priv->workqueue, &priv->led_link_on,
903 LD_TIME_LINK_OFF);
904
905 }
906
907 spin_unlock_irqrestore(&priv->lock, flags);
908}
909
910static void ipw_bg_led_link_off(struct work_struct *work)
911{
912 struct ipw_priv *priv =
913 container_of(work, struct ipw_priv, led_link_off.work);
914 mutex_lock(&priv->mutex);
915 ipw_led_link_off(priv);
916 mutex_unlock(&priv->mutex);
917}
918
919static void __ipw_led_activity_on(struct ipw_priv *priv)
920{
921 u32 led;
922
923 if (priv->config & CFG_NO_LED)
924 return;
925
926 if (priv->status & STATUS_RF_KILL_MASK)
927 return;
928
929 if (!(priv->status & STATUS_LED_ACT_ON)) {
930 led = ipw_read_reg32(priv, IPW_EVENT_REG);
931 led |= priv->led_activity_on;
932
933 led = ipw_register_toggle(led);
934
935 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
936 ipw_write_reg32(priv, IPW_EVENT_REG, led);
937
938 IPW_DEBUG_LED("Activity LED On\n");
939
940 priv->status |= STATUS_LED_ACT_ON;
941
942 cancel_delayed_work(&priv->led_act_off);
943 queue_delayed_work(priv->workqueue, &priv->led_act_off,
944 LD_TIME_ACT_ON);
945 } else {
946 /* Reschedule LED off for full time period */
947 cancel_delayed_work(&priv->led_act_off);
948 queue_delayed_work(priv->workqueue, &priv->led_act_off,
949 LD_TIME_ACT_ON);
950 }
951}
952
953#if 0
954void ipw_led_activity_on(struct ipw_priv *priv)
955{
956 unsigned long flags;
957 spin_lock_irqsave(&priv->lock, flags);
958 __ipw_led_activity_on(priv);
959 spin_unlock_irqrestore(&priv->lock, flags);
960}
961#endif /* 0 */
962
963static void ipw_led_activity_off(struct ipw_priv *priv)
964{
965 unsigned long flags;
966 u32 led;
967
968 if (priv->config & CFG_NO_LED)
969 return;
970
971 spin_lock_irqsave(&priv->lock, flags);
972
973 if (priv->status & STATUS_LED_ACT_ON) {
974 led = ipw_read_reg32(priv, IPW_EVENT_REG);
975 led &= priv->led_activity_off;
976
977 led = ipw_register_toggle(led);
978
979 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
980 ipw_write_reg32(priv, IPW_EVENT_REG, led);
981
982 IPW_DEBUG_LED("Activity LED Off\n");
983
984 priv->status &= ~STATUS_LED_ACT_ON;
985 }
986
987 spin_unlock_irqrestore(&priv->lock, flags);
988}
989
990static void ipw_bg_led_activity_off(struct work_struct *work)
991{
992 struct ipw_priv *priv =
993 container_of(work, struct ipw_priv, led_act_off.work);
994 mutex_lock(&priv->mutex);
995 ipw_led_activity_off(priv);
996 mutex_unlock(&priv->mutex);
997}
998
999static void ipw_led_band_on(struct ipw_priv *priv)
1000{
1001 unsigned long flags;
1002 u32 led;
1003
1004 /* Only nic type 1 supports mode LEDs */
1005 if (priv->config & CFG_NO_LED ||
1006 priv->nic_type != EEPROM_NIC_TYPE_1 || !priv->assoc_network)
1007 return;
1008
1009 spin_lock_irqsave(&priv->lock, flags);
1010
1011 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1012 if (priv->assoc_network->mode == IEEE_A) {
1013 led |= priv->led_ofdm_on;
1014 led &= priv->led_association_off;
1015 IPW_DEBUG_LED("Mode LED On: 802.11a\n");
1016 } else if (priv->assoc_network->mode == IEEE_G) {
1017 led |= priv->led_ofdm_on;
1018 led |= priv->led_association_on;
1019 IPW_DEBUG_LED("Mode LED On: 802.11g\n");
1020 } else {
1021 led &= priv->led_ofdm_off;
1022 led |= priv->led_association_on;
1023 IPW_DEBUG_LED("Mode LED On: 802.11b\n");
1024 }
1025
1026 led = ipw_register_toggle(led);
1027
1028 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1029 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1030
1031 spin_unlock_irqrestore(&priv->lock, flags);
1032}
1033
1034static void ipw_led_band_off(struct ipw_priv *priv)
1035{
1036 unsigned long flags;
1037 u32 led;
1038
1039 /* Only nic type 1 supports mode LEDs */
1040 if (priv->config & CFG_NO_LED || priv->nic_type != EEPROM_NIC_TYPE_1)
1041 return;
1042
1043 spin_lock_irqsave(&priv->lock, flags);
1044
1045 led = ipw_read_reg32(priv, IPW_EVENT_REG);
1046 led &= priv->led_ofdm_off;
1047 led &= priv->led_association_off;
1048
1049 led = ipw_register_toggle(led);
1050
1051 IPW_DEBUG_LED("Reg: 0x%08X\n", led);
1052 ipw_write_reg32(priv, IPW_EVENT_REG, led);
1053
1054 spin_unlock_irqrestore(&priv->lock, flags);
1055}
1056
1057static void ipw_led_radio_on(struct ipw_priv *priv)
1058{
1059 ipw_led_link_on(priv);
1060}
1061
1062static void ipw_led_radio_off(struct ipw_priv *priv)
1063{
1064 ipw_led_activity_off(priv);
1065 ipw_led_link_off(priv);
1066}
1067
1068static void ipw_led_link_up(struct ipw_priv *priv)
1069{
1070 /* Set the Link Led on for all nic types */
1071 ipw_led_link_on(priv);
1072}
1073
1074static void ipw_led_link_down(struct ipw_priv *priv)
1075{
1076 ipw_led_activity_off(priv);
1077 ipw_led_link_off(priv);
1078
1079 if (priv->status & STATUS_RF_KILL_MASK)
1080 ipw_led_radio_off(priv);
1081}
1082
1083static void ipw_led_init(struct ipw_priv *priv)
1084{
1085 priv->nic_type = priv->eeprom[EEPROM_NIC_TYPE];
1086
1087 /* Set the default PINs for the link and activity leds */
1088 priv->led_activity_on = IPW_ACTIVITY_LED;
1089 priv->led_activity_off = ~(IPW_ACTIVITY_LED);
1090
1091 priv->led_association_on = IPW_ASSOCIATED_LED;
1092 priv->led_association_off = ~(IPW_ASSOCIATED_LED);
1093
1094 /* Set the default PINs for the OFDM leds */
1095 priv->led_ofdm_on = IPW_OFDM_LED;
1096 priv->led_ofdm_off = ~(IPW_OFDM_LED);
1097
1098 switch (priv->nic_type) {
1099 case EEPROM_NIC_TYPE_1:
1100 /* In this NIC type, the LEDs are reversed.... */
1101 priv->led_activity_on = IPW_ASSOCIATED_LED;
1102 priv->led_activity_off = ~(IPW_ASSOCIATED_LED);
1103 priv->led_association_on = IPW_ACTIVITY_LED;
1104 priv->led_association_off = ~(IPW_ACTIVITY_LED);
1105
1106 if (!(priv->config & CFG_NO_LED))
1107 ipw_led_band_on(priv);
1108
1109 /* And we don't blink link LEDs for this nic, so
1110 * just return here */
1111 return;
1112
1113 case EEPROM_NIC_TYPE_3:
1114 case EEPROM_NIC_TYPE_2:
1115 case EEPROM_NIC_TYPE_4:
1116 case EEPROM_NIC_TYPE_0:
1117 break;
1118
1119 default:
1120 IPW_DEBUG_INFO("Unknown NIC type from EEPROM: %d\n",
1121 priv->nic_type);
1122 priv->nic_type = EEPROM_NIC_TYPE_0;
1123 break;
1124 }
1125
1126 if (!(priv->config & CFG_NO_LED)) {
1127 if (priv->status & STATUS_ASSOCIATED)
1128 ipw_led_link_on(priv);
1129 else
1130 ipw_led_link_off(priv);
1131 }
1132}
1133
1134static void ipw_led_shutdown(struct ipw_priv *priv)
1135{
1136 ipw_led_activity_off(priv);
1137 ipw_led_link_off(priv);
1138 ipw_led_band_off(priv);
1139 cancel_delayed_work(&priv->led_link_on);
1140 cancel_delayed_work(&priv->led_link_off);
1141 cancel_delayed_work(&priv->led_act_off);
1142}
1143
1144/*
1145 * The following adds a new attribute to the sysfs representation
1146 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
1147 * used for controling the debug level.
1148 *
1149 * See the level definitions in ipw for details.
1150 */
1151static ssize_t show_debug_level(struct device_driver *d, char *buf)
1152{
1153 return sprintf(buf, "0x%08X\n", ipw_debug_level);
1154}
1155
1156static ssize_t store_debug_level(struct device_driver *d, const char *buf,
1157 size_t count)
1158{
1159 char *p = (char *)buf;
1160 u32 val;
1161
1162 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1163 p++;
1164 if (p[0] == 'x' || p[0] == 'X')
1165 p++;
1166 val = simple_strtoul(p, &p, 16);
1167 } else
1168 val = simple_strtoul(p, &p, 10);
1169 if (p == buf)
1170 printk(KERN_INFO DRV_NAME
1171 ": %s is not in hex or decimal form.\n", buf);
1172 else
1173 ipw_debug_level = val;
1174
1175 return strnlen(buf, count);
1176}
1177
1178static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
1179 show_debug_level, store_debug_level);
1180
1181static inline u32 ipw_get_event_log_len(struct ipw_priv *priv)
1182{
1183 /* length = 1st dword in log */
1184 return ipw_read_reg32(priv, ipw_read32(priv, IPW_EVENT_LOG));
1185}
1186
1187static void ipw_capture_event_log(struct ipw_priv *priv,
1188 u32 log_len, struct ipw_event *log)
1189{
1190 u32 base;
1191
1192 if (log_len) {
1193 base = ipw_read32(priv, IPW_EVENT_LOG);
1194 ipw_read_indirect(priv, base + sizeof(base) + sizeof(u32),
1195 (u8 *) log, sizeof(*log) * log_len);
1196 }
1197}
1198
1199static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
1200{
1201 struct ipw_fw_error *error;
1202 u32 log_len = ipw_get_event_log_len(priv);
1203 u32 base = ipw_read32(priv, IPW_ERROR_LOG);
1204 u32 elem_len = ipw_read_reg32(priv, base);
1205
1206 error = kmalloc(sizeof(*error) +
1207 sizeof(*error->elem) * elem_len +
1208 sizeof(*error->log) * log_len, GFP_ATOMIC);
1209 if (!error) {
1210 IPW_ERROR("Memory allocation for firmware error log "
1211 "failed.\n");
1212 return NULL;
1213 }
1214 error->jiffies = jiffies;
1215 error->status = priv->status;
1216 error->config = priv->config;
1217 error->elem_len = elem_len;
1218 error->log_len = log_len;
1219 error->elem = (struct ipw_error_elem *)error->payload;
1220 error->log = (struct ipw_event *)(error->elem + elem_len);
1221
1222 ipw_capture_event_log(priv, log_len, error->log);
1223
1224 if (elem_len)
1225 ipw_read_indirect(priv, base + sizeof(base), (u8 *) error->elem,
1226 sizeof(*error->elem) * elem_len);
1227
1228 return error;
1229}
1230
1231static ssize_t show_event_log(struct device *d,
1232 struct device_attribute *attr, char *buf)
1233{
1234 struct ipw_priv *priv = dev_get_drvdata(d);
1235 u32 log_len = ipw_get_event_log_len(priv);
1236 u32 log_size;
1237 struct ipw_event *log;
1238 u32 len = 0, i;
1239
1240 /* not using min() because of its strict type checking */
1241 log_size = PAGE_SIZE / sizeof(*log) > log_len ?
1242 sizeof(*log) * log_len : PAGE_SIZE;
1243 log = kzalloc(log_size, GFP_KERNEL);
1244 if (!log) {
1245 IPW_ERROR("Unable to allocate memory for log\n");
1246 return 0;
1247 }
1248 log_len = log_size / sizeof(*log);
1249 ipw_capture_event_log(priv, log_len, log);
1250
1251 len += snprintf(buf + len, PAGE_SIZE - len, "%08X", log_len);
1252 for (i = 0; i < log_len; i++)
1253 len += snprintf(buf + len, PAGE_SIZE - len,
1254 "\n%08X%08X%08X",
1255 log[i].time, log[i].event, log[i].data);
1256 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1257 kfree(log);
1258 return len;
1259}
1260
1261static DEVICE_ATTR(event_log, S_IRUGO, show_event_log, NULL);
1262
1263static ssize_t show_error(struct device *d,
1264 struct device_attribute *attr, char *buf)
1265{
1266 struct ipw_priv *priv = dev_get_drvdata(d);
1267 u32 len = 0, i;
1268 if (!priv->error)
1269 return 0;
1270 len += snprintf(buf + len, PAGE_SIZE - len,
1271 "%08lX%08X%08X%08X",
1272 priv->error->jiffies,
1273 priv->error->status,
1274 priv->error->config, priv->error->elem_len);
1275 for (i = 0; i < priv->error->elem_len; i++)
1276 len += snprintf(buf + len, PAGE_SIZE - len,
1277 "\n%08X%08X%08X%08X%08X%08X%08X",
1278 priv->error->elem[i].time,
1279 priv->error->elem[i].desc,
1280 priv->error->elem[i].blink1,
1281 priv->error->elem[i].blink2,
1282 priv->error->elem[i].link1,
1283 priv->error->elem[i].link2,
1284 priv->error->elem[i].data);
1285
1286 len += snprintf(buf + len, PAGE_SIZE - len,
1287 "\n%08X", priv->error->log_len);
1288 for (i = 0; i < priv->error->log_len; i++)
1289 len += snprintf(buf + len, PAGE_SIZE - len,
1290 "\n%08X%08X%08X",
1291 priv->error->log[i].time,
1292 priv->error->log[i].event,
1293 priv->error->log[i].data);
1294 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1295 return len;
1296}
1297
1298static ssize_t clear_error(struct device *d,
1299 struct device_attribute *attr,
1300 const char *buf, size_t count)
1301{
1302 struct ipw_priv *priv = dev_get_drvdata(d);
1303
1304 kfree(priv->error);
1305 priv->error = NULL;
1306 return count;
1307}
1308
1309static DEVICE_ATTR(error, S_IRUGO | S_IWUSR, show_error, clear_error);
1310
1311static ssize_t show_cmd_log(struct device *d,
1312 struct device_attribute *attr, char *buf)
1313{
1314 struct ipw_priv *priv = dev_get_drvdata(d);
1315 u32 len = 0, i;
1316 if (!priv->cmdlog)
1317 return 0;
1318 for (i = (priv->cmdlog_pos + 1) % priv->cmdlog_len;
1319 (i != priv->cmdlog_pos) && (PAGE_SIZE - len);
1320 i = (i + 1) % priv->cmdlog_len) {
1321 len +=
1322 snprintf(buf + len, PAGE_SIZE - len,
1323 "\n%08lX%08X%08X%08X\n", priv->cmdlog[i].jiffies,
1324 priv->cmdlog[i].retcode, priv->cmdlog[i].cmd.cmd,
1325 priv->cmdlog[i].cmd.len);
1326 len +=
1327 snprintk_buf(buf + len, PAGE_SIZE - len,
1328 (u8 *) priv->cmdlog[i].cmd.param,
1329 priv->cmdlog[i].cmd.len);
1330 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1331 }
1332 len += snprintf(buf + len, PAGE_SIZE - len, "\n");
1333 return len;
1334}
1335
1336static DEVICE_ATTR(cmd_log, S_IRUGO, show_cmd_log, NULL);
1337
1338#ifdef CONFIG_IPW2200_PROMISCUOUS
1339static void ipw_prom_free(struct ipw_priv *priv);
1340static int ipw_prom_alloc(struct ipw_priv *priv);
1341static ssize_t store_rtap_iface(struct device *d,
1342 struct device_attribute *attr,
1343 const char *buf, size_t count)
1344{
1345 struct ipw_priv *priv = dev_get_drvdata(d);
1346 int rc = 0;
1347
1348 if (count < 1)
1349 return -EINVAL;
1350
1351 switch (buf[0]) {
1352 case '0':
1353 if (!rtap_iface)
1354 return count;
1355
1356 if (netif_running(priv->prom_net_dev)) {
1357 IPW_WARNING("Interface is up. Cannot unregister.\n");
1358 return count;
1359 }
1360
1361 ipw_prom_free(priv);
1362 rtap_iface = 0;
1363 break;
1364
1365 case '1':
1366 if (rtap_iface)
1367 return count;
1368
1369 rc = ipw_prom_alloc(priv);
1370 if (!rc)
1371 rtap_iface = 1;
1372 break;
1373
1374 default:
1375 return -EINVAL;
1376 }
1377
1378 if (rc) {
1379 IPW_ERROR("Failed to register promiscuous network "
1380 "device (error %d).\n", rc);
1381 }
1382
1383 return count;
1384}
1385
1386static ssize_t show_rtap_iface(struct device *d,
1387 struct device_attribute *attr,
1388 char *buf)
1389{
1390 struct ipw_priv *priv = dev_get_drvdata(d);
1391 if (rtap_iface)
1392 return sprintf(buf, "%s", priv->prom_net_dev->name);
1393 else {
1394 buf[0] = '-';
1395 buf[1] = '1';
1396 buf[2] = '\0';
1397 return 3;
1398 }
1399}
1400
1401static DEVICE_ATTR(rtap_iface, S_IWUSR | S_IRUSR, show_rtap_iface,
1402 store_rtap_iface);
1403
1404static ssize_t store_rtap_filter(struct device *d,
1405 struct device_attribute *attr,
1406 const char *buf, size_t count)
1407{
1408 struct ipw_priv *priv = dev_get_drvdata(d);
1409
1410 if (!priv->prom_priv) {
1411 IPW_ERROR("Attempting to set filter without "
1412 "rtap_iface enabled.\n");
1413 return -EPERM;
1414 }
1415
1416 priv->prom_priv->filter = simple_strtol(buf, NULL, 0);
1417
1418 IPW_DEBUG_INFO("Setting rtap filter to " BIT_FMT16 "\n",
1419 BIT_ARG16(priv->prom_priv->filter));
1420
1421 return count;
1422}
1423
1424static ssize_t show_rtap_filter(struct device *d,
1425 struct device_attribute *attr,
1426 char *buf)
1427{
1428 struct ipw_priv *priv = dev_get_drvdata(d);
1429 return sprintf(buf, "0x%04X",
1430 priv->prom_priv ? priv->prom_priv->filter : 0);
1431}
1432
1433static DEVICE_ATTR(rtap_filter, S_IWUSR | S_IRUSR, show_rtap_filter,
1434 store_rtap_filter);
1435#endif
1436
1437static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
1438 char *buf)
1439{
1440 struct ipw_priv *priv = dev_get_drvdata(d);
1441 return sprintf(buf, "%d\n", priv->ieee->scan_age);
1442}
1443
1444static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
1445 const char *buf, size_t count)
1446{
1447 struct ipw_priv *priv = dev_get_drvdata(d);
1448 struct net_device *dev = priv->net_dev;
1449 char buffer[] = "00000000";
1450 unsigned long len =
1451 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
1452 unsigned long val;
1453 char *p = buffer;
1454
1455 IPW_DEBUG_INFO("enter\n");
1456
1457 strncpy(buffer, buf, len);
1458 buffer[len] = 0;
1459
1460 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
1461 p++;
1462 if (p[0] == 'x' || p[0] == 'X')
1463 p++;
1464 val = simple_strtoul(p, &p, 16);
1465 } else
1466 val = simple_strtoul(p, &p, 10);
1467 if (p == buffer) {
1468 IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name);
1469 } else {
1470 priv->ieee->scan_age = val;
1471 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
1472 }
1473
1474 IPW_DEBUG_INFO("exit\n");
1475 return len;
1476}
1477
1478static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
1479
1480static ssize_t show_led(struct device *d, struct device_attribute *attr,
1481 char *buf)
1482{
1483 struct ipw_priv *priv = dev_get_drvdata(d);
1484 return sprintf(buf, "%d\n", (priv->config & CFG_NO_LED) ? 0 : 1);
1485}
1486
1487static ssize_t store_led(struct device *d, struct device_attribute *attr,
1488 const char *buf, size_t count)
1489{
1490 struct ipw_priv *priv = dev_get_drvdata(d);
1491
1492 IPW_DEBUG_INFO("enter\n");
1493
1494 if (count == 0)
1495 return 0;
1496
1497 if (*buf == 0) {
1498 IPW_DEBUG_LED("Disabling LED control.\n");
1499 priv->config |= CFG_NO_LED;
1500 ipw_led_shutdown(priv);
1501 } else {
1502 IPW_DEBUG_LED("Enabling LED control.\n");
1503 priv->config &= ~CFG_NO_LED;
1504 ipw_led_init(priv);
1505 }
1506
1507 IPW_DEBUG_INFO("exit\n");
1508 return count;
1509}
1510
1511static DEVICE_ATTR(led, S_IWUSR | S_IRUGO, show_led, store_led);
1512
1513static ssize_t show_status(struct device *d,
1514 struct device_attribute *attr, char *buf)
1515{
1516 struct ipw_priv *p = d->driver_data;
1517 return sprintf(buf, "0x%08x\n", (int)p->status);
1518}
1519
1520static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
1521
1522static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
1523 char *buf)
1524{
1525 struct ipw_priv *p = d->driver_data;
1526 return sprintf(buf, "0x%08x\n", (int)p->config);
1527}
1528
1529static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
1530
1531static ssize_t show_nic_type(struct device *d,
1532 struct device_attribute *attr, char *buf)
1533{
1534 struct ipw_priv *priv = d->driver_data;
1535 return sprintf(buf, "TYPE: %d\n", priv->nic_type);
1536}
1537
1538static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
1539
1540static ssize_t show_ucode_version(struct device *d,
1541 struct device_attribute *attr, char *buf)
1542{
1543 u32 len = sizeof(u32), tmp = 0;
1544 struct ipw_priv *p = d->driver_data;
1545
1546 if (ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
1547 return 0;
1548
1549 return sprintf(buf, "0x%08x\n", tmp);
1550}
1551
1552static DEVICE_ATTR(ucode_version, S_IWUSR | S_IRUGO, show_ucode_version, NULL);
1553
1554static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
1555 char *buf)
1556{
1557 u32 len = sizeof(u32), tmp = 0;
1558 struct ipw_priv *p = d->driver_data;
1559
1560 if (ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
1561 return 0;
1562
1563 return sprintf(buf, "0x%08x\n", tmp);
1564}
1565
1566static DEVICE_ATTR(rtc, S_IWUSR | S_IRUGO, show_rtc, NULL);
1567
1568/*
1569 * Add a device attribute to view/control the delay between eeprom
1570 * operations.
1571 */
1572static ssize_t show_eeprom_delay(struct device *d,
1573 struct device_attribute *attr, char *buf)
1574{
1575 int n = ((struct ipw_priv *)d->driver_data)->eeprom_delay;
1576 return sprintf(buf, "%i\n", n);
1577}
1578static ssize_t store_eeprom_delay(struct device *d,
1579 struct device_attribute *attr,
1580 const char *buf, size_t count)
1581{
1582 struct ipw_priv *p = d->driver_data;
1583 sscanf(buf, "%i", &p->eeprom_delay);
1584 return strnlen(buf, count);
1585}
1586
1587static DEVICE_ATTR(eeprom_delay, S_IWUSR | S_IRUGO,
1588 show_eeprom_delay, store_eeprom_delay);
1589
1590static ssize_t show_command_event_reg(struct device *d,
1591 struct device_attribute *attr, char *buf)
1592{
1593 u32 reg = 0;
1594 struct ipw_priv *p = d->driver_data;
1595
1596 reg = ipw_read_reg32(p, IPW_INTERNAL_CMD_EVENT);
1597 return sprintf(buf, "0x%08x\n", reg);
1598}
1599static ssize_t store_command_event_reg(struct device *d,
1600 struct device_attribute *attr,
1601 const char *buf, size_t count)
1602{
1603 u32 reg;
1604 struct ipw_priv *p = d->driver_data;
1605
1606 sscanf(buf, "%x", &reg);
1607 ipw_write_reg32(p, IPW_INTERNAL_CMD_EVENT, reg);
1608 return strnlen(buf, count);
1609}
1610
1611static DEVICE_ATTR(command_event_reg, S_IWUSR | S_IRUGO,
1612 show_command_event_reg, store_command_event_reg);
1613
1614static ssize_t show_mem_gpio_reg(struct device *d,
1615 struct device_attribute *attr, char *buf)
1616{
1617 u32 reg = 0;
1618 struct ipw_priv *p = d->driver_data;
1619
1620 reg = ipw_read_reg32(p, 0x301100);
1621 return sprintf(buf, "0x%08x\n", reg);
1622}
1623static ssize_t store_mem_gpio_reg(struct device *d,
1624 struct device_attribute *attr,
1625 const char *buf, size_t count)
1626{
1627 u32 reg;
1628 struct ipw_priv *p = d->driver_data;
1629
1630 sscanf(buf, "%x", &reg);
1631 ipw_write_reg32(p, 0x301100, reg);
1632 return strnlen(buf, count);
1633}
1634
1635static DEVICE_ATTR(mem_gpio_reg, S_IWUSR | S_IRUGO,
1636 show_mem_gpio_reg, store_mem_gpio_reg);
1637
1638static ssize_t show_indirect_dword(struct device *d,
1639 struct device_attribute *attr, char *buf)
1640{
1641 u32 reg = 0;
1642 struct ipw_priv *priv = d->driver_data;
1643
1644 if (priv->status & STATUS_INDIRECT_DWORD)
1645 reg = ipw_read_reg32(priv, priv->indirect_dword);
1646 else
1647 reg = 0;
1648
1649 return sprintf(buf, "0x%08x\n", reg);
1650}
1651static ssize_t store_indirect_dword(struct device *d,
1652 struct device_attribute *attr,
1653 const char *buf, size_t count)
1654{
1655 struct ipw_priv *priv = d->driver_data;
1656
1657 sscanf(buf, "%x", &priv->indirect_dword);
1658 priv->status |= STATUS_INDIRECT_DWORD;
1659 return strnlen(buf, count);
1660}
1661
1662static DEVICE_ATTR(indirect_dword, S_IWUSR | S_IRUGO,
1663 show_indirect_dword, store_indirect_dword);
1664
1665static ssize_t show_indirect_byte(struct device *d,
1666 struct device_attribute *attr, char *buf)
1667{
1668 u8 reg = 0;
1669 struct ipw_priv *priv = d->driver_data;
1670
1671 if (priv->status & STATUS_INDIRECT_BYTE)
1672 reg = ipw_read_reg8(priv, priv->indirect_byte);
1673 else
1674 reg = 0;
1675
1676 return sprintf(buf, "0x%02x\n", reg);
1677}
1678static ssize_t store_indirect_byte(struct device *d,
1679 struct device_attribute *attr,
1680 const char *buf, size_t count)
1681{
1682 struct ipw_priv *priv = d->driver_data;
1683
1684 sscanf(buf, "%x", &priv->indirect_byte);
1685 priv->status |= STATUS_INDIRECT_BYTE;
1686 return strnlen(buf, count);
1687}
1688
1689static DEVICE_ATTR(indirect_byte, S_IWUSR | S_IRUGO,
1690 show_indirect_byte, store_indirect_byte);
1691
1692static ssize_t show_direct_dword(struct device *d,
1693 struct device_attribute *attr, char *buf)
1694{
1695 u32 reg = 0;
1696 struct ipw_priv *priv = d->driver_data;
1697
1698 if (priv->status & STATUS_DIRECT_DWORD)
1699 reg = ipw_read32(priv, priv->direct_dword);
1700 else
1701 reg = 0;
1702
1703 return sprintf(buf, "0x%08x\n", reg);
1704}
1705static ssize_t store_direct_dword(struct device *d,
1706 struct device_attribute *attr,
1707 const char *buf, size_t count)
1708{
1709 struct ipw_priv *priv = d->driver_data;
1710
1711 sscanf(buf, "%x", &priv->direct_dword);
1712 priv->status |= STATUS_DIRECT_DWORD;
1713 return strnlen(buf, count);
1714}
1715
1716static DEVICE_ATTR(direct_dword, S_IWUSR | S_IRUGO,
1717 show_direct_dword, store_direct_dword);
1718
1719static int rf_kill_active(struct ipw_priv *priv)
1720{
1721 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
1722 priv->status |= STATUS_RF_KILL_HW;
1723 else
1724 priv->status &= ~STATUS_RF_KILL_HW;
1725
1726 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
1727}
1728
1729static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
1730 char *buf)
1731{
1732 /* 0 - RF kill not enabled
1733 1 - SW based RF kill active (sysfs)
1734 2 - HW based RF kill active
1735 3 - Both HW and SW baed RF kill active */
1736 struct ipw_priv *priv = d->driver_data;
1737 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
1738 (rf_kill_active(priv) ? 0x2 : 0x0);
1739 return sprintf(buf, "%i\n", val);
1740}
1741
1742static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
1743{
1744 if ((disable_radio ? 1 : 0) ==
1745 ((priv->status & STATUS_RF_KILL_SW) ? 1 : 0))
1746 return 0;
1747
1748 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
1749 disable_radio ? "OFF" : "ON");
1750
1751 if (disable_radio) {
1752 priv->status |= STATUS_RF_KILL_SW;
1753
1754 if (priv->workqueue) {
1755 cancel_delayed_work(&priv->request_scan);
1756 cancel_delayed_work(&priv->request_direct_scan);
1757 cancel_delayed_work(&priv->request_passive_scan);
1758 cancel_delayed_work(&priv->scan_event);
1759 }
1760 queue_work(priv->workqueue, &priv->down);
1761 } else {
1762 priv->status &= ~STATUS_RF_KILL_SW;
1763 if (rf_kill_active(priv)) {
1764 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
1765 "disabled by HW switch\n");
1766 /* Make sure the RF_KILL check timer is running */
1767 cancel_delayed_work(&priv->rf_kill);
1768 queue_delayed_work(priv->workqueue, &priv->rf_kill,
1769 round_jiffies_relative(2 * HZ));
1770 } else
1771 queue_work(priv->workqueue, &priv->up);
1772 }
1773
1774 return 1;
1775}
1776
1777static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
1778 const char *buf, size_t count)
1779{
1780 struct ipw_priv *priv = d->driver_data;
1781
1782 ipw_radio_kill_sw(priv, buf[0] == '1');
1783
1784 return count;
1785}
1786
1787static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
1788
1789static ssize_t show_speed_scan(struct device *d, struct device_attribute *attr,
1790 char *buf)
1791{
1792 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1793 int pos = 0, len = 0;
1794 if (priv->config & CFG_SPEED_SCAN) {
1795 while (priv->speed_scan[pos] != 0)
1796 len += sprintf(&buf[len], "%d ",
1797 priv->speed_scan[pos++]);
1798 return len + sprintf(&buf[len], "\n");
1799 }
1800
1801 return sprintf(buf, "0\n");
1802}
1803
1804static ssize_t store_speed_scan(struct device *d, struct device_attribute *attr,
1805 const char *buf, size_t count)
1806{
1807 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1808 int channel, pos = 0;
1809 const char *p = buf;
1810
1811 /* list of space separated channels to scan, optionally ending with 0 */
1812 while ((channel = simple_strtol(p, NULL, 0))) {
1813 if (pos == MAX_SPEED_SCAN - 1) {
1814 priv->speed_scan[pos] = 0;
1815 break;
1816 }
1817
1818 if (ieee80211_is_valid_channel(priv->ieee, channel))
1819 priv->speed_scan[pos++] = channel;
1820 else
1821 IPW_WARNING("Skipping invalid channel request: %d\n",
1822 channel);
1823 p = strchr(p, ' ');
1824 if (!p)
1825 break;
1826 while (*p == ' ' || *p == '\t')
1827 p++;
1828 }
1829
1830 if (pos == 0)
1831 priv->config &= ~CFG_SPEED_SCAN;
1832 else {
1833 priv->speed_scan_pos = 0;
1834 priv->config |= CFG_SPEED_SCAN;
1835 }
1836
1837 return count;
1838}
1839
1840static DEVICE_ATTR(speed_scan, S_IWUSR | S_IRUGO, show_speed_scan,
1841 store_speed_scan);
1842
1843static ssize_t show_net_stats(struct device *d, struct device_attribute *attr,
1844 char *buf)
1845{
1846 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1847 return sprintf(buf, "%c\n", (priv->config & CFG_NET_STATS) ? '1' : '0');
1848}
1849
1850static ssize_t store_net_stats(struct device *d, struct device_attribute *attr,
1851 const char *buf, size_t count)
1852{
1853 struct ipw_priv *priv = (struct ipw_priv *)d->driver_data;
1854 if (buf[0] == '1')
1855 priv->config |= CFG_NET_STATS;
1856 else
1857 priv->config &= ~CFG_NET_STATS;
1858
1859 return count;
1860}
1861
1862static DEVICE_ATTR(net_stats, S_IWUSR | S_IRUGO,
1863 show_net_stats, store_net_stats);
1864
1865static ssize_t show_channels(struct device *d,
1866 struct device_attribute *attr,
1867 char *buf)
1868{
1869 struct ipw_priv *priv = dev_get_drvdata(d);
1870 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
1871 int len = 0, i;
1872
1873 len = sprintf(&buf[len],
1874 "Displaying %d channels in 2.4Ghz band "
1875 "(802.11bg):\n", geo->bg_channels);
1876
1877 for (i = 0; i < geo->bg_channels; i++) {
1878 len += sprintf(&buf[len], "%d: BSS%s%s, %s, Band %s.\n",
1879 geo->bg[i].channel,
1880 geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT ?
1881 " (radar spectrum)" : "",
1882 ((geo->bg[i].flags & IEEE80211_CH_NO_IBSS) ||
1883 (geo->bg[i].flags & IEEE80211_CH_RADAR_DETECT))
1884 ? "" : ", IBSS",
1885 geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1886 "passive only" : "active/passive",
1887 geo->bg[i].flags & IEEE80211_CH_B_ONLY ?
1888 "B" : "B/G");
1889 }
1890
1891 len += sprintf(&buf[len],
1892 "Displaying %d channels in 5.2Ghz band "
1893 "(802.11a):\n", geo->a_channels);
1894 for (i = 0; i < geo->a_channels; i++) {
1895 len += sprintf(&buf[len], "%d: BSS%s%s, %s.\n",
1896 geo->a[i].channel,
1897 geo->a[i].flags & IEEE80211_CH_RADAR_DETECT ?
1898 " (radar spectrum)" : "",
1899 ((geo->a[i].flags & IEEE80211_CH_NO_IBSS) ||
1900 (geo->a[i].flags & IEEE80211_CH_RADAR_DETECT))
1901 ? "" : ", IBSS",
1902 geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY ?
1903 "passive only" : "active/passive");
1904 }
1905
1906 return len;
1907}
1908
1909static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
1910
1911static void notify_wx_assoc_event(struct ipw_priv *priv)
1912{
1913 union iwreq_data wrqu;
1914 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1915 if (priv->status & STATUS_ASSOCIATED)
1916 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
1917 else
1918 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
1919 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1920}
1921
1922static void ipw_irq_tasklet(struct ipw_priv *priv)
1923{
1924 u32 inta, inta_mask, handled = 0;
1925 unsigned long flags;
1926 int rc = 0;
1927
1928 spin_lock_irqsave(&priv->irq_lock, flags);
1929
1930 inta = ipw_read32(priv, IPW_INTA_RW);
1931 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
1932 inta &= (IPW_INTA_MASK_ALL & inta_mask);
1933
1934 /* Add any cached INTA values that need to be handled */
1935 inta |= priv->isr_inta;
1936
1937 spin_unlock_irqrestore(&priv->irq_lock, flags);
1938
1939 spin_lock_irqsave(&priv->lock, flags);
1940
1941 /* handle all the justifications for the interrupt */
1942 if (inta & IPW_INTA_BIT_RX_TRANSFER) {
1943 ipw_rx(priv);
1944 handled |= IPW_INTA_BIT_RX_TRANSFER;
1945 }
1946
1947 if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
1948 IPW_DEBUG_HC("Command completed.\n");
1949 rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
1950 priv->status &= ~STATUS_HCMD_ACTIVE;
1951 wake_up_interruptible(&priv->wait_command_queue);
1952 handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
1953 }
1954
1955 if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
1956 IPW_DEBUG_TX("TX_QUEUE_1\n");
1957 rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
1958 handled |= IPW_INTA_BIT_TX_QUEUE_1;
1959 }
1960
1961 if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
1962 IPW_DEBUG_TX("TX_QUEUE_2\n");
1963 rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
1964 handled |= IPW_INTA_BIT_TX_QUEUE_2;
1965 }
1966
1967 if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
1968 IPW_DEBUG_TX("TX_QUEUE_3\n");
1969 rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
1970 handled |= IPW_INTA_BIT_TX_QUEUE_3;
1971 }
1972
1973 if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
1974 IPW_DEBUG_TX("TX_QUEUE_4\n");
1975 rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
1976 handled |= IPW_INTA_BIT_TX_QUEUE_4;
1977 }
1978
1979 if (inta & IPW_INTA_BIT_STATUS_CHANGE) {
1980 IPW_WARNING("STATUS_CHANGE\n");
1981 handled |= IPW_INTA_BIT_STATUS_CHANGE;
1982 }
1983
1984 if (inta & IPW_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1985 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1986 handled |= IPW_INTA_BIT_BEACON_PERIOD_EXPIRED;
1987 }
1988
1989 if (inta & IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1990 IPW_WARNING("HOST_CMD_DONE\n");
1991 handled |= IPW_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1992 }
1993
1994 if (inta & IPW_INTA_BIT_FW_INITIALIZATION_DONE) {
1995 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1996 handled |= IPW_INTA_BIT_FW_INITIALIZATION_DONE;
1997 }
1998
1999 if (inta & IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
2000 IPW_WARNING("PHY_OFF_DONE\n");
2001 handled |= IPW_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
2002 }
2003
2004 if (inta & IPW_INTA_BIT_RF_KILL_DONE) {
2005 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
2006 priv->status |= STATUS_RF_KILL_HW;
2007 wake_up_interruptible(&priv->wait_command_queue);
2008 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2009 cancel_delayed_work(&priv->request_scan);
2010 cancel_delayed_work(&priv->request_direct_scan);
2011 cancel_delayed_work(&priv->request_passive_scan);
2012 cancel_delayed_work(&priv->scan_event);
2013 schedule_work(&priv->link_down);
2014 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
2015 handled |= IPW_INTA_BIT_RF_KILL_DONE;
2016 }
2017
2018 if (inta & IPW_INTA_BIT_FATAL_ERROR) {
2019 IPW_WARNING("Firmware error detected. Restarting.\n");
2020 if (priv->error) {
2021 IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
2022 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
2023 struct ipw_fw_error *error =
2024 ipw_alloc_error_log(priv);
2025 ipw_dump_error_log(priv, error);
2026 kfree(error);
2027 }
2028 } else {
2029 priv->error = ipw_alloc_error_log(priv);
2030 if (priv->error)
2031 IPW_DEBUG_FW("Sysfs 'error' log captured.\n");
2032 else
2033 IPW_DEBUG_FW("Error allocating sysfs 'error' "
2034 "log.\n");
2035 if (ipw_debug_level & IPW_DL_FW_ERRORS)
2036 ipw_dump_error_log(priv, priv->error);
2037 }
2038
2039 /* XXX: If hardware encryption is for WPA/WPA2,
2040 * we have to notify the supplicant. */
2041 if (priv->ieee->sec.encrypt) {
2042 priv->status &= ~STATUS_ASSOCIATED;
2043 notify_wx_assoc_event(priv);
2044 }
2045
2046 /* Keep the restart process from trying to send host
2047 * commands by clearing the INIT status bit */
2048 priv->status &= ~STATUS_INIT;
2049
2050 /* Cancel currently queued command. */
2051 priv->status &= ~STATUS_HCMD_ACTIVE;
2052 wake_up_interruptible(&priv->wait_command_queue);
2053
2054 queue_work(priv->workqueue, &priv->adapter_restart);
2055 handled |= IPW_INTA_BIT_FATAL_ERROR;
2056 }
2057
2058 if (inta & IPW_INTA_BIT_PARITY_ERROR) {
2059 IPW_ERROR("Parity error\n");
2060 handled |= IPW_INTA_BIT_PARITY_ERROR;
2061 }
2062
2063 if (handled != inta) {
2064 IPW_ERROR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
2065 }
2066
2067 spin_unlock_irqrestore(&priv->lock, flags);
2068
2069 /* enable all interrupts */
2070 ipw_enable_interrupts(priv);
2071}
2072
2073#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
2074static char *get_cmd_string(u8 cmd)
2075{
2076 switch (cmd) {
2077 IPW_CMD(HOST_COMPLETE);
2078 IPW_CMD(POWER_DOWN);
2079 IPW_CMD(SYSTEM_CONFIG);
2080 IPW_CMD(MULTICAST_ADDRESS);
2081 IPW_CMD(SSID);
2082 IPW_CMD(ADAPTER_ADDRESS);
2083 IPW_CMD(PORT_TYPE);
2084 IPW_CMD(RTS_THRESHOLD);
2085 IPW_CMD(FRAG_THRESHOLD);
2086 IPW_CMD(POWER_MODE);
2087 IPW_CMD(WEP_KEY);
2088 IPW_CMD(TGI_TX_KEY);
2089 IPW_CMD(SCAN_REQUEST);
2090 IPW_CMD(SCAN_REQUEST_EXT);
2091 IPW_CMD(ASSOCIATE);
2092 IPW_CMD(SUPPORTED_RATES);
2093 IPW_CMD(SCAN_ABORT);
2094 IPW_CMD(TX_FLUSH);
2095 IPW_CMD(QOS_PARAMETERS);
2096 IPW_CMD(DINO_CONFIG);
2097 IPW_CMD(RSN_CAPABILITIES);
2098 IPW_CMD(RX_KEY);
2099 IPW_CMD(CARD_DISABLE);
2100 IPW_CMD(SEED_NUMBER);
2101 IPW_CMD(TX_POWER);
2102 IPW_CMD(COUNTRY_INFO);
2103 IPW_CMD(AIRONET_INFO);
2104 IPW_CMD(AP_TX_POWER);
2105 IPW_CMD(CCKM_INFO);
2106 IPW_CMD(CCX_VER_INFO);
2107 IPW_CMD(SET_CALIBRATION);
2108 IPW_CMD(SENSITIVITY_CALIB);
2109 IPW_CMD(RETRY_LIMIT);
2110 IPW_CMD(IPW_PRE_POWER_DOWN);
2111 IPW_CMD(VAP_BEACON_TEMPLATE);
2112 IPW_CMD(VAP_DTIM_PERIOD);
2113 IPW_CMD(EXT_SUPPORTED_RATES);
2114 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
2115 IPW_CMD(VAP_QUIET_INTERVALS);
2116 IPW_CMD(VAP_CHANNEL_SWITCH);
2117 IPW_CMD(VAP_MANDATORY_CHANNELS);
2118 IPW_CMD(VAP_CELL_PWR_LIMIT);
2119 IPW_CMD(VAP_CF_PARAM_SET);
2120 IPW_CMD(VAP_SET_BEACONING_STATE);
2121 IPW_CMD(MEASUREMENT);
2122 IPW_CMD(POWER_CAPABILITY);
2123 IPW_CMD(SUPPORTED_CHANNELS);
2124 IPW_CMD(TPC_REPORT);
2125 IPW_CMD(WME_INFO);
2126 IPW_CMD(PRODUCTION_COMMAND);
2127 default:
2128 return "UNKNOWN";
2129 }
2130}
2131
2132#define HOST_COMPLETE_TIMEOUT HZ
2133
2134static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
2135{
2136 int rc = 0;
2137 unsigned long flags;
2138
2139 spin_lock_irqsave(&priv->lock, flags);
2140 if (priv->status & STATUS_HCMD_ACTIVE) {
2141 IPW_ERROR("Failed to send %s: Already sending a command.\n",
2142 get_cmd_string(cmd->cmd));
2143 spin_unlock_irqrestore(&priv->lock, flags);
2144 return -EAGAIN;
2145 }
2146
2147 priv->status |= STATUS_HCMD_ACTIVE;
2148
2149 if (priv->cmdlog) {
2150 priv->cmdlog[priv->cmdlog_pos].jiffies = jiffies;
2151 priv->cmdlog[priv->cmdlog_pos].cmd.cmd = cmd->cmd;
2152 priv->cmdlog[priv->cmdlog_pos].cmd.len = cmd->len;
2153 memcpy(priv->cmdlog[priv->cmdlog_pos].cmd.param, cmd->param,
2154 cmd->len);
2155 priv->cmdlog[priv->cmdlog_pos].retcode = -1;
2156 }
2157
2158 IPW_DEBUG_HC("%s command (#%d) %d bytes: 0x%08X\n",
2159 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len,
2160 priv->status);
2161
2162#ifndef DEBUG_CMD_WEP_KEY
2163 if (cmd->cmd == IPW_CMD_WEP_KEY)
2164 IPW_DEBUG_HC("WEP_KEY command masked out for secure.\n");
2165 else
2166#endif
2167 printk_buf(IPW_DL_HOST_COMMAND, (u8 *) cmd->param, cmd->len);
2168
2169 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, cmd->param, cmd->len, 0);
2170 if (rc) {
2171 priv->status &= ~STATUS_HCMD_ACTIVE;
2172 IPW_ERROR("Failed to send %s: Reason %d\n",
2173 get_cmd_string(cmd->cmd), rc);
2174 spin_unlock_irqrestore(&priv->lock, flags);
2175 goto exit;
2176 }
2177 spin_unlock_irqrestore(&priv->lock, flags);
2178
2179 rc = wait_event_interruptible_timeout(priv->wait_command_queue,
2180 !(priv->
2181 status & STATUS_HCMD_ACTIVE),
2182 HOST_COMPLETE_TIMEOUT);
2183 if (rc == 0) {
2184 spin_lock_irqsave(&priv->lock, flags);
2185 if (priv->status & STATUS_HCMD_ACTIVE) {
2186 IPW_ERROR("Failed to send %s: Command timed out.\n",
2187 get_cmd_string(cmd->cmd));
2188 priv->status &= ~STATUS_HCMD_ACTIVE;
2189 spin_unlock_irqrestore(&priv->lock, flags);
2190 rc = -EIO;
2191 goto exit;
2192 }
2193 spin_unlock_irqrestore(&priv->lock, flags);
2194 } else
2195 rc = 0;
2196
2197 if (priv->status & STATUS_RF_KILL_HW) {
2198 IPW_ERROR("Failed to send %s: Aborted due to RF kill switch.\n",
2199 get_cmd_string(cmd->cmd));
2200 rc = -EIO;
2201 goto exit;
2202 }
2203
2204 exit:
2205 if (priv->cmdlog) {
2206 priv->cmdlog[priv->cmdlog_pos++].retcode = rc;
2207 priv->cmdlog_pos %= priv->cmdlog_len;
2208 }
2209 return rc;
2210}
2211
2212static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
2213{
2214 struct host_cmd cmd = {
2215 .cmd = command,
2216 };
2217
2218 return __ipw_send_cmd(priv, &cmd);
2219}
2220
2221static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
2222 void *data)
2223{
2224 struct host_cmd cmd = {
2225 .cmd = command,
2226 .len = len,
2227 .param = data,
2228 };
2229
2230 return __ipw_send_cmd(priv, &cmd);
2231}
2232
2233static int ipw_send_host_complete(struct ipw_priv *priv)
2234{
2235 if (!priv) {
2236 IPW_ERROR("Invalid args\n");
2237 return -1;
2238 }
2239
2240 return ipw_send_cmd_simple(priv, IPW_CMD_HOST_COMPLETE);
2241}
2242
2243static int ipw_send_system_config(struct ipw_priv *priv)
2244{
2245 return ipw_send_cmd_pdu(priv, IPW_CMD_SYSTEM_CONFIG,
2246 sizeof(priv->sys_config),
2247 &priv->sys_config);
2248}
2249
2250static int ipw_send_ssid(struct ipw_priv *priv, u8 * ssid, int len)
2251{
2252 if (!priv || !ssid) {
2253 IPW_ERROR("Invalid args\n");
2254 return -1;
2255 }
2256
2257 return ipw_send_cmd_pdu(priv, IPW_CMD_SSID, min(len, IW_ESSID_MAX_SIZE),
2258 ssid);
2259}
2260
2261static int ipw_send_adapter_address(struct ipw_priv *priv, u8 * mac)
2262{
2263 if (!priv || !mac) {
2264 IPW_ERROR("Invalid args\n");
2265 return -1;
2266 }
2267
2268 IPW_DEBUG_INFO("%s: Setting MAC to %pM\n",
2269 priv->net_dev->name, mac);
2270
2271 return ipw_send_cmd_pdu(priv, IPW_CMD_ADAPTER_ADDRESS, ETH_ALEN, mac);
2272}
2273
2274/*
2275 * NOTE: This must be executed from our workqueue as it results in udelay
2276 * being called which may corrupt the keyboard if executed on default
2277 * workqueue
2278 */
2279static void ipw_adapter_restart(void *adapter)
2280{
2281 struct ipw_priv *priv = adapter;
2282
2283 if (priv->status & STATUS_RF_KILL_MASK)
2284 return;
2285
2286 ipw_down(priv);
2287
2288 if (priv->assoc_network &&
2289 (priv->assoc_network->capability & WLAN_CAPABILITY_IBSS))
2290 ipw_remove_current_network(priv);
2291
2292 if (ipw_up(priv)) {
2293 IPW_ERROR("Failed to up device\n");
2294 return;
2295 }
2296}
2297
2298static void ipw_bg_adapter_restart(struct work_struct *work)
2299{
2300 struct ipw_priv *priv =
2301 container_of(work, struct ipw_priv, adapter_restart);
2302 mutex_lock(&priv->mutex);
2303 ipw_adapter_restart(priv);
2304 mutex_unlock(&priv->mutex);
2305}
2306
2307#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2308
2309static void ipw_scan_check(void *data)
2310{
2311 struct ipw_priv *priv = data;
2312 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
2313 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2314 "adapter after (%dms).\n",
2315 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2316 queue_work(priv->workqueue, &priv->adapter_restart);
2317 }
2318}
2319
2320static void ipw_bg_scan_check(struct work_struct *work)
2321{
2322 struct ipw_priv *priv =
2323 container_of(work, struct ipw_priv, scan_check.work);
2324 mutex_lock(&priv->mutex);
2325 ipw_scan_check(priv);
2326 mutex_unlock(&priv->mutex);
2327}
2328
2329static int ipw_send_scan_request_ext(struct ipw_priv *priv,
2330 struct ipw_scan_request_ext *request)
2331{
2332 return ipw_send_cmd_pdu(priv, IPW_CMD_SCAN_REQUEST_EXT,
2333 sizeof(*request), request);
2334}
2335
2336static int ipw_send_scan_abort(struct ipw_priv *priv)
2337{
2338 if (!priv) {
2339 IPW_ERROR("Invalid args\n");
2340 return -1;
2341 }
2342
2343 return ipw_send_cmd_simple(priv, IPW_CMD_SCAN_ABORT);
2344}
2345
2346static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
2347{
2348 struct ipw_sensitivity_calib calib = {
2349 .beacon_rssi_raw = cpu_to_le16(sens),
2350 };
2351
2352 return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
2353 &calib);
2354}
2355
2356static int ipw_send_associate(struct ipw_priv *priv,
2357 struct ipw_associate *associate)
2358{
2359 if (!priv || !associate) {
2360 IPW_ERROR("Invalid args\n");
2361 return -1;
2362 }
2363
2364 return ipw_send_cmd_pdu(priv, IPW_CMD_ASSOCIATE, sizeof(*associate),
2365 associate);
2366}
2367
2368static int ipw_send_supported_rates(struct ipw_priv *priv,
2369 struct ipw_supported_rates *rates)
2370{
2371 if (!priv || !rates) {
2372 IPW_ERROR("Invalid args\n");
2373 return -1;
2374 }
2375
2376 return ipw_send_cmd_pdu(priv, IPW_CMD_SUPPORTED_RATES, sizeof(*rates),
2377 rates);
2378}
2379
2380static int ipw_set_random_seed(struct ipw_priv *priv)
2381{
2382 u32 val;
2383
2384 if (!priv) {
2385 IPW_ERROR("Invalid args\n");
2386 return -1;
2387 }
2388
2389 get_random_bytes(&val, sizeof(val));
2390
2391 return ipw_send_cmd_pdu(priv, IPW_CMD_SEED_NUMBER, sizeof(val), &val);
2392}
2393
2394static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
2395{
2396 __le32 v = cpu_to_le32(phy_off);
2397 if (!priv) {
2398 IPW_ERROR("Invalid args\n");
2399 return -1;
2400 }
2401
2402 return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(v), &v);
2403}
2404
2405static int ipw_send_tx_power(struct ipw_priv *priv, struct ipw_tx_power *power)
2406{
2407 if (!priv || !power) {
2408 IPW_ERROR("Invalid args\n");
2409 return -1;
2410 }
2411
2412 return ipw_send_cmd_pdu(priv, IPW_CMD_TX_POWER, sizeof(*power), power);
2413}
2414
2415static int ipw_set_tx_power(struct ipw_priv *priv)
2416{
2417 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
2418 struct ipw_tx_power tx_power;
2419 s8 max_power;
2420 int i;
2421
2422 memset(&tx_power, 0, sizeof(tx_power));
2423
2424 /* configure device for 'G' band */
2425 tx_power.ieee_mode = IPW_G_MODE;
2426 tx_power.num_channels = geo->bg_channels;
2427 for (i = 0; i < geo->bg_channels; i++) {
2428 max_power = geo->bg[i].max_power;
2429 tx_power.channels_tx_power[i].channel_number =
2430 geo->bg[i].channel;
2431 tx_power.channels_tx_power[i].tx_power = max_power ?
2432 min(max_power, priv->tx_power) : priv->tx_power;
2433 }
2434 if (ipw_send_tx_power(priv, &tx_power))
2435 return -EIO;
2436
2437 /* configure device to also handle 'B' band */
2438 tx_power.ieee_mode = IPW_B_MODE;
2439 if (ipw_send_tx_power(priv, &tx_power))
2440 return -EIO;
2441
2442 /* configure device to also handle 'A' band */
2443 if (priv->ieee->abg_true) {
2444 tx_power.ieee_mode = IPW_A_MODE;
2445 tx_power.num_channels = geo->a_channels;
2446 for (i = 0; i < tx_power.num_channels; i++) {
2447 max_power = geo->a[i].max_power;
2448 tx_power.channels_tx_power[i].channel_number =
2449 geo->a[i].channel;
2450 tx_power.channels_tx_power[i].tx_power = max_power ?
2451 min(max_power, priv->tx_power) : priv->tx_power;
2452 }
2453 if (ipw_send_tx_power(priv, &tx_power))
2454 return -EIO;
2455 }
2456 return 0;
2457}
2458
2459static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
2460{
2461 struct ipw_rts_threshold rts_threshold = {
2462 .rts_threshold = cpu_to_le16(rts),
2463 };
2464
2465 if (!priv) {
2466 IPW_ERROR("Invalid args\n");
2467 return -1;
2468 }
2469
2470 return ipw_send_cmd_pdu(priv, IPW_CMD_RTS_THRESHOLD,
2471 sizeof(rts_threshold), &rts_threshold);
2472}
2473
2474static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
2475{
2476 struct ipw_frag_threshold frag_threshold = {
2477 .frag_threshold = cpu_to_le16(frag),
2478 };
2479
2480 if (!priv) {
2481 IPW_ERROR("Invalid args\n");
2482 return -1;
2483 }
2484
2485 return ipw_send_cmd_pdu(priv, IPW_CMD_FRAG_THRESHOLD,
2486 sizeof(frag_threshold), &frag_threshold);
2487}
2488
2489static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
2490{
2491 __le32 param;
2492
2493 if (!priv) {
2494 IPW_ERROR("Invalid args\n");
2495 return -1;
2496 }
2497
2498 /* If on battery, set to 3, if AC set to CAM, else user
2499 * level */
2500 switch (mode) {
2501 case IPW_POWER_BATTERY:
2502 param = cpu_to_le32(IPW_POWER_INDEX_3);
2503 break;
2504 case IPW_POWER_AC:
2505 param = cpu_to_le32(IPW_POWER_MODE_CAM);
2506 break;
2507 default:
2508 param = cpu_to_le32(mode);
2509 break;
2510 }
2511
2512 return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
2513 &param);
2514}
2515
2516static int ipw_send_retry_limit(struct ipw_priv *priv, u8 slimit, u8 llimit)
2517{
2518 struct ipw_retry_limit retry_limit = {
2519 .short_retry_limit = slimit,
2520 .long_retry_limit = llimit
2521 };
2522
2523 if (!priv) {
2524 IPW_ERROR("Invalid args\n");
2525 return -1;
2526 }
2527
2528 return ipw_send_cmd_pdu(priv, IPW_CMD_RETRY_LIMIT, sizeof(retry_limit),
2529 &retry_limit);
2530}
2531
2532/*
2533 * The IPW device contains a Microwire compatible EEPROM that stores
2534 * various data like the MAC address. Usually the firmware has exclusive
2535 * access to the eeprom, but during device initialization (before the
2536 * device driver has sent the HostComplete command to the firmware) the
2537 * device driver has read access to the EEPROM by way of indirect addressing
2538 * through a couple of memory mapped registers.
2539 *
2540 * The following is a simplified implementation for pulling data out of the
2541 * the eeprom, along with some helper functions to find information in
2542 * the per device private data's copy of the eeprom.
2543 *
2544 * NOTE: To better understand how these functions work (i.e what is a chip
2545 * select and why do have to keep driving the eeprom clock?), read
2546 * just about any data sheet for a Microwire compatible EEPROM.
2547 */
2548
2549/* write a 32 bit value into the indirect accessor register */
2550static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2551{
2552 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
2553
2554 /* the eeprom requires some time to complete the operation */
2555 udelay(p->eeprom_delay);
2556
2557 return;
2558}
2559
2560/* perform a chip select operation */
2561static void eeprom_cs(struct ipw_priv *priv)
2562{
2563 eeprom_write_reg(priv, 0);
2564 eeprom_write_reg(priv, EEPROM_BIT_CS);
2565 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2566 eeprom_write_reg(priv, EEPROM_BIT_CS);
2567}
2568
2569/* perform a chip select operation */
2570static void eeprom_disable_cs(struct ipw_priv *priv)
2571{
2572 eeprom_write_reg(priv, EEPROM_BIT_CS);
2573 eeprom_write_reg(priv, 0);
2574 eeprom_write_reg(priv, EEPROM_BIT_SK);
2575}
2576
2577/* push a single bit down to the eeprom */
2578static inline void eeprom_write_bit(struct ipw_priv *p, u8 bit)
2579{
2580 int d = (bit ? EEPROM_BIT_DI : 0);
2581 eeprom_write_reg(p, EEPROM_BIT_CS | d);
2582 eeprom_write_reg(p, EEPROM_BIT_CS | d | EEPROM_BIT_SK);
2583}
2584
2585/* push an opcode followed by an address down to the eeprom */
2586static void eeprom_op(struct ipw_priv *priv, u8 op, u8 addr)
2587{
2588 int i;
2589
2590 eeprom_cs(priv);
2591 eeprom_write_bit(priv, 1);
2592 eeprom_write_bit(priv, op & 2);
2593 eeprom_write_bit(priv, op & 1);
2594 for (i = 7; i >= 0; i--) {
2595 eeprom_write_bit(priv, addr & (1 << i));
2596 }
2597}
2598
2599/* pull 16 bits off the eeprom, one bit at a time */
2600static u16 eeprom_read_u16(struct ipw_priv *priv, u8 addr)
2601{
2602 int i;
2603 u16 r = 0;
2604
2605 /* Send READ Opcode */
2606 eeprom_op(priv, EEPROM_CMD_READ, addr);
2607
2608 /* Send dummy bit */
2609 eeprom_write_reg(priv, EEPROM_BIT_CS);
2610
2611 /* Read the byte off the eeprom one bit at a time */
2612 for (i = 0; i < 16; i++) {
2613 u32 data = 0;
2614 eeprom_write_reg(priv, EEPROM_BIT_CS | EEPROM_BIT_SK);
2615 eeprom_write_reg(priv, EEPROM_BIT_CS);
2616 data = ipw_read_reg32(priv, FW_MEM_REG_EEPROM_ACCESS);
2617 r = (r << 1) | ((data & EEPROM_BIT_DO) ? 1 : 0);
2618 }
2619
2620 /* Send another dummy bit */
2621 eeprom_write_reg(priv, 0);
2622 eeprom_disable_cs(priv);
2623
2624 return r;
2625}
2626
2627/* helper function for pulling the mac address out of the private */
2628/* data's copy of the eeprom data */
2629static void eeprom_parse_mac(struct ipw_priv *priv, u8 * mac)
2630{
2631 memcpy(mac, &priv->eeprom[EEPROM_MAC_ADDRESS], 6);
2632}
2633
2634/*
2635 * Either the device driver (i.e. the host) or the firmware can
2636 * load eeprom data into the designated region in SRAM. If neither
2637 * happens then the FW will shutdown with a fatal error.
2638 *
2639 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
2640 * bit needs region of shared SRAM needs to be non-zero.
2641 */
2642static void ipw_eeprom_init_sram(struct ipw_priv *priv)
2643{
2644 int i;
2645 __le16 *eeprom = (__le16 *) priv->eeprom;
2646
2647 IPW_DEBUG_TRACE(">>\n");
2648
2649 /* read entire contents of eeprom into private buffer */
2650 for (i = 0; i < 128; i++)
2651 eeprom[i] = cpu_to_le16(eeprom_read_u16(priv, (u8) i));
2652
2653 /*
2654 If the data looks correct, then copy it to our private
2655 copy. Otherwise let the firmware know to perform the operation
2656 on its own.
2657 */
2658 if (priv->eeprom[EEPROM_VERSION] != 0) {
2659 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
2660
2661 /* write the eeprom data to sram */
2662 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
2663 ipw_write8(priv, IPW_EEPROM_DATA + i, priv->eeprom[i]);
2664
2665 /* Do not load eeprom data on fatal error or suspend */
2666 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2667 } else {
2668 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
2669
2670 /* Load eeprom data on fatal error or suspend */
2671 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
2672 }
2673
2674 IPW_DEBUG_TRACE("<<\n");
2675}
2676
2677static void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
2678{
2679 count >>= 2;
2680 if (!count)
2681 return;
2682 _ipw_write32(priv, IPW_AUTOINC_ADDR, start);
2683 while (count--)
2684 _ipw_write32(priv, IPW_AUTOINC_DATA, 0);
2685}
2686
2687static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2688{
2689 ipw_zero_memory(priv, IPW_SHARED_SRAM_DMA_CONTROL,
2690 CB_NUMBER_OF_ELEMENTS_SMALL *
2691 sizeof(struct command_block));
2692}
2693
2694static int ipw_fw_dma_enable(struct ipw_priv *priv)
2695{ /* start dma engine but no transfers yet */
2696
2697 IPW_DEBUG_FW(">> : \n");
2698
2699 /* Start the dma */
2700 ipw_fw_dma_reset_command_blocks(priv);
2701
2702 /* Write CB base address */
2703 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2704
2705 IPW_DEBUG_FW("<< : \n");
2706 return 0;
2707}
2708
2709static void ipw_fw_dma_abort(struct ipw_priv *priv)
2710{
2711 u32 control = 0;
2712
2713 IPW_DEBUG_FW(">> :\n");
2714
2715 /* set the Stop and Abort bit */
2716 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2717 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2718 priv->sram_desc.last_cb_index = 0;
2719
2720 IPW_DEBUG_FW("<< \n");
2721}
2722
2723static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
2724 struct command_block *cb)
2725{
2726 u32 address =
2727 IPW_SHARED_SRAM_DMA_CONTROL +
2728 (sizeof(struct command_block) * index);
2729 IPW_DEBUG_FW(">> :\n");
2730
2731 ipw_write_indirect(priv, address, (u8 *) cb,
2732 (int)sizeof(struct command_block));
2733
2734 IPW_DEBUG_FW("<< :\n");
2735 return 0;
2736
2737}
2738
2739static int ipw_fw_dma_kick(struct ipw_priv *priv)
2740{
2741 u32 control = 0;
2742 u32 index = 0;
2743
2744 IPW_DEBUG_FW(">> :\n");
2745
2746 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
2747 ipw_fw_dma_write_command_block(priv, index,
2748 &priv->sram_desc.cb_list[index]);
2749
2750 /* Enable the DMA in the CSR register */
2751 ipw_clear_bit(priv, IPW_RESET_REG,
2752 IPW_RESET_REG_MASTER_DISABLED |
2753 IPW_RESET_REG_STOP_MASTER);
2754
2755 /* Set the Start bit. */
2756 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
2757 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2758
2759 IPW_DEBUG_FW("<< :\n");
2760 return 0;
2761}
2762
2763static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2764{
2765 u32 address;
2766 u32 register_value = 0;
2767 u32 cb_fields_address = 0;
2768
2769 IPW_DEBUG_FW(">> :\n");
2770 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2771 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address);
2772
2773 /* Read the DMA Controlor register */
2774 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2775 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value);
2776
2777 /* Print the CB values */
2778 cb_fields_address = address;
2779 register_value = ipw_read_reg32(priv, cb_fields_address);
2780 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value);
2781
2782 cb_fields_address += sizeof(u32);
2783 register_value = ipw_read_reg32(priv, cb_fields_address);
2784 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value);
2785
2786 cb_fields_address += sizeof(u32);
2787 register_value = ipw_read_reg32(priv, cb_fields_address);
2788 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
2789 register_value);
2790
2791 cb_fields_address += sizeof(u32);
2792 register_value = ipw_read_reg32(priv, cb_fields_address);
2793 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value);
2794
2795 IPW_DEBUG_FW(">> :\n");
2796}
2797
2798static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2799{
2800 u32 current_cb_address = 0;
2801 u32 current_cb_index = 0;
2802
2803 IPW_DEBUG_FW("<< :\n");
2804 current_cb_address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2805
2806 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2807 sizeof(struct command_block);
2808
2809 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
2810 current_cb_index, current_cb_address);
2811
2812 IPW_DEBUG_FW(">> :\n");
2813 return current_cb_index;
2814
2815}
2816
2817static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2818 u32 src_address,
2819 u32 dest_address,
2820 u32 length,
2821 int interrupt_enabled, int is_last)
2822{
2823
2824 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
2825 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
2826 CB_DEST_SIZE_LONG;
2827 struct command_block *cb;
2828 u32 last_cb_element = 0;
2829
2830 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
2831 src_address, dest_address, length);
2832
2833 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
2834 return -1;
2835
2836 last_cb_element = priv->sram_desc.last_cb_index;
2837 cb = &priv->sram_desc.cb_list[last_cb_element];
2838 priv->sram_desc.last_cb_index++;
2839
2840 /* Calculate the new CB control word */
2841 if (interrupt_enabled)
2842 control |= CB_INT_ENABLED;
2843
2844 if (is_last)
2845 control |= CB_LAST_VALID;
2846
2847 control |= length;
2848
2849 /* Calculate the CB Element's checksum value */
2850 cb->status = control ^ src_address ^ dest_address;
2851
2852 /* Copy the Source and Destination addresses */
2853 cb->dest_addr = dest_address;
2854 cb->source_addr = src_address;
2855
2856 /* Copy the Control Word last */
2857 cb->control = control;
2858
2859 return 0;
2860}
2861
2862static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
2863 u32 src_phys, u32 dest_address, u32 length)
2864{
2865 u32 bytes_left = length;
2866 u32 src_offset = 0;
2867 u32 dest_offset = 0;
2868 int status = 0;
2869 IPW_DEBUG_FW(">> \n");
2870 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
2871 src_phys, dest_address, length);
2872 while (bytes_left > CB_MAX_LENGTH) {
2873 status = ipw_fw_dma_add_command_block(priv,
2874 src_phys + src_offset,
2875 dest_address +
2876 dest_offset,
2877 CB_MAX_LENGTH, 0, 0);
2878 if (status) {
2879 IPW_DEBUG_FW_INFO(": Failed\n");
2880 return -1;
2881 } else
2882 IPW_DEBUG_FW_INFO(": Added new cb\n");
2883
2884 src_offset += CB_MAX_LENGTH;
2885 dest_offset += CB_MAX_LENGTH;
2886 bytes_left -= CB_MAX_LENGTH;
2887 }
2888
2889 /* add the buffer tail */
2890 if (bytes_left > 0) {
2891 status =
2892 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2893 dest_address + dest_offset,
2894 bytes_left, 0, 0);
2895 if (status) {
2896 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2897 return -1;
2898 } else
2899 IPW_DEBUG_FW_INFO
2900 (": Adding new cb - the buffer tail\n");
2901 }
2902
2903 IPW_DEBUG_FW("<< \n");
2904 return 0;
2905}
2906
2907static int ipw_fw_dma_wait(struct ipw_priv *priv)
2908{
2909 u32 current_index = 0, previous_index;
2910 u32 watchdog = 0;
2911
2912 IPW_DEBUG_FW(">> : \n");
2913
2914 current_index = ipw_fw_dma_command_block_index(priv);
2915 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
2916 (int)priv->sram_desc.last_cb_index);
2917
2918 while (current_index < priv->sram_desc.last_cb_index) {
2919 udelay(50);
2920 previous_index = current_index;
2921 current_index = ipw_fw_dma_command_block_index(priv);
2922
2923 if (previous_index < current_index) {
2924 watchdog = 0;
2925 continue;
2926 }
2927 if (++watchdog > 400) {
2928 IPW_DEBUG_FW_INFO("Timeout\n");
2929 ipw_fw_dma_dump_command_block(priv);
2930 ipw_fw_dma_abort(priv);
2931 return -1;
2932 }
2933 }
2934
2935 ipw_fw_dma_abort(priv);
2936
2937 /*Disable the DMA in the CSR register */
2938 ipw_set_bit(priv, IPW_RESET_REG,
2939 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2940
2941 IPW_DEBUG_FW("<< dmaWaitSync \n");
2942 return 0;
2943}
2944
2945static void ipw_remove_current_network(struct ipw_priv *priv)
2946{
2947 struct list_head *element, *safe;
2948 struct ieee80211_network *network = NULL;
2949 unsigned long flags;
2950
2951 spin_lock_irqsave(&priv->ieee->lock, flags);
2952 list_for_each_safe(element, safe, &priv->ieee->network_list) {
2953 network = list_entry(element, struct ieee80211_network, list);
2954 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
2955 list_del(element);
2956 list_add_tail(&network->list,
2957 &priv->ieee->network_free_list);
2958 }
2959 }
2960 spin_unlock_irqrestore(&priv->ieee->lock, flags);
2961}
2962
2963/**
2964 * Check that card is still alive.
2965 * Reads debug register from domain0.
2966 * If card is present, pre-defined value should
2967 * be found there.
2968 *
2969 * @param priv
2970 * @return 1 if card is present, 0 otherwise
2971 */
2972static inline int ipw_alive(struct ipw_priv *priv)
2973{
2974 return ipw_read32(priv, 0x90) == 0xd55555d5;
2975}
2976
2977/* timeout in msec, attempted in 10-msec quanta */
2978static int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
2979 int timeout)
2980{
2981 int i = 0;
2982
2983 do {
2984 if ((ipw_read32(priv, addr) & mask) == mask)
2985 return i;
2986 mdelay(10);
2987 i += 10;
2988 } while (i < timeout);
2989
2990 return -ETIME;
2991}
2992
2993/* These functions load the firmware and micro code for the operation of
2994 * the ipw hardware. It assumes the buffer has all the bits for the
2995 * image and the caller is handling the memory allocation and clean up.
2996 */
2997
2998static int ipw_stop_master(struct ipw_priv *priv)
2999{
3000 int rc;
3001
3002 IPW_DEBUG_TRACE(">> \n");
3003 /* stop master. typical delay - 0 */
3004 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3005
3006 /* timeout is in msec, polled in 10-msec quanta */
3007 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3008 IPW_RESET_REG_MASTER_DISABLED, 100);
3009 if (rc < 0) {
3010 IPW_ERROR("wait for stop master failed after 100ms\n");
3011 return -1;
3012 }
3013
3014 IPW_DEBUG_INFO("stop master %dms\n", rc);
3015
3016 return rc;
3017}
3018
3019static void ipw_arc_release(struct ipw_priv *priv)
3020{
3021 IPW_DEBUG_TRACE(">> \n");
3022 mdelay(5);
3023
3024 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3025
3026 /* no one knows timing, for safety add some delay */
3027 mdelay(5);
3028}
3029
3030struct fw_chunk {
3031 __le32 address;
3032 __le32 length;
3033};
3034
3035static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3036{
3037 int rc = 0, i, addr;
3038 u8 cr = 0;
3039 __le16 *image;
3040
3041 image = (__le16 *) data;
3042
3043 IPW_DEBUG_TRACE(">> \n");
3044
3045 rc = ipw_stop_master(priv);
3046
3047 if (rc < 0)
3048 return rc;
3049
3050 for (addr = IPW_SHARED_LOWER_BOUND;
3051 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3052 ipw_write32(priv, addr, 0);
3053 }
3054
3055 /* no ucode (yet) */
3056 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
3057 /* destroy DMA queues */
3058 /* reset sequence */
3059
3060 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_ON);
3061 ipw_arc_release(priv);
3062 ipw_write_reg32(priv, IPW_MEM_HALT_AND_RESET, IPW_BIT_HALT_RESET_OFF);
3063 mdelay(1);
3064
3065 /* reset PHY */
3066 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, IPW_BASEBAND_POWER_DOWN);
3067 mdelay(1);
3068
3069 ipw_write_reg32(priv, IPW_INTERNAL_CMD_EVENT, 0);
3070 mdelay(1);
3071
3072 /* enable ucode store */
3073 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0x0);
3074 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_CS);
3075 mdelay(1);
3076
3077 /* write ucode */
3078 /**
3079 * @bug
3080 * Do NOT set indirect address register once and then
3081 * store data to indirect data register in the loop.
3082 * It seems very reasonable, but in this case DINO do not
3083 * accept ucode. It is essential to set address each time.
3084 */
3085 /* load new ipw uCode */
3086 for (i = 0; i < len / 2; i++)
3087 ipw_write_reg16(priv, IPW_BASEBAND_CONTROL_STORE,
3088 le16_to_cpu(image[i]));
3089
3090 /* enable DINO */
3091 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3092 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, DINO_ENABLE_SYSTEM);
3093
3094 /* this is where the igx / win driver deveates from the VAP driver. */
3095
3096 /* wait for alive response */
3097 for (i = 0; i < 100; i++) {
3098 /* poll for incoming data */
3099 cr = ipw_read_reg8(priv, IPW_BASEBAND_CONTROL_STATUS);
3100 if (cr & DINO_RXFIFO_DATA)
3101 break;
3102 mdelay(1);
3103 }
3104
3105 if (cr & DINO_RXFIFO_DATA) {
3106 /* alive_command_responce size is NOT multiple of 4 */
3107 __le32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
3108
3109 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
3110 response_buffer[i] =
3111 cpu_to_le32(ipw_read_reg32(priv,
3112 IPW_BASEBAND_RX_FIFO_READ));
3113 memcpy(&priv->dino_alive, response_buffer,
3114 sizeof(priv->dino_alive));
3115 if (priv->dino_alive.alive_command == 1
3116 && priv->dino_alive.ucode_valid == 1) {
3117 rc = 0;
3118 IPW_DEBUG_INFO
3119 ("Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
3120 "of %02d/%02d/%02d %02d:%02d\n",
3121 priv->dino_alive.software_revision,
3122 priv->dino_alive.software_revision,
3123 priv->dino_alive.device_identifier,
3124 priv->dino_alive.device_identifier,
3125 priv->dino_alive.time_stamp[0],
3126 priv->dino_alive.time_stamp[1],
3127 priv->dino_alive.time_stamp[2],
3128 priv->dino_alive.time_stamp[3],
3129 priv->dino_alive.time_stamp[4]);
3130 } else {
3131 IPW_DEBUG_INFO("Microcode is not alive\n");
3132 rc = -EINVAL;
3133 }
3134 } else {
3135 IPW_DEBUG_INFO("No alive response from DINO\n");
3136 rc = -ETIME;
3137 }
3138
3139 /* disable DINO, otherwise for some reason
3140 firmware have problem getting alive resp. */
3141 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3142
3143 return rc;
3144}
3145
3146static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3147{
3148 int rc = -1;
3149 int offset = 0;
3150 struct fw_chunk *chunk;
3151 dma_addr_t shared_phys;
3152 u8 *shared_virt;
3153
3154 IPW_DEBUG_TRACE("<< : \n");
3155 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3156
3157 if (!shared_virt)
3158 return -ENOMEM;
3159
3160 memmove(shared_virt, data, len);
3161
3162 /* Start the Dma */
3163 rc = ipw_fw_dma_enable(priv);
3164
3165 if (priv->sram_desc.last_cb_index > 0) {
3166 /* the DMA is already ready this would be a bug. */
3167 BUG();
3168 goto out;
3169 }
3170
3171 do {
3172 chunk = (struct fw_chunk *)(data + offset);
3173 offset += sizeof(struct fw_chunk);
3174 /* build DMA packet and queue up for sending */
3175 /* dma to chunk->address, the chunk->length bytes from data +
3176 * offeset*/
3177 /* Dma loading */
3178 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
3179 le32_to_cpu(chunk->address),
3180 le32_to_cpu(chunk->length));
3181 if (rc) {
3182 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3183 goto out;
3184 }
3185
3186 offset += le32_to_cpu(chunk->length);
3187 } while (offset < len);
3188
3189 /* Run the DMA and wait for the answer */
3190 rc = ipw_fw_dma_kick(priv);
3191 if (rc) {
3192 IPW_ERROR("dmaKick Failed\n");
3193 goto out;
3194 }
3195
3196 rc = ipw_fw_dma_wait(priv);
3197 if (rc) {
3198 IPW_ERROR("dmaWaitSync Failed\n");
3199 goto out;
3200 }
3201 out:
3202 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys);
3203 return rc;
3204}
3205
3206/* stop nic */
3207static int ipw_stop_nic(struct ipw_priv *priv)
3208{
3209 int rc = 0;
3210
3211 /* stop */
3212 ipw_write32(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3213
3214 rc = ipw_poll_bit(priv, IPW_RESET_REG,
3215 IPW_RESET_REG_MASTER_DISABLED, 500);
3216 if (rc < 0) {
3217 IPW_ERROR("wait for reg master disabled failed after 500ms\n");
3218 return rc;
3219 }
3220
3221 ipw_set_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
3222
3223 return rc;
3224}
3225
3226static void ipw_start_nic(struct ipw_priv *priv)
3227{
3228 IPW_DEBUG_TRACE(">>\n");
3229
3230 /* prvHwStartNic release ARC */
3231 ipw_clear_bit(priv, IPW_RESET_REG,
3232 IPW_RESET_REG_MASTER_DISABLED |
3233 IPW_RESET_REG_STOP_MASTER |
3234 CBD_RESET_REG_PRINCETON_RESET);
3235
3236 /* enable power management */
3237 ipw_set_bit(priv, IPW_GP_CNTRL_RW,
3238 IPW_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
3239
3240 IPW_DEBUG_TRACE("<<\n");
3241}
3242
3243static int ipw_init_nic(struct ipw_priv *priv)
3244{
3245 int rc;
3246
3247 IPW_DEBUG_TRACE(">>\n");
3248 /* reset */
3249 /*prvHwInitNic */
3250 /* set "initialization complete" bit to move adapter to D0 state */
3251 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3252
3253 /* low-level PLL activation */
3254 ipw_write32(priv, IPW_READ_INT_REGISTER,
3255 IPW_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
3256
3257 /* wait for clock stabilization */
3258 rc = ipw_poll_bit(priv, IPW_GP_CNTRL_RW,
3259 IPW_GP_CNTRL_BIT_CLOCK_READY, 250);
3260 if (rc < 0)
3261 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
3262
3263 /* assert SW reset */
3264 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_SW_RESET);
3265
3266 udelay(10);
3267
3268 /* set "initialization complete" bit to move adapter to D0 state */
3269 ipw_set_bit(priv, IPW_GP_CNTRL_RW, IPW_GP_CNTRL_BIT_INIT_DONE);
3270
3271 IPW_DEBUG_TRACE(">>\n");
3272 return 0;
3273}
3274
3275/* Call this function from process context, it will sleep in request_firmware.
3276 * Probe is an ok place to call this from.
3277 */
3278static int ipw_reset_nic(struct ipw_priv *priv)
3279{
3280 int rc = 0;
3281 unsigned long flags;
3282
3283 IPW_DEBUG_TRACE(">>\n");
3284
3285 rc = ipw_init_nic(priv);
3286
3287 spin_lock_irqsave(&priv->lock, flags);
3288 /* Clear the 'host command active' bit... */
3289 priv->status &= ~STATUS_HCMD_ACTIVE;
3290 wake_up_interruptible(&priv->wait_command_queue);
3291 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3292 wake_up_interruptible(&priv->wait_state);
3293 spin_unlock_irqrestore(&priv->lock, flags);
3294
3295 IPW_DEBUG_TRACE("<<\n");
3296 return rc;
3297}
3298
3299
3300struct ipw_fw {
3301 __le32 ver;
3302 __le32 boot_size;
3303 __le32 ucode_size;
3304 __le32 fw_size;
3305 u8 data[0];
3306};
3307
3308static int ipw_get_fw(struct ipw_priv *priv,
3309 const struct firmware **raw, const char *name)
3310{
3311 struct ipw_fw *fw;
3312 int rc;
3313
3314 /* ask firmware_class module to get the boot firmware off disk */
3315 rc = request_firmware(raw, name, &priv->pci_dev->dev);
3316 if (rc < 0) {
3317 IPW_ERROR("%s request_firmware failed: Reason %d\n", name, rc);
3318 return rc;
3319 }
3320
3321 if ((*raw)->size < sizeof(*fw)) {
3322 IPW_ERROR("%s is too small (%zd)\n", name, (*raw)->size);
3323 return -EINVAL;
3324 }
3325
3326 fw = (void *)(*raw)->data;
3327
3328 if ((*raw)->size < sizeof(*fw) + le32_to_cpu(fw->boot_size) +
3329 le32_to_cpu(fw->ucode_size) + le32_to_cpu(fw->fw_size)) {
3330 IPW_ERROR("%s is too small or corrupt (%zd)\n",
3331 name, (*raw)->size);
3332 return -EINVAL;
3333 }
3334
3335 IPW_DEBUG_INFO("Read firmware '%s' image v%d.%d (%zd bytes)\n",
3336 name,
3337 le32_to_cpu(fw->ver) >> 16,
3338 le32_to_cpu(fw->ver) & 0xff,
3339 (*raw)->size - sizeof(*fw));
3340 return 0;
3341}
3342
3343#define IPW_RX_BUF_SIZE (3000)
3344
3345static void ipw_rx_queue_reset(struct ipw_priv *priv,
3346 struct ipw_rx_queue *rxq)
3347{
3348 unsigned long flags;
3349 int i;
3350
3351 spin_lock_irqsave(&rxq->lock, flags);
3352
3353 INIT_LIST_HEAD(&rxq->rx_free);
3354 INIT_LIST_HEAD(&rxq->rx_used);
3355
3356 /* Fill the rx_used queue with _all_ of the Rx buffers */
3357 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
3358 /* In the reset function, these buffers may have been allocated
3359 * to an SKB, so we need to unmap and free potential storage */
3360 if (rxq->pool[i].skb != NULL) {
3361 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3362 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
3363 dev_kfree_skb(rxq->pool[i].skb);
3364 rxq->pool[i].skb = NULL;
3365 }
3366 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3367 }
3368
3369 /* Set us so that we have processed and used all buffers, but have
3370 * not restocked the Rx queue with fresh buffers */
3371 rxq->read = rxq->write = 0;
3372 rxq->free_count = 0;
3373 spin_unlock_irqrestore(&rxq->lock, flags);
3374}
3375
3376#ifdef CONFIG_PM
3377static int fw_loaded = 0;
3378static const struct firmware *raw = NULL;
3379
3380static void free_firmware(void)
3381{
3382 if (fw_loaded) {
3383 release_firmware(raw);
3384 raw = NULL;
3385 fw_loaded = 0;
3386 }
3387}
3388#else
3389#define free_firmware() do {} while (0)
3390#endif
3391
3392static int ipw_load(struct ipw_priv *priv)
3393{
3394#ifndef CONFIG_PM
3395 const struct firmware *raw = NULL;
3396#endif
3397 struct ipw_fw *fw;
3398 u8 *boot_img, *ucode_img, *fw_img;
3399 u8 *name = NULL;
3400 int rc = 0, retries = 3;
3401
3402 switch (priv->ieee->iw_mode) {
3403 case IW_MODE_ADHOC:
3404 name = "ipw2200-ibss.fw";
3405 break;
3406#ifdef CONFIG_IPW2200_MONITOR
3407 case IW_MODE_MONITOR:
3408 name = "ipw2200-sniffer.fw";
3409 break;
3410#endif
3411 case IW_MODE_INFRA:
3412 name = "ipw2200-bss.fw";
3413 break;
3414 }
3415
3416 if (!name) {
3417 rc = -EINVAL;
3418 goto error;
3419 }
3420
3421#ifdef CONFIG_PM
3422 if (!fw_loaded) {
3423#endif
3424 rc = ipw_get_fw(priv, &raw, name);
3425 if (rc < 0)
3426 goto error;
3427#ifdef CONFIG_PM
3428 }
3429#endif
3430
3431 fw = (void *)raw->data;
3432 boot_img = &fw->data[0];
3433 ucode_img = &fw->data[le32_to_cpu(fw->boot_size)];
3434 fw_img = &fw->data[le32_to_cpu(fw->boot_size) +
3435 le32_to_cpu(fw->ucode_size)];
3436
3437 if (rc < 0)
3438 goto error;
3439
3440 if (!priv->rxq)
3441 priv->rxq = ipw_rx_queue_alloc(priv);
3442 else
3443 ipw_rx_queue_reset(priv, priv->rxq);
3444 if (!priv->rxq) {
3445 IPW_ERROR("Unable to initialize Rx queue\n");
3446 goto error;
3447 }
3448
3449 retry:
3450 /* Ensure interrupts are disabled */
3451 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3452 priv->status &= ~STATUS_INT_ENABLED;
3453
3454 /* ack pending interrupts */
3455 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3456
3457 ipw_stop_nic(priv);
3458
3459 rc = ipw_reset_nic(priv);
3460 if (rc < 0) {
3461 IPW_ERROR("Unable to reset NIC\n");
3462 goto error;
3463 }
3464
3465 ipw_zero_memory(priv, IPW_NIC_SRAM_LOWER_BOUND,
3466 IPW_NIC_SRAM_UPPER_BOUND - IPW_NIC_SRAM_LOWER_BOUND);
3467
3468 /* DMA the initial boot firmware into the device */
3469 rc = ipw_load_firmware(priv, boot_img, le32_to_cpu(fw->boot_size));
3470 if (rc < 0) {
3471 IPW_ERROR("Unable to load boot firmware: %d\n", rc);
3472 goto error;
3473 }
3474
3475 /* kick start the device */
3476 ipw_start_nic(priv);
3477
3478 /* wait for the device to finish its initial startup sequence */
3479 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3480 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3481 if (rc < 0) {
3482 IPW_ERROR("device failed to boot initial fw image\n");
3483 goto error;
3484 }
3485 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
3486
3487 /* ack fw init done interrupt */
3488 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3489
3490 /* DMA the ucode into the device */
3491 rc = ipw_load_ucode(priv, ucode_img, le32_to_cpu(fw->ucode_size));
3492 if (rc < 0) {
3493 IPW_ERROR("Unable to load ucode: %d\n", rc);
3494 goto error;
3495 }
3496
3497 /* stop nic */
3498 ipw_stop_nic(priv);
3499
3500 /* DMA bss firmware into the device */
3501 rc = ipw_load_firmware(priv, fw_img, le32_to_cpu(fw->fw_size));
3502 if (rc < 0) {
3503 IPW_ERROR("Unable to load firmware: %d\n", rc);
3504 goto error;
3505 }
3506#ifdef CONFIG_PM
3507 fw_loaded = 1;
3508#endif
3509
3510 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
3511
3512 rc = ipw_queue_reset(priv);
3513 if (rc < 0) {
3514 IPW_ERROR("Unable to initialize queues\n");
3515 goto error;
3516 }
3517
3518 /* Ensure interrupts are disabled */
3519 ipw_write32(priv, IPW_INTA_MASK_R, ~IPW_INTA_MASK_ALL);
3520 /* ack pending interrupts */
3521 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3522
3523 /* kick start the device */
3524 ipw_start_nic(priv);
3525
3526 if (ipw_read32(priv, IPW_INTA_RW) & IPW_INTA_BIT_PARITY_ERROR) {
3527 if (retries > 0) {
3528 IPW_WARNING("Parity error. Retrying init.\n");
3529 retries--;
3530 goto retry;
3531 }
3532
3533 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
3534 rc = -EIO;
3535 goto error;
3536 }
3537
3538 /* wait for the device */
3539 rc = ipw_poll_bit(priv, IPW_INTA_RW,
3540 IPW_INTA_BIT_FW_INITIALIZATION_DONE, 500);
3541 if (rc < 0) {
3542 IPW_ERROR("device failed to start within 500ms\n");
3543 goto error;
3544 }
3545 IPW_DEBUG_INFO("device response after %dms\n", rc);
3546
3547 /* ack fw init done interrupt */
3548 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_BIT_FW_INITIALIZATION_DONE);
3549
3550 /* read eeprom data and initialize the eeprom region of sram */
3551 priv->eeprom_delay = 1;
3552 ipw_eeprom_init_sram(priv);
3553
3554 /* enable interrupts */
3555 ipw_enable_interrupts(priv);
3556
3557 /* Ensure our queue has valid packets */
3558 ipw_rx_queue_replenish(priv);
3559
3560 ipw_write32(priv, IPW_RX_READ_INDEX, priv->rxq->read);
3561
3562 /* ack pending interrupts */
3563 ipw_write32(priv, IPW_INTA_RW, IPW_INTA_MASK_ALL);
3564
3565#ifndef CONFIG_PM
3566 release_firmware(raw);
3567#endif
3568 return 0;
3569
3570 error:
3571 if (priv->rxq) {
3572 ipw_rx_queue_free(priv, priv->rxq);
3573 priv->rxq = NULL;
3574 }
3575 ipw_tx_queue_free(priv);
3576 if (raw)
3577 release_firmware(raw);
3578#ifdef CONFIG_PM
3579 fw_loaded = 0;
3580 raw = NULL;
3581#endif
3582
3583 return rc;
3584}
3585
3586/**
3587 * DMA services
3588 *
3589 * Theory of operation
3590 *
3591 * A queue is a circular buffers with 'Read' and 'Write' pointers.
3592 * 2 empty entries always kept in the buffer to protect from overflow.
3593 *
3594 * For Tx queue, there are low mark and high mark limits. If, after queuing
3595 * the packet for Tx, free space become < low mark, Tx queue stopped. When
3596 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
3597 * Tx queue resumed.
3598 *
3599 * The IPW operates with six queues, one receive queue in the device's
3600 * sram, one transmit queue for sending commands to the device firmware,
3601 * and four transmit queues for data.
3602 *
3603 * The four transmit queues allow for performing quality of service (qos)
3604 * transmissions as per the 802.11 protocol. Currently Linux does not
3605 * provide a mechanism to the user for utilizing prioritized queues, so
3606 * we only utilize the first data transmit queue (queue1).
3607 */
3608
3609/**
3610 * Driver allocates buffers of this size for Rx
3611 */
3612
3613/**
3614 * ipw_rx_queue_space - Return number of free slots available in queue.
3615 */
3616static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
3617{
3618 int s = q->read - q->write;
3619 if (s <= 0)
3620 s += RX_QUEUE_SIZE;
3621 /* keep some buffer to not confuse full and empty queue */
3622 s -= 2;
3623 if (s < 0)
3624 s = 0;
3625 return s;
3626}
3627
3628static inline int ipw_tx_queue_space(const struct clx2_queue *q)
3629{
3630 int s = q->last_used - q->first_empty;
3631 if (s <= 0)
3632 s += q->n_bd;
3633 s -= 2; /* keep some reserve to not confuse empty and full situations */
3634 if (s < 0)
3635 s = 0;
3636 return s;
3637}
3638
3639static inline int ipw_queue_inc_wrap(int index, int n_bd)
3640{
3641 return (++index == n_bd) ? 0 : index;
3642}
3643
3644/**
3645 * Initialize common DMA queue structure
3646 *
3647 * @param q queue to init
3648 * @param count Number of BD's to allocate. Should be power of 2
3649 * @param read_register Address for 'read' register
3650 * (not offset within BAR, full address)
3651 * @param write_register Address for 'write' register
3652 * (not offset within BAR, full address)
3653 * @param base_register Address for 'base' register
3654 * (not offset within BAR, full address)
3655 * @param size Address for 'size' register
3656 * (not offset within BAR, full address)
3657 */
3658static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
3659 int count, u32 read, u32 write, u32 base, u32 size)
3660{
3661 q->n_bd = count;
3662
3663 q->low_mark = q->n_bd / 4;
3664 if (q->low_mark < 4)
3665 q->low_mark = 4;
3666
3667 q->high_mark = q->n_bd / 8;
3668 if (q->high_mark < 2)
3669 q->high_mark = 2;
3670
3671 q->first_empty = q->last_used = 0;
3672 q->reg_r = read;
3673 q->reg_w = write;
3674
3675 ipw_write32(priv, base, q->dma_addr);
3676 ipw_write32(priv, size, count);
3677 ipw_write32(priv, read, 0);
3678 ipw_write32(priv, write, 0);
3679
3680 _ipw_read32(priv, 0x90);
3681}
3682
3683static int ipw_queue_tx_init(struct ipw_priv *priv,
3684 struct clx2_tx_queue *q,
3685 int count, u32 read, u32 write, u32 base, u32 size)
3686{
3687 struct pci_dev *dev = priv->pci_dev;
3688
3689 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
3690 if (!q->txb) {
3691 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
3692 return -ENOMEM;
3693 }
3694
3695 q->bd =
3696 pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr);
3697 if (!q->bd) {
3698 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
3699 sizeof(q->bd[0]) * count);
3700 kfree(q->txb);
3701 q->txb = NULL;
3702 return -ENOMEM;
3703 }
3704
3705 ipw_queue_init(priv, &q->q, count, read, write, base, size);
3706 return 0;
3707}
3708
3709/**
3710 * Free one TFD, those at index [txq->q.last_used].
3711 * Do NOT advance any indexes
3712 *
3713 * @param dev
3714 * @param txq
3715 */
3716static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
3717 struct clx2_tx_queue *txq)
3718{
3719 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
3720 struct pci_dev *dev = priv->pci_dev;
3721 int i;
3722
3723 /* classify bd */
3724 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
3725 /* nothing to cleanup after for host commands */
3726 return;
3727
3728 /* sanity check */
3729 if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
3730 IPW_ERROR("Too many chunks: %i\n",
3731 le32_to_cpu(bd->u.data.num_chunks));
3732 /** @todo issue fatal error, it is quite serious situation */
3733 return;
3734 }
3735
3736 /* unmap chunks if any */
3737 for (i = 0; i < le32_to_cpu(bd->u.data.num_chunks); i++) {
3738 pci_unmap_single(dev, le32_to_cpu(bd->u.data.chunk_ptr[i]),
3739 le16_to_cpu(bd->u.data.chunk_len[i]),
3740 PCI_DMA_TODEVICE);
3741 if (txq->txb[txq->q.last_used]) {
3742 ieee80211_txb_free(txq->txb[txq->q.last_used]);
3743 txq->txb[txq->q.last_used] = NULL;
3744 }
3745 }
3746}
3747
3748/**
3749 * Deallocate DMA queue.
3750 *
3751 * Empty queue by removing and destroying all BD's.
3752 * Free all buffers.
3753 *
3754 * @param dev
3755 * @param q
3756 */
3757static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
3758{
3759 struct clx2_queue *q = &txq->q;
3760 struct pci_dev *dev = priv->pci_dev;
3761
3762 if (q->n_bd == 0)
3763 return;
3764
3765 /* first, empty all BD's */
3766 for (; q->first_empty != q->last_used;
3767 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3768 ipw_queue_tx_free_tfd(priv, txq);
3769 }
3770
3771 /* free buffers belonging to queue itself */
3772 pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd,
3773 q->dma_addr);
3774 kfree(txq->txb);
3775
3776 /* 0 fill whole structure */
3777 memset(txq, 0, sizeof(*txq));
3778}
3779
3780/**
3781 * Destroy all DMA queues and structures
3782 *
3783 * @param priv
3784 */
3785static void ipw_tx_queue_free(struct ipw_priv *priv)
3786{
3787 /* Tx CMD queue */
3788 ipw_queue_tx_free(priv, &priv->txq_cmd);
3789
3790 /* Tx queues */
3791 ipw_queue_tx_free(priv, &priv->txq[0]);
3792 ipw_queue_tx_free(priv, &priv->txq[1]);
3793 ipw_queue_tx_free(priv, &priv->txq[2]);
3794 ipw_queue_tx_free(priv, &priv->txq[3]);
3795}
3796
3797static void ipw_create_bssid(struct ipw_priv *priv, u8 * bssid)
3798{
3799 /* First 3 bytes are manufacturer */
3800 bssid[0] = priv->mac_addr[0];
3801 bssid[1] = priv->mac_addr[1];
3802 bssid[2] = priv->mac_addr[2];
3803
3804 /* Last bytes are random */
3805 get_random_bytes(&bssid[3], ETH_ALEN - 3);
3806
3807 bssid[0] &= 0xfe; /* clear multicast bit */
3808 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
3809}
3810
3811static u8 ipw_add_station(struct ipw_priv *priv, u8 * bssid)
3812{
3813 struct ipw_station_entry entry;
3814 int i;
3815
3816 for (i = 0; i < priv->num_stations; i++) {
3817 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
3818 /* Another node is active in network */
3819 priv->missed_adhoc_beacons = 0;
3820 if (!(priv->config & CFG_STATIC_CHANNEL))
3821 /* when other nodes drop out, we drop out */
3822 priv->config &= ~CFG_ADHOC_PERSIST;
3823
3824 return i;
3825 }
3826 }
3827
3828 if (i == MAX_STATIONS)
3829 return IPW_INVALID_STATION;
3830
3831 IPW_DEBUG_SCAN("Adding AdHoc station: %pM\n", bssid);
3832
3833 entry.reserved = 0;
3834 entry.support_mode = 0;
3835 memcpy(entry.mac_addr, bssid, ETH_ALEN);
3836 memcpy(priv->stations[i], bssid, ETH_ALEN);
3837 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
3838 &entry, sizeof(entry));
3839 priv->num_stations++;
3840
3841 return i;
3842}
3843
3844static u8 ipw_find_station(struct ipw_priv *priv, u8 * bssid)
3845{
3846 int i;
3847
3848 for (i = 0; i < priv->num_stations; i++)
3849 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
3850 return i;
3851
3852 return IPW_INVALID_STATION;
3853}
3854
3855static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
3856{
3857 int err;
3858
3859 if (priv->status & STATUS_ASSOCIATING) {
3860 IPW_DEBUG_ASSOC("Disassociating while associating.\n");
3861 queue_work(priv->workqueue, &priv->disassociate);
3862 return;
3863 }
3864
3865 if (!(priv->status & STATUS_ASSOCIATED)) {
3866 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
3867 return;
3868 }
3869
3870 IPW_DEBUG_ASSOC("Disassocation attempt from %pM "
3871 "on channel %d.\n",
3872 priv->assoc_request.bssid,
3873 priv->assoc_request.channel);
3874
3875 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
3876 priv->status |= STATUS_DISASSOCIATING;
3877
3878 if (quiet)
3879 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
3880 else
3881 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
3882
3883 err = ipw_send_associate(priv, &priv->assoc_request);
3884 if (err) {
3885 IPW_DEBUG_HC("Attempt to send [dis]associate command "
3886 "failed.\n");
3887 return;
3888 }
3889
3890}
3891
3892static int ipw_disassociate(void *data)
3893{
3894 struct ipw_priv *priv = data;
3895 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
3896 return 0;
3897 ipw_send_disassociate(data, 0);
3898 return 1;
3899}
3900
3901static void ipw_bg_disassociate(struct work_struct *work)
3902{
3903 struct ipw_priv *priv =
3904 container_of(work, struct ipw_priv, disassociate);
3905 mutex_lock(&priv->mutex);
3906 ipw_disassociate(priv);
3907 mutex_unlock(&priv->mutex);
3908}
3909
3910static void ipw_system_config(struct work_struct *work)
3911{
3912 struct ipw_priv *priv =
3913 container_of(work, struct ipw_priv, system_config);
3914
3915#ifdef CONFIG_IPW2200_PROMISCUOUS
3916 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
3917 priv->sys_config.accept_all_data_frames = 1;
3918 priv->sys_config.accept_non_directed_frames = 1;
3919 priv->sys_config.accept_all_mgmt_bcpr = 1;
3920 priv->sys_config.accept_all_mgmt_frames = 1;
3921 }
3922#endif
3923
3924 ipw_send_system_config(priv);
3925}
3926
3927struct ipw_status_code {
3928 u16 status;
3929 const char *reason;
3930};
3931
3932static const struct ipw_status_code ipw_status_codes[] = {
3933 {0x00, "Successful"},
3934 {0x01, "Unspecified failure"},
3935 {0x0A, "Cannot support all requested capabilities in the "
3936 "Capability information field"},
3937 {0x0B, "Reassociation denied due to inability to confirm that "
3938 "association exists"},
3939 {0x0C, "Association denied due to reason outside the scope of this "
3940 "standard"},
3941 {0x0D,
3942 "Responding station does not support the specified authentication "
3943 "algorithm"},
3944 {0x0E,
3945 "Received an Authentication frame with authentication sequence "
3946 "transaction sequence number out of expected sequence"},
3947 {0x0F, "Authentication rejected because of challenge failure"},
3948 {0x10, "Authentication rejected due to timeout waiting for next "
3949 "frame in sequence"},
3950 {0x11, "Association denied because AP is unable to handle additional "
3951 "associated stations"},
3952 {0x12,
3953 "Association denied due to requesting station not supporting all "
3954 "of the datarates in the BSSBasicServiceSet Parameter"},
3955 {0x13,
3956 "Association denied due to requesting station not supporting "
3957 "short preamble operation"},
3958 {0x14,
3959 "Association denied due to requesting station not supporting "
3960 "PBCC encoding"},
3961 {0x15,
3962 "Association denied due to requesting station not supporting "
3963 "channel agility"},
3964 {0x19,
3965 "Association denied due to requesting station not supporting "
3966 "short slot operation"},
3967 {0x1A,
3968 "Association denied due to requesting station not supporting "
3969 "DSSS-OFDM operation"},
3970 {0x28, "Invalid Information Element"},
3971 {0x29, "Group Cipher is not valid"},
3972 {0x2A, "Pairwise Cipher is not valid"},
3973 {0x2B, "AKMP is not valid"},
3974 {0x2C, "Unsupported RSN IE version"},
3975 {0x2D, "Invalid RSN IE Capabilities"},
3976 {0x2E, "Cipher suite is rejected per security policy"},
3977};
3978
3979static const char *ipw_get_status_code(u16 status)
3980{
3981 int i;
3982 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
3983 if (ipw_status_codes[i].status == (status & 0xff))
3984 return ipw_status_codes[i].reason;
3985 return "Unknown status value.";
3986}
3987
3988static void inline average_init(struct average *avg)
3989{
3990 memset(avg, 0, sizeof(*avg));
3991}
3992
3993#define DEPTH_RSSI 8
3994#define DEPTH_NOISE 16
3995static s16 exponential_average(s16 prev_avg, s16 val, u8 depth)
3996{
3997 return ((depth-1)*prev_avg + val)/depth;
3998}
3999
4000static void average_add(struct average *avg, s16 val)
4001{
4002 avg->sum -= avg->entries[avg->pos];
4003 avg->sum += val;
4004 avg->entries[avg->pos++] = val;
4005 if (unlikely(avg->pos == AVG_ENTRIES)) {
4006 avg->init = 1;
4007 avg->pos = 0;
4008 }
4009}
4010
4011static s16 average_value(struct average *avg)
4012{
4013 if (!unlikely(avg->init)) {
4014 if (avg->pos)
4015 return avg->sum / avg->pos;
4016 return 0;
4017 }
4018
4019 return avg->sum / AVG_ENTRIES;
4020}
4021
4022static void ipw_reset_stats(struct ipw_priv *priv)
4023{
4024 u32 len = sizeof(u32);
4025
4026 priv->quality = 0;
4027
4028 average_init(&priv->average_missed_beacons);
4029 priv->exp_avg_rssi = -60;
4030 priv->exp_avg_noise = -85 + 0x100;
4031
4032 priv->last_rate = 0;
4033 priv->last_missed_beacons = 0;
4034 priv->last_rx_packets = 0;
4035 priv->last_tx_packets = 0;
4036 priv->last_tx_failures = 0;
4037
4038 /* Firmware managed, reset only when NIC is restarted, so we have to
4039 * normalize on the current value */
4040 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
4041 &priv->last_rx_err, &len);
4042 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
4043 &priv->last_tx_failures, &len);
4044
4045 /* Driver managed, reset with each association */
4046 priv->missed_adhoc_beacons = 0;
4047 priv->missed_beacons = 0;
4048 priv->tx_packets = 0;
4049 priv->rx_packets = 0;
4050
4051}
4052
4053static u32 ipw_get_max_rate(struct ipw_priv *priv)
4054{
4055 u32 i = 0x80000000;
4056 u32 mask = priv->rates_mask;
4057 /* If currently associated in B mode, restrict the maximum
4058 * rate match to B rates */
4059 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
4060 mask &= IEEE80211_CCK_RATES_MASK;
4061
4062 /* TODO: Verify that the rate is supported by the current rates
4063 * list. */
4064
4065 while (i && !(mask & i))
4066 i >>= 1;
4067 switch (i) {
4068 case IEEE80211_CCK_RATE_1MB_MASK:
4069 return 1000000;
4070 case IEEE80211_CCK_RATE_2MB_MASK:
4071 return 2000000;
4072 case IEEE80211_CCK_RATE_5MB_MASK:
4073 return 5500000;
4074 case IEEE80211_OFDM_RATE_6MB_MASK:
4075 return 6000000;
4076 case IEEE80211_OFDM_RATE_9MB_MASK:
4077 return 9000000;
4078 case IEEE80211_CCK_RATE_11MB_MASK:
4079 return 11000000;
4080 case IEEE80211_OFDM_RATE_12MB_MASK:
4081 return 12000000;
4082 case IEEE80211_OFDM_RATE_18MB_MASK:
4083 return 18000000;
4084 case IEEE80211_OFDM_RATE_24MB_MASK:
4085 return 24000000;
4086 case IEEE80211_OFDM_RATE_36MB_MASK:
4087 return 36000000;
4088 case IEEE80211_OFDM_RATE_48MB_MASK:
4089 return 48000000;
4090 case IEEE80211_OFDM_RATE_54MB_MASK:
4091 return 54000000;
4092 }
4093
4094 if (priv->ieee->mode == IEEE_B)
4095 return 11000000;
4096 else
4097 return 54000000;
4098}
4099
4100static u32 ipw_get_current_rate(struct ipw_priv *priv)
4101{
4102 u32 rate, len = sizeof(rate);
4103 int err;
4104
4105 if (!(priv->status & STATUS_ASSOCIATED))
4106 return 0;
4107
4108 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
4109 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
4110 &len);
4111 if (err) {
4112 IPW_DEBUG_INFO("failed querying ordinals.\n");
4113 return 0;
4114 }
4115 } else
4116 return ipw_get_max_rate(priv);
4117
4118 switch (rate) {
4119 case IPW_TX_RATE_1MB:
4120 return 1000000;
4121 case IPW_TX_RATE_2MB:
4122 return 2000000;
4123 case IPW_TX_RATE_5MB:
4124 return 5500000;
4125 case IPW_TX_RATE_6MB:
4126 return 6000000;
4127 case IPW_TX_RATE_9MB:
4128 return 9000000;
4129 case IPW_TX_RATE_11MB:
4130 return 11000000;
4131 case IPW_TX_RATE_12MB:
4132 return 12000000;
4133 case IPW_TX_RATE_18MB:
4134 return 18000000;
4135 case IPW_TX_RATE_24MB:
4136 return 24000000;
4137 case IPW_TX_RATE_36MB:
4138 return 36000000;
4139 case IPW_TX_RATE_48MB:
4140 return 48000000;
4141 case IPW_TX_RATE_54MB:
4142 return 54000000;
4143 }
4144
4145 return 0;
4146}
4147
4148#define IPW_STATS_INTERVAL (2 * HZ)
4149static void ipw_gather_stats(struct ipw_priv *priv)
4150{
4151 u32 rx_err, rx_err_delta, rx_packets_delta;
4152 u32 tx_failures, tx_failures_delta, tx_packets_delta;
4153 u32 missed_beacons_percent, missed_beacons_delta;
4154 u32 quality = 0;
4155 u32 len = sizeof(u32);
4156 s16 rssi;
4157 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
4158 rate_quality;
4159 u32 max_rate;
4160
4161 if (!(priv->status & STATUS_ASSOCIATED)) {
4162 priv->quality = 0;
4163 return;
4164 }
4165
4166 /* Update the statistics */
4167 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
4168 &priv->missed_beacons, &len);
4169 missed_beacons_delta = priv->missed_beacons - priv->last_missed_beacons;
4170 priv->last_missed_beacons = priv->missed_beacons;
4171 if (priv->assoc_request.beacon_interval) {
4172 missed_beacons_percent = missed_beacons_delta *
4173 (HZ * le16_to_cpu(priv->assoc_request.beacon_interval)) /
4174 (IPW_STATS_INTERVAL * 10);
4175 } else {
4176 missed_beacons_percent = 0;
4177 }
4178 average_add(&priv->average_missed_beacons, missed_beacons_percent);
4179
4180 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
4181 rx_err_delta = rx_err - priv->last_rx_err;
4182 priv->last_rx_err = rx_err;
4183
4184 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
4185 tx_failures_delta = tx_failures - priv->last_tx_failures;
4186 priv->last_tx_failures = tx_failures;
4187
4188 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
4189 priv->last_rx_packets = priv->rx_packets;
4190
4191 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
4192 priv->last_tx_packets = priv->tx_packets;
4193
4194 /* Calculate quality based on the following:
4195 *
4196 * Missed beacon: 100% = 0, 0% = 70% missed
4197 * Rate: 60% = 1Mbs, 100% = Max
4198 * Rx and Tx errors represent a straight % of total Rx/Tx
4199 * RSSI: 100% = > -50, 0% = < -80
4200 * Rx errors: 100% = 0, 0% = 50% missed
4201 *
4202 * The lowest computed quality is used.
4203 *
4204 */
4205#define BEACON_THRESHOLD 5
4206 beacon_quality = 100 - missed_beacons_percent;
4207 if (beacon_quality < BEACON_THRESHOLD)
4208 beacon_quality = 0;
4209 else
4210 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
4211 (100 - BEACON_THRESHOLD);
4212 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
4213 beacon_quality, missed_beacons_percent);
4214
4215 priv->last_rate = ipw_get_current_rate(priv);
4216 max_rate = ipw_get_max_rate(priv);
4217 rate_quality = priv->last_rate * 40 / max_rate + 60;
4218 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
4219 rate_quality, priv->last_rate / 1000000);
4220
4221 if (rx_packets_delta > 100 && rx_packets_delta + rx_err_delta)
4222 rx_quality = 100 - (rx_err_delta * 100) /
4223 (rx_packets_delta + rx_err_delta);
4224 else
4225 rx_quality = 100;
4226 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
4227 rx_quality, rx_err_delta, rx_packets_delta);
4228
4229 if (tx_packets_delta > 100 && tx_packets_delta + tx_failures_delta)
4230 tx_quality = 100 - (tx_failures_delta * 100) /
4231 (tx_packets_delta + tx_failures_delta);
4232 else
4233 tx_quality = 100;
4234 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
4235 tx_quality, tx_failures_delta, tx_packets_delta);
4236
4237 rssi = priv->exp_avg_rssi;
4238 signal_quality =
4239 (100 *
4240 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4241 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) -
4242 (priv->ieee->perfect_rssi - rssi) *
4243 (15 * (priv->ieee->perfect_rssi - priv->ieee->worst_rssi) +
4244 62 * (priv->ieee->perfect_rssi - rssi))) /
4245 ((priv->ieee->perfect_rssi - priv->ieee->worst_rssi) *
4246 (priv->ieee->perfect_rssi - priv->ieee->worst_rssi));
4247 if (signal_quality > 100)
4248 signal_quality = 100;
4249 else if (signal_quality < 1)
4250 signal_quality = 0;
4251
4252 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
4253 signal_quality, rssi);
4254
4255 quality = min(beacon_quality,
4256 min(rate_quality,
4257 min(tx_quality, min(rx_quality, signal_quality))));
4258 if (quality == beacon_quality)
4259 IPW_DEBUG_STATS("Quality (%d%%): Clamped to missed beacons.\n",
4260 quality);
4261 if (quality == rate_quality)
4262 IPW_DEBUG_STATS("Quality (%d%%): Clamped to rate quality.\n",
4263 quality);
4264 if (quality == tx_quality)
4265 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Tx quality.\n",
4266 quality);
4267 if (quality == rx_quality)
4268 IPW_DEBUG_STATS("Quality (%d%%): Clamped to Rx quality.\n",
4269 quality);
4270 if (quality == signal_quality)
4271 IPW_DEBUG_STATS("Quality (%d%%): Clamped to signal quality.\n",
4272 quality);
4273
4274 priv->quality = quality;
4275
4276 queue_delayed_work(priv->workqueue, &priv->gather_stats,
4277 IPW_STATS_INTERVAL);
4278}
4279
4280static void ipw_bg_gather_stats(struct work_struct *work)
4281{
4282 struct ipw_priv *priv =
4283 container_of(work, struct ipw_priv, gather_stats.work);
4284 mutex_lock(&priv->mutex);
4285 ipw_gather_stats(priv);
4286 mutex_unlock(&priv->mutex);
4287}
4288
4289/* Missed beacon behavior:
4290 * 1st missed -> roaming_threshold, just wait, don't do any scan/roam.
4291 * roaming_threshold -> disassociate_threshold, scan and roam for better signal.
4292 * Above disassociate threshold, give up and stop scanning.
4293 * Roaming is disabled if disassociate_threshold <= roaming_threshold */
4294static void ipw_handle_missed_beacon(struct ipw_priv *priv,
4295 int missed_count)
4296{
4297 priv->notif_missed_beacons = missed_count;
4298
4299 if (missed_count > priv->disassociate_threshold &&
4300 priv->status & STATUS_ASSOCIATED) {
4301 /* If associated and we've hit the missed
4302 * beacon threshold, disassociate, turn
4303 * off roaming, and abort any active scans */
4304 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4305 IPW_DL_STATE | IPW_DL_ASSOC,
4306 "Missed beacon: %d - disassociate\n", missed_count);
4307 priv->status &= ~STATUS_ROAMING;
4308 if (priv->status & STATUS_SCANNING) {
4309 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
4310 IPW_DL_STATE,
4311 "Aborting scan with missed beacon.\n");
4312 queue_work(priv->workqueue, &priv->abort_scan);
4313 }
4314
4315 queue_work(priv->workqueue, &priv->disassociate);
4316 return;
4317 }
4318
4319 if (priv->status & STATUS_ROAMING) {
4320 /* If we are currently roaming, then just
4321 * print a debug statement... */
4322 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4323 "Missed beacon: %d - roam in progress\n",
4324 missed_count);
4325 return;
4326 }
4327
4328 if (roaming &&
4329 (missed_count > priv->roaming_threshold &&
4330 missed_count <= priv->disassociate_threshold)) {
4331 /* If we are not already roaming, set the ROAM
4332 * bit in the status and kick off a scan.
4333 * This can happen several times before we reach
4334 * disassociate_threshold. */
4335 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4336 "Missed beacon: %d - initiate "
4337 "roaming\n", missed_count);
4338 if (!(priv->status & STATUS_ROAMING)) {
4339 priv->status |= STATUS_ROAMING;
4340 if (!(priv->status & STATUS_SCANNING))
4341 queue_delayed_work(priv->workqueue,
4342 &priv->request_scan, 0);
4343 }
4344 return;
4345 }
4346
4347 if (priv->status & STATUS_SCANNING) {
4348 /* Stop scan to keep fw from getting
4349 * stuck (only if we aren't roaming --
4350 * otherwise we'll never scan more than 2 or 3
4351 * channels..) */
4352 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF | IPW_DL_STATE,
4353 "Aborting scan with missed beacon.\n");
4354 queue_work(priv->workqueue, &priv->abort_scan);
4355 }
4356
4357 IPW_DEBUG_NOTIF("Missed beacon: %d\n", missed_count);
4358}
4359
4360static void ipw_scan_event(struct work_struct *work)
4361{
4362 union iwreq_data wrqu;
4363
4364 struct ipw_priv *priv =
4365 container_of(work, struct ipw_priv, scan_event.work);
4366
4367 wrqu.data.length = 0;
4368 wrqu.data.flags = 0;
4369 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4370}
4371
4372static void handle_scan_event(struct ipw_priv *priv)
4373{
4374 /* Only userspace-requested scan completion events go out immediately */
4375 if (!priv->user_requested_scan) {
4376 if (!delayed_work_pending(&priv->scan_event))
4377 queue_delayed_work(priv->workqueue, &priv->scan_event,
4378 round_jiffies_relative(msecs_to_jiffies(4000)));
4379 } else {
4380 union iwreq_data wrqu;
4381
4382 priv->user_requested_scan = 0;
4383 cancel_delayed_work(&priv->scan_event);
4384
4385 wrqu.data.length = 0;
4386 wrqu.data.flags = 0;
4387 wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL);
4388 }
4389}
4390
4391/**
4392 * Handle host notification packet.
4393 * Called from interrupt routine
4394 */
4395static void ipw_rx_notification(struct ipw_priv *priv,
4396 struct ipw_rx_notification *notif)
4397{
4398 DECLARE_SSID_BUF(ssid);
4399 u16 size = le16_to_cpu(notif->size);
4400 notif->size = le16_to_cpu(notif->size);
4401
4402 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n", notif->subtype, size);
4403
4404 switch (notif->subtype) {
4405 case HOST_NOTIFICATION_STATUS_ASSOCIATED:{
4406 struct notif_association *assoc = &notif->u.assoc;
4407
4408 switch (assoc->state) {
4409 case CMAS_ASSOCIATED:{
4410 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4411 IPW_DL_ASSOC,
4412 "associated: '%s' %pM \n",
4413 print_ssid(ssid, priv->essid,
4414 priv->essid_len),
4415 priv->bssid);
4416
4417 switch (priv->ieee->iw_mode) {
4418 case IW_MODE_INFRA:
4419 memcpy(priv->ieee->bssid,
4420 priv->bssid, ETH_ALEN);
4421 break;
4422
4423 case IW_MODE_ADHOC:
4424 memcpy(priv->ieee->bssid,
4425 priv->bssid, ETH_ALEN);
4426
4427 /* clear out the station table */
4428 priv->num_stations = 0;
4429
4430 IPW_DEBUG_ASSOC
4431 ("queueing adhoc check\n");
4432 queue_delayed_work(priv->
4433 workqueue,
4434 &priv->
4435 adhoc_check,
4436 le16_to_cpu(priv->
4437 assoc_request.
4438 beacon_interval));
4439 break;
4440 }
4441
4442 priv->status &= ~STATUS_ASSOCIATING;
4443 priv->status |= STATUS_ASSOCIATED;
4444 queue_work(priv->workqueue,
4445 &priv->system_config);
4446
4447#ifdef CONFIG_IPW2200_QOS
4448#define IPW_GET_PACKET_STYPE(x) WLAN_FC_GET_STYPE( \
4449 le16_to_cpu(((struct ieee80211_hdr *)(x))->frame_control))
4450 if ((priv->status & STATUS_AUTH) &&
4451 (IPW_GET_PACKET_STYPE(&notif->u.raw)
4452 == IEEE80211_STYPE_ASSOC_RESP)) {
4453 if ((sizeof
4454 (struct
4455 ieee80211_assoc_response)
4456 <= size)
4457 && (size <= 2314)) {
4458 struct
4459 ieee80211_rx_stats
4460 stats = {
4461 .len = size - 1,
4462 };
4463
4464 IPW_DEBUG_QOS
4465 ("QoS Associate "
4466 "size %d\n", size);
4467 ieee80211_rx_mgt(priv->
4468 ieee,
4469 (struct
4470 ieee80211_hdr_4addr
4471 *)
4472 &notif->u.raw, &stats);
4473 }
4474 }
4475#endif
4476
4477 schedule_work(&priv->link_up);
4478
4479 break;
4480 }
4481
4482 case CMAS_AUTHENTICATED:{
4483 if (priv->
4484 status & (STATUS_ASSOCIATED |
4485 STATUS_AUTH)) {
4486 struct notif_authenticate *auth
4487 = &notif->u.auth;
4488 IPW_DEBUG(IPW_DL_NOTIF |
4489 IPW_DL_STATE |
4490 IPW_DL_ASSOC,
4491 "deauthenticated: '%s' "
4492 "%pM"
4493 ": (0x%04X) - %s \n",
4494 print_ssid(ssid,
4495 priv->
4496 essid,
4497 priv->
4498 essid_len),
4499 priv->bssid,
4500 le16_to_cpu(auth->status),
4501 ipw_get_status_code
4502 (le16_to_cpu
4503 (auth->status)));
4504
4505 priv->status &=
4506 ~(STATUS_ASSOCIATING |
4507 STATUS_AUTH |
4508 STATUS_ASSOCIATED);
4509
4510 schedule_work(&priv->link_down);
4511 break;
4512 }
4513
4514 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4515 IPW_DL_ASSOC,
4516 "authenticated: '%s' %pM\n",
4517 print_ssid(ssid, priv->essid,
4518 priv->essid_len),
4519 priv->bssid);
4520 break;
4521 }
4522
4523 case CMAS_INIT:{
4524 if (priv->status & STATUS_AUTH) {
4525 struct
4526 ieee80211_assoc_response
4527 *resp;
4528 resp =
4529 (struct
4530 ieee80211_assoc_response
4531 *)&notif->u.raw;
4532 IPW_DEBUG(IPW_DL_NOTIF |
4533 IPW_DL_STATE |
4534 IPW_DL_ASSOC,
4535 "association failed (0x%04X): %s\n",
4536 le16_to_cpu(resp->status),
4537 ipw_get_status_code
4538 (le16_to_cpu
4539 (resp->status)));
4540 }
4541
4542 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4543 IPW_DL_ASSOC,
4544 "disassociated: '%s' %pM \n",
4545 print_ssid(ssid, priv->essid,
4546 priv->essid_len),
4547 priv->bssid);
4548
4549 priv->status &=
4550 ~(STATUS_DISASSOCIATING |
4551 STATUS_ASSOCIATING |
4552 STATUS_ASSOCIATED | STATUS_AUTH);
4553 if (priv->assoc_network
4554 && (priv->assoc_network->
4555 capability &
4556 WLAN_CAPABILITY_IBSS))
4557 ipw_remove_current_network
4558 (priv);
4559
4560 schedule_work(&priv->link_down);
4561
4562 break;
4563 }
4564
4565 case CMAS_RX_ASSOC_RESP:
4566 break;
4567
4568 default:
4569 IPW_ERROR("assoc: unknown (%d)\n",
4570 assoc->state);
4571 break;
4572 }
4573
4574 break;
4575 }
4576
4577 case HOST_NOTIFICATION_STATUS_AUTHENTICATE:{
4578 struct notif_authenticate *auth = &notif->u.auth;
4579 switch (auth->state) {
4580 case CMAS_AUTHENTICATED:
4581 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4582 "authenticated: '%s' %pM \n",
4583 print_ssid(ssid, priv->essid,
4584 priv->essid_len),
4585 priv->bssid);
4586 priv->status |= STATUS_AUTH;
4587 break;
4588
4589 case CMAS_INIT:
4590 if (priv->status & STATUS_AUTH) {
4591 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4592 IPW_DL_ASSOC,
4593 "authentication failed (0x%04X): %s\n",
4594 le16_to_cpu(auth->status),
4595 ipw_get_status_code(le16_to_cpu
4596 (auth->
4597 status)));
4598 }
4599 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4600 IPW_DL_ASSOC,
4601 "deauthenticated: '%s' %pM\n",
4602 print_ssid(ssid, priv->essid,
4603 priv->essid_len),
4604 priv->bssid);
4605
4606 priv->status &= ~(STATUS_ASSOCIATING |
4607 STATUS_AUTH |
4608 STATUS_ASSOCIATED);
4609
4610 schedule_work(&priv->link_down);
4611 break;
4612
4613 case CMAS_TX_AUTH_SEQ_1:
4614 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4615 IPW_DL_ASSOC, "AUTH_SEQ_1\n");
4616 break;
4617 case CMAS_RX_AUTH_SEQ_2:
4618 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4619 IPW_DL_ASSOC, "AUTH_SEQ_2\n");
4620 break;
4621 case CMAS_AUTH_SEQ_1_PASS:
4622 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4623 IPW_DL_ASSOC, "AUTH_SEQ_1_PASS\n");
4624 break;
4625 case CMAS_AUTH_SEQ_1_FAIL:
4626 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4627 IPW_DL_ASSOC, "AUTH_SEQ_1_FAIL\n");
4628 break;
4629 case CMAS_TX_AUTH_SEQ_3:
4630 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4631 IPW_DL_ASSOC, "AUTH_SEQ_3\n");
4632 break;
4633 case CMAS_RX_AUTH_SEQ_4:
4634 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4635 IPW_DL_ASSOC, "RX_AUTH_SEQ_4\n");
4636 break;
4637 case CMAS_AUTH_SEQ_2_PASS:
4638 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4639 IPW_DL_ASSOC, "AUTH_SEQ_2_PASS\n");
4640 break;
4641 case CMAS_AUTH_SEQ_2_FAIL:
4642 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4643 IPW_DL_ASSOC, "AUT_SEQ_2_FAIL\n");
4644 break;
4645 case CMAS_TX_ASSOC:
4646 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4647 IPW_DL_ASSOC, "TX_ASSOC\n");
4648 break;
4649 case CMAS_RX_ASSOC_RESP:
4650 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4651 IPW_DL_ASSOC, "RX_ASSOC_RESP\n");
4652
4653 break;
4654 case CMAS_ASSOCIATED:
4655 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4656 IPW_DL_ASSOC, "ASSOCIATED\n");
4657 break;
4658 default:
4659 IPW_DEBUG_NOTIF("auth: failure - %d\n",
4660 auth->state);
4661 break;
4662 }
4663 break;
4664 }
4665
4666 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT:{
4667 struct notif_channel_result *x =
4668 &notif->u.channel_result;
4669
4670 if (size == sizeof(*x)) {
4671 IPW_DEBUG_SCAN("Scan result for channel %d\n",
4672 x->channel_num);
4673 } else {
4674 IPW_DEBUG_SCAN("Scan result of wrong size %d "
4675 "(should be %zd)\n",
4676 size, sizeof(*x));
4677 }
4678 break;
4679 }
4680
4681 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED:{
4682 struct notif_scan_complete *x = &notif->u.scan_complete;
4683 if (size == sizeof(*x)) {
4684 IPW_DEBUG_SCAN
4685 ("Scan completed: type %d, %d channels, "
4686 "%d status\n", x->scan_type,
4687 x->num_channels, x->status);
4688 } else {
4689 IPW_ERROR("Scan completed of wrong size %d "
4690 "(should be %zd)\n",
4691 size, sizeof(*x));
4692 }
4693
4694 priv->status &=
4695 ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
4696
4697 wake_up_interruptible(&priv->wait_state);
4698 cancel_delayed_work(&priv->scan_check);
4699
4700 if (priv->status & STATUS_EXIT_PENDING)
4701 break;
4702
4703 priv->ieee->scans++;
4704
4705#ifdef CONFIG_IPW2200_MONITOR
4706 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4707 priv->status |= STATUS_SCAN_FORCED;
4708 queue_delayed_work(priv->workqueue,
4709 &priv->request_scan, 0);
4710 break;
4711 }
4712 priv->status &= ~STATUS_SCAN_FORCED;
4713#endif /* CONFIG_IPW2200_MONITOR */
4714
4715 /* Do queued direct scans first */
4716 if (priv->status & STATUS_DIRECT_SCAN_PENDING) {
4717 queue_delayed_work(priv->workqueue,
4718 &priv->request_direct_scan, 0);
4719 }
4720
4721 if (!(priv->status & (STATUS_ASSOCIATED |
4722 STATUS_ASSOCIATING |
4723 STATUS_ROAMING |
4724 STATUS_DISASSOCIATING)))
4725 queue_work(priv->workqueue, &priv->associate);
4726 else if (priv->status & STATUS_ROAMING) {
4727 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4728 /* If a scan completed and we are in roam mode, then
4729 * the scan that completed was the one requested as a
4730 * result of entering roam... so, schedule the
4731 * roam work */
4732 queue_work(priv->workqueue,
4733 &priv->roam);
4734 else
4735 /* Don't schedule if we aborted the scan */
4736 priv->status &= ~STATUS_ROAMING;
4737 } else if (priv->status & STATUS_SCAN_PENDING)
4738 queue_delayed_work(priv->workqueue,
4739 &priv->request_scan, 0);
4740 else if (priv->config & CFG_BACKGROUND_SCAN
4741 && priv->status & STATUS_ASSOCIATED)
4742 queue_delayed_work(priv->workqueue,
4743 &priv->request_scan,
4744 round_jiffies_relative(HZ));
4745
4746 /* Send an empty event to user space.
4747 * We don't send the received data on the event because
4748 * it would require us to do complex transcoding, and
4749 * we want to minimise the work done in the irq handler
4750 * Use a request to extract the data.
4751 * Also, we generate this even for any scan, regardless
4752 * on how the scan was initiated. User space can just
4753 * sync on periodic scan to get fresh data...
4754 * Jean II */
4755 if (x->status == SCAN_COMPLETED_STATUS_COMPLETE)
4756 handle_scan_event(priv);
4757 break;
4758 }
4759
4760 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH:{
4761 struct notif_frag_length *x = &notif->u.frag_len;
4762
4763 if (size == sizeof(*x))
4764 IPW_ERROR("Frag length: %d\n",
4765 le16_to_cpu(x->frag_length));
4766 else
4767 IPW_ERROR("Frag length of wrong size %d "
4768 "(should be %zd)\n",
4769 size, sizeof(*x));
4770 break;
4771 }
4772
4773 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION:{
4774 struct notif_link_deterioration *x =
4775 &notif->u.link_deterioration;
4776
4777 if (size == sizeof(*x)) {
4778 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4779 "link deterioration: type %d, cnt %d\n",
4780 x->silence_notification_type,
4781 x->silence_count);
4782 memcpy(&priv->last_link_deterioration, x,
4783 sizeof(*x));
4784 } else {
4785 IPW_ERROR("Link Deterioration of wrong size %d "
4786 "(should be %zd)\n",
4787 size, sizeof(*x));
4788 }
4789 break;
4790 }
4791
4792 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE:{
4793 IPW_ERROR("Dino config\n");
4794 if (priv->hcmd
4795 && priv->hcmd->cmd != HOST_CMD_DINO_CONFIG)
4796 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
4797
4798 break;
4799 }
4800
4801 case HOST_NOTIFICATION_STATUS_BEACON_STATE:{
4802 struct notif_beacon_state *x = &notif->u.beacon_state;
4803 if (size != sizeof(*x)) {
4804 IPW_ERROR
4805 ("Beacon state of wrong size %d (should "
4806 "be %zd)\n", size, sizeof(*x));
4807 break;
4808 }
4809
4810 if (le32_to_cpu(x->state) ==
4811 HOST_NOTIFICATION_STATUS_BEACON_MISSING)
4812 ipw_handle_missed_beacon(priv,
4813 le32_to_cpu(x->
4814 number));
4815
4816 break;
4817 }
4818
4819 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY:{
4820 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
4821 if (size == sizeof(*x)) {
4822 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
4823 "0x%02x station %d\n",
4824 x->key_state, x->security_type,
4825 x->station_index);
4826 break;
4827 }
4828
4829 IPW_ERROR
4830 ("TGi Tx Key of wrong size %d (should be %zd)\n",
4831 size, sizeof(*x));
4832 break;
4833 }
4834
4835 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS:{
4836 struct notif_calibration *x = &notif->u.calibration;
4837
4838 if (size == sizeof(*x)) {
4839 memcpy(&priv->calib, x, sizeof(*x));
4840 IPW_DEBUG_INFO("TODO: Calibration\n");
4841 break;
4842 }
4843
4844 IPW_ERROR
4845 ("Calibration of wrong size %d (should be %zd)\n",
4846 size, sizeof(*x));
4847 break;
4848 }
4849
4850 case HOST_NOTIFICATION_NOISE_STATS:{
4851 if (size == sizeof(u32)) {
4852 priv->exp_avg_noise =
4853 exponential_average(priv->exp_avg_noise,
4854 (u8) (le32_to_cpu(notif->u.noise.value) & 0xff),
4855 DEPTH_NOISE);
4856 break;
4857 }
4858
4859 IPW_ERROR
4860 ("Noise stat is wrong size %d (should be %zd)\n",
4861 size, sizeof(u32));
4862 break;
4863 }
4864
4865 default:
4866 IPW_DEBUG_NOTIF("Unknown notification: "
4867 "subtype=%d,flags=0x%2x,size=%d\n",
4868 notif->subtype, notif->flags, size);
4869 }
4870}
4871
4872/**
4873 * Destroys all DMA structures and initialise them again
4874 *
4875 * @param priv
4876 * @return error code
4877 */
4878static int ipw_queue_reset(struct ipw_priv *priv)
4879{
4880 int rc = 0;
4881 /** @todo customize queue sizes */
4882 int nTx = 64, nTxCmd = 8;
4883 ipw_tx_queue_free(priv);
4884 /* Tx CMD queue */
4885 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
4886 IPW_TX_CMD_QUEUE_READ_INDEX,
4887 IPW_TX_CMD_QUEUE_WRITE_INDEX,
4888 IPW_TX_CMD_QUEUE_BD_BASE,
4889 IPW_TX_CMD_QUEUE_BD_SIZE);
4890 if (rc) {
4891 IPW_ERROR("Tx Cmd queue init failed\n");
4892 goto error;
4893 }
4894 /* Tx queue(s) */
4895 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
4896 IPW_TX_QUEUE_0_READ_INDEX,
4897 IPW_TX_QUEUE_0_WRITE_INDEX,
4898 IPW_TX_QUEUE_0_BD_BASE, IPW_TX_QUEUE_0_BD_SIZE);
4899 if (rc) {
4900 IPW_ERROR("Tx 0 queue init failed\n");
4901 goto error;
4902 }
4903 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
4904 IPW_TX_QUEUE_1_READ_INDEX,
4905 IPW_TX_QUEUE_1_WRITE_INDEX,
4906 IPW_TX_QUEUE_1_BD_BASE, IPW_TX_QUEUE_1_BD_SIZE);
4907 if (rc) {
4908 IPW_ERROR("Tx 1 queue init failed\n");
4909 goto error;
4910 }
4911 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
4912 IPW_TX_QUEUE_2_READ_INDEX,
4913 IPW_TX_QUEUE_2_WRITE_INDEX,
4914 IPW_TX_QUEUE_2_BD_BASE, IPW_TX_QUEUE_2_BD_SIZE);
4915 if (rc) {
4916 IPW_ERROR("Tx 2 queue init failed\n");
4917 goto error;
4918 }
4919 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
4920 IPW_TX_QUEUE_3_READ_INDEX,
4921 IPW_TX_QUEUE_3_WRITE_INDEX,
4922 IPW_TX_QUEUE_3_BD_BASE, IPW_TX_QUEUE_3_BD_SIZE);
4923 if (rc) {
4924 IPW_ERROR("Tx 3 queue init failed\n");
4925 goto error;
4926 }
4927 /* statistics */
4928 priv->rx_bufs_min = 0;
4929 priv->rx_pend_max = 0;
4930 return rc;
4931
4932 error:
4933 ipw_tx_queue_free(priv);
4934 return rc;
4935}
4936
4937/**
4938 * Reclaim Tx queue entries no more used by NIC.
4939 *
4940 * When FW advances 'R' index, all entries between old and
4941 * new 'R' index need to be reclaimed. As result, some free space
4942 * forms. If there is enough free space (> low mark), wake Tx queue.
4943 *
4944 * @note Need to protect against garbage in 'R' index
4945 * @param priv
4946 * @param txq
4947 * @param qindex
4948 * @return Number of used entries remains in the queue
4949 */
4950static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
4951 struct clx2_tx_queue *txq, int qindex)
4952{
4953 u32 hw_tail;
4954 int used;
4955 struct clx2_queue *q = &txq->q;
4956
4957 hw_tail = ipw_read32(priv, q->reg_r);
4958 if (hw_tail >= q->n_bd) {
4959 IPW_ERROR
4960 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
4961 hw_tail, q->n_bd);
4962 goto done;
4963 }
4964 for (; q->last_used != hw_tail;
4965 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
4966 ipw_queue_tx_free_tfd(priv, txq);
4967 priv->tx_packets++;
4968 }
4969 done:
4970 if ((ipw_tx_queue_space(q) > q->low_mark) &&
4971 (qindex >= 0))
4972 netif_wake_queue(priv->net_dev);
4973 used = q->first_empty - q->last_used;
4974 if (used < 0)
4975 used += q->n_bd;
4976
4977 return used;
4978}
4979
4980static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
4981 int len, int sync)
4982{
4983 struct clx2_tx_queue *txq = &priv->txq_cmd;
4984 struct clx2_queue *q = &txq->q;
4985 struct tfd_frame *tfd;
4986
4987 if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) {
4988 IPW_ERROR("No space for Tx\n");
4989 return -EBUSY;
4990 }
4991
4992 tfd = &txq->bd[q->first_empty];
4993 txq->txb[q->first_empty] = NULL;
4994
4995 memset(tfd, 0, sizeof(*tfd));
4996 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
4997 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
4998 priv->hcmd_seq++;
4999 tfd->u.cmd.index = hcmd;
5000 tfd->u.cmd.length = len;
5001 memcpy(tfd->u.cmd.payload, buf, len);
5002 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
5003 ipw_write32(priv, q->reg_w, q->first_empty);
5004 _ipw_read32(priv, 0x90);
5005
5006 return 0;
5007}
5008
5009/*
5010 * Rx theory of operation
5011 *
5012 * The host allocates 32 DMA target addresses and passes the host address
5013 * to the firmware at register IPW_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
5014 * 0 to 31
5015 *
5016 * Rx Queue Indexes
5017 * The host/firmware share two index registers for managing the Rx buffers.
5018 *
5019 * The READ index maps to the first position that the firmware may be writing
5020 * to -- the driver can read up to (but not including) this position and get
5021 * good data.
5022 * The READ index is managed by the firmware once the card is enabled.
5023 *
5024 * The WRITE index maps to the last position the driver has read from -- the
5025 * position preceding WRITE is the last slot the firmware can place a packet.
5026 *
5027 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
5028 * WRITE = READ.
5029 *
5030 * During initialization the host sets up the READ queue position to the first
5031 * INDEX position, and WRITE to the last (READ - 1 wrapped)
5032 *
5033 * When the firmware places a packet in a buffer it will advance the READ index
5034 * and fire the RX interrupt. The driver can then query the READ index and
5035 * process as many packets as possible, moving the WRITE index forward as it
5036 * resets the Rx queue buffers with new memory.
5037 *
5038 * The management in the driver is as follows:
5039 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
5040 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
5041 * to replensish the ipw->rxq->rx_free.
5042 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
5043 * ipw->rxq is replenished and the READ INDEX is updated (updating the
5044 * 'processed' and 'read' driver indexes as well)
5045 * + A received packet is processed and handed to the kernel network stack,
5046 * detached from the ipw->rxq. The driver 'processed' index is updated.
5047 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
5048 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
5049 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
5050 * were enough free buffers and RX_STALLED is set it is cleared.
5051 *
5052 *
5053 * Driver sequence:
5054 *
5055 * ipw_rx_queue_alloc() Allocates rx_free
5056 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
5057 * ipw_rx_queue_restock
5058 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
5059 * queue, updates firmware pointers, and updates
5060 * the WRITE index. If insufficient rx_free buffers
5061 * are available, schedules ipw_rx_queue_replenish
5062 *
5063 * -- enable interrupts --
5064 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
5065 * READ INDEX, detaching the SKB from the pool.
5066 * Moves the packet buffer from queue to rx_used.
5067 * Calls ipw_rx_queue_restock to refill any empty
5068 * slots.
5069 * ...
5070 *
5071 */
5072
5073/*
5074 * If there are slots in the RX queue that need to be restocked,
5075 * and we have free pre-allocated buffers, fill the ranks as much
5076 * as we can pulling from rx_free.
5077 *
5078 * This moves the 'write' index forward to catch up with 'processed', and
5079 * also updates the memory address in the firmware to reference the new
5080 * target buffer.
5081 */
5082static void ipw_rx_queue_restock(struct ipw_priv *priv)
5083{
5084 struct ipw_rx_queue *rxq = priv->rxq;
5085 struct list_head *element;
5086 struct ipw_rx_mem_buffer *rxb;
5087 unsigned long flags;
5088 int write;
5089
5090 spin_lock_irqsave(&rxq->lock, flags);
5091 write = rxq->write;
5092 while ((ipw_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
5093 element = rxq->rx_free.next;
5094 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5095 list_del(element);
5096
5097 ipw_write32(priv, IPW_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
5098 rxb->dma_addr);
5099 rxq->queue[rxq->write] = rxb;
5100 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
5101 rxq->free_count--;
5102 }
5103 spin_unlock_irqrestore(&rxq->lock, flags);
5104
5105 /* If the pre-allocated buffer pool is dropping low, schedule to
5106 * refill it */
5107 if (rxq->free_count <= RX_LOW_WATERMARK)
5108 queue_work(priv->workqueue, &priv->rx_replenish);
5109
5110 /* If we've added more space for the firmware to place data, tell it */
5111 if (write != rxq->write)
5112 ipw_write32(priv, IPW_RX_WRITE_INDEX, rxq->write);
5113}
5114
5115/*
5116 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
5117 * Also restock the Rx queue via ipw_rx_queue_restock.
5118 *
5119 * This is called as a scheduled work item (except for during intialization)
5120 */
5121static void ipw_rx_queue_replenish(void *data)
5122{
5123 struct ipw_priv *priv = data;
5124 struct ipw_rx_queue *rxq = priv->rxq;
5125 struct list_head *element;
5126 struct ipw_rx_mem_buffer *rxb;
5127 unsigned long flags;
5128
5129 spin_lock_irqsave(&rxq->lock, flags);
5130 while (!list_empty(&rxq->rx_used)) {
5131 element = rxq->rx_used.next;
5132 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
5133 rxb->skb = alloc_skb(IPW_RX_BUF_SIZE, GFP_ATOMIC);
5134 if (!rxb->skb) {
5135 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
5136 priv->net_dev->name);
5137 /* We don't reschedule replenish work here -- we will
5138 * call the restock method and if it still needs
5139 * more buffers it will schedule replenish */
5140 break;
5141 }
5142 list_del(element);
5143
5144 rxb->dma_addr =
5145 pci_map_single(priv->pci_dev, rxb->skb->data,
5146 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5147
5148 list_add_tail(&rxb->list, &rxq->rx_free);
5149 rxq->free_count++;
5150 }
5151 spin_unlock_irqrestore(&rxq->lock, flags);
5152
5153 ipw_rx_queue_restock(priv);
5154}
5155
5156static void ipw_bg_rx_queue_replenish(struct work_struct *work)
5157{
5158 struct ipw_priv *priv =
5159 container_of(work, struct ipw_priv, rx_replenish);
5160 mutex_lock(&priv->mutex);
5161 ipw_rx_queue_replenish(priv);
5162 mutex_unlock(&priv->mutex);
5163}
5164
5165/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
5166 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
5167 * This free routine walks the list of POOL entries and if SKB is set to
5168 * non NULL it is unmapped and freed
5169 */
5170static void ipw_rx_queue_free(struct ipw_priv *priv, struct ipw_rx_queue *rxq)
5171{
5172 int i;
5173
5174 if (!rxq)
5175 return;
5176
5177 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
5178 if (rxq->pool[i].skb != NULL) {
5179 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
5180 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5181 dev_kfree_skb(rxq->pool[i].skb);
5182 }
5183 }
5184
5185 kfree(rxq);
5186}
5187
5188static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
5189{
5190 struct ipw_rx_queue *rxq;
5191 int i;
5192
5193 rxq = kzalloc(sizeof(*rxq), GFP_KERNEL);
5194 if (unlikely(!rxq)) {
5195 IPW_ERROR("memory allocation failed\n");
5196 return NULL;
5197 }
5198 spin_lock_init(&rxq->lock);
5199 INIT_LIST_HEAD(&rxq->rx_free);
5200 INIT_LIST_HEAD(&rxq->rx_used);
5201
5202 /* Fill the rx_used queue with _all_ of the Rx buffers */
5203 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
5204 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
5205
5206 /* Set us so that we have processed and used all buffers, but have
5207 * not restocked the Rx queue with fresh buffers */
5208 rxq->read = rxq->write = 0;
5209 rxq->free_count = 0;
5210
5211 return rxq;
5212}
5213
5214static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
5215{
5216 rate &= ~IEEE80211_BASIC_RATE_MASK;
5217 if (ieee_mode == IEEE_A) {
5218 switch (rate) {
5219 case IEEE80211_OFDM_RATE_6MB:
5220 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
5221 1 : 0;
5222 case IEEE80211_OFDM_RATE_9MB:
5223 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
5224 1 : 0;
5225 case IEEE80211_OFDM_RATE_12MB:
5226 return priv->
5227 rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5228 case IEEE80211_OFDM_RATE_18MB:
5229 return priv->
5230 rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5231 case IEEE80211_OFDM_RATE_24MB:
5232 return priv->
5233 rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5234 case IEEE80211_OFDM_RATE_36MB:
5235 return priv->
5236 rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5237 case IEEE80211_OFDM_RATE_48MB:
5238 return priv->
5239 rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5240 case IEEE80211_OFDM_RATE_54MB:
5241 return priv->
5242 rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5243 default:
5244 return 0;
5245 }
5246 }
5247
5248 /* B and G mixed */
5249 switch (rate) {
5250 case IEEE80211_CCK_RATE_1MB:
5251 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
5252 case IEEE80211_CCK_RATE_2MB:
5253 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
5254 case IEEE80211_CCK_RATE_5MB:
5255 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
5256 case IEEE80211_CCK_RATE_11MB:
5257 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
5258 }
5259
5260 /* If we are limited to B modulations, bail at this point */
5261 if (ieee_mode == IEEE_B)
5262 return 0;
5263
5264 /* G */
5265 switch (rate) {
5266 case IEEE80211_OFDM_RATE_6MB:
5267 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
5268 case IEEE80211_OFDM_RATE_9MB:
5269 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
5270 case IEEE80211_OFDM_RATE_12MB:
5271 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
5272 case IEEE80211_OFDM_RATE_18MB:
5273 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
5274 case IEEE80211_OFDM_RATE_24MB:
5275 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
5276 case IEEE80211_OFDM_RATE_36MB:
5277 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
5278 case IEEE80211_OFDM_RATE_48MB:
5279 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
5280 case IEEE80211_OFDM_RATE_54MB:
5281 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
5282 }
5283
5284 return 0;
5285}
5286
5287static int ipw_compatible_rates(struct ipw_priv *priv,
5288 const struct ieee80211_network *network,
5289 struct ipw_supported_rates *rates)
5290{
5291 int num_rates, i;
5292
5293 memset(rates, 0, sizeof(*rates));
5294 num_rates = min(network->rates_len, (u8) IPW_MAX_RATES);
5295 rates->num_rates = 0;
5296 for (i = 0; i < num_rates; i++) {
5297 if (!ipw_is_rate_in_mask(priv, network->mode,
5298 network->rates[i])) {
5299
5300 if (network->rates[i] & IEEE80211_BASIC_RATE_MASK) {
5301 IPW_DEBUG_SCAN("Adding masked mandatory "
5302 "rate %02X\n",
5303 network->rates[i]);
5304 rates->supported_rates[rates->num_rates++] =
5305 network->rates[i];
5306 continue;
5307 }
5308
5309 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5310 network->rates[i], priv->rates_mask);
5311 continue;
5312 }
5313
5314 rates->supported_rates[rates->num_rates++] = network->rates[i];
5315 }
5316
5317 num_rates = min(network->rates_ex_len,
5318 (u8) (IPW_MAX_RATES - num_rates));
5319 for (i = 0; i < num_rates; i++) {
5320 if (!ipw_is_rate_in_mask(priv, network->mode,
5321 network->rates_ex[i])) {
5322 if (network->rates_ex[i] & IEEE80211_BASIC_RATE_MASK) {
5323 IPW_DEBUG_SCAN("Adding masked mandatory "
5324 "rate %02X\n",
5325 network->rates_ex[i]);
5326 rates->supported_rates[rates->num_rates++] =
5327 network->rates[i];
5328 continue;
5329 }
5330
5331 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
5332 network->rates_ex[i], priv->rates_mask);
5333 continue;
5334 }
5335
5336 rates->supported_rates[rates->num_rates++] =
5337 network->rates_ex[i];
5338 }
5339
5340 return 1;
5341}
5342
5343static void ipw_copy_rates(struct ipw_supported_rates *dest,
5344 const struct ipw_supported_rates *src)
5345{
5346 u8 i;
5347 for (i = 0; i < src->num_rates; i++)
5348 dest->supported_rates[i] = src->supported_rates[i];
5349 dest->num_rates = src->num_rates;
5350}
5351
5352/* TODO: Look at sniffed packets in the air to determine if the basic rate
5353 * mask should ever be used -- right now all callers to add the scan rates are
5354 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
5355static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
5356 u8 modulation, u32 rate_mask)
5357{
5358 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5359 IEEE80211_BASIC_RATE_MASK : 0;
5360
5361 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
5362 rates->supported_rates[rates->num_rates++] =
5363 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
5364
5365 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
5366 rates->supported_rates[rates->num_rates++] =
5367 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
5368
5369 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
5370 rates->supported_rates[rates->num_rates++] = basic_mask |
5371 IEEE80211_CCK_RATE_5MB;
5372
5373 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
5374 rates->supported_rates[rates->num_rates++] = basic_mask |
5375 IEEE80211_CCK_RATE_11MB;
5376}
5377
5378static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
5379 u8 modulation, u32 rate_mask)
5380{
5381 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
5382 IEEE80211_BASIC_RATE_MASK : 0;
5383
5384 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
5385 rates->supported_rates[rates->num_rates++] = basic_mask |
5386 IEEE80211_OFDM_RATE_6MB;
5387
5388 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
5389 rates->supported_rates[rates->num_rates++] =
5390 IEEE80211_OFDM_RATE_9MB;
5391
5392 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
5393 rates->supported_rates[rates->num_rates++] = basic_mask |
5394 IEEE80211_OFDM_RATE_12MB;
5395
5396 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
5397 rates->supported_rates[rates->num_rates++] =
5398 IEEE80211_OFDM_RATE_18MB;
5399
5400 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
5401 rates->supported_rates[rates->num_rates++] = basic_mask |
5402 IEEE80211_OFDM_RATE_24MB;
5403
5404 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
5405 rates->supported_rates[rates->num_rates++] =
5406 IEEE80211_OFDM_RATE_36MB;
5407
5408 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
5409 rates->supported_rates[rates->num_rates++] =
5410 IEEE80211_OFDM_RATE_48MB;
5411
5412 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
5413 rates->supported_rates[rates->num_rates++] =
5414 IEEE80211_OFDM_RATE_54MB;
5415}
5416
5417struct ipw_network_match {
5418 struct ieee80211_network *network;
5419 struct ipw_supported_rates rates;
5420};
5421
5422static int ipw_find_adhoc_network(struct ipw_priv *priv,
5423 struct ipw_network_match *match,
5424 struct ieee80211_network *network,
5425 int roaming)
5426{
5427 struct ipw_supported_rates rates;
5428 DECLARE_SSID_BUF(ssid);
5429
5430 /* Verify that this network's capability is compatible with the
5431 * current mode (AdHoc or Infrastructure) */
5432 if ((priv->ieee->iw_mode == IW_MODE_ADHOC &&
5433 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5434 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded due to "
5435 "capability mismatch.\n",
5436 print_ssid(ssid, network->ssid,
5437 network->ssid_len),
5438 network->bssid);
5439 return 0;
5440 }
5441
5442 if (unlikely(roaming)) {
5443 /* If we are roaming, then ensure check if this is a valid
5444 * network to try and roam to */
5445 if ((network->ssid_len != match->network->ssid_len) ||
5446 memcmp(network->ssid, match->network->ssid,
5447 network->ssid_len)) {
5448 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5449 "because of non-network ESSID.\n",
5450 print_ssid(ssid, network->ssid,
5451 network->ssid_len),
5452 network->bssid);
5453 return 0;
5454 }
5455 } else {
5456 /* If an ESSID has been configured then compare the broadcast
5457 * ESSID to ours */
5458 if ((priv->config & CFG_STATIC_ESSID) &&
5459 ((network->ssid_len != priv->essid_len) ||
5460 memcmp(network->ssid, priv->essid,
5461 min(network->ssid_len, priv->essid_len)))) {
5462 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5463
5464 strncpy(escaped,
5465 print_ssid(ssid, network->ssid,
5466 network->ssid_len),
5467 sizeof(escaped));
5468 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5469 "because of ESSID mismatch: '%s'.\n",
5470 escaped, network->bssid,
5471 print_ssid(ssid, priv->essid,
5472 priv->essid_len));
5473 return 0;
5474 }
5475 }
5476
5477 /* If the old network rate is better than this one, don't bother
5478 * testing everything else. */
5479
5480 if (network->time_stamp[0] < match->network->time_stamp[0]) {
5481 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5482 "current network.\n",
5483 print_ssid(ssid, match->network->ssid,
5484 match->network->ssid_len));
5485 return 0;
5486 } else if (network->time_stamp[1] < match->network->time_stamp[1]) {
5487 IPW_DEBUG_MERGE("Network '%s excluded because newer than "
5488 "current network.\n",
5489 print_ssid(ssid, match->network->ssid,
5490 match->network->ssid_len));
5491 return 0;
5492 }
5493
5494 /* Now go through and see if the requested network is valid... */
5495 if (priv->ieee->scan_age != 0 &&
5496 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5497 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5498 "because of age: %ums.\n",
5499 print_ssid(ssid, network->ssid,
5500 network->ssid_len),
5501 network->bssid,
5502 jiffies_to_msecs(jiffies -
5503 network->last_scanned));
5504 return 0;
5505 }
5506
5507 if ((priv->config & CFG_STATIC_CHANNEL) &&
5508 (network->channel != priv->channel)) {
5509 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5510 "because of channel mismatch: %d != %d.\n",
5511 print_ssid(ssid, network->ssid,
5512 network->ssid_len),
5513 network->bssid,
5514 network->channel, priv->channel);
5515 return 0;
5516 }
5517
5518 /* Verify privacy compatability */
5519 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5520 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5521 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5522 "because of privacy mismatch: %s != %s.\n",
5523 print_ssid(ssid, network->ssid,
5524 network->ssid_len),
5525 network->bssid,
5526 priv->
5527 capability & CAP_PRIVACY_ON ? "on" : "off",
5528 network->
5529 capability & WLAN_CAPABILITY_PRIVACY ? "on" :
5530 "off");
5531 return 0;
5532 }
5533
5534 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5535 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5536 "because of the same BSSID match: %pM"
5537 ".\n", print_ssid(ssid, network->ssid,
5538 network->ssid_len),
5539 network->bssid,
5540 priv->bssid);
5541 return 0;
5542 }
5543
5544 /* Filter out any incompatible freq / mode combinations */
5545 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5546 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5547 "because of invalid frequency/mode "
5548 "combination.\n",
5549 print_ssid(ssid, network->ssid,
5550 network->ssid_len),
5551 network->bssid);
5552 return 0;
5553 }
5554
5555 /* Ensure that the rates supported by the driver are compatible with
5556 * this AP, including verification of basic rates (mandatory) */
5557 if (!ipw_compatible_rates(priv, network, &rates)) {
5558 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5559 "because configured rate mask excludes "
5560 "AP mandatory rate.\n",
5561 print_ssid(ssid, network->ssid,
5562 network->ssid_len),
5563 network->bssid);
5564 return 0;
5565 }
5566
5567 if (rates.num_rates == 0) {
5568 IPW_DEBUG_MERGE("Network '%s (%pM)' excluded "
5569 "because of no compatible rates.\n",
5570 print_ssid(ssid, network->ssid,
5571 network->ssid_len),
5572 network->bssid);
5573 return 0;
5574 }
5575
5576 /* TODO: Perform any further minimal comparititive tests. We do not
5577 * want to put too much policy logic here; intelligent scan selection
5578 * should occur within a generic IEEE 802.11 user space tool. */
5579
5580 /* Set up 'new' AP to this network */
5581 ipw_copy_rates(&match->rates, &rates);
5582 match->network = network;
5583 IPW_DEBUG_MERGE("Network '%s (%pM)' is a viable match.\n",
5584 print_ssid(ssid, network->ssid, network->ssid_len),
5585 network->bssid);
5586
5587 return 1;
5588}
5589
5590static void ipw_merge_adhoc_network(struct work_struct *work)
5591{
5592 DECLARE_SSID_BUF(ssid);
5593 struct ipw_priv *priv =
5594 container_of(work, struct ipw_priv, merge_networks);
5595 struct ieee80211_network *network = NULL;
5596 struct ipw_network_match match = {
5597 .network = priv->assoc_network
5598 };
5599
5600 if ((priv->status & STATUS_ASSOCIATED) &&
5601 (priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5602 /* First pass through ROAM process -- look for a better
5603 * network */
5604 unsigned long flags;
5605
5606 spin_lock_irqsave(&priv->ieee->lock, flags);
5607 list_for_each_entry(network, &priv->ieee->network_list, list) {
5608 if (network != priv->assoc_network)
5609 ipw_find_adhoc_network(priv, &match, network,
5610 1);
5611 }
5612 spin_unlock_irqrestore(&priv->ieee->lock, flags);
5613
5614 if (match.network == priv->assoc_network) {
5615 IPW_DEBUG_MERGE("No better ADHOC in this network to "
5616 "merge to.\n");
5617 return;
5618 }
5619
5620 mutex_lock(&priv->mutex);
5621 if ((priv->ieee->iw_mode == IW_MODE_ADHOC)) {
5622 IPW_DEBUG_MERGE("remove network %s\n",
5623 print_ssid(ssid, priv->essid,
5624 priv->essid_len));
5625 ipw_remove_current_network(priv);
5626 }
5627
5628 ipw_disassociate(priv);
5629 priv->assoc_network = match.network;
5630 mutex_unlock(&priv->mutex);
5631 return;
5632 }
5633}
5634
5635static int ipw_best_network(struct ipw_priv *priv,
5636 struct ipw_network_match *match,
5637 struct ieee80211_network *network, int roaming)
5638{
5639 struct ipw_supported_rates rates;
5640 DECLARE_SSID_BUF(ssid);
5641
5642 /* Verify that this network's capability is compatible with the
5643 * current mode (AdHoc or Infrastructure) */
5644 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
5645 !(network->capability & WLAN_CAPABILITY_ESS)) ||
5646 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
5647 !(network->capability & WLAN_CAPABILITY_IBSS))) {
5648 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded due to "
5649 "capability mismatch.\n",
5650 print_ssid(ssid, network->ssid,
5651 network->ssid_len),
5652 network->bssid);
5653 return 0;
5654 }
5655
5656 if (unlikely(roaming)) {
5657 /* If we are roaming, then ensure check if this is a valid
5658 * network to try and roam to */
5659 if ((network->ssid_len != match->network->ssid_len) ||
5660 memcmp(network->ssid, match->network->ssid,
5661 network->ssid_len)) {
5662 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5663 "because of non-network ESSID.\n",
5664 print_ssid(ssid, network->ssid,
5665 network->ssid_len),
5666 network->bssid);
5667 return 0;
5668 }
5669 } else {
5670 /* If an ESSID has been configured then compare the broadcast
5671 * ESSID to ours */
5672 if ((priv->config & CFG_STATIC_ESSID) &&
5673 ((network->ssid_len != priv->essid_len) ||
5674 memcmp(network->ssid, priv->essid,
5675 min(network->ssid_len, priv->essid_len)))) {
5676 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5677 strncpy(escaped,
5678 print_ssid(ssid, network->ssid,
5679 network->ssid_len),
5680 sizeof(escaped));
5681 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5682 "because of ESSID mismatch: '%s'.\n",
5683 escaped, network->bssid,
5684 print_ssid(ssid, priv->essid,
5685 priv->essid_len));
5686 return 0;
5687 }
5688 }
5689
5690 /* If the old network rate is better than this one, don't bother
5691 * testing everything else. */
5692 if (match->network && match->network->stats.rssi > network->stats.rssi) {
5693 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
5694 strncpy(escaped,
5695 print_ssid(ssid, network->ssid, network->ssid_len),
5696 sizeof(escaped));
5697 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded because "
5698 "'%s (%pM)' has a stronger signal.\n",
5699 escaped, network->bssid,
5700 print_ssid(ssid, match->network->ssid,
5701 match->network->ssid_len),
5702 match->network->bssid);
5703 return 0;
5704 }
5705
5706 /* If this network has already had an association attempt within the
5707 * last 3 seconds, do not try and associate again... */
5708 if (network->last_associate &&
5709 time_after(network->last_associate + (HZ * 3UL), jiffies)) {
5710 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5711 "because of storming (%ums since last "
5712 "assoc attempt).\n",
5713 print_ssid(ssid, network->ssid,
5714 network->ssid_len),
5715 network->bssid,
5716 jiffies_to_msecs(jiffies -
5717 network->last_associate));
5718 return 0;
5719 }
5720
5721 /* Now go through and see if the requested network is valid... */
5722 if (priv->ieee->scan_age != 0 &&
5723 time_after(jiffies, network->last_scanned + priv->ieee->scan_age)) {
5724 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5725 "because of age: %ums.\n",
5726 print_ssid(ssid, network->ssid,
5727 network->ssid_len),
5728 network->bssid,
5729 jiffies_to_msecs(jiffies -
5730 network->last_scanned));
5731 return 0;
5732 }
5733
5734 if ((priv->config & CFG_STATIC_CHANNEL) &&
5735 (network->channel != priv->channel)) {
5736 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5737 "because of channel mismatch: %d != %d.\n",
5738 print_ssid(ssid, network->ssid,
5739 network->ssid_len),
5740 network->bssid,
5741 network->channel, priv->channel);
5742 return 0;
5743 }
5744
5745 /* Verify privacy compatability */
5746 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
5747 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
5748 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5749 "because of privacy mismatch: %s != %s.\n",
5750 print_ssid(ssid, network->ssid,
5751 network->ssid_len),
5752 network->bssid,
5753 priv->capability & CAP_PRIVACY_ON ? "on" :
5754 "off",
5755 network->capability &
5756 WLAN_CAPABILITY_PRIVACY ? "on" : "off");
5757 return 0;
5758 }
5759
5760 if ((priv->config & CFG_STATIC_BSSID) &&
5761 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
5762 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5763 "because of BSSID mismatch: %pM.\n",
5764 print_ssid(ssid, network->ssid,
5765 network->ssid_len),
5766 network->bssid, priv->bssid);
5767 return 0;
5768 }
5769
5770 /* Filter out any incompatible freq / mode combinations */
5771 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
5772 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5773 "because of invalid frequency/mode "
5774 "combination.\n",
5775 print_ssid(ssid, network->ssid,
5776 network->ssid_len),
5777 network->bssid);
5778 return 0;
5779 }
5780
5781 /* Filter out invalid channel in current GEO */
5782 if (!ieee80211_is_valid_channel(priv->ieee, network->channel)) {
5783 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5784 "because of invalid channel in current GEO\n",
5785 print_ssid(ssid, network->ssid,
5786 network->ssid_len),
5787 network->bssid);
5788 return 0;
5789 }
5790
5791 /* Ensure that the rates supported by the driver are compatible with
5792 * this AP, including verification of basic rates (mandatory) */
5793 if (!ipw_compatible_rates(priv, network, &rates)) {
5794 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5795 "because configured rate mask excludes "
5796 "AP mandatory rate.\n",
5797 print_ssid(ssid, network->ssid,
5798 network->ssid_len),
5799 network->bssid);
5800 return 0;
5801 }
5802
5803 if (rates.num_rates == 0) {
5804 IPW_DEBUG_ASSOC("Network '%s (%pM)' excluded "
5805 "because of no compatible rates.\n",
5806 print_ssid(ssid, network->ssid,
5807 network->ssid_len),
5808 network->bssid);
5809 return 0;
5810 }
5811
5812 /* TODO: Perform any further minimal comparititive tests. We do not
5813 * want to put too much policy logic here; intelligent scan selection
5814 * should occur within a generic IEEE 802.11 user space tool. */
5815
5816 /* Set up 'new' AP to this network */
5817 ipw_copy_rates(&match->rates, &rates);
5818 match->network = network;
5819
5820 IPW_DEBUG_ASSOC("Network '%s (%pM)' is a viable match.\n",
5821 print_ssid(ssid, network->ssid, network->ssid_len),
5822 network->bssid);
5823
5824 return 1;
5825}
5826
5827static void ipw_adhoc_create(struct ipw_priv *priv,
5828 struct ieee80211_network *network)
5829{
5830 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
5831 int i;
5832
5833 /*
5834 * For the purposes of scanning, we can set our wireless mode
5835 * to trigger scans across combinations of bands, but when it
5836 * comes to creating a new ad-hoc network, we have tell the FW
5837 * exactly which band to use.
5838 *
5839 * We also have the possibility of an invalid channel for the
5840 * chossen band. Attempting to create a new ad-hoc network
5841 * with an invalid channel for wireless mode will trigger a
5842 * FW fatal error.
5843 *
5844 */
5845 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
5846 case IEEE80211_52GHZ_BAND:
5847 network->mode = IEEE_A;
5848 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5849 BUG_ON(i == -1);
5850 if (geo->a[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5851 IPW_WARNING("Overriding invalid channel\n");
5852 priv->channel = geo->a[0].channel;
5853 }
5854 break;
5855
5856 case IEEE80211_24GHZ_BAND:
5857 if (priv->ieee->mode & IEEE_G)
5858 network->mode = IEEE_G;
5859 else
5860 network->mode = IEEE_B;
5861 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
5862 BUG_ON(i == -1);
5863 if (geo->bg[i].flags & IEEE80211_CH_PASSIVE_ONLY) {
5864 IPW_WARNING("Overriding invalid channel\n");
5865 priv->channel = geo->bg[0].channel;
5866 }
5867 break;
5868
5869 default:
5870 IPW_WARNING("Overriding invalid channel\n");
5871 if (priv->ieee->mode & IEEE_A) {
5872 network->mode = IEEE_A;
5873 priv->channel = geo->a[0].channel;
5874 } else if (priv->ieee->mode & IEEE_G) {
5875 network->mode = IEEE_G;
5876 priv->channel = geo->bg[0].channel;
5877 } else {
5878 network->mode = IEEE_B;
5879 priv->channel = geo->bg[0].channel;
5880 }
5881 break;
5882 }
5883
5884 network->channel = priv->channel;
5885 priv->config |= CFG_ADHOC_PERSIST;
5886 ipw_create_bssid(priv, network->bssid);
5887 network->ssid_len = priv->essid_len;
5888 memcpy(network->ssid, priv->essid, priv->essid_len);
5889 memset(&network->stats, 0, sizeof(network->stats));
5890 network->capability = WLAN_CAPABILITY_IBSS;
5891 if (!(priv->config & CFG_PREAMBLE_LONG))
5892 network->capability |= WLAN_CAPABILITY_SHORT_PREAMBLE;
5893 if (priv->capability & CAP_PRIVACY_ON)
5894 network->capability |= WLAN_CAPABILITY_PRIVACY;
5895 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
5896 memcpy(network->rates, priv->rates.supported_rates, network->rates_len);
5897 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
5898 memcpy(network->rates_ex,
5899 &priv->rates.supported_rates[network->rates_len],
5900 network->rates_ex_len);
5901 network->last_scanned = 0;
5902 network->flags = 0;
5903 network->last_associate = 0;
5904 network->time_stamp[0] = 0;
5905 network->time_stamp[1] = 0;
5906 network->beacon_interval = 100; /* Default */
5907 network->listen_interval = 10; /* Default */
5908 network->atim_window = 0; /* Default */
5909 network->wpa_ie_len = 0;
5910 network->rsn_ie_len = 0;
5911}
5912
5913static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
5914{
5915 struct ipw_tgi_tx_key key;
5916
5917 if (!(priv->ieee->sec.flags & (1 << index)))
5918 return;
5919
5920 key.key_id = index;
5921 memcpy(key.key, priv->ieee->sec.keys[index], SCM_TEMPORAL_KEY_LENGTH);
5922 key.security_type = type;
5923 key.station_index = 0; /* always 0 for BSS */
5924 key.flags = 0;
5925 /* 0 for new key; previous value of counter (after fatal error) */
5926 key.tx_counter[0] = cpu_to_le32(0);
5927 key.tx_counter[1] = cpu_to_le32(0);
5928
5929 ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
5930}
5931
5932static void ipw_send_wep_keys(struct ipw_priv *priv, int type)
5933{
5934 struct ipw_wep_key key;
5935 int i;
5936
5937 key.cmd_id = DINO_CMD_WEP_KEY;
5938 key.seq_num = 0;
5939
5940 /* Note: AES keys cannot be set for multiple times.
5941 * Only set it at the first time. */
5942 for (i = 0; i < 4; i++) {
5943 key.key_index = i | type;
5944 if (!(priv->ieee->sec.flags & (1 << i))) {
5945 key.key_size = 0;
5946 continue;
5947 }
5948
5949 key.key_size = priv->ieee->sec.key_sizes[i];
5950 memcpy(key.key, priv->ieee->sec.keys[i], key.key_size);
5951
5952 ipw_send_cmd_pdu(priv, IPW_CMD_WEP_KEY, sizeof(key), &key);
5953 }
5954}
5955
5956static void ipw_set_hw_decrypt_unicast(struct ipw_priv *priv, int level)
5957{
5958 if (priv->ieee->host_encrypt)
5959 return;
5960
5961 switch (level) {
5962 case SEC_LEVEL_3:
5963 priv->sys_config.disable_unicast_decryption = 0;
5964 priv->ieee->host_decrypt = 0;
5965 break;
5966 case SEC_LEVEL_2:
5967 priv->sys_config.disable_unicast_decryption = 1;
5968 priv->ieee->host_decrypt = 1;
5969 break;
5970 case SEC_LEVEL_1:
5971 priv->sys_config.disable_unicast_decryption = 0;
5972 priv->ieee->host_decrypt = 0;
5973 break;
5974 case SEC_LEVEL_0:
5975 priv->sys_config.disable_unicast_decryption = 1;
5976 break;
5977 default:
5978 break;
5979 }
5980}
5981
5982static void ipw_set_hw_decrypt_multicast(struct ipw_priv *priv, int level)
5983{
5984 if (priv->ieee->host_encrypt)
5985 return;
5986
5987 switch (level) {
5988 case SEC_LEVEL_3:
5989 priv->sys_config.disable_multicast_decryption = 0;
5990 break;
5991 case SEC_LEVEL_2:
5992 priv->sys_config.disable_multicast_decryption = 1;
5993 break;
5994 case SEC_LEVEL_1:
5995 priv->sys_config.disable_multicast_decryption = 0;
5996 break;
5997 case SEC_LEVEL_0:
5998 priv->sys_config.disable_multicast_decryption = 1;
5999 break;
6000 default:
6001 break;
6002 }
6003}
6004
6005static void ipw_set_hwcrypto_keys(struct ipw_priv *priv)
6006{
6007 switch (priv->ieee->sec.level) {
6008 case SEC_LEVEL_3:
6009 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6010 ipw_send_tgi_tx_key(priv,
6011 DCT_FLAG_EXT_SECURITY_CCM,
6012 priv->ieee->sec.active_key);
6013
6014 if (!priv->ieee->host_mc_decrypt)
6015 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_CCM);
6016 break;
6017 case SEC_LEVEL_2:
6018 if (priv->ieee->sec.flags & SEC_ACTIVE_KEY)
6019 ipw_send_tgi_tx_key(priv,
6020 DCT_FLAG_EXT_SECURITY_TKIP,
6021 priv->ieee->sec.active_key);
6022 break;
6023 case SEC_LEVEL_1:
6024 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
6025 ipw_set_hw_decrypt_unicast(priv, priv->ieee->sec.level);
6026 ipw_set_hw_decrypt_multicast(priv, priv->ieee->sec.level);
6027 break;
6028 case SEC_LEVEL_0:
6029 default:
6030 break;
6031 }
6032}
6033
6034static void ipw_adhoc_check(void *data)
6035{
6036 struct ipw_priv *priv = data;
6037
6038 if (priv->missed_adhoc_beacons++ > priv->disassociate_threshold &&
6039 !(priv->config & CFG_ADHOC_PERSIST)) {
6040 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
6041 IPW_DL_STATE | IPW_DL_ASSOC,
6042 "Missed beacon: %d - disassociate\n",
6043 priv->missed_adhoc_beacons);
6044 ipw_remove_current_network(priv);
6045 ipw_disassociate(priv);
6046 return;
6047 }
6048
6049 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
6050 le16_to_cpu(priv->assoc_request.beacon_interval));
6051}
6052
6053static void ipw_bg_adhoc_check(struct work_struct *work)
6054{
6055 struct ipw_priv *priv =
6056 container_of(work, struct ipw_priv, adhoc_check.work);
6057 mutex_lock(&priv->mutex);
6058 ipw_adhoc_check(priv);
6059 mutex_unlock(&priv->mutex);
6060}
6061
6062static void ipw_debug_config(struct ipw_priv *priv)
6063{
6064 DECLARE_SSID_BUF(ssid);
6065 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
6066 "[CFG 0x%08X]\n", priv->config);
6067 if (priv->config & CFG_STATIC_CHANNEL)
6068 IPW_DEBUG_INFO("Channel locked to %d\n", priv->channel);
6069 else
6070 IPW_DEBUG_INFO("Channel unlocked.\n");
6071 if (priv->config & CFG_STATIC_ESSID)
6072 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
6073 print_ssid(ssid, priv->essid, priv->essid_len));
6074 else
6075 IPW_DEBUG_INFO("ESSID unlocked.\n");
6076 if (priv->config & CFG_STATIC_BSSID)
6077 IPW_DEBUG_INFO("BSSID locked to %pM\n", priv->bssid);
6078 else
6079 IPW_DEBUG_INFO("BSSID unlocked.\n");
6080 if (priv->capability & CAP_PRIVACY_ON)
6081 IPW_DEBUG_INFO("PRIVACY on\n");
6082 else
6083 IPW_DEBUG_INFO("PRIVACY off\n");
6084 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
6085}
6086
6087static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
6088{
6089 /* TODO: Verify that this works... */
6090 struct ipw_fixed_rate fr = {
6091 .tx_rates = priv->rates_mask
6092 };
6093 u32 reg;
6094 u16 mask = 0;
6095
6096 /* Identify 'current FW band' and match it with the fixed
6097 * Tx rates */
6098
6099 switch (priv->ieee->freq_band) {
6100 case IEEE80211_52GHZ_BAND: /* A only */
6101 /* IEEE_A */
6102 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
6103 /* Invalid fixed rate mask */
6104 IPW_DEBUG_WX
6105 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6106 fr.tx_rates = 0;
6107 break;
6108 }
6109
6110 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
6111 break;
6112
6113 default: /* 2.4Ghz or Mixed */
6114 /* IEEE_B */
6115 if (mode == IEEE_B) {
6116 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
6117 /* Invalid fixed rate mask */
6118 IPW_DEBUG_WX
6119 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6120 fr.tx_rates = 0;
6121 }
6122 break;
6123 }
6124
6125 /* IEEE_G */
6126 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
6127 IEEE80211_OFDM_RATES_MASK)) {
6128 /* Invalid fixed rate mask */
6129 IPW_DEBUG_WX
6130 ("invalid fixed rate mask in ipw_set_fixed_rate\n");
6131 fr.tx_rates = 0;
6132 break;
6133 }
6134
6135 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
6136 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
6137 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
6138 }
6139
6140 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
6141 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
6142 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
6143 }
6144
6145 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
6146 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
6147 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
6148 }
6149
6150 fr.tx_rates |= mask;
6151 break;
6152 }
6153
6154 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
6155 ipw_write_reg32(priv, reg, *(u32 *) & fr);
6156}
6157
6158static void ipw_abort_scan(struct ipw_priv *priv)
6159{
6160 int err;
6161
6162 if (priv->status & STATUS_SCAN_ABORTING) {
6163 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
6164 return;
6165 }
6166 priv->status |= STATUS_SCAN_ABORTING;
6167
6168 err = ipw_send_scan_abort(priv);
6169 if (err)
6170 IPW_DEBUG_HC("Request to abort scan failed.\n");
6171}
6172
6173static void ipw_add_scan_channels(struct ipw_priv *priv,
6174 struct ipw_scan_request_ext *scan,
6175 int scan_type)
6176{
6177 int channel_index = 0;
6178 const struct ieee80211_geo *geo;
6179 int i;
6180
6181 geo = ieee80211_get_geo(priv->ieee);
6182
6183 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
6184 int start = channel_index;
6185 for (i = 0; i < geo->a_channels; i++) {
6186 if ((priv->status & STATUS_ASSOCIATED) &&
6187 geo->a[i].channel == priv->channel)
6188 continue;
6189 channel_index++;
6190 scan->channels_list[channel_index] = geo->a[i].channel;
6191 ipw_set_scan_type(scan, channel_index,
6192 geo->a[i].
6193 flags & IEEE80211_CH_PASSIVE_ONLY ?
6194 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN :
6195 scan_type);
6196 }
6197
6198 if (start != channel_index) {
6199 scan->channels_list[start] = (u8) (IPW_A_MODE << 6) |
6200 (channel_index - start);
6201 channel_index++;
6202 }
6203 }
6204
6205 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
6206 int start = channel_index;
6207 if (priv->config & CFG_SPEED_SCAN) {
6208 int index;
6209 u8 channels[IEEE80211_24GHZ_CHANNELS] = {
6210 /* nop out the list */
6211 [0] = 0
6212 };
6213
6214 u8 channel;
6215 while (channel_index < IPW_SCAN_CHANNELS) {
6216 channel =
6217 priv->speed_scan[priv->speed_scan_pos];
6218 if (channel == 0) {
6219 priv->speed_scan_pos = 0;
6220 channel = priv->speed_scan[0];
6221 }
6222 if ((priv->status & STATUS_ASSOCIATED) &&
6223 channel == priv->channel) {
6224 priv->speed_scan_pos++;
6225 continue;
6226 }
6227
6228 /* If this channel has already been
6229 * added in scan, break from loop
6230 * and this will be the first channel
6231 * in the next scan.
6232 */
6233 if (channels[channel - 1] != 0)
6234 break;
6235
6236 channels[channel - 1] = 1;
6237 priv->speed_scan_pos++;
6238 channel_index++;
6239 scan->channels_list[channel_index] = channel;
6240 index =
6241 ieee80211_channel_to_index(priv->ieee, channel);
6242 ipw_set_scan_type(scan, channel_index,
6243 geo->bg[index].
6244 flags &
6245 IEEE80211_CH_PASSIVE_ONLY ?
6246 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6247 : scan_type);
6248 }
6249 } else {
6250 for (i = 0; i < geo->bg_channels; i++) {
6251 if ((priv->status & STATUS_ASSOCIATED) &&
6252 geo->bg[i].channel == priv->channel)
6253 continue;
6254 channel_index++;
6255 scan->channels_list[channel_index] =
6256 geo->bg[i].channel;
6257 ipw_set_scan_type(scan, channel_index,
6258 geo->bg[i].
6259 flags &
6260 IEEE80211_CH_PASSIVE_ONLY ?
6261 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN
6262 : scan_type);
6263 }
6264 }
6265
6266 if (start != channel_index) {
6267 scan->channels_list[start] = (u8) (IPW_B_MODE << 6) |
6268 (channel_index - start);
6269 }
6270 }
6271}
6272
6273static int ipw_request_scan_helper(struct ipw_priv *priv, int type, int direct)
6274{
6275 struct ipw_scan_request_ext scan;
6276 int err = 0, scan_type;
6277
6278 if (!(priv->status & STATUS_INIT) ||
6279 (priv->status & STATUS_EXIT_PENDING))
6280 return 0;
6281
6282 mutex_lock(&priv->mutex);
6283
6284 if (direct && (priv->direct_scan_ssid_len == 0)) {
6285 IPW_DEBUG_HC("Direct scan requested but no SSID to scan for\n");
6286 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6287 goto done;
6288 }
6289
6290 if (priv->status & STATUS_SCANNING) {
6291 IPW_DEBUG_HC("Concurrent scan requested. Queuing.\n");
6292 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6293 STATUS_SCAN_PENDING;
6294 goto done;
6295 }
6296
6297 if (!(priv->status & STATUS_SCAN_FORCED) &&
6298 priv->status & STATUS_SCAN_ABORTING) {
6299 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
6300 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6301 STATUS_SCAN_PENDING;
6302 goto done;
6303 }
6304
6305 if (priv->status & STATUS_RF_KILL_MASK) {
6306 IPW_DEBUG_HC("Queuing scan due to RF Kill activation\n");
6307 priv->status |= direct ? STATUS_DIRECT_SCAN_PENDING :
6308 STATUS_SCAN_PENDING;
6309 goto done;
6310 }
6311
6312 memset(&scan, 0, sizeof(scan));
6313 scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
6314
6315 if (type == IW_SCAN_TYPE_PASSIVE) {
6316 IPW_DEBUG_WX("use passive scanning\n");
6317 scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
6318 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6319 cpu_to_le16(120);
6320 ipw_add_scan_channels(priv, &scan, scan_type);
6321 goto send_request;
6322 }
6323
6324 /* Use active scan by default. */
6325 if (priv->config & CFG_SPEED_SCAN)
6326 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6327 cpu_to_le16(30);
6328 else
6329 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
6330 cpu_to_le16(20);
6331
6332 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
6333 cpu_to_le16(20);
6334
6335 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
6336 scan.dwell_time[IPW_SCAN_ACTIVE_DIRECT_SCAN] = cpu_to_le16(20);
6337
6338#ifdef CONFIG_IPW2200_MONITOR
6339 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
6340 u8 channel;
6341 u8 band = 0;
6342
6343 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
6344 case IEEE80211_52GHZ_BAND:
6345 band = (u8) (IPW_A_MODE << 6) | 1;
6346 channel = priv->channel;
6347 break;
6348
6349 case IEEE80211_24GHZ_BAND:
6350 band = (u8) (IPW_B_MODE << 6) | 1;
6351 channel = priv->channel;
6352 break;
6353
6354 default:
6355 band = (u8) (IPW_B_MODE << 6) | 1;
6356 channel = 9;
6357 break;
6358 }
6359
6360 scan.channels_list[0] = band;
6361 scan.channels_list[1] = channel;
6362 ipw_set_scan_type(&scan, 1, IPW_SCAN_PASSIVE_FULL_DWELL_SCAN);
6363
6364 /* NOTE: The card will sit on this channel for this time
6365 * period. Scan aborts are timing sensitive and frequently
6366 * result in firmware restarts. As such, it is best to
6367 * set a small dwell_time here and just keep re-issuing
6368 * scans. Otherwise fast channel hopping will not actually
6369 * hop channels.
6370 *
6371 * TODO: Move SPEED SCAN support to all modes and bands */
6372 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
6373 cpu_to_le16(2000);
6374 } else {
6375#endif /* CONFIG_IPW2200_MONITOR */
6376 /* Honor direct scans first, otherwise if we are roaming make
6377 * this a direct scan for the current network. Finally,
6378 * ensure that every other scan is a fast channel hop scan */
6379 if (direct) {
6380 err = ipw_send_ssid(priv, priv->direct_scan_ssid,
6381 priv->direct_scan_ssid_len);
6382 if (err) {
6383 IPW_DEBUG_HC("Attempt to send SSID command "
6384 "failed\n");
6385 goto done;
6386 }
6387
6388 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6389 } else if ((priv->status & STATUS_ROAMING)
6390 || (!(priv->status & STATUS_ASSOCIATED)
6391 && (priv->config & CFG_STATIC_ESSID)
6392 && (le32_to_cpu(scan.full_scan_index) % 2))) {
6393 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
6394 if (err) {
6395 IPW_DEBUG_HC("Attempt to send SSID command "
6396 "failed.\n");
6397 goto done;
6398 }
6399
6400 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
6401 } else
6402 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
6403
6404 ipw_add_scan_channels(priv, &scan, scan_type);
6405#ifdef CONFIG_IPW2200_MONITOR
6406 }
6407#endif
6408
6409send_request:
6410 err = ipw_send_scan_request_ext(priv, &scan);
6411 if (err) {
6412 IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
6413 goto done;
6414 }
6415
6416 priv->status |= STATUS_SCANNING;
6417 if (direct) {
6418 priv->status &= ~STATUS_DIRECT_SCAN_PENDING;
6419 priv->direct_scan_ssid_len = 0;
6420 } else
6421 priv->status &= ~STATUS_SCAN_PENDING;
6422
6423 queue_delayed_work(priv->workqueue, &priv->scan_check,
6424 IPW_SCAN_CHECK_WATCHDOG);
6425done:
6426 mutex_unlock(&priv->mutex);
6427 return err;
6428}
6429
6430static void ipw_request_passive_scan(struct work_struct *work)
6431{
6432 struct ipw_priv *priv =
6433 container_of(work, struct ipw_priv, request_passive_scan.work);
6434 ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE, 0);
6435}
6436
6437static void ipw_request_scan(struct work_struct *work)
6438{
6439 struct ipw_priv *priv =
6440 container_of(work, struct ipw_priv, request_scan.work);
6441 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 0);
6442}
6443
6444static void ipw_request_direct_scan(struct work_struct *work)
6445{
6446 struct ipw_priv *priv =
6447 container_of(work, struct ipw_priv, request_direct_scan.work);
6448 ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE, 1);
6449}
6450
6451static void ipw_bg_abort_scan(struct work_struct *work)
6452{
6453 struct ipw_priv *priv =
6454 container_of(work, struct ipw_priv, abort_scan);
6455 mutex_lock(&priv->mutex);
6456 ipw_abort_scan(priv);
6457 mutex_unlock(&priv->mutex);
6458}
6459
6460static int ipw_wpa_enable(struct ipw_priv *priv, int value)
6461{
6462 /* This is called when wpa_supplicant loads and closes the driver
6463 * interface. */
6464 priv->ieee->wpa_enabled = value;
6465 return 0;
6466}
6467
6468static int ipw_wpa_set_auth_algs(struct ipw_priv *priv, int value)
6469{
6470 struct ieee80211_device *ieee = priv->ieee;
6471 struct ieee80211_security sec = {
6472 .flags = SEC_AUTH_MODE,
6473 };
6474 int ret = 0;
6475
6476 if (value & IW_AUTH_ALG_SHARED_KEY) {
6477 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
6478 ieee->open_wep = 0;
6479 } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) {
6480 sec.auth_mode = WLAN_AUTH_OPEN;
6481 ieee->open_wep = 1;
6482 } else if (value & IW_AUTH_ALG_LEAP) {
6483 sec.auth_mode = WLAN_AUTH_LEAP;
6484 ieee->open_wep = 1;
6485 } else
6486 return -EINVAL;
6487
6488 if (ieee->set_security)
6489 ieee->set_security(ieee->dev, &sec);
6490 else
6491 ret = -EOPNOTSUPP;
6492
6493 return ret;
6494}
6495
6496static void ipw_wpa_assoc_frame(struct ipw_priv *priv, char *wpa_ie,
6497 int wpa_ie_len)
6498{
6499 /* make sure WPA is enabled */
6500 ipw_wpa_enable(priv, 1);
6501}
6502
6503static int ipw_set_rsn_capa(struct ipw_priv *priv,
6504 char *capabilities, int length)
6505{
6506 IPW_DEBUG_HC("HOST_CMD_RSN_CAPABILITIES\n");
6507
6508 return ipw_send_cmd_pdu(priv, IPW_CMD_RSN_CAPABILITIES, length,
6509 capabilities);
6510}
6511
6512/*
6513 * WE-18 support
6514 */
6515
6516/* SIOCSIWGENIE */
6517static int ipw_wx_set_genie(struct net_device *dev,
6518 struct iw_request_info *info,
6519 union iwreq_data *wrqu, char *extra)
6520{
6521 struct ipw_priv *priv = ieee80211_priv(dev);
6522 struct ieee80211_device *ieee = priv->ieee;
6523 u8 *buf;
6524 int err = 0;
6525
6526 if (wrqu->data.length > MAX_WPA_IE_LEN ||
6527 (wrqu->data.length && extra == NULL))
6528 return -EINVAL;
6529
6530 if (wrqu->data.length) {
6531 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6532 if (buf == NULL) {
6533 err = -ENOMEM;
6534 goto out;
6535 }
6536
6537 memcpy(buf, extra, wrqu->data.length);
6538 kfree(ieee->wpa_ie);
6539 ieee->wpa_ie = buf;
6540 ieee->wpa_ie_len = wrqu->data.length;
6541 } else {
6542 kfree(ieee->wpa_ie);
6543 ieee->wpa_ie = NULL;
6544 ieee->wpa_ie_len = 0;
6545 }
6546
6547 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6548 out:
6549 return err;
6550}
6551
6552/* SIOCGIWGENIE */
6553static int ipw_wx_get_genie(struct net_device *dev,
6554 struct iw_request_info *info,
6555 union iwreq_data *wrqu, char *extra)
6556{
6557 struct ipw_priv *priv = ieee80211_priv(dev);
6558 struct ieee80211_device *ieee = priv->ieee;
6559 int err = 0;
6560
6561 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6562 wrqu->data.length = 0;
6563 goto out;
6564 }
6565
6566 if (wrqu->data.length < ieee->wpa_ie_len) {
6567 err = -E2BIG;
6568 goto out;
6569 }
6570
6571 wrqu->data.length = ieee->wpa_ie_len;
6572 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6573
6574 out:
6575 return err;
6576}
6577
6578static int wext_cipher2level(int cipher)
6579{
6580 switch (cipher) {
6581 case IW_AUTH_CIPHER_NONE:
6582 return SEC_LEVEL_0;
6583 case IW_AUTH_CIPHER_WEP40:
6584 case IW_AUTH_CIPHER_WEP104:
6585 return SEC_LEVEL_1;
6586 case IW_AUTH_CIPHER_TKIP:
6587 return SEC_LEVEL_2;
6588 case IW_AUTH_CIPHER_CCMP:
6589 return SEC_LEVEL_3;
6590 default:
6591 return -1;
6592 }
6593}
6594
6595/* SIOCSIWAUTH */
6596static int ipw_wx_set_auth(struct net_device *dev,
6597 struct iw_request_info *info,
6598 union iwreq_data *wrqu, char *extra)
6599{
6600 struct ipw_priv *priv = ieee80211_priv(dev);
6601 struct ieee80211_device *ieee = priv->ieee;
6602 struct iw_param *param = &wrqu->param;
6603 struct lib80211_crypt_data *crypt;
6604 unsigned long flags;
6605 int ret = 0;
6606
6607 switch (param->flags & IW_AUTH_INDEX) {
6608 case IW_AUTH_WPA_VERSION:
6609 break;
6610 case IW_AUTH_CIPHER_PAIRWISE:
6611 ipw_set_hw_decrypt_unicast(priv,
6612 wext_cipher2level(param->value));
6613 break;
6614 case IW_AUTH_CIPHER_GROUP:
6615 ipw_set_hw_decrypt_multicast(priv,
6616 wext_cipher2level(param->value));
6617 break;
6618 case IW_AUTH_KEY_MGMT:
6619 /*
6620 * ipw2200 does not use these parameters
6621 */
6622 break;
6623
6624 case IW_AUTH_TKIP_COUNTERMEASURES:
6625 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6626 if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
6627 break;
6628
6629 flags = crypt->ops->get_flags(crypt->priv);
6630
6631 if (param->value)
6632 flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6633 else
6634 flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
6635
6636 crypt->ops->set_flags(flags, crypt->priv);
6637
6638 break;
6639
6640 case IW_AUTH_DROP_UNENCRYPTED:{
6641 /* HACK:
6642 *
6643 * wpa_supplicant calls set_wpa_enabled when the driver
6644 * is loaded and unloaded, regardless of if WPA is being
6645 * used. No other calls are made which can be used to
6646 * determine if encryption will be used or not prior to
6647 * association being expected. If encryption is not being
6648 * used, drop_unencrypted is set to false, else true -- we
6649 * can use this to determine if the CAP_PRIVACY_ON bit should
6650 * be set.
6651 */
6652 struct ieee80211_security sec = {
6653 .flags = SEC_ENABLED,
6654 .enabled = param->value,
6655 };
6656 priv->ieee->drop_unencrypted = param->value;
6657 /* We only change SEC_LEVEL for open mode. Others
6658 * are set by ipw_wpa_set_encryption.
6659 */
6660 if (!param->value) {
6661 sec.flags |= SEC_LEVEL;
6662 sec.level = SEC_LEVEL_0;
6663 } else {
6664 sec.flags |= SEC_LEVEL;
6665 sec.level = SEC_LEVEL_1;
6666 }
6667 if (priv->ieee->set_security)
6668 priv->ieee->set_security(priv->ieee->dev, &sec);
6669 break;
6670 }
6671
6672 case IW_AUTH_80211_AUTH_ALG:
6673 ret = ipw_wpa_set_auth_algs(priv, param->value);
6674 break;
6675
6676 case IW_AUTH_WPA_ENABLED:
6677 ret = ipw_wpa_enable(priv, param->value);
6678 ipw_disassociate(priv);
6679 break;
6680
6681 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6682 ieee->ieee802_1x = param->value;
6683 break;
6684
6685 case IW_AUTH_PRIVACY_INVOKED:
6686 ieee->privacy_invoked = param->value;
6687 break;
6688
6689 default:
6690 return -EOPNOTSUPP;
6691 }
6692 return ret;
6693}
6694
6695/* SIOCGIWAUTH */
6696static int ipw_wx_get_auth(struct net_device *dev,
6697 struct iw_request_info *info,
6698 union iwreq_data *wrqu, char *extra)
6699{
6700 struct ipw_priv *priv = ieee80211_priv(dev);
6701 struct ieee80211_device *ieee = priv->ieee;
6702 struct lib80211_crypt_data *crypt;
6703 struct iw_param *param = &wrqu->param;
6704 int ret = 0;
6705
6706 switch (param->flags & IW_AUTH_INDEX) {
6707 case IW_AUTH_WPA_VERSION:
6708 case IW_AUTH_CIPHER_PAIRWISE:
6709 case IW_AUTH_CIPHER_GROUP:
6710 case IW_AUTH_KEY_MGMT:
6711 /*
6712 * wpa_supplicant will control these internally
6713 */
6714 ret = -EOPNOTSUPP;
6715 break;
6716
6717 case IW_AUTH_TKIP_COUNTERMEASURES:
6718 crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx];
6719 if (!crypt || !crypt->ops->get_flags)
6720 break;
6721
6722 param->value = (crypt->ops->get_flags(crypt->priv) &
6723 IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0;
6724
6725 break;
6726
6727 case IW_AUTH_DROP_UNENCRYPTED:
6728 param->value = ieee->drop_unencrypted;
6729 break;
6730
6731 case IW_AUTH_80211_AUTH_ALG:
6732 param->value = ieee->sec.auth_mode;
6733 break;
6734
6735 case IW_AUTH_WPA_ENABLED:
6736 param->value = ieee->wpa_enabled;
6737 break;
6738
6739 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
6740 param->value = ieee->ieee802_1x;
6741 break;
6742
6743 case IW_AUTH_ROAMING_CONTROL:
6744 case IW_AUTH_PRIVACY_INVOKED:
6745 param->value = ieee->privacy_invoked;
6746 break;
6747
6748 default:
6749 return -EOPNOTSUPP;
6750 }
6751 return 0;
6752}
6753
6754/* SIOCSIWENCODEEXT */
6755static int ipw_wx_set_encodeext(struct net_device *dev,
6756 struct iw_request_info *info,
6757 union iwreq_data *wrqu, char *extra)
6758{
6759 struct ipw_priv *priv = ieee80211_priv(dev);
6760 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
6761
6762 if (hwcrypto) {
6763 if (ext->alg == IW_ENCODE_ALG_TKIP) {
6764 /* IPW HW can't build TKIP MIC,
6765 host decryption still needed */
6766 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY)
6767 priv->ieee->host_mc_decrypt = 1;
6768 else {
6769 priv->ieee->host_encrypt = 0;
6770 priv->ieee->host_encrypt_msdu = 1;
6771 priv->ieee->host_decrypt = 1;
6772 }
6773 } else {
6774 priv->ieee->host_encrypt = 0;
6775 priv->ieee->host_encrypt_msdu = 0;
6776 priv->ieee->host_decrypt = 0;
6777 priv->ieee->host_mc_decrypt = 0;
6778 }
6779 }
6780
6781 return ieee80211_wx_set_encodeext(priv->ieee, info, wrqu, extra);
6782}
6783
6784/* SIOCGIWENCODEEXT */
6785static int ipw_wx_get_encodeext(struct net_device *dev,
6786 struct iw_request_info *info,
6787 union iwreq_data *wrqu, char *extra)
6788{
6789 struct ipw_priv *priv = ieee80211_priv(dev);
6790 return ieee80211_wx_get_encodeext(priv->ieee, info, wrqu, extra);
6791}
6792
6793/* SIOCSIWMLME */
6794static int ipw_wx_set_mlme(struct net_device *dev,
6795 struct iw_request_info *info,
6796 union iwreq_data *wrqu, char *extra)
6797{
6798 struct ipw_priv *priv = ieee80211_priv(dev);
6799 struct iw_mlme *mlme = (struct iw_mlme *)extra;
6800 __le16 reason;
6801
6802 reason = cpu_to_le16(mlme->reason_code);
6803
6804 switch (mlme->cmd) {
6805 case IW_MLME_DEAUTH:
6806 /* silently ignore */
6807 break;
6808
6809 case IW_MLME_DISASSOC:
6810 ipw_disassociate(priv);
6811 break;
6812
6813 default:
6814 return -EOPNOTSUPP;
6815 }
6816 return 0;
6817}
6818
6819#ifdef CONFIG_IPW2200_QOS
6820
6821/* QoS */
6822/*
6823* get the modulation type of the current network or
6824* the card current mode
6825*/
6826static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6827{
6828 u8 mode = 0;
6829
6830 if (priv->status & STATUS_ASSOCIATED) {
6831 unsigned long flags;
6832
6833 spin_lock_irqsave(&priv->ieee->lock, flags);
6834 mode = priv->assoc_network->mode;
6835 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6836 } else {
6837 mode = priv->ieee->mode;
6838 }
6839 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode);
6840 return mode;
6841}
6842
6843/*
6844* Handle management frame beacon and probe response
6845*/
6846static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6847 int active_network,
6848 struct ieee80211_network *network)
6849{
6850 u32 size = sizeof(struct ieee80211_qos_parameters);
6851
6852 if (network->capability & WLAN_CAPABILITY_IBSS)
6853 network->qos_data.active = network->qos_data.supported;
6854
6855 if (network->flags & NETWORK_HAS_QOS_MASK) {
6856 if (active_network &&
6857 (network->flags & NETWORK_HAS_QOS_PARAMETERS))
6858 network->qos_data.active = network->qos_data.supported;
6859
6860 if ((network->qos_data.active == 1) && (active_network == 1) &&
6861 (network->flags & NETWORK_HAS_QOS_PARAMETERS) &&
6862 (network->qos_data.old_param_count !=
6863 network->qos_data.param_count)) {
6864 network->qos_data.old_param_count =
6865 network->qos_data.param_count;
6866 schedule_work(&priv->qos_activate);
6867 IPW_DEBUG_QOS("QoS parameters change call "
6868 "qos_activate\n");
6869 }
6870 } else {
6871 if ((priv->ieee->mode == IEEE_B) || (network->mode == IEEE_B))
6872 memcpy(&network->qos_data.parameters,
6873 &def_parameters_CCK, size);
6874 else
6875 memcpy(&network->qos_data.parameters,
6876 &def_parameters_OFDM, size);
6877
6878 if ((network->qos_data.active == 1) && (active_network == 1)) {
6879 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n");
6880 schedule_work(&priv->qos_activate);
6881 }
6882
6883 network->qos_data.active = 0;
6884 network->qos_data.supported = 0;
6885 }
6886 if ((priv->status & STATUS_ASSOCIATED) &&
6887 (priv->ieee->iw_mode == IW_MODE_ADHOC) && (active_network == 0)) {
6888 if (memcmp(network->bssid, priv->bssid, ETH_ALEN))
6889 if (network->capability & WLAN_CAPABILITY_IBSS)
6890 if ((network->ssid_len ==
6891 priv->assoc_network->ssid_len) &&
6892 !memcmp(network->ssid,
6893 priv->assoc_network->ssid,
6894 network->ssid_len)) {
6895 queue_work(priv->workqueue,
6896 &priv->merge_networks);
6897 }
6898 }
6899
6900 return 0;
6901}
6902
6903/*
6904* This function set up the firmware to support QoS. It sends
6905* IPW_CMD_QOS_PARAMETERS and IPW_CMD_WME_INFO
6906*/
6907static int ipw_qos_activate(struct ipw_priv *priv,
6908 struct ieee80211_qos_data *qos_network_data)
6909{
6910 int err;
6911 struct ieee80211_qos_parameters qos_parameters[QOS_QOS_SETS];
6912 struct ieee80211_qos_parameters *active_one = NULL;
6913 u32 size = sizeof(struct ieee80211_qos_parameters);
6914 u32 burst_duration;
6915 int i;
6916 u8 type;
6917
6918 type = ipw_qos_current_mode(priv);
6919
6920 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_CCK]);
6921 memcpy(active_one, priv->qos_data.def_qos_parm_CCK, size);
6922 active_one = &(qos_parameters[QOS_PARAM_SET_DEF_OFDM]);
6923 memcpy(active_one, priv->qos_data.def_qos_parm_OFDM, size);
6924
6925 if (qos_network_data == NULL) {
6926 if (type == IEEE_B) {
6927 IPW_DEBUG_QOS("QoS activate network mode %d\n", type);
6928 active_one = &def_parameters_CCK;
6929 } else
6930 active_one = &def_parameters_OFDM;
6931
6932 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6933 burst_duration = ipw_qos_get_burst_duration(priv);
6934 for (i = 0; i < QOS_QUEUE_NUM; i++)
6935 qos_parameters[QOS_PARAM_SET_ACTIVE].tx_op_limit[i] =
6936 cpu_to_le16(burst_duration);
6937 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
6938 if (type == IEEE_B) {
6939 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n",
6940 type);
6941 if (priv->qos_data.qos_enable == 0)
6942 active_one = &def_parameters_CCK;
6943 else
6944 active_one = priv->qos_data.def_qos_parm_CCK;
6945 } else {
6946 if (priv->qos_data.qos_enable == 0)
6947 active_one = &def_parameters_OFDM;
6948 else
6949 active_one = priv->qos_data.def_qos_parm_OFDM;
6950 }
6951 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6952 } else {
6953 unsigned long flags;
6954 int active;
6955
6956 spin_lock_irqsave(&priv->ieee->lock, flags);
6957 active_one = &(qos_network_data->parameters);
6958 qos_network_data->old_param_count =
6959 qos_network_data->param_count;
6960 memcpy(&qos_parameters[QOS_PARAM_SET_ACTIVE], active_one, size);
6961 active = qos_network_data->supported;
6962 spin_unlock_irqrestore(&priv->ieee->lock, flags);
6963
6964 if (active == 0) {
6965 burst_duration = ipw_qos_get_burst_duration(priv);
6966 for (i = 0; i < QOS_QUEUE_NUM; i++)
6967 qos_parameters[QOS_PARAM_SET_ACTIVE].
6968 tx_op_limit[i] = cpu_to_le16(burst_duration);
6969 }
6970 }
6971
6972 IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
6973 err = ipw_send_qos_params_command(priv,
6974 (struct ieee80211_qos_parameters *)
6975 &(qos_parameters[0]));
6976 if (err)
6977 IPW_DEBUG_QOS("QoS IPW_CMD_QOS_PARAMETERS failed\n");
6978
6979 return err;
6980}
6981
6982/*
6983* send IPW_CMD_WME_INFO to the firmware
6984*/
6985static int ipw_qos_set_info_element(struct ipw_priv *priv)
6986{
6987 int ret = 0;
6988 struct ieee80211_qos_information_element qos_info;
6989
6990 if (priv == NULL)
6991 return -1;
6992
6993 qos_info.elementID = QOS_ELEMENT_ID;
6994 qos_info.length = sizeof(struct ieee80211_qos_information_element) - 2;
6995
6996 qos_info.version = QOS_VERSION_1;
6997 qos_info.ac_info = 0;
6998
6999 memcpy(qos_info.qui, qos_oui, QOS_OUI_LEN);
7000 qos_info.qui_type = QOS_OUI_TYPE;
7001 qos_info.qui_subtype = QOS_OUI_INFO_SUB_TYPE;
7002
7003 ret = ipw_send_qos_info_command(priv, &qos_info);
7004 if (ret != 0) {
7005 IPW_DEBUG_QOS("QoS error calling ipw_send_qos_info_command\n");
7006 }
7007 return ret;
7008}
7009
7010/*
7011* Set the QoS parameter with the association request structure
7012*/
7013static int ipw_qos_association(struct ipw_priv *priv,
7014 struct ieee80211_network *network)
7015{
7016 int err = 0;
7017 struct ieee80211_qos_data *qos_data = NULL;
7018 struct ieee80211_qos_data ibss_data = {
7019 .supported = 1,
7020 .active = 1,
7021 };
7022
7023 switch (priv->ieee->iw_mode) {
7024 case IW_MODE_ADHOC:
7025 BUG_ON(!(network->capability & WLAN_CAPABILITY_IBSS));
7026
7027 qos_data = &ibss_data;
7028 break;
7029
7030 case IW_MODE_INFRA:
7031 qos_data = &network->qos_data;
7032 break;
7033
7034 default:
7035 BUG();
7036 break;
7037 }
7038
7039 err = ipw_qos_activate(priv, qos_data);
7040 if (err) {
7041 priv->assoc_request.policy_support &= ~HC_QOS_SUPPORT_ASSOC;
7042 return err;
7043 }
7044
7045 if (priv->qos_data.qos_enable && qos_data->supported) {
7046 IPW_DEBUG_QOS("QoS will be enabled for this association\n");
7047 priv->assoc_request.policy_support |= HC_QOS_SUPPORT_ASSOC;
7048 return ipw_qos_set_info_element(priv);
7049 }
7050
7051 return 0;
7052}
7053
7054/*
7055* handling the beaconing responses. if we get different QoS setting
7056* off the network from the associated setting, adjust the QoS
7057* setting
7058*/
7059static int ipw_qos_association_resp(struct ipw_priv *priv,
7060 struct ieee80211_network *network)
7061{
7062 int ret = 0;
7063 unsigned long flags;
7064 u32 size = sizeof(struct ieee80211_qos_parameters);
7065 int set_qos_param = 0;
7066
7067 if ((priv == NULL) || (network == NULL) ||
7068 (priv->assoc_network == NULL))
7069 return ret;
7070
7071 if (!(priv->status & STATUS_ASSOCIATED))
7072 return ret;
7073
7074 if ((priv->ieee->iw_mode != IW_MODE_INFRA))
7075 return ret;
7076
7077 spin_lock_irqsave(&priv->ieee->lock, flags);
7078 if (network->flags & NETWORK_HAS_QOS_PARAMETERS) {
7079 memcpy(&priv->assoc_network->qos_data, &network->qos_data,
7080 sizeof(struct ieee80211_qos_data));
7081 priv->assoc_network->qos_data.active = 1;
7082 if ((network->qos_data.old_param_count !=
7083 network->qos_data.param_count)) {
7084 set_qos_param = 1;
7085 network->qos_data.old_param_count =
7086 network->qos_data.param_count;
7087 }
7088
7089 } else {
7090 if ((network->mode == IEEE_B) || (priv->ieee->mode == IEEE_B))
7091 memcpy(&priv->assoc_network->qos_data.parameters,
7092 &def_parameters_CCK, size);
7093 else
7094 memcpy(&priv->assoc_network->qos_data.parameters,
7095 &def_parameters_OFDM, size);
7096 priv->assoc_network->qos_data.active = 0;
7097 priv->assoc_network->qos_data.supported = 0;
7098 set_qos_param = 1;
7099 }
7100
7101 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7102
7103 if (set_qos_param == 1)
7104 schedule_work(&priv->qos_activate);
7105
7106 return ret;
7107}
7108
7109static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv)
7110{
7111 u32 ret = 0;
7112
7113 if ((priv == NULL))
7114 return 0;
7115
7116 if (!(priv->ieee->modulation & IEEE80211_OFDM_MODULATION))
7117 ret = priv->qos_data.burst_duration_CCK;
7118 else
7119 ret = priv->qos_data.burst_duration_OFDM;
7120
7121 return ret;
7122}
7123
7124/*
7125* Initialize the setting of QoS global
7126*/
7127static void ipw_qos_init(struct ipw_priv *priv, int enable,
7128 int burst_enable, u32 burst_duration_CCK,
7129 u32 burst_duration_OFDM)
7130{
7131 priv->qos_data.qos_enable = enable;
7132
7133 if (priv->qos_data.qos_enable) {
7134 priv->qos_data.def_qos_parm_CCK = &def_qos_parameters_CCK;
7135 priv->qos_data.def_qos_parm_OFDM = &def_qos_parameters_OFDM;
7136 IPW_DEBUG_QOS("QoS is enabled\n");
7137 } else {
7138 priv->qos_data.def_qos_parm_CCK = &def_parameters_CCK;
7139 priv->qos_data.def_qos_parm_OFDM = &def_parameters_OFDM;
7140 IPW_DEBUG_QOS("QoS is not enabled\n");
7141 }
7142
7143 priv->qos_data.burst_enable = burst_enable;
7144
7145 if (burst_enable) {
7146 priv->qos_data.burst_duration_CCK = burst_duration_CCK;
7147 priv->qos_data.burst_duration_OFDM = burst_duration_OFDM;
7148 } else {
7149 priv->qos_data.burst_duration_CCK = 0;
7150 priv->qos_data.burst_duration_OFDM = 0;
7151 }
7152}
7153
7154/*
7155* map the packet priority to the right TX Queue
7156*/
7157static int ipw_get_tx_queue_number(struct ipw_priv *priv, u16 priority)
7158{
7159 if (priority > 7 || !priv->qos_data.qos_enable)
7160 priority = 0;
7161
7162 return from_priority_to_tx_queue[priority] - 1;
7163}
7164
7165static int ipw_is_qos_active(struct net_device *dev,
7166 struct sk_buff *skb)
7167{
7168 struct ipw_priv *priv = ieee80211_priv(dev);
7169 struct ieee80211_qos_data *qos_data = NULL;
7170 int active, supported;
7171 u8 *daddr = skb->data + ETH_ALEN;
7172 int unicast = !is_multicast_ether_addr(daddr);
7173
7174 if (!(priv->status & STATUS_ASSOCIATED))
7175 return 0;
7176
7177 qos_data = &priv->assoc_network->qos_data;
7178
7179 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7180 if (unicast == 0)
7181 qos_data->active = 0;
7182 else
7183 qos_data->active = qos_data->supported;
7184 }
7185 active = qos_data->active;
7186 supported = qos_data->supported;
7187 IPW_DEBUG_QOS("QoS %d network is QoS active %d supported %d "
7188 "unicast %d\n",
7189 priv->qos_data.qos_enable, active, supported, unicast);
7190 if (active && priv->qos_data.qos_enable)
7191 return 1;
7192
7193 return 0;
7194
7195}
7196/*
7197* add QoS parameter to the TX command
7198*/
7199static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
7200 u16 priority,
7201 struct tfd_data *tfd)
7202{
7203 int tx_queue_id = 0;
7204
7205
7206 tx_queue_id = from_priority_to_tx_queue[priority] - 1;
7207 tfd->tx_flags_ext |= DCT_FLAG_EXT_QOS_ENABLED;
7208
7209 if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
7210 tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
7211 tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
7212 }
7213 return 0;
7214}
7215
7216/*
7217* background support to run QoS activate functionality
7218*/
7219static void ipw_bg_qos_activate(struct work_struct *work)
7220{
7221 struct ipw_priv *priv =
7222 container_of(work, struct ipw_priv, qos_activate);
7223
7224 if (priv == NULL)
7225 return;
7226
7227 mutex_lock(&priv->mutex);
7228
7229 if (priv->status & STATUS_ASSOCIATED)
7230 ipw_qos_activate(priv, &(priv->assoc_network->qos_data));
7231
7232 mutex_unlock(&priv->mutex);
7233}
7234
7235static int ipw_handle_probe_response(struct net_device *dev,
7236 struct ieee80211_probe_response *resp,
7237 struct ieee80211_network *network)
7238{
7239 struct ipw_priv *priv = ieee80211_priv(dev);
7240 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7241 (network == priv->assoc_network));
7242
7243 ipw_qos_handle_probe_response(priv, active_network, network);
7244
7245 return 0;
7246}
7247
7248static int ipw_handle_beacon(struct net_device *dev,
7249 struct ieee80211_beacon *resp,
7250 struct ieee80211_network *network)
7251{
7252 struct ipw_priv *priv = ieee80211_priv(dev);
7253 int active_network = ((priv->status & STATUS_ASSOCIATED) &&
7254 (network == priv->assoc_network));
7255
7256 ipw_qos_handle_probe_response(priv, active_network, network);
7257
7258 return 0;
7259}
7260
7261static int ipw_handle_assoc_response(struct net_device *dev,
7262 struct ieee80211_assoc_response *resp,
7263 struct ieee80211_network *network)
7264{
7265 struct ipw_priv *priv = ieee80211_priv(dev);
7266 ipw_qos_association_resp(priv, network);
7267 return 0;
7268}
7269
7270static int ipw_send_qos_params_command(struct ipw_priv *priv, struct ieee80211_qos_parameters
7271 *qos_param)
7272{
7273 return ipw_send_cmd_pdu(priv, IPW_CMD_QOS_PARAMETERS,
7274 sizeof(*qos_param) * 3, qos_param);
7275}
7276
7277static int ipw_send_qos_info_command(struct ipw_priv *priv, struct ieee80211_qos_information_element
7278 *qos_param)
7279{
7280 return ipw_send_cmd_pdu(priv, IPW_CMD_WME_INFO, sizeof(*qos_param),
7281 qos_param);
7282}
7283
7284#endif /* CONFIG_IPW2200_QOS */
7285
7286static int ipw_associate_network(struct ipw_priv *priv,
7287 struct ieee80211_network *network,
7288 struct ipw_supported_rates *rates, int roaming)
7289{
7290 int err;
7291 DECLARE_SSID_BUF(ssid);
7292
7293 if (priv->config & CFG_FIXED_RATE)
7294 ipw_set_fixed_rate(priv, network->mode);
7295
7296 if (!(priv->config & CFG_STATIC_ESSID)) {
7297 priv->essid_len = min(network->ssid_len,
7298 (u8) IW_ESSID_MAX_SIZE);
7299 memcpy(priv->essid, network->ssid, priv->essid_len);
7300 }
7301
7302 network->last_associate = jiffies;
7303
7304 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
7305 priv->assoc_request.channel = network->channel;
7306 priv->assoc_request.auth_key = 0;
7307
7308 if ((priv->capability & CAP_PRIVACY_ON) &&
7309 (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY)) {
7310 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
7311 priv->assoc_request.auth_key = priv->ieee->sec.active_key;
7312
7313 if (priv->ieee->sec.level == SEC_LEVEL_1)
7314 ipw_send_wep_keys(priv, DCW_WEP_KEY_SEC_TYPE_WEP);
7315
7316 } else if ((priv->capability & CAP_PRIVACY_ON) &&
7317 (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP))
7318 priv->assoc_request.auth_type = AUTH_LEAP;
7319 else
7320 priv->assoc_request.auth_type = AUTH_OPEN;
7321
7322 if (priv->ieee->wpa_ie_len) {
7323 priv->assoc_request.policy_support = cpu_to_le16(0x02); /* RSN active */
7324 ipw_set_rsn_capa(priv, priv->ieee->wpa_ie,
7325 priv->ieee->wpa_ie_len);
7326 }
7327
7328 /*
7329 * It is valid for our ieee device to support multiple modes, but
7330 * when it comes to associating to a given network we have to choose
7331 * just one mode.
7332 */
7333 if (network->mode & priv->ieee->mode & IEEE_A)
7334 priv->assoc_request.ieee_mode = IPW_A_MODE;
7335 else if (network->mode & priv->ieee->mode & IEEE_G)
7336 priv->assoc_request.ieee_mode = IPW_G_MODE;
7337 else if (network->mode & priv->ieee->mode & IEEE_B)
7338 priv->assoc_request.ieee_mode = IPW_B_MODE;
7339
7340 priv->assoc_request.capability = cpu_to_le16(network->capability);
7341 if ((network->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
7342 && !(priv->config & CFG_PREAMBLE_LONG)) {
7343 priv->assoc_request.preamble_length = DCT_FLAG_SHORT_PREAMBLE;
7344 } else {
7345 priv->assoc_request.preamble_length = DCT_FLAG_LONG_PREAMBLE;
7346
7347 /* Clear the short preamble if we won't be supporting it */
7348 priv->assoc_request.capability &=
7349 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
7350 }
7351
7352 /* Clear capability bits that aren't used in Ad Hoc */
7353 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7354 priv->assoc_request.capability &=
7355 ~cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
7356
7357 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
7358 "802.11%c [%d], %s[:%s], enc=%s%s%s%c%c\n",
7359 roaming ? "Rea" : "A",
7360 print_ssid(ssid, priv->essid, priv->essid_len),
7361 network->channel,
7362 ipw_modes[priv->assoc_request.ieee_mode],
7363 rates->num_rates,
7364 (priv->assoc_request.preamble_length ==
7365 DCT_FLAG_LONG_PREAMBLE) ? "long" : "short",
7366 network->capability &
7367 WLAN_CAPABILITY_SHORT_PREAMBLE ? "short" : "long",
7368 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
7369 priv->capability & CAP_PRIVACY_ON ?
7370 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
7371 "(open)") : "",
7372 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
7373 priv->capability & CAP_PRIVACY_ON ?
7374 '1' + priv->ieee->sec.active_key : '.',
7375 priv->capability & CAP_PRIVACY_ON ? '.' : ' ');
7376
7377 priv->assoc_request.beacon_interval = cpu_to_le16(network->beacon_interval);
7378 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
7379 (network->time_stamp[0] == 0) && (network->time_stamp[1] == 0)) {
7380 priv->assoc_request.assoc_type = HC_IBSS_START;
7381 priv->assoc_request.assoc_tsf_msw = 0;
7382 priv->assoc_request.assoc_tsf_lsw = 0;
7383 } else {
7384 if (unlikely(roaming))
7385 priv->assoc_request.assoc_type = HC_REASSOCIATE;
7386 else
7387 priv->assoc_request.assoc_type = HC_ASSOCIATE;
7388 priv->assoc_request.assoc_tsf_msw = cpu_to_le32(network->time_stamp[1]);
7389 priv->assoc_request.assoc_tsf_lsw = cpu_to_le32(network->time_stamp[0]);
7390 }
7391
7392 memcpy(priv->assoc_request.bssid, network->bssid, ETH_ALEN);
7393
7394 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7395 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
7396 priv->assoc_request.atim_window = cpu_to_le16(network->atim_window);
7397 } else {
7398 memcpy(priv->assoc_request.dest, network->bssid, ETH_ALEN);
7399 priv->assoc_request.atim_window = 0;
7400 }
7401
7402 priv->assoc_request.listen_interval = cpu_to_le16(network->listen_interval);
7403
7404 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
7405 if (err) {
7406 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
7407 return err;
7408 }
7409
7410 rates->ieee_mode = priv->assoc_request.ieee_mode;
7411 rates->purpose = IPW_RATE_CONNECT;
7412 ipw_send_supported_rates(priv, rates);
7413
7414 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
7415 priv->sys_config.dot11g_auto_detection = 1;
7416 else
7417 priv->sys_config.dot11g_auto_detection = 0;
7418
7419 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
7420 priv->sys_config.answer_broadcast_ssid_probe = 1;
7421 else
7422 priv->sys_config.answer_broadcast_ssid_probe = 0;
7423
7424 err = ipw_send_system_config(priv);
7425 if (err) {
7426 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
7427 return err;
7428 }
7429
7430 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
7431 err = ipw_set_sensitivity(priv, network->stats.rssi + IPW_RSSI_TO_DBM);
7432 if (err) {
7433 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7434 return err;
7435 }
7436
7437 /*
7438 * If preemption is enabled, it is possible for the association
7439 * to complete before we return from ipw_send_associate. Therefore
7440 * we have to be sure and update our priviate data first.
7441 */
7442 priv->channel = network->channel;
7443 memcpy(priv->bssid, network->bssid, ETH_ALEN);
7444 priv->status |= STATUS_ASSOCIATING;
7445 priv->status &= ~STATUS_SECURITY_UPDATED;
7446
7447 priv->assoc_network = network;
7448
7449#ifdef CONFIG_IPW2200_QOS
7450 ipw_qos_association(priv, network);
7451#endif
7452
7453 err = ipw_send_associate(priv, &priv->assoc_request);
7454 if (err) {
7455 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
7456 return err;
7457 }
7458
7459 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n",
7460 print_ssid(ssid, priv->essid, priv->essid_len),
7461 priv->bssid);
7462
7463 return 0;
7464}
7465
7466static void ipw_roam(void *data)
7467{
7468 struct ipw_priv *priv = data;
7469 struct ieee80211_network *network = NULL;
7470 struct ipw_network_match match = {
7471 .network = priv->assoc_network
7472 };
7473
7474 /* The roaming process is as follows:
7475 *
7476 * 1. Missed beacon threshold triggers the roaming process by
7477 * setting the status ROAM bit and requesting a scan.
7478 * 2. When the scan completes, it schedules the ROAM work
7479 * 3. The ROAM work looks at all of the known networks for one that
7480 * is a better network than the currently associated. If none
7481 * found, the ROAM process is over (ROAM bit cleared)
7482 * 4. If a better network is found, a disassociation request is
7483 * sent.
7484 * 5. When the disassociation completes, the roam work is again
7485 * scheduled. The second time through, the driver is no longer
7486 * associated, and the newly selected network is sent an
7487 * association request.
7488 * 6. At this point ,the roaming process is complete and the ROAM
7489 * status bit is cleared.
7490 */
7491
7492 /* If we are no longer associated, and the roaming bit is no longer
7493 * set, then we are not actively roaming, so just return */
7494 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
7495 return;
7496
7497 if (priv->status & STATUS_ASSOCIATED) {
7498 /* First pass through ROAM process -- look for a better
7499 * network */
7500 unsigned long flags;
7501 u8 rssi = priv->assoc_network->stats.rssi;
7502 priv->assoc_network->stats.rssi = -128;
7503 spin_lock_irqsave(&priv->ieee->lock, flags);
7504 list_for_each_entry(network, &priv->ieee->network_list, list) {
7505 if (network != priv->assoc_network)
7506 ipw_best_network(priv, &match, network, 1);
7507 }
7508 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7509 priv->assoc_network->stats.rssi = rssi;
7510
7511 if (match.network == priv->assoc_network) {
7512 IPW_DEBUG_ASSOC("No better APs in this network to "
7513 "roam to.\n");
7514 priv->status &= ~STATUS_ROAMING;
7515 ipw_debug_config(priv);
7516 return;
7517 }
7518
7519 ipw_send_disassociate(priv, 1);
7520 priv->assoc_network = match.network;
7521
7522 return;
7523 }
7524
7525 /* Second pass through ROAM process -- request association */
7526 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
7527 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
7528 priv->status &= ~STATUS_ROAMING;
7529}
7530
7531static void ipw_bg_roam(struct work_struct *work)
7532{
7533 struct ipw_priv *priv =
7534 container_of(work, struct ipw_priv, roam);
7535 mutex_lock(&priv->mutex);
7536 ipw_roam(priv);
7537 mutex_unlock(&priv->mutex);
7538}
7539
7540static int ipw_associate(void *data)
7541{
7542 struct ipw_priv *priv = data;
7543
7544 struct ieee80211_network *network = NULL;
7545 struct ipw_network_match match = {
7546 .network = NULL
7547 };
7548 struct ipw_supported_rates *rates;
7549 struct list_head *element;
7550 unsigned long flags;
7551 DECLARE_SSID_BUF(ssid);
7552
7553 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7554 IPW_DEBUG_ASSOC("Not attempting association (monitor mode)\n");
7555 return 0;
7556 }
7557
7558 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
7559 IPW_DEBUG_ASSOC("Not attempting association (already in "
7560 "progress)\n");
7561 return 0;
7562 }
7563
7564 if (priv->status & STATUS_DISASSOCIATING) {
7565 IPW_DEBUG_ASSOC("Not attempting association (in "
7566 "disassociating)\n ");
7567 queue_work(priv->workqueue, &priv->associate);
7568 return 0;
7569 }
7570
7571 if (!ipw_is_init(priv) || (priv->status & STATUS_SCANNING)) {
7572 IPW_DEBUG_ASSOC("Not attempting association (scanning or not "
7573 "initialized)\n");
7574 return 0;
7575 }
7576
7577 if (!(priv->config & CFG_ASSOCIATE) &&
7578 !(priv->config & (CFG_STATIC_ESSID | CFG_STATIC_BSSID))) {
7579 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
7580 return 0;
7581 }
7582
7583 /* Protect our use of the network_list */
7584 spin_lock_irqsave(&priv->ieee->lock, flags);
7585 list_for_each_entry(network, &priv->ieee->network_list, list)
7586 ipw_best_network(priv, &match, network, 0);
7587
7588 network = match.network;
7589 rates = &match.rates;
7590
7591 if (network == NULL &&
7592 priv->ieee->iw_mode == IW_MODE_ADHOC &&
7593 priv->config & CFG_ADHOC_CREATE &&
7594 priv->config & CFG_STATIC_ESSID &&
7595 priv->config & CFG_STATIC_CHANNEL) {
7596 /* Use oldest network if the free list is empty */
7597 if (list_empty(&priv->ieee->network_free_list)) {
7598 struct ieee80211_network *oldest = NULL;
7599 struct ieee80211_network *target;
7600
7601 list_for_each_entry(target, &priv->ieee->network_list, list) {
7602 if ((oldest == NULL) ||
7603 (target->last_scanned < oldest->last_scanned))
7604 oldest = target;
7605 }
7606
7607 /* If there are no more slots, expire the oldest */
7608 list_del(&oldest->list);
7609 target = oldest;
7610 IPW_DEBUG_ASSOC("Expired '%s' (%pM) from "
7611 "network list.\n",
7612 print_ssid(ssid, target->ssid,
7613 target->ssid_len),
7614 target->bssid);
7615 list_add_tail(&target->list,
7616 &priv->ieee->network_free_list);
7617 }
7618
7619 element = priv->ieee->network_free_list.next;
7620 network = list_entry(element, struct ieee80211_network, list);
7621 ipw_adhoc_create(priv, network);
7622 rates = &priv->rates;
7623 list_del(element);
7624 list_add_tail(&network->list, &priv->ieee->network_list);
7625 }
7626 spin_unlock_irqrestore(&priv->ieee->lock, flags);
7627
7628 /* If we reached the end of the list, then we don't have any valid
7629 * matching APs */
7630 if (!network) {
7631 ipw_debug_config(priv);
7632
7633 if (!(priv->status & STATUS_SCANNING)) {
7634 if (!(priv->config & CFG_SPEED_SCAN))
7635 queue_delayed_work(priv->workqueue,
7636 &priv->request_scan,
7637 SCAN_INTERVAL);
7638 else
7639 queue_delayed_work(priv->workqueue,
7640 &priv->request_scan, 0);
7641 }
7642
7643 return 0;
7644 }
7645
7646 ipw_associate_network(priv, network, rates, 0);
7647
7648 return 1;
7649}
7650
7651static void ipw_bg_associate(struct work_struct *work)
7652{
7653 struct ipw_priv *priv =
7654 container_of(work, struct ipw_priv, associate);
7655 mutex_lock(&priv->mutex);
7656 ipw_associate(priv);
7657 mutex_unlock(&priv->mutex);
7658}
7659
7660static void ipw_rebuild_decrypted_skb(struct ipw_priv *priv,
7661 struct sk_buff *skb)
7662{
7663 struct ieee80211_hdr *hdr;
7664 u16 fc;
7665
7666 hdr = (struct ieee80211_hdr *)skb->data;
7667 fc = le16_to_cpu(hdr->frame_control);
7668 if (!(fc & IEEE80211_FCTL_PROTECTED))
7669 return;
7670
7671 fc &= ~IEEE80211_FCTL_PROTECTED;
7672 hdr->frame_control = cpu_to_le16(fc);
7673 switch (priv->ieee->sec.level) {
7674 case SEC_LEVEL_3:
7675 /* Remove CCMP HDR */
7676 memmove(skb->data + IEEE80211_3ADDR_LEN,
7677 skb->data + IEEE80211_3ADDR_LEN + 8,
7678 skb->len - IEEE80211_3ADDR_LEN - 8);
7679 skb_trim(skb, skb->len - 16); /* CCMP_HDR_LEN + CCMP_MIC_LEN */
7680 break;
7681 case SEC_LEVEL_2:
7682 break;
7683 case SEC_LEVEL_1:
7684 /* Remove IV */
7685 memmove(skb->data + IEEE80211_3ADDR_LEN,
7686 skb->data + IEEE80211_3ADDR_LEN + 4,
7687 skb->len - IEEE80211_3ADDR_LEN - 4);
7688 skb_trim(skb, skb->len - 8); /* IV + ICV */
7689 break;
7690 case SEC_LEVEL_0:
7691 break;
7692 default:
7693 printk(KERN_ERR "Unknow security level %d\n",
7694 priv->ieee->sec.level);
7695 break;
7696 }
7697}
7698
7699static void ipw_handle_data_packet(struct ipw_priv *priv,
7700 struct ipw_rx_mem_buffer *rxb,
7701 struct ieee80211_rx_stats *stats)
7702{
7703 struct ieee80211_hdr_4addr *hdr;
7704 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7705
7706 /* We received data from the HW, so stop the watchdog */
7707 priv->net_dev->trans_start = jiffies;
7708
7709 /* We only process data packets if the
7710 * interface is open */
7711 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7712 skb_tailroom(rxb->skb))) {
7713 priv->ieee->stats.rx_errors++;
7714 priv->wstats.discard.misc++;
7715 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7716 return;
7717 } else if (unlikely(!netif_running(priv->net_dev))) {
7718 priv->ieee->stats.rx_dropped++;
7719 priv->wstats.discard.misc++;
7720 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7721 return;
7722 }
7723
7724 /* Advance skb->data to the start of the actual payload */
7725 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
7726
7727 /* Set the size of the skb to the size of the frame */
7728 skb_put(rxb->skb, le16_to_cpu(pkt->u.frame.length));
7729
7730 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7731
7732 /* HW decrypt will not clear the WEP bit, MIC, PN, etc. */
7733 hdr = (struct ieee80211_hdr_4addr *)rxb->skb->data;
7734 if (priv->ieee->iw_mode != IW_MODE_MONITOR &&
7735 (is_multicast_ether_addr(hdr->addr1) ?
7736 !priv->ieee->host_mc_decrypt : !priv->ieee->host_decrypt))
7737 ipw_rebuild_decrypted_skb(priv, rxb->skb);
7738
7739 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7740 priv->ieee->stats.rx_errors++;
7741 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7742 rxb->skb = NULL;
7743 __ipw_led_activity_on(priv);
7744 }
7745}
7746
7747#ifdef CONFIG_IPW2200_RADIOTAP
7748static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
7749 struct ipw_rx_mem_buffer *rxb,
7750 struct ieee80211_rx_stats *stats)
7751{
7752 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7753 struct ipw_rx_frame *frame = &pkt->u.frame;
7754
7755 /* initial pull of some data */
7756 u16 received_channel = frame->received_channel;
7757 u8 antennaAndPhy = frame->antennaAndPhy;
7758 s8 antsignal = frame->rssi_dbm - IPW_RSSI_TO_DBM; /* call it signed anyhow */
7759 u16 pktrate = frame->rate;
7760
7761 /* Magic struct that slots into the radiotap header -- no reason
7762 * to build this manually element by element, we can write it much
7763 * more efficiently than we can parse it. ORDER MATTERS HERE */
7764 struct ipw_rt_hdr *ipw_rt;
7765
7766 short len = le16_to_cpu(pkt->u.frame.length);
7767
7768 /* We received data from the HW, so stop the watchdog */
7769 priv->net_dev->trans_start = jiffies;
7770
7771 /* We only process data packets if the
7772 * interface is open */
7773 if (unlikely((le16_to_cpu(pkt->u.frame.length) + IPW_RX_FRAME_SIZE) >
7774 skb_tailroom(rxb->skb))) {
7775 priv->ieee->stats.rx_errors++;
7776 priv->wstats.discard.misc++;
7777 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7778 return;
7779 } else if (unlikely(!netif_running(priv->net_dev))) {
7780 priv->ieee->stats.rx_dropped++;
7781 priv->wstats.discard.misc++;
7782 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7783 return;
7784 }
7785
7786 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7787 * that now */
7788 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7789 /* FIXME: Should alloc bigger skb instead */
7790 priv->ieee->stats.rx_dropped++;
7791 priv->wstats.discard.misc++;
7792 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7793 return;
7794 }
7795
7796 /* copy the frame itself */
7797 memmove(rxb->skb->data + sizeof(struct ipw_rt_hdr),
7798 rxb->skb->data + IPW_RX_FRAME_SIZE, len);
7799
7800 /* Zero the radiotap static buffer ... We only need to zero the bytes NOT
7801 * part of our real header, saves a little time.
7802 *
7803 * No longer necessary since we fill in all our data. Purge before merging
7804 * patch officially.
7805 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
7806 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
7807 */
7808
7809 ipw_rt = (struct ipw_rt_hdr *)rxb->skb->data;
7810
7811 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
7812 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
7813 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total header+data */
7814
7815 /* Big bitfield of all the fields we provide in radiotap */
7816 ipw_rt->rt_hdr.it_present = cpu_to_le32(
7817 (1 << IEEE80211_RADIOTAP_TSFT) |
7818 (1 << IEEE80211_RADIOTAP_FLAGS) |
7819 (1 << IEEE80211_RADIOTAP_RATE) |
7820 (1 << IEEE80211_RADIOTAP_CHANNEL) |
7821 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
7822 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
7823 (1 << IEEE80211_RADIOTAP_ANTENNA));
7824
7825 /* Zero the flags, we'll add to them as we go */
7826 ipw_rt->rt_flags = 0;
7827 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
7828 frame->parent_tsf[2] << 16 |
7829 frame->parent_tsf[1] << 8 |
7830 frame->parent_tsf[0]);
7831
7832 /* Convert signal to DBM */
7833 ipw_rt->rt_dbmsignal = antsignal;
7834 ipw_rt->rt_dbmnoise = frame->noise;
7835
7836 /* Convert the channel data and set the flags */
7837 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(received_channel));
7838 if (received_channel > 14) { /* 802.11a */
7839 ipw_rt->rt_chbitmask =
7840 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
7841 } else if (antennaAndPhy & 32) { /* 802.11b */
7842 ipw_rt->rt_chbitmask =
7843 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
7844 } else { /* 802.11g */
7845 ipw_rt->rt_chbitmask =
7846 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
7847 }
7848
7849 /* set the rate in multiples of 500k/s */
7850 switch (pktrate) {
7851 case IPW_TX_RATE_1MB:
7852 ipw_rt->rt_rate = 2;
7853 break;
7854 case IPW_TX_RATE_2MB:
7855 ipw_rt->rt_rate = 4;
7856 break;
7857 case IPW_TX_RATE_5MB:
7858 ipw_rt->rt_rate = 10;
7859 break;
7860 case IPW_TX_RATE_6MB:
7861 ipw_rt->rt_rate = 12;
7862 break;
7863 case IPW_TX_RATE_9MB:
7864 ipw_rt->rt_rate = 18;
7865 break;
7866 case IPW_TX_RATE_11MB:
7867 ipw_rt->rt_rate = 22;
7868 break;
7869 case IPW_TX_RATE_12MB:
7870 ipw_rt->rt_rate = 24;
7871 break;
7872 case IPW_TX_RATE_18MB:
7873 ipw_rt->rt_rate = 36;
7874 break;
7875 case IPW_TX_RATE_24MB:
7876 ipw_rt->rt_rate = 48;
7877 break;
7878 case IPW_TX_RATE_36MB:
7879 ipw_rt->rt_rate = 72;
7880 break;
7881 case IPW_TX_RATE_48MB:
7882 ipw_rt->rt_rate = 96;
7883 break;
7884 case IPW_TX_RATE_54MB:
7885 ipw_rt->rt_rate = 108;
7886 break;
7887 default:
7888 ipw_rt->rt_rate = 0;
7889 break;
7890 }
7891
7892 /* antenna number */
7893 ipw_rt->rt_antenna = (antennaAndPhy & 3); /* Is this right? */
7894
7895 /* set the preamble flag if we have it */
7896 if ((antennaAndPhy & 64))
7897 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
7898
7899 /* Set the size of the skb to the size of the frame */
7900 skb_put(rxb->skb, len + sizeof(struct ipw_rt_hdr));
7901
7902 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
7903
7904 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
7905 priv->ieee->stats.rx_errors++;
7906 else { /* ieee80211_rx succeeded, so it now owns the SKB */
7907 rxb->skb = NULL;
7908 /* no LED during capture */
7909 }
7910}
7911#endif
7912
7913#ifdef CONFIG_IPW2200_PROMISCUOUS
7914#define ieee80211_is_probe_response(fc) \
7915 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT && \
7916 (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_PROBE_RESP )
7917
7918#define ieee80211_is_management(fc) \
7919 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT)
7920
7921#define ieee80211_is_control(fc) \
7922 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_CTL)
7923
7924#define ieee80211_is_data(fc) \
7925 ((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA)
7926
7927#define ieee80211_is_assoc_request(fc) \
7928 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_ASSOC_REQ)
7929
7930#define ieee80211_is_reassoc_request(fc) \
7931 ((fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_REASSOC_REQ)
7932
7933static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
7934 struct ipw_rx_mem_buffer *rxb,
7935 struct ieee80211_rx_stats *stats)
7936{
7937 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
7938 struct ipw_rx_frame *frame = &pkt->u.frame;
7939 struct ipw_rt_hdr *ipw_rt;
7940
7941 /* First cache any information we need before we overwrite
7942 * the information provided in the skb from the hardware */
7943 struct ieee80211_hdr *hdr;
7944 u16 channel = frame->received_channel;
7945 u8 phy_flags = frame->antennaAndPhy;
7946 s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
7947 s8 noise = frame->noise;
7948 u8 rate = frame->rate;
7949 short len = le16_to_cpu(pkt->u.frame.length);
7950 struct sk_buff *skb;
7951 int hdr_only = 0;
7952 u16 filter = priv->prom_priv->filter;
7953
7954 /* If the filter is set to not include Rx frames then return */
7955 if (filter & IPW_PROM_NO_RX)
7956 return;
7957
7958 /* We received data from the HW, so stop the watchdog */
7959 priv->prom_net_dev->trans_start = jiffies;
7960
7961 if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
7962 priv->prom_priv->ieee->stats.rx_errors++;
7963 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
7964 return;
7965 }
7966
7967 /* We only process data packets if the interface is open */
7968 if (unlikely(!netif_running(priv->prom_net_dev))) {
7969 priv->prom_priv->ieee->stats.rx_dropped++;
7970 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
7971 return;
7972 }
7973
7974 /* Libpcap 0.9.3+ can handle variable length radiotap, so we'll use
7975 * that now */
7976 if (len > IPW_RX_BUF_SIZE - sizeof(struct ipw_rt_hdr)) {
7977 /* FIXME: Should alloc bigger skb instead */
7978 priv->prom_priv->ieee->stats.rx_dropped++;
7979 IPW_DEBUG_DROP("Dropping too large packet in monitor\n");
7980 return;
7981 }
7982
7983 hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
7984 if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
7985 if (filter & IPW_PROM_NO_MGMT)
7986 return;
7987 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
7988 hdr_only = 1;
7989 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
7990 if (filter & IPW_PROM_NO_CTL)
7991 return;
7992 if (filter & IPW_PROM_CTL_HEADER_ONLY)
7993 hdr_only = 1;
7994 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
7995 if (filter & IPW_PROM_NO_DATA)
7996 return;
7997 if (filter & IPW_PROM_DATA_HEADER_ONLY)
7998 hdr_only = 1;
7999 }
8000
8001 /* Copy the SKB since this is for the promiscuous side */
8002 skb = skb_copy(rxb->skb, GFP_ATOMIC);
8003 if (skb == NULL) {
8004 IPW_ERROR("skb_clone failed for promiscuous copy.\n");
8005 return;
8006 }
8007
8008 /* copy the frame data to write after where the radiotap header goes */
8009 ipw_rt = (void *)skb->data;
8010
8011 if (hdr_only)
8012 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
8013
8014 memcpy(ipw_rt->payload, hdr, len);
8015
8016 /* Zero the radiotap static buffer ... We only need to zero the bytes
8017 * NOT part of our real header, saves a little time.
8018 *
8019 * No longer necessary since we fill in all our data. Purge before
8020 * merging patch officially.
8021 * memset(rxb->skb->data + sizeof(struct ipw_rt_hdr), 0,
8022 * IEEE80211_RADIOTAP_HDRLEN - sizeof(struct ipw_rt_hdr));
8023 */
8024
8025 ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION;
8026 ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */
8027 ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(*ipw_rt)); /* total header+data */
8028
8029 /* Set the size of the skb to the size of the frame */
8030 skb_put(skb, sizeof(*ipw_rt) + len);
8031
8032 /* Big bitfield of all the fields we provide in radiotap */
8033 ipw_rt->rt_hdr.it_present = cpu_to_le32(
8034 (1 << IEEE80211_RADIOTAP_TSFT) |
8035 (1 << IEEE80211_RADIOTAP_FLAGS) |
8036 (1 << IEEE80211_RADIOTAP_RATE) |
8037 (1 << IEEE80211_RADIOTAP_CHANNEL) |
8038 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
8039 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
8040 (1 << IEEE80211_RADIOTAP_ANTENNA));
8041
8042 /* Zero the flags, we'll add to them as we go */
8043 ipw_rt->rt_flags = 0;
8044 ipw_rt->rt_tsf = (u64)(frame->parent_tsf[3] << 24 |
8045 frame->parent_tsf[2] << 16 |
8046 frame->parent_tsf[1] << 8 |
8047 frame->parent_tsf[0]);
8048
8049 /* Convert to DBM */
8050 ipw_rt->rt_dbmsignal = signal;
8051 ipw_rt->rt_dbmnoise = noise;
8052
8053 /* Convert the channel data and set the flags */
8054 ipw_rt->rt_channel = cpu_to_le16(ieee80211chan2mhz(channel));
8055 if (channel > 14) { /* 802.11a */
8056 ipw_rt->rt_chbitmask =
8057 cpu_to_le16((IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ));
8058 } else if (phy_flags & (1 << 5)) { /* 802.11b */
8059 ipw_rt->rt_chbitmask =
8060 cpu_to_le16((IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ));
8061 } else { /* 802.11g */
8062 ipw_rt->rt_chbitmask =
8063 cpu_to_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ);
8064 }
8065
8066 /* set the rate in multiples of 500k/s */
8067 switch (rate) {
8068 case IPW_TX_RATE_1MB:
8069 ipw_rt->rt_rate = 2;
8070 break;
8071 case IPW_TX_RATE_2MB:
8072 ipw_rt->rt_rate = 4;
8073 break;
8074 case IPW_TX_RATE_5MB:
8075 ipw_rt->rt_rate = 10;
8076 break;
8077 case IPW_TX_RATE_6MB:
8078 ipw_rt->rt_rate = 12;
8079 break;
8080 case IPW_TX_RATE_9MB:
8081 ipw_rt->rt_rate = 18;
8082 break;
8083 case IPW_TX_RATE_11MB:
8084 ipw_rt->rt_rate = 22;
8085 break;
8086 case IPW_TX_RATE_12MB:
8087 ipw_rt->rt_rate = 24;
8088 break;
8089 case IPW_TX_RATE_18MB:
8090 ipw_rt->rt_rate = 36;
8091 break;
8092 case IPW_TX_RATE_24MB:
8093 ipw_rt->rt_rate = 48;
8094 break;
8095 case IPW_TX_RATE_36MB:
8096 ipw_rt->rt_rate = 72;
8097 break;
8098 case IPW_TX_RATE_48MB:
8099 ipw_rt->rt_rate = 96;
8100 break;
8101 case IPW_TX_RATE_54MB:
8102 ipw_rt->rt_rate = 108;
8103 break;
8104 default:
8105 ipw_rt->rt_rate = 0;
8106 break;
8107 }
8108
8109 /* antenna number */
8110 ipw_rt->rt_antenna = (phy_flags & 3);
8111
8112 /* set the preamble flag if we have it */
8113 if (phy_flags & (1 << 6))
8114 ipw_rt->rt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
8115
8116 IPW_DEBUG_RX("Rx packet of %d bytes.\n", skb->len);
8117
8118 if (!ieee80211_rx(priv->prom_priv->ieee, skb, stats)) {
8119 priv->prom_priv->ieee->stats.rx_errors++;
8120 dev_kfree_skb_any(skb);
8121 }
8122}
8123#endif
8124
8125static int is_network_packet(struct ipw_priv *priv,
8126 struct ieee80211_hdr_4addr *header)
8127{
8128 /* Filter incoming packets to determine if they are targetted toward
8129 * this network, discarding packets coming from ourselves */
8130 switch (priv->ieee->iw_mode) {
8131 case IW_MODE_ADHOC: /* Header: Dest. | Source | BSSID */
8132 /* packets from our adapter are dropped (echo) */
8133 if (!memcmp(header->addr2, priv->net_dev->dev_addr, ETH_ALEN))
8134 return 0;
8135
8136 /* {broad,multi}cast packets to our BSSID go through */
8137 if (is_multicast_ether_addr(header->addr1))
8138 return !memcmp(header->addr3, priv->bssid, ETH_ALEN);
8139
8140 /* packets to our adapter go through */
8141 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8142 ETH_ALEN);
8143
8144 case IW_MODE_INFRA: /* Header: Dest. | BSSID | Source */
8145 /* packets from our adapter are dropped (echo) */
8146 if (!memcmp(header->addr3, priv->net_dev->dev_addr, ETH_ALEN))
8147 return 0;
8148
8149 /* {broad,multi}cast packets to our BSS go through */
8150 if (is_multicast_ether_addr(header->addr1))
8151 return !memcmp(header->addr2, priv->bssid, ETH_ALEN);
8152
8153 /* packets to our adapter go through */
8154 return !memcmp(header->addr1, priv->net_dev->dev_addr,
8155 ETH_ALEN);
8156 }
8157
8158 return 1;
8159}
8160
8161#define IPW_PACKET_RETRY_TIME HZ
8162
8163static int is_duplicate_packet(struct ipw_priv *priv,
8164 struct ieee80211_hdr_4addr *header)
8165{
8166 u16 sc = le16_to_cpu(header->seq_ctl);
8167 u16 seq = WLAN_GET_SEQ_SEQ(sc);
8168 u16 frag = WLAN_GET_SEQ_FRAG(sc);
8169 u16 *last_seq, *last_frag;
8170 unsigned long *last_time;
8171
8172 switch (priv->ieee->iw_mode) {
8173 case IW_MODE_ADHOC:
8174 {
8175 struct list_head *p;
8176 struct ipw_ibss_seq *entry = NULL;
8177 u8 *mac = header->addr2;
8178 int index = mac[5] % IPW_IBSS_MAC_HASH_SIZE;
8179
8180 __list_for_each(p, &priv->ibss_mac_hash[index]) {
8181 entry =
8182 list_entry(p, struct ipw_ibss_seq, list);
8183 if (!memcmp(entry->mac, mac, ETH_ALEN))
8184 break;
8185 }
8186 if (p == &priv->ibss_mac_hash[index]) {
8187 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
8188 if (!entry) {
8189 IPW_ERROR
8190 ("Cannot malloc new mac entry\n");
8191 return 0;
8192 }
8193 memcpy(entry->mac, mac, ETH_ALEN);
8194 entry->seq_num = seq;
8195 entry->frag_num = frag;
8196 entry->packet_time = jiffies;
8197 list_add(&entry->list,
8198 &priv->ibss_mac_hash[index]);
8199 return 0;
8200 }
8201 last_seq = &entry->seq_num;
8202 last_frag = &entry->frag_num;
8203 last_time = &entry->packet_time;
8204 break;
8205 }
8206 case IW_MODE_INFRA:
8207 last_seq = &priv->last_seq_num;
8208 last_frag = &priv->last_frag_num;
8209 last_time = &priv->last_packet_time;
8210 break;
8211 default:
8212 return 0;
8213 }
8214 if ((*last_seq == seq) &&
8215 time_after(*last_time + IPW_PACKET_RETRY_TIME, jiffies)) {
8216 if (*last_frag == frag)
8217 goto drop;
8218 if (*last_frag + 1 != frag)
8219 /* out-of-order fragment */
8220 goto drop;
8221 } else
8222 *last_seq = seq;
8223
8224 *last_frag = frag;
8225 *last_time = jiffies;
8226 return 0;
8227
8228 drop:
8229 /* Comment this line now since we observed the card receives
8230 * duplicate packets but the FCTL_RETRY bit is not set in the
8231 * IBSS mode with fragmentation enabled.
8232 BUG_ON(!(le16_to_cpu(header->frame_control) & IEEE80211_FCTL_RETRY)); */
8233 return 1;
8234}
8235
8236static void ipw_handle_mgmt_packet(struct ipw_priv *priv,
8237 struct ipw_rx_mem_buffer *rxb,
8238 struct ieee80211_rx_stats *stats)
8239{
8240 struct sk_buff *skb = rxb->skb;
8241 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)skb->data;
8242 struct ieee80211_hdr_4addr *header = (struct ieee80211_hdr_4addr *)
8243 (skb->data + IPW_RX_FRAME_SIZE);
8244
8245 ieee80211_rx_mgt(priv->ieee, header, stats);
8246
8247 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
8248 ((WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8249 IEEE80211_STYPE_PROBE_RESP) ||
8250 (WLAN_FC_GET_STYPE(le16_to_cpu(header->frame_ctl)) ==
8251 IEEE80211_STYPE_BEACON))) {
8252 if (!memcmp(header->addr3, priv->bssid, ETH_ALEN))
8253 ipw_add_station(priv, header->addr2);
8254 }
8255
8256 if (priv->config & CFG_NET_STATS) {
8257 IPW_DEBUG_HC("sending stat packet\n");
8258
8259 /* Set the size of the skb to the size of the full
8260 * ipw header and 802.11 frame */
8261 skb_put(skb, le16_to_cpu(pkt->u.frame.length) +
8262 IPW_RX_FRAME_SIZE);
8263
8264 /* Advance past the ipw packet header to the 802.11 frame */
8265 skb_pull(skb, IPW_RX_FRAME_SIZE);
8266
8267 /* Push the ieee80211_rx_stats before the 802.11 frame */
8268 memcpy(skb_push(skb, sizeof(*stats)), stats, sizeof(*stats));
8269
8270 skb->dev = priv->ieee->dev;
8271
8272 /* Point raw at the ieee80211_stats */
8273 skb_reset_mac_header(skb);
8274
8275 skb->pkt_type = PACKET_OTHERHOST;
8276 skb->protocol = __constant_htons(ETH_P_80211_STATS);
8277 memset(skb->cb, 0, sizeof(rxb->skb->cb));
8278 netif_rx(skb);
8279 rxb->skb = NULL;
8280 }
8281}
8282
8283/*
8284 * Main entry function for recieving a packet with 80211 headers. This
8285 * should be called when ever the FW has notified us that there is a new
8286 * skb in the recieve queue.
8287 */
8288static void ipw_rx(struct ipw_priv *priv)
8289{
8290 struct ipw_rx_mem_buffer *rxb;
8291 struct ipw_rx_packet *pkt;
8292 struct ieee80211_hdr_4addr *header;
8293 u32 r, w, i;
8294 u8 network_packet;
8295 u8 fill_rx = 0;
8296
8297 r = ipw_read32(priv, IPW_RX_READ_INDEX);
8298 w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
8299 i = priv->rxq->read;
8300
8301 if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
8302 fill_rx = 1;
8303
8304 while (i != r) {
8305 rxb = priv->rxq->queue[i];
8306 if (unlikely(rxb == NULL)) {
8307 printk(KERN_CRIT "Queue not allocated!\n");
8308 break;
8309 }
8310 priv->rxq->queue[i] = NULL;
8311
8312 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
8313 IPW_RX_BUF_SIZE,
8314 PCI_DMA_FROMDEVICE);
8315
8316 pkt = (struct ipw_rx_packet *)rxb->skb->data;
8317 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
8318 pkt->header.message_type,
8319 pkt->header.rx_seq_num, pkt->header.control_bits);
8320
8321 switch (pkt->header.message_type) {
8322 case RX_FRAME_TYPE: /* 802.11 frame */ {
8323 struct ieee80211_rx_stats stats = {
8324 .rssi = pkt->u.frame.rssi_dbm -
8325 IPW_RSSI_TO_DBM,
8326 .signal =
8327 le16_to_cpu(pkt->u.frame.rssi_dbm) -
8328 IPW_RSSI_TO_DBM + 0x100,
8329 .noise =
8330 le16_to_cpu(pkt->u.frame.noise),
8331 .rate = pkt->u.frame.rate,
8332 .mac_time = jiffies,
8333 .received_channel =
8334 pkt->u.frame.received_channel,
8335 .freq =
8336 (pkt->u.frame.
8337 control & (1 << 0)) ?
8338 IEEE80211_24GHZ_BAND :
8339 IEEE80211_52GHZ_BAND,
8340 .len = le16_to_cpu(pkt->u.frame.length),
8341 };
8342
8343 if (stats.rssi != 0)
8344 stats.mask |= IEEE80211_STATMASK_RSSI;
8345 if (stats.signal != 0)
8346 stats.mask |= IEEE80211_STATMASK_SIGNAL;
8347 if (stats.noise != 0)
8348 stats.mask |= IEEE80211_STATMASK_NOISE;
8349 if (stats.rate != 0)
8350 stats.mask |= IEEE80211_STATMASK_RATE;
8351
8352 priv->rx_packets++;
8353
8354#ifdef CONFIG_IPW2200_PROMISCUOUS
8355 if (priv->prom_net_dev && netif_running(priv->prom_net_dev))
8356 ipw_handle_promiscuous_rx(priv, rxb, &stats);
8357#endif
8358
8359#ifdef CONFIG_IPW2200_MONITOR
8360 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8361#ifdef CONFIG_IPW2200_RADIOTAP
8362
8363 ipw_handle_data_packet_monitor(priv,
8364 rxb,
8365 &stats);
8366#else
8367 ipw_handle_data_packet(priv, rxb,
8368 &stats);
8369#endif
8370 break;
8371 }
8372#endif
8373
8374 header =
8375 (struct ieee80211_hdr_4addr *)(rxb->skb->
8376 data +
8377 IPW_RX_FRAME_SIZE);
8378 /* TODO: Check Ad-Hoc dest/source and make sure
8379 * that we are actually parsing these packets
8380 * correctly -- we should probably use the
8381 * frame control of the packet and disregard
8382 * the current iw_mode */
8383
8384 network_packet =
8385 is_network_packet(priv, header);
8386 if (network_packet && priv->assoc_network) {
8387 priv->assoc_network->stats.rssi =
8388 stats.rssi;
8389 priv->exp_avg_rssi =
8390 exponential_average(priv->exp_avg_rssi,
8391 stats.rssi, DEPTH_RSSI);
8392 }
8393
8394 IPW_DEBUG_RX("Frame: len=%u\n",
8395 le16_to_cpu(pkt->u.frame.length));
8396
8397 if (le16_to_cpu(pkt->u.frame.length) <
8398 ieee80211_get_hdrlen(le16_to_cpu(
8399 header->frame_ctl))) {
8400 IPW_DEBUG_DROP
8401 ("Received packet is too small. "
8402 "Dropping.\n");
8403 priv->ieee->stats.rx_errors++;
8404 priv->wstats.discard.misc++;
8405 break;
8406 }
8407
8408 switch (WLAN_FC_GET_TYPE
8409 (le16_to_cpu(header->frame_ctl))) {
8410
8411 case IEEE80211_FTYPE_MGMT:
8412 ipw_handle_mgmt_packet(priv, rxb,
8413 &stats);
8414 break;
8415
8416 case IEEE80211_FTYPE_CTL:
8417 break;
8418
8419 case IEEE80211_FTYPE_DATA:
8420 if (unlikely(!network_packet ||
8421 is_duplicate_packet(priv,
8422 header)))
8423 {
8424 IPW_DEBUG_DROP("Dropping: "
8425 "%pM, "
8426 "%pM, "
8427 "%pM\n",
8428 header->addr1,
8429 header->addr2,
8430 header->addr3);
8431 break;
8432 }
8433
8434 ipw_handle_data_packet(priv, rxb,
8435 &stats);
8436
8437 break;
8438 }
8439 break;
8440 }
8441
8442 case RX_HOST_NOTIFICATION_TYPE:{
8443 IPW_DEBUG_RX
8444 ("Notification: subtype=%02X flags=%02X size=%d\n",
8445 pkt->u.notification.subtype,
8446 pkt->u.notification.flags,
8447 le16_to_cpu(pkt->u.notification.size));
8448 ipw_rx_notification(priv, &pkt->u.notification);
8449 break;
8450 }
8451
8452 default:
8453 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
8454 pkt->header.message_type);
8455 break;
8456 }
8457
8458 /* For now we just don't re-use anything. We can tweak this
8459 * later to try and re-use notification packets and SKBs that
8460 * fail to Rx correctly */
8461 if (rxb->skb != NULL) {
8462 dev_kfree_skb_any(rxb->skb);
8463 rxb->skb = NULL;
8464 }
8465
8466 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
8467 IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
8468 list_add_tail(&rxb->list, &priv->rxq->rx_used);
8469
8470 i = (i + 1) % RX_QUEUE_SIZE;
8471
8472 /* If there are a lot of unsued frames, restock the Rx queue
8473 * so the ucode won't assert */
8474 if (fill_rx) {
8475 priv->rxq->read = i;
8476 ipw_rx_queue_replenish(priv);
8477 }
8478 }
8479
8480 /* Backtrack one entry */
8481 priv->rxq->read = i;
8482 ipw_rx_queue_restock(priv);
8483}
8484
8485#define DEFAULT_RTS_THRESHOLD 2304U
8486#define MIN_RTS_THRESHOLD 1U
8487#define MAX_RTS_THRESHOLD 2304U
8488#define DEFAULT_BEACON_INTERVAL 100U
8489#define DEFAULT_SHORT_RETRY_LIMIT 7U
8490#define DEFAULT_LONG_RETRY_LIMIT 4U
8491
8492/**
8493 * ipw_sw_reset
8494 * @option: options to control different reset behaviour
8495 * 0 = reset everything except the 'disable' module_param
8496 * 1 = reset everything and print out driver info (for probe only)
8497 * 2 = reset everything
8498 */
8499static int ipw_sw_reset(struct ipw_priv *priv, int option)
8500{
8501 int band, modulation;
8502 int old_mode = priv->ieee->iw_mode;
8503
8504 /* Initialize module parameter values here */
8505 priv->config = 0;
8506
8507 /* We default to disabling the LED code as right now it causes
8508 * too many systems to lock up... */
8509 if (!led)
8510 priv->config |= CFG_NO_LED;
8511
8512 if (associate)
8513 priv->config |= CFG_ASSOCIATE;
8514 else
8515 IPW_DEBUG_INFO("Auto associate disabled.\n");
8516
8517 if (auto_create)
8518 priv->config |= CFG_ADHOC_CREATE;
8519 else
8520 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
8521
8522 priv->config &= ~CFG_STATIC_ESSID;
8523 priv->essid_len = 0;
8524 memset(priv->essid, 0, IW_ESSID_MAX_SIZE);
8525
8526 if (disable && option) {
8527 priv->status |= STATUS_RF_KILL_SW;
8528 IPW_DEBUG_INFO("Radio disabled.\n");
8529 }
8530
8531 if (channel != 0) {
8532 priv->config |= CFG_STATIC_CHANNEL;
8533 priv->channel = channel;
8534 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
8535 /* TODO: Validate that provided channel is in range */
8536 }
8537#ifdef CONFIG_IPW2200_QOS
8538 ipw_qos_init(priv, qos_enable, qos_burst_enable,
8539 burst_duration_CCK, burst_duration_OFDM);
8540#endif /* CONFIG_IPW2200_QOS */
8541
8542 switch (mode) {
8543 case 1:
8544 priv->ieee->iw_mode = IW_MODE_ADHOC;
8545 priv->net_dev->type = ARPHRD_ETHER;
8546
8547 break;
8548#ifdef CONFIG_IPW2200_MONITOR
8549 case 2:
8550 priv->ieee->iw_mode = IW_MODE_MONITOR;
8551#ifdef CONFIG_IPW2200_RADIOTAP
8552 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8553#else
8554 priv->net_dev->type = ARPHRD_IEEE80211;
8555#endif
8556 break;
8557#endif
8558 default:
8559 case 0:
8560 priv->net_dev->type = ARPHRD_ETHER;
8561 priv->ieee->iw_mode = IW_MODE_INFRA;
8562 break;
8563 }
8564
8565 if (hwcrypto) {
8566 priv->ieee->host_encrypt = 0;
8567 priv->ieee->host_encrypt_msdu = 0;
8568 priv->ieee->host_decrypt = 0;
8569 priv->ieee->host_mc_decrypt = 0;
8570 }
8571 IPW_DEBUG_INFO("Hardware crypto [%s]\n", hwcrypto ? "on" : "off");
8572
8573 /* IPW2200/2915 is abled to do hardware fragmentation. */
8574 priv->ieee->host_open_frag = 0;
8575
8576 if ((priv->pci_dev->device == 0x4223) ||
8577 (priv->pci_dev->device == 0x4224)) {
8578 if (option == 1)
8579 printk(KERN_INFO DRV_NAME
8580 ": Detected Intel PRO/Wireless 2915ABG Network "
8581 "Connection\n");
8582 priv->ieee->abg_true = 1;
8583 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
8584 modulation = IEEE80211_OFDM_MODULATION |
8585 IEEE80211_CCK_MODULATION;
8586 priv->adapter = IPW_2915ABG;
8587 priv->ieee->mode = IEEE_A | IEEE_G | IEEE_B;
8588 } else {
8589 if (option == 1)
8590 printk(KERN_INFO DRV_NAME
8591 ": Detected Intel PRO/Wireless 2200BG Network "
8592 "Connection\n");
8593
8594 priv->ieee->abg_true = 0;
8595 band = IEEE80211_24GHZ_BAND;
8596 modulation = IEEE80211_OFDM_MODULATION |
8597 IEEE80211_CCK_MODULATION;
8598 priv->adapter = IPW_2200BG;
8599 priv->ieee->mode = IEEE_G | IEEE_B;
8600 }
8601
8602 priv->ieee->freq_band = band;
8603 priv->ieee->modulation = modulation;
8604
8605 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
8606
8607 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
8608 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
8609
8610 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
8611 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
8612 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
8613
8614 /* If power management is turned on, default to AC mode */
8615 priv->power_mode = IPW_POWER_AC;
8616 priv->tx_power = IPW_TX_POWER_DEFAULT;
8617
8618 return old_mode == priv->ieee->iw_mode;
8619}
8620
8621/*
8622 * This file defines the Wireless Extension handlers. It does not
8623 * define any methods of hardware manipulation and relies on the
8624 * functions defined in ipw_main to provide the HW interaction.
8625 *
8626 * The exception to this is the use of the ipw_get_ordinal()
8627 * function used to poll the hardware vs. making unecessary calls.
8628 *
8629 */
8630
8631static int ipw_wx_get_name(struct net_device *dev,
8632 struct iw_request_info *info,
8633 union iwreq_data *wrqu, char *extra)
8634{
8635 struct ipw_priv *priv = ieee80211_priv(dev);
8636 mutex_lock(&priv->mutex);
8637 if (priv->status & STATUS_RF_KILL_MASK)
8638 strcpy(wrqu->name, "radio off");
8639 else if (!(priv->status & STATUS_ASSOCIATED))
8640 strcpy(wrqu->name, "unassociated");
8641 else
8642 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
8643 ipw_modes[priv->assoc_request.ieee_mode]);
8644 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
8645 mutex_unlock(&priv->mutex);
8646 return 0;
8647}
8648
8649static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
8650{
8651 if (channel == 0) {
8652 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
8653 priv->config &= ~CFG_STATIC_CHANNEL;
8654 IPW_DEBUG_ASSOC("Attempting to associate with new "
8655 "parameters.\n");
8656 ipw_associate(priv);
8657 return 0;
8658 }
8659
8660 priv->config |= CFG_STATIC_CHANNEL;
8661
8662 if (priv->channel == channel) {
8663 IPW_DEBUG_INFO("Request to set channel to current value (%d)\n",
8664 channel);
8665 return 0;
8666 }
8667
8668 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
8669 priv->channel = channel;
8670
8671#ifdef CONFIG_IPW2200_MONITOR
8672 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
8673 int i;
8674 if (priv->status & STATUS_SCANNING) {
8675 IPW_DEBUG_SCAN("Scan abort triggered due to "
8676 "channel change.\n");
8677 ipw_abort_scan(priv);
8678 }
8679
8680 for (i = 1000; i && (priv->status & STATUS_SCANNING); i--)
8681 udelay(10);
8682
8683 if (priv->status & STATUS_SCANNING)
8684 IPW_DEBUG_SCAN("Still scanning...\n");
8685 else
8686 IPW_DEBUG_SCAN("Took %dms to abort current scan\n",
8687 1000 - i);
8688
8689 return 0;
8690 }
8691#endif /* CONFIG_IPW2200_MONITOR */
8692
8693 /* Network configuration changed -- force [re]association */
8694 IPW_DEBUG_ASSOC("[re]association triggered due to channel change.\n");
8695 if (!ipw_disassociate(priv))
8696 ipw_associate(priv);
8697
8698 return 0;
8699}
8700
8701static int ipw_wx_set_freq(struct net_device *dev,
8702 struct iw_request_info *info,
8703 union iwreq_data *wrqu, char *extra)
8704{
8705 struct ipw_priv *priv = ieee80211_priv(dev);
8706 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8707 struct iw_freq *fwrq = &wrqu->freq;
8708 int ret = 0, i;
8709 u8 channel, flags;
8710 int band;
8711
8712 if (fwrq->m == 0) {
8713 IPW_DEBUG_WX("SET Freq/Channel -> any\n");
8714 mutex_lock(&priv->mutex);
8715 ret = ipw_set_channel(priv, 0);
8716 mutex_unlock(&priv->mutex);
8717 return ret;
8718 }
8719 /* if setting by freq convert to channel */
8720 if (fwrq->e == 1) {
8721 channel = ieee80211_freq_to_channel(priv->ieee, fwrq->m);
8722 if (channel == 0)
8723 return -EINVAL;
8724 } else
8725 channel = fwrq->m;
8726
8727 if (!(band = ieee80211_is_valid_channel(priv->ieee, channel)))
8728 return -EINVAL;
8729
8730 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
8731 i = ieee80211_channel_to_index(priv->ieee, channel);
8732 if (i == -1)
8733 return -EINVAL;
8734
8735 flags = (band == IEEE80211_24GHZ_BAND) ?
8736 geo->bg[i].flags : geo->a[i].flags;
8737 if (flags & IEEE80211_CH_PASSIVE_ONLY) {
8738 IPW_DEBUG_WX("Invalid Ad-Hoc channel for 802.11a\n");
8739 return -EINVAL;
8740 }
8741 }
8742
8743 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
8744 mutex_lock(&priv->mutex);
8745 ret = ipw_set_channel(priv, channel);
8746 mutex_unlock(&priv->mutex);
8747 return ret;
8748}
8749
8750static int ipw_wx_get_freq(struct net_device *dev,
8751 struct iw_request_info *info,
8752 union iwreq_data *wrqu, char *extra)
8753{
8754 struct ipw_priv *priv = ieee80211_priv(dev);
8755
8756 wrqu->freq.e = 0;
8757
8758 /* If we are associated, trying to associate, or have a statically
8759 * configured CHANNEL then return that; otherwise return ANY */
8760 mutex_lock(&priv->mutex);
8761 if (priv->config & CFG_STATIC_CHANNEL ||
8762 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
8763 int i;
8764
8765 i = ieee80211_channel_to_index(priv->ieee, priv->channel);
8766 BUG_ON(i == -1);
8767 wrqu->freq.e = 1;
8768
8769 switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
8770 case IEEE80211_52GHZ_BAND:
8771 wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
8772 break;
8773
8774 case IEEE80211_24GHZ_BAND:
8775 wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
8776 break;
8777
8778 default:
8779 BUG();
8780 }
8781 } else
8782 wrqu->freq.m = 0;
8783
8784 mutex_unlock(&priv->mutex);
8785 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
8786 return 0;
8787}
8788
8789static int ipw_wx_set_mode(struct net_device *dev,
8790 struct iw_request_info *info,
8791 union iwreq_data *wrqu, char *extra)
8792{
8793 struct ipw_priv *priv = ieee80211_priv(dev);
8794 int err = 0;
8795
8796 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
8797
8798 switch (wrqu->mode) {
8799#ifdef CONFIG_IPW2200_MONITOR
8800 case IW_MODE_MONITOR:
8801#endif
8802 case IW_MODE_ADHOC:
8803 case IW_MODE_INFRA:
8804 break;
8805 case IW_MODE_AUTO:
8806 wrqu->mode = IW_MODE_INFRA;
8807 break;
8808 default:
8809 return -EINVAL;
8810 }
8811 if (wrqu->mode == priv->ieee->iw_mode)
8812 return 0;
8813
8814 mutex_lock(&priv->mutex);
8815
8816 ipw_sw_reset(priv, 0);
8817
8818#ifdef CONFIG_IPW2200_MONITOR
8819 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
8820 priv->net_dev->type = ARPHRD_ETHER;
8821
8822 if (wrqu->mode == IW_MODE_MONITOR)
8823#ifdef CONFIG_IPW2200_RADIOTAP
8824 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
8825#else
8826 priv->net_dev->type = ARPHRD_IEEE80211;
8827#endif
8828#endif /* CONFIG_IPW2200_MONITOR */
8829
8830 /* Free the existing firmware and reset the fw_loaded
8831 * flag so ipw_load() will bring in the new firmawre */
8832 free_firmware();
8833
8834 priv->ieee->iw_mode = wrqu->mode;
8835
8836 queue_work(priv->workqueue, &priv->adapter_restart);
8837 mutex_unlock(&priv->mutex);
8838 return err;
8839}
8840
8841static int ipw_wx_get_mode(struct net_device *dev,
8842 struct iw_request_info *info,
8843 union iwreq_data *wrqu, char *extra)
8844{
8845 struct ipw_priv *priv = ieee80211_priv(dev);
8846 mutex_lock(&priv->mutex);
8847 wrqu->mode = priv->ieee->iw_mode;
8848 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
8849 mutex_unlock(&priv->mutex);
8850 return 0;
8851}
8852
8853/* Values are in microsecond */
8854static const s32 timeout_duration[] = {
8855 350000,
8856 250000,
8857 75000,
8858 37000,
8859 25000,
8860};
8861
8862static const s32 period_duration[] = {
8863 400000,
8864 700000,
8865 1000000,
8866 1000000,
8867 1000000
8868};
8869
8870static int ipw_wx_get_range(struct net_device *dev,
8871 struct iw_request_info *info,
8872 union iwreq_data *wrqu, char *extra)
8873{
8874 struct ipw_priv *priv = ieee80211_priv(dev);
8875 struct iw_range *range = (struct iw_range *)extra;
8876 const struct ieee80211_geo *geo = ieee80211_get_geo(priv->ieee);
8877 int i = 0, j;
8878
8879 wrqu->data.length = sizeof(*range);
8880 memset(range, 0, sizeof(*range));
8881
8882 /* 54Mbs == ~27 Mb/s real (802.11g) */
8883 range->throughput = 27 * 1000 * 1000;
8884
8885 range->max_qual.qual = 100;
8886 /* TODO: Find real max RSSI and stick here */
8887 range->max_qual.level = 0;
8888 range->max_qual.noise = 0;
8889 range->max_qual.updated = 7; /* Updated all three */
8890
8891 range->avg_qual.qual = 70;
8892 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
8893 range->avg_qual.level = 0; /* FIXME to real average level */
8894 range->avg_qual.noise = 0;
8895 range->avg_qual.updated = 7; /* Updated all three */
8896 mutex_lock(&priv->mutex);
8897 range->num_bitrates = min(priv->rates.num_rates, (u8) IW_MAX_BITRATES);
8898
8899 for (i = 0; i < range->num_bitrates; i++)
8900 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
8901 500000;
8902
8903 range->max_rts = DEFAULT_RTS_THRESHOLD;
8904 range->min_frag = MIN_FRAG_THRESHOLD;
8905 range->max_frag = MAX_FRAG_THRESHOLD;
8906
8907 range->encoding_size[0] = 5;
8908 range->encoding_size[1] = 13;
8909 range->num_encoding_sizes = 2;
8910 range->max_encoding_tokens = WEP_KEYS;
8911
8912 /* Set the Wireless Extension versions */
8913 range->we_version_compiled = WIRELESS_EXT;
8914 range->we_version_source = 18;
8915
8916 i = 0;
8917 if (priv->ieee->mode & (IEEE_B | IEEE_G)) {
8918 for (j = 0; j < geo->bg_channels && i < IW_MAX_FREQUENCIES; j++) {
8919 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8920 (geo->bg[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8921 continue;
8922
8923 range->freq[i].i = geo->bg[j].channel;
8924 range->freq[i].m = geo->bg[j].freq * 100000;
8925 range->freq[i].e = 1;
8926 i++;
8927 }
8928 }
8929
8930 if (priv->ieee->mode & IEEE_A) {
8931 for (j = 0; j < geo->a_channels && i < IW_MAX_FREQUENCIES; j++) {
8932 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
8933 (geo->a[j].flags & IEEE80211_CH_PASSIVE_ONLY))
8934 continue;
8935
8936 range->freq[i].i = geo->a[j].channel;
8937 range->freq[i].m = geo->a[j].freq * 100000;
8938 range->freq[i].e = 1;
8939 i++;
8940 }
8941 }
8942
8943 range->num_channels = i;
8944 range->num_frequency = i;
8945
8946 mutex_unlock(&priv->mutex);
8947
8948 /* Event capability (kernel + driver) */
8949 range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
8950 IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) |
8951 IW_EVENT_CAPA_MASK(SIOCGIWAP) |
8952 IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
8953 range->event_capa[1] = IW_EVENT_CAPA_K_1;
8954
8955 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
8956 IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
8957
8958 range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE;
8959
8960 IPW_DEBUG_WX("GET Range\n");
8961 return 0;
8962}
8963
8964static int ipw_wx_set_wap(struct net_device *dev,
8965 struct iw_request_info *info,
8966 union iwreq_data *wrqu, char *extra)
8967{
8968 struct ipw_priv *priv = ieee80211_priv(dev);
8969
8970 static const unsigned char any[] = {
8971 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
8972 };
8973 static const unsigned char off[] = {
8974 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
8975 };
8976
8977 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
8978 return -EINVAL;
8979 mutex_lock(&priv->mutex);
8980 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
8981 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8982 /* we disable mandatory BSSID association */
8983 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
8984 priv->config &= ~CFG_STATIC_BSSID;
8985 IPW_DEBUG_ASSOC("Attempting to associate with new "
8986 "parameters.\n");
8987 ipw_associate(priv);
8988 mutex_unlock(&priv->mutex);
8989 return 0;
8990 }
8991
8992 priv->config |= CFG_STATIC_BSSID;
8993 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
8994 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
8995 mutex_unlock(&priv->mutex);
8996 return 0;
8997 }
8998
8999 IPW_DEBUG_WX("Setting mandatory BSSID to %pM\n",
9000 wrqu->ap_addr.sa_data);
9001
9002 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
9003
9004 /* Network configuration changed -- force [re]association */
9005 IPW_DEBUG_ASSOC("[re]association triggered due to BSSID change.\n");
9006 if (!ipw_disassociate(priv))
9007 ipw_associate(priv);
9008
9009 mutex_unlock(&priv->mutex);
9010 return 0;
9011}
9012
9013static int ipw_wx_get_wap(struct net_device *dev,
9014 struct iw_request_info *info,
9015 union iwreq_data *wrqu, char *extra)
9016{
9017 struct ipw_priv *priv = ieee80211_priv(dev);
9018
9019 /* If we are associated, trying to associate, or have a statically
9020 * configured BSSID then return that; otherwise return ANY */
9021 mutex_lock(&priv->mutex);
9022 if (priv->config & CFG_STATIC_BSSID ||
9023 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9024 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
9025 memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN);
9026 } else
9027 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
9028
9029 IPW_DEBUG_WX("Getting WAP BSSID: %pM\n",
9030 wrqu->ap_addr.sa_data);
9031 mutex_unlock(&priv->mutex);
9032 return 0;
9033}
9034
9035static int ipw_wx_set_essid(struct net_device *dev,
9036 struct iw_request_info *info,
9037 union iwreq_data *wrqu, char *extra)
9038{
9039 struct ipw_priv *priv = ieee80211_priv(dev);
9040 int length;
9041 DECLARE_SSID_BUF(ssid);
9042
9043 mutex_lock(&priv->mutex);
9044
9045 if (!wrqu->essid.flags)
9046 {
9047 IPW_DEBUG_WX("Setting ESSID to ANY\n");
9048 ipw_disassociate(priv);
9049 priv->config &= ~CFG_STATIC_ESSID;
9050 ipw_associate(priv);
9051 mutex_unlock(&priv->mutex);
9052 return 0;
9053 }
9054
9055 length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
9056
9057 priv->config |= CFG_STATIC_ESSID;
9058
9059 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
9060 && (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
9061 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
9062 mutex_unlock(&priv->mutex);
9063 return 0;
9064 }
9065
9066 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n",
9067 print_ssid(ssid, extra, length), length);
9068
9069 priv->essid_len = length;
9070 memcpy(priv->essid, extra, priv->essid_len);
9071
9072 /* Network configuration changed -- force [re]association */
9073 IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
9074 if (!ipw_disassociate(priv))
9075 ipw_associate(priv);
9076
9077 mutex_unlock(&priv->mutex);
9078 return 0;
9079}
9080
9081static int ipw_wx_get_essid(struct net_device *dev,
9082 struct iw_request_info *info,
9083 union iwreq_data *wrqu, char *extra)
9084{
9085 struct ipw_priv *priv = ieee80211_priv(dev);
9086 DECLARE_SSID_BUF(ssid);
9087
9088 /* If we are associated, trying to associate, or have a statically
9089 * configured ESSID then return that; otherwise return ANY */
9090 mutex_lock(&priv->mutex);
9091 if (priv->config & CFG_STATIC_ESSID ||
9092 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
9093 IPW_DEBUG_WX("Getting essid: '%s'\n",
9094 print_ssid(ssid, priv->essid, priv->essid_len));
9095 memcpy(extra, priv->essid, priv->essid_len);
9096 wrqu->essid.length = priv->essid_len;
9097 wrqu->essid.flags = 1; /* active */
9098 } else {
9099 IPW_DEBUG_WX("Getting essid: ANY\n");
9100 wrqu->essid.length = 0;
9101 wrqu->essid.flags = 0; /* active */
9102 }
9103 mutex_unlock(&priv->mutex);
9104 return 0;
9105}
9106
9107static int ipw_wx_set_nick(struct net_device *dev,
9108 struct iw_request_info *info,
9109 union iwreq_data *wrqu, char *extra)
9110{
9111 struct ipw_priv *priv = ieee80211_priv(dev);
9112
9113 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
9114 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
9115 return -E2BIG;
9116 mutex_lock(&priv->mutex);
9117 wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick));
9118 memset(priv->nick, 0, sizeof(priv->nick));
9119 memcpy(priv->nick, extra, wrqu->data.length);
9120 IPW_DEBUG_TRACE("<<\n");
9121 mutex_unlock(&priv->mutex);
9122 return 0;
9123
9124}
9125
9126static int ipw_wx_get_nick(struct net_device *dev,
9127 struct iw_request_info *info,
9128 union iwreq_data *wrqu, char *extra)
9129{
9130 struct ipw_priv *priv = ieee80211_priv(dev);
9131 IPW_DEBUG_WX("Getting nick\n");
9132 mutex_lock(&priv->mutex);
9133 wrqu->data.length = strlen(priv->nick);
9134 memcpy(extra, priv->nick, wrqu->data.length);
9135 wrqu->data.flags = 1; /* active */
9136 mutex_unlock(&priv->mutex);
9137 return 0;
9138}
9139
9140static int ipw_wx_set_sens(struct net_device *dev,
9141 struct iw_request_info *info,
9142 union iwreq_data *wrqu, char *extra)
9143{
9144 struct ipw_priv *priv = ieee80211_priv(dev);
9145 int err = 0;
9146
9147 IPW_DEBUG_WX("Setting roaming threshold to %d\n", wrqu->sens.value);
9148 IPW_DEBUG_WX("Setting disassociate threshold to %d\n", 3*wrqu->sens.value);
9149 mutex_lock(&priv->mutex);
9150
9151 if (wrqu->sens.fixed == 0)
9152 {
9153 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
9154 priv->disassociate_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
9155 goto out;
9156 }
9157 if ((wrqu->sens.value > IPW_MB_ROAMING_THRESHOLD_MAX) ||
9158 (wrqu->sens.value < IPW_MB_ROAMING_THRESHOLD_MIN)) {
9159 err = -EINVAL;
9160 goto out;
9161 }
9162
9163 priv->roaming_threshold = wrqu->sens.value;
9164 priv->disassociate_threshold = 3*wrqu->sens.value;
9165 out:
9166 mutex_unlock(&priv->mutex);
9167 return err;
9168}
9169
9170static int ipw_wx_get_sens(struct net_device *dev,
9171 struct iw_request_info *info,
9172 union iwreq_data *wrqu, char *extra)
9173{
9174 struct ipw_priv *priv = ieee80211_priv(dev);
9175 mutex_lock(&priv->mutex);
9176 wrqu->sens.fixed = 1;
9177 wrqu->sens.value = priv->roaming_threshold;
9178 mutex_unlock(&priv->mutex);
9179
9180 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n",
9181 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9182
9183 return 0;
9184}
9185
9186static int ipw_wx_set_rate(struct net_device *dev,
9187 struct iw_request_info *info,
9188 union iwreq_data *wrqu, char *extra)
9189{
9190 /* TODO: We should use semaphores or locks for access to priv */
9191 struct ipw_priv *priv = ieee80211_priv(dev);
9192 u32 target_rate = wrqu->bitrate.value;
9193 u32 fixed, mask;
9194
9195 /* value = -1, fixed = 0 means auto only, so we should use all rates offered by AP */
9196 /* value = X, fixed = 1 means only rate X */
9197 /* value = X, fixed = 0 means all rates lower equal X */
9198
9199 if (target_rate == -1) {
9200 fixed = 0;
9201 mask = IEEE80211_DEFAULT_RATES_MASK;
9202 /* Now we should reassociate */
9203 goto apply;
9204 }
9205
9206 mask = 0;
9207 fixed = wrqu->bitrate.fixed;
9208
9209 if (target_rate == 1000000 || !fixed)
9210 mask |= IEEE80211_CCK_RATE_1MB_MASK;
9211 if (target_rate == 1000000)
9212 goto apply;
9213
9214 if (target_rate == 2000000 || !fixed)
9215 mask |= IEEE80211_CCK_RATE_2MB_MASK;
9216 if (target_rate == 2000000)
9217 goto apply;
9218
9219 if (target_rate == 5500000 || !fixed)
9220 mask |= IEEE80211_CCK_RATE_5MB_MASK;
9221 if (target_rate == 5500000)
9222 goto apply;
9223
9224 if (target_rate == 6000000 || !fixed)
9225 mask |= IEEE80211_OFDM_RATE_6MB_MASK;
9226 if (target_rate == 6000000)
9227 goto apply;
9228
9229 if (target_rate == 9000000 || !fixed)
9230 mask |= IEEE80211_OFDM_RATE_9MB_MASK;
9231 if (target_rate == 9000000)
9232 goto apply;
9233
9234 if (target_rate == 11000000 || !fixed)
9235 mask |= IEEE80211_CCK_RATE_11MB_MASK;
9236 if (target_rate == 11000000)
9237 goto apply;
9238
9239 if (target_rate == 12000000 || !fixed)
9240 mask |= IEEE80211_OFDM_RATE_12MB_MASK;
9241 if (target_rate == 12000000)
9242 goto apply;
9243
9244 if (target_rate == 18000000 || !fixed)
9245 mask |= IEEE80211_OFDM_RATE_18MB_MASK;
9246 if (target_rate == 18000000)
9247 goto apply;
9248
9249 if (target_rate == 24000000 || !fixed)
9250 mask |= IEEE80211_OFDM_RATE_24MB_MASK;
9251 if (target_rate == 24000000)
9252 goto apply;
9253
9254 if (target_rate == 36000000 || !fixed)
9255 mask |= IEEE80211_OFDM_RATE_36MB_MASK;
9256 if (target_rate == 36000000)
9257 goto apply;
9258
9259 if (target_rate == 48000000 || !fixed)
9260 mask |= IEEE80211_OFDM_RATE_48MB_MASK;
9261 if (target_rate == 48000000)
9262 goto apply;
9263
9264 if (target_rate == 54000000 || !fixed)
9265 mask |= IEEE80211_OFDM_RATE_54MB_MASK;
9266 if (target_rate == 54000000)
9267 goto apply;
9268
9269 IPW_DEBUG_WX("invalid rate specified, returning error\n");
9270 return -EINVAL;
9271
9272 apply:
9273 IPW_DEBUG_WX("Setting rate mask to 0x%08X [%s]\n",
9274 mask, fixed ? "fixed" : "sub-rates");
9275 mutex_lock(&priv->mutex);
9276 if (mask == IEEE80211_DEFAULT_RATES_MASK) {
9277 priv->config &= ~CFG_FIXED_RATE;
9278 ipw_set_fixed_rate(priv, priv->ieee->mode);
9279 } else
9280 priv->config |= CFG_FIXED_RATE;
9281
9282 if (priv->rates_mask == mask) {
9283 IPW_DEBUG_WX("Mask set to current mask.\n");
9284 mutex_unlock(&priv->mutex);
9285 return 0;
9286 }
9287
9288 priv->rates_mask = mask;
9289
9290 /* Network configuration changed -- force [re]association */
9291 IPW_DEBUG_ASSOC("[re]association triggered due to rates change.\n");
9292 if (!ipw_disassociate(priv))
9293 ipw_associate(priv);
9294
9295 mutex_unlock(&priv->mutex);
9296 return 0;
9297}
9298
9299static int ipw_wx_get_rate(struct net_device *dev,
9300 struct iw_request_info *info,
9301 union iwreq_data *wrqu, char *extra)
9302{
9303 struct ipw_priv *priv = ieee80211_priv(dev);
9304 mutex_lock(&priv->mutex);
9305 wrqu->bitrate.value = priv->last_rate;
9306 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9307 mutex_unlock(&priv->mutex);
9308 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
9309 return 0;
9310}
9311
9312static int ipw_wx_set_rts(struct net_device *dev,
9313 struct iw_request_info *info,
9314 union iwreq_data *wrqu, char *extra)
9315{
9316 struct ipw_priv *priv = ieee80211_priv(dev);
9317 mutex_lock(&priv->mutex);
9318 if (wrqu->rts.disabled || !wrqu->rts.fixed)
9319 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
9320 else {
9321 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
9322 wrqu->rts.value > MAX_RTS_THRESHOLD) {
9323 mutex_unlock(&priv->mutex);
9324 return -EINVAL;
9325 }
9326 priv->rts_threshold = wrqu->rts.value;
9327 }
9328
9329 ipw_send_rts_threshold(priv, priv->rts_threshold);
9330 mutex_unlock(&priv->mutex);
9331 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
9332 return 0;
9333}
9334
9335static int ipw_wx_get_rts(struct net_device *dev,
9336 struct iw_request_info *info,
9337 union iwreq_data *wrqu, char *extra)
9338{
9339 struct ipw_priv *priv = ieee80211_priv(dev);
9340 mutex_lock(&priv->mutex);
9341 wrqu->rts.value = priv->rts_threshold;
9342 wrqu->rts.fixed = 0; /* no auto select */
9343 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9344 mutex_unlock(&priv->mutex);
9345 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
9346 return 0;
9347}
9348
9349static int ipw_wx_set_txpow(struct net_device *dev,
9350 struct iw_request_info *info,
9351 union iwreq_data *wrqu, char *extra)
9352{
9353 struct ipw_priv *priv = ieee80211_priv(dev);
9354 int err = 0;
9355
9356 mutex_lock(&priv->mutex);
9357 if (ipw_radio_kill_sw(priv, wrqu->power.disabled)) {
9358 err = -EINPROGRESS;
9359 goto out;
9360 }
9361
9362 if (!wrqu->power.fixed)
9363 wrqu->power.value = IPW_TX_POWER_DEFAULT;
9364
9365 if (wrqu->power.flags != IW_TXPOW_DBM) {
9366 err = -EINVAL;
9367 goto out;
9368 }
9369
9370 if ((wrqu->power.value > IPW_TX_POWER_MAX) ||
9371 (wrqu->power.value < IPW_TX_POWER_MIN)) {
9372 err = -EINVAL;
9373 goto out;
9374 }
9375
9376 priv->tx_power = wrqu->power.value;
9377 err = ipw_set_tx_power(priv);
9378 out:
9379 mutex_unlock(&priv->mutex);
9380 return err;
9381}
9382
9383static int ipw_wx_get_txpow(struct net_device *dev,
9384 struct iw_request_info *info,
9385 union iwreq_data *wrqu, char *extra)
9386{
9387 struct ipw_priv *priv = ieee80211_priv(dev);
9388 mutex_lock(&priv->mutex);
9389 wrqu->power.value = priv->tx_power;
9390 wrqu->power.fixed = 1;
9391 wrqu->power.flags = IW_TXPOW_DBM;
9392 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9393 mutex_unlock(&priv->mutex);
9394
9395 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
9396 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9397
9398 return 0;
9399}
9400
9401static int ipw_wx_set_frag(struct net_device *dev,
9402 struct iw_request_info *info,
9403 union iwreq_data *wrqu, char *extra)
9404{
9405 struct ipw_priv *priv = ieee80211_priv(dev);
9406 mutex_lock(&priv->mutex);
9407 if (wrqu->frag.disabled || !wrqu->frag.fixed)
9408 priv->ieee->fts = DEFAULT_FTS;
9409 else {
9410 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
9411 wrqu->frag.value > MAX_FRAG_THRESHOLD) {
9412 mutex_unlock(&priv->mutex);
9413 return -EINVAL;
9414 }
9415
9416 priv->ieee->fts = wrqu->frag.value & ~0x1;
9417 }
9418
9419 ipw_send_frag_threshold(priv, wrqu->frag.value);
9420 mutex_unlock(&priv->mutex);
9421 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
9422 return 0;
9423}
9424
9425static int ipw_wx_get_frag(struct net_device *dev,
9426 struct iw_request_info *info,
9427 union iwreq_data *wrqu, char *extra)
9428{
9429 struct ipw_priv *priv = ieee80211_priv(dev);
9430 mutex_lock(&priv->mutex);
9431 wrqu->frag.value = priv->ieee->fts;
9432 wrqu->frag.fixed = 0; /* no auto select */
9433 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9434 mutex_unlock(&priv->mutex);
9435 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
9436
9437 return 0;
9438}
9439
9440static int ipw_wx_set_retry(struct net_device *dev,
9441 struct iw_request_info *info,
9442 union iwreq_data *wrqu, char *extra)
9443{
9444 struct ipw_priv *priv = ieee80211_priv(dev);
9445
9446 if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled)
9447 return -EINVAL;
9448
9449 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
9450 return 0;
9451
9452 if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
9453 return -EINVAL;
9454
9455 mutex_lock(&priv->mutex);
9456 if (wrqu->retry.flags & IW_RETRY_SHORT)
9457 priv->short_retry_limit = (u8) wrqu->retry.value;
9458 else if (wrqu->retry.flags & IW_RETRY_LONG)
9459 priv->long_retry_limit = (u8) wrqu->retry.value;
9460 else {
9461 priv->short_retry_limit = (u8) wrqu->retry.value;
9462 priv->long_retry_limit = (u8) wrqu->retry.value;
9463 }
9464
9465 ipw_send_retry_limit(priv, priv->short_retry_limit,
9466 priv->long_retry_limit);
9467 mutex_unlock(&priv->mutex);
9468 IPW_DEBUG_WX("SET retry limit -> short:%d long:%d\n",
9469 priv->short_retry_limit, priv->long_retry_limit);
9470 return 0;
9471}
9472
9473static int ipw_wx_get_retry(struct net_device *dev,
9474 struct iw_request_info *info,
9475 union iwreq_data *wrqu, char *extra)
9476{
9477 struct ipw_priv *priv = ieee80211_priv(dev);
9478
9479 mutex_lock(&priv->mutex);
9480 wrqu->retry.disabled = 0;
9481
9482 if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
9483 mutex_unlock(&priv->mutex);
9484 return -EINVAL;
9485 }
9486
9487 if (wrqu->retry.flags & IW_RETRY_LONG) {
9488 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
9489 wrqu->retry.value = priv->long_retry_limit;
9490 } else if (wrqu->retry.flags & IW_RETRY_SHORT) {
9491 wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
9492 wrqu->retry.value = priv->short_retry_limit;
9493 } else {
9494 wrqu->retry.flags = IW_RETRY_LIMIT;
9495 wrqu->retry.value = priv->short_retry_limit;
9496 }
9497 mutex_unlock(&priv->mutex);
9498
9499 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value);
9500
9501 return 0;
9502}
9503
9504static int ipw_wx_set_scan(struct net_device *dev,
9505 struct iw_request_info *info,
9506 union iwreq_data *wrqu, char *extra)
9507{
9508 struct ipw_priv *priv = ieee80211_priv(dev);
9509 struct iw_scan_req *req = (struct iw_scan_req *)extra;
9510 struct delayed_work *work = NULL;
9511
9512 mutex_lock(&priv->mutex);
9513
9514 priv->user_requested_scan = 1;
9515
9516 if (wrqu->data.length == sizeof(struct iw_scan_req)) {
9517 if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
9518 int len = min((int)req->essid_len,
9519 (int)sizeof(priv->direct_scan_ssid));
9520 memcpy(priv->direct_scan_ssid, req->essid, len);
9521 priv->direct_scan_ssid_len = len;
9522 work = &priv->request_direct_scan;
9523 } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
9524 work = &priv->request_passive_scan;
9525 }
9526 } else {
9527 /* Normal active broadcast scan */
9528 work = &priv->request_scan;
9529 }
9530
9531 mutex_unlock(&priv->mutex);
9532
9533 IPW_DEBUG_WX("Start scan\n");
9534
9535 queue_delayed_work(priv->workqueue, work, 0);
9536
9537 return 0;
9538}
9539
9540static int ipw_wx_get_scan(struct net_device *dev,
9541 struct iw_request_info *info,
9542 union iwreq_data *wrqu, char *extra)
9543{
9544 struct ipw_priv *priv = ieee80211_priv(dev);
9545 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
9546}
9547
9548static int ipw_wx_set_encode(struct net_device *dev,
9549 struct iw_request_info *info,
9550 union iwreq_data *wrqu, char *key)
9551{
9552 struct ipw_priv *priv = ieee80211_priv(dev);
9553 int ret;
9554 u32 cap = priv->capability;
9555
9556 mutex_lock(&priv->mutex);
9557 ret = ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
9558
9559 /* In IBSS mode, we need to notify the firmware to update
9560 * the beacon info after we changed the capability. */
9561 if (cap != priv->capability &&
9562 priv->ieee->iw_mode == IW_MODE_ADHOC &&
9563 priv->status & STATUS_ASSOCIATED)
9564 ipw_disassociate(priv);
9565
9566 mutex_unlock(&priv->mutex);
9567 return ret;
9568}
9569
9570static int ipw_wx_get_encode(struct net_device *dev,
9571 struct iw_request_info *info,
9572 union iwreq_data *wrqu, char *key)
9573{
9574 struct ipw_priv *priv = ieee80211_priv(dev);
9575 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
9576}
9577
9578static int ipw_wx_set_power(struct net_device *dev,
9579 struct iw_request_info *info,
9580 union iwreq_data *wrqu, char *extra)
9581{
9582 struct ipw_priv *priv = ieee80211_priv(dev);
9583 int err;
9584 mutex_lock(&priv->mutex);
9585 if (wrqu->power.disabled) {
9586 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
9587 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
9588 if (err) {
9589 IPW_DEBUG_WX("failed setting power mode.\n");
9590 mutex_unlock(&priv->mutex);
9591 return err;
9592 }
9593 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
9594 mutex_unlock(&priv->mutex);
9595 return 0;
9596 }
9597
9598 switch (wrqu->power.flags & IW_POWER_MODE) {
9599 case IW_POWER_ON: /* If not specified */
9600 case IW_POWER_MODE: /* If set all mask */
9601 case IW_POWER_ALL_R: /* If explicitly state all */
9602 break;
9603 default: /* Otherwise we don't support it */
9604 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
9605 wrqu->power.flags);
9606 mutex_unlock(&priv->mutex);
9607 return -EOPNOTSUPP;
9608 }
9609
9610 /* If the user hasn't specified a power management mode yet, default
9611 * to BATTERY */
9612 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
9613 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
9614 else
9615 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
9616
9617 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
9618 if (err) {
9619 IPW_DEBUG_WX("failed setting power mode.\n");
9620 mutex_unlock(&priv->mutex);
9621 return err;
9622 }
9623
9624 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode);
9625 mutex_unlock(&priv->mutex);
9626 return 0;
9627}
9628
9629static int ipw_wx_get_power(struct net_device *dev,
9630 struct iw_request_info *info,
9631 union iwreq_data *wrqu, char *extra)
9632{
9633 struct ipw_priv *priv = ieee80211_priv(dev);
9634 mutex_lock(&priv->mutex);
9635 if (!(priv->power_mode & IPW_POWER_ENABLED))
9636 wrqu->power.disabled = 1;
9637 else
9638 wrqu->power.disabled = 0;
9639
9640 mutex_unlock(&priv->mutex);
9641 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
9642
9643 return 0;
9644}
9645
9646static int ipw_wx_set_powermode(struct net_device *dev,
9647 struct iw_request_info *info,
9648 union iwreq_data *wrqu, char *extra)
9649{
9650 struct ipw_priv *priv = ieee80211_priv(dev);
9651 int mode = *(int *)extra;
9652 int err;
9653
9654 mutex_lock(&priv->mutex);
9655 if ((mode < 1) || (mode > IPW_POWER_LIMIT))
9656 mode = IPW_POWER_AC;
9657
9658 if (IPW_POWER_LEVEL(priv->power_mode) != mode) {
9659 err = ipw_send_power_mode(priv, mode);
9660 if (err) {
9661 IPW_DEBUG_WX("failed setting power mode.\n");
9662 mutex_unlock(&priv->mutex);
9663 return err;
9664 }
9665 priv->power_mode = IPW_POWER_ENABLED | mode;
9666 }
9667 mutex_unlock(&priv->mutex);
9668 return 0;
9669}
9670
9671#define MAX_WX_STRING 80
9672static int ipw_wx_get_powermode(struct net_device *dev,
9673 struct iw_request_info *info,
9674 union iwreq_data *wrqu, char *extra)
9675{
9676 struct ipw_priv *priv = ieee80211_priv(dev);
9677 int level = IPW_POWER_LEVEL(priv->power_mode);
9678 char *p = extra;
9679
9680 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
9681
9682 switch (level) {
9683 case IPW_POWER_AC:
9684 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
9685 break;
9686 case IPW_POWER_BATTERY:
9687 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
9688 break;
9689 default:
9690 p += snprintf(p, MAX_WX_STRING - (p - extra),
9691 "(Timeout %dms, Period %dms)",
9692 timeout_duration[level - 1] / 1000,
9693 period_duration[level - 1] / 1000);
9694 }
9695
9696 if (!(priv->power_mode & IPW_POWER_ENABLED))
9697 p += snprintf(p, MAX_WX_STRING - (p - extra), " OFF");
9698
9699 wrqu->data.length = p - extra + 1;
9700
9701 return 0;
9702}
9703
9704static int ipw_wx_set_wireless_mode(struct net_device *dev,
9705 struct iw_request_info *info,
9706 union iwreq_data *wrqu, char *extra)
9707{
9708 struct ipw_priv *priv = ieee80211_priv(dev);
9709 int mode = *(int *)extra;
9710 u8 band = 0, modulation = 0;
9711
9712 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
9713 IPW_WARNING("Attempt to set invalid wireless mode: %d\n", mode);
9714 return -EINVAL;
9715 }
9716 mutex_lock(&priv->mutex);
9717 if (priv->adapter == IPW_2915ABG) {
9718 priv->ieee->abg_true = 1;
9719 if (mode & IEEE_A) {
9720 band |= IEEE80211_52GHZ_BAND;
9721 modulation |= IEEE80211_OFDM_MODULATION;
9722 } else
9723 priv->ieee->abg_true = 0;
9724 } else {
9725 if (mode & IEEE_A) {
9726 IPW_WARNING("Attempt to set 2200BG into "
9727 "802.11a mode\n");
9728 mutex_unlock(&priv->mutex);
9729 return -EINVAL;
9730 }
9731
9732 priv->ieee->abg_true = 0;
9733 }
9734
9735 if (mode & IEEE_B) {
9736 band |= IEEE80211_24GHZ_BAND;
9737 modulation |= IEEE80211_CCK_MODULATION;
9738 } else
9739 priv->ieee->abg_true = 0;
9740
9741 if (mode & IEEE_G) {
9742 band |= IEEE80211_24GHZ_BAND;
9743 modulation |= IEEE80211_OFDM_MODULATION;
9744 } else
9745 priv->ieee->abg_true = 0;
9746
9747 priv->ieee->mode = mode;
9748 priv->ieee->freq_band = band;
9749 priv->ieee->modulation = modulation;
9750 init_supported_rates(priv, &priv->rates);
9751
9752 /* Network configuration changed -- force [re]association */
9753 IPW_DEBUG_ASSOC("[re]association triggered due to mode change.\n");
9754 if (!ipw_disassociate(priv)) {
9755 ipw_send_supported_rates(priv, &priv->rates);
9756 ipw_associate(priv);
9757 }
9758
9759 /* Update the band LEDs */
9760 ipw_led_band_on(priv);
9761
9762 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
9763 mode & IEEE_A ? 'a' : '.',
9764 mode & IEEE_B ? 'b' : '.', mode & IEEE_G ? 'g' : '.');
9765 mutex_unlock(&priv->mutex);
9766 return 0;
9767}
9768
9769static int ipw_wx_get_wireless_mode(struct net_device *dev,
9770 struct iw_request_info *info,
9771 union iwreq_data *wrqu, char *extra)
9772{
9773 struct ipw_priv *priv = ieee80211_priv(dev);
9774 mutex_lock(&priv->mutex);
9775 switch (priv->ieee->mode) {
9776 case IEEE_A:
9777 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
9778 break;
9779 case IEEE_B:
9780 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
9781 break;
9782 case IEEE_A | IEEE_B:
9783 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
9784 break;
9785 case IEEE_G:
9786 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
9787 break;
9788 case IEEE_A | IEEE_G:
9789 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
9790 break;
9791 case IEEE_B | IEEE_G:
9792 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
9793 break;
9794 case IEEE_A | IEEE_B | IEEE_G:
9795 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
9796 break;
9797 default:
9798 strncpy(extra, "unknown", MAX_WX_STRING);
9799 break;
9800 }
9801
9802 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
9803
9804 wrqu->data.length = strlen(extra) + 1;
9805 mutex_unlock(&priv->mutex);
9806
9807 return 0;
9808}
9809
9810static int ipw_wx_set_preamble(struct net_device *dev,
9811 struct iw_request_info *info,
9812 union iwreq_data *wrqu, char *extra)
9813{
9814 struct ipw_priv *priv = ieee80211_priv(dev);
9815 int mode = *(int *)extra;
9816 mutex_lock(&priv->mutex);
9817 /* Switching from SHORT -> LONG requires a disassociation */
9818 if (mode == 1) {
9819 if (!(priv->config & CFG_PREAMBLE_LONG)) {
9820 priv->config |= CFG_PREAMBLE_LONG;
9821
9822 /* Network configuration changed -- force [re]association */
9823 IPW_DEBUG_ASSOC
9824 ("[re]association triggered due to preamble change.\n");
9825 if (!ipw_disassociate(priv))
9826 ipw_associate(priv);
9827 }
9828 goto done;
9829 }
9830
9831 if (mode == 0) {
9832 priv->config &= ~CFG_PREAMBLE_LONG;
9833 goto done;
9834 }
9835 mutex_unlock(&priv->mutex);
9836 return -EINVAL;
9837
9838 done:
9839 mutex_unlock(&priv->mutex);
9840 return 0;
9841}
9842
9843static int ipw_wx_get_preamble(struct net_device *dev,
9844 struct iw_request_info *info,
9845 union iwreq_data *wrqu, char *extra)
9846{
9847 struct ipw_priv *priv = ieee80211_priv(dev);
9848 mutex_lock(&priv->mutex);
9849 if (priv->config & CFG_PREAMBLE_LONG)
9850 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
9851 else
9852 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
9853 mutex_unlock(&priv->mutex);
9854 return 0;
9855}
9856
9857#ifdef CONFIG_IPW2200_MONITOR
9858static int ipw_wx_set_monitor(struct net_device *dev,
9859 struct iw_request_info *info,
9860 union iwreq_data *wrqu, char *extra)
9861{
9862 struct ipw_priv *priv = ieee80211_priv(dev);
9863 int *parms = (int *)extra;
9864 int enable = (parms[0] > 0);
9865 mutex_lock(&priv->mutex);
9866 IPW_DEBUG_WX("SET MONITOR: %d %d\n", enable, parms[1]);
9867 if (enable) {
9868 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9869#ifdef CONFIG_IPW2200_RADIOTAP
9870 priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
9871#else
9872 priv->net_dev->type = ARPHRD_IEEE80211;
9873#endif
9874 queue_work(priv->workqueue, &priv->adapter_restart);
9875 }
9876
9877 ipw_set_channel(priv, parms[1]);
9878 } else {
9879 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
9880 mutex_unlock(&priv->mutex);
9881 return 0;
9882 }
9883 priv->net_dev->type = ARPHRD_ETHER;
9884 queue_work(priv->workqueue, &priv->adapter_restart);
9885 }
9886 mutex_unlock(&priv->mutex);
9887 return 0;
9888}
9889
9890#endif /* CONFIG_IPW2200_MONITOR */
9891
9892static int ipw_wx_reset(struct net_device *dev,
9893 struct iw_request_info *info,
9894 union iwreq_data *wrqu, char *extra)
9895{
9896 struct ipw_priv *priv = ieee80211_priv(dev);
9897 IPW_DEBUG_WX("RESET\n");
9898 queue_work(priv->workqueue, &priv->adapter_restart);
9899 return 0;
9900}
9901
9902static int ipw_wx_sw_reset(struct net_device *dev,
9903 struct iw_request_info *info,
9904 union iwreq_data *wrqu, char *extra)
9905{
9906 struct ipw_priv *priv = ieee80211_priv(dev);
9907 union iwreq_data wrqu_sec = {
9908 .encoding = {
9909 .flags = IW_ENCODE_DISABLED,
9910 },
9911 };
9912 int ret;
9913
9914 IPW_DEBUG_WX("SW_RESET\n");
9915
9916 mutex_lock(&priv->mutex);
9917
9918 ret = ipw_sw_reset(priv, 2);
9919 if (!ret) {
9920 free_firmware();
9921 ipw_adapter_restart(priv);
9922 }
9923
9924 /* The SW reset bit might have been toggled on by the 'disable'
9925 * module parameter, so take appropriate action */
9926 ipw_radio_kill_sw(priv, priv->status & STATUS_RF_KILL_SW);
9927
9928 mutex_unlock(&priv->mutex);
9929 ieee80211_wx_set_encode(priv->ieee, info, &wrqu_sec, NULL);
9930 mutex_lock(&priv->mutex);
9931
9932 if (!(priv->status & STATUS_RF_KILL_MASK)) {
9933 /* Configuration likely changed -- force [re]association */
9934 IPW_DEBUG_ASSOC("[re]association triggered due to sw "
9935 "reset.\n");
9936 if (!ipw_disassociate(priv))
9937 ipw_associate(priv);
9938 }
9939
9940 mutex_unlock(&priv->mutex);
9941
9942 return 0;
9943}
9944
9945/* Rebase the WE IOCTLs to zero for the handler array */
9946#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
9947static iw_handler ipw_wx_handlers[] = {
9948 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
9949 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
9950 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
9951 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
9952 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
9953 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens,
9954 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens,
9955 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
9956 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
9957 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
9958 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
9959 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
9960 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
9961 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
9962 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
9963 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
9964 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
9965 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
9966 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
9967 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
9968 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
9969 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
9970 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
9971 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
9972 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
9973 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
9974 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
9975 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
9976 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
9977 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
9978 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy,
9979 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy,
9980 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy,
9981 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy,
9982 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie,
9983 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie,
9984 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme,
9985 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth,
9986 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth,
9987 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext,
9988 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext,
9989};
9990
9991enum {
9992 IPW_PRIV_SET_POWER = SIOCIWFIRSTPRIV,
9993 IPW_PRIV_GET_POWER,
9994 IPW_PRIV_SET_MODE,
9995 IPW_PRIV_GET_MODE,
9996 IPW_PRIV_SET_PREAMBLE,
9997 IPW_PRIV_GET_PREAMBLE,
9998 IPW_PRIV_RESET,
9999 IPW_PRIV_SW_RESET,
10000#ifdef CONFIG_IPW2200_MONITOR
10001 IPW_PRIV_SET_MONITOR,
10002#endif
10003};
10004
10005static struct iw_priv_args ipw_priv_args[] = {
10006 {
10007 .cmd = IPW_PRIV_SET_POWER,
10008 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10009 .name = "set_power"},
10010 {
10011 .cmd = IPW_PRIV_GET_POWER,
10012 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10013 .name = "get_power"},
10014 {
10015 .cmd = IPW_PRIV_SET_MODE,
10016 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10017 .name = "set_mode"},
10018 {
10019 .cmd = IPW_PRIV_GET_MODE,
10020 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
10021 .name = "get_mode"},
10022 {
10023 .cmd = IPW_PRIV_SET_PREAMBLE,
10024 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
10025 .name = "set_preamble"},
10026 {
10027 .cmd = IPW_PRIV_GET_PREAMBLE,
10028 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ,
10029 .name = "get_preamble"},
10030 {
10031 IPW_PRIV_RESET,
10032 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"},
10033 {
10034 IPW_PRIV_SW_RESET,
10035 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "sw_reset"},
10036#ifdef CONFIG_IPW2200_MONITOR
10037 {
10038 IPW_PRIV_SET_MONITOR,
10039 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"},
10040#endif /* CONFIG_IPW2200_MONITOR */
10041};
10042
10043static iw_handler ipw_priv_handler[] = {
10044 ipw_wx_set_powermode,
10045 ipw_wx_get_powermode,
10046 ipw_wx_set_wireless_mode,
10047 ipw_wx_get_wireless_mode,
10048 ipw_wx_set_preamble,
10049 ipw_wx_get_preamble,
10050 ipw_wx_reset,
10051 ipw_wx_sw_reset,
10052#ifdef CONFIG_IPW2200_MONITOR
10053 ipw_wx_set_monitor,
10054#endif
10055};
10056
10057static struct iw_handler_def ipw_wx_handler_def = {
10058 .standard = ipw_wx_handlers,
10059 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
10060 .num_private = ARRAY_SIZE(ipw_priv_handler),
10061 .num_private_args = ARRAY_SIZE(ipw_priv_args),
10062 .private = ipw_priv_handler,
10063 .private_args = ipw_priv_args,
10064 .get_wireless_stats = ipw_get_wireless_stats,
10065};
10066
10067/*
10068 * Get wireless statistics.
10069 * Called by /proc/net/wireless
10070 * Also called by SIOCGIWSTATS
10071 */
10072static struct iw_statistics *ipw_get_wireless_stats(struct net_device *dev)
10073{
10074 struct ipw_priv *priv = ieee80211_priv(dev);
10075 struct iw_statistics *wstats;
10076
10077 wstats = &priv->wstats;
10078
10079 /* if hw is disabled, then ipw_get_ordinal() can't be called.
10080 * netdev->get_wireless_stats seems to be called before fw is
10081 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
10082 * and associated; if not associcated, the values are all meaningless
10083 * anyway, so set them all to NULL and INVALID */
10084 if (!(priv->status & STATUS_ASSOCIATED)) {
10085 wstats->miss.beacon = 0;
10086 wstats->discard.retries = 0;
10087 wstats->qual.qual = 0;
10088 wstats->qual.level = 0;
10089 wstats->qual.noise = 0;
10090 wstats->qual.updated = 7;
10091 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
10092 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
10093 return wstats;
10094 }
10095
10096 wstats->qual.qual = priv->quality;
10097 wstats->qual.level = priv->exp_avg_rssi;
10098 wstats->qual.noise = priv->exp_avg_noise;
10099 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
10100 IW_QUAL_NOISE_UPDATED | IW_QUAL_DBM;
10101
10102 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
10103 wstats->discard.retries = priv->last_tx_failures;
10104 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
10105
10106/* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
10107 goto fail_get_ordinal;
10108 wstats->discard.retries += tx_retry; */
10109
10110 return wstats;
10111}
10112
10113/* net device stuff */
10114
10115static void init_sys_config(struct ipw_sys_config *sys_config)
10116{
10117 memset(sys_config, 0, sizeof(struct ipw_sys_config));
10118 sys_config->bt_coexistence = 0;
10119 sys_config->answer_broadcast_ssid_probe = 0;
10120 sys_config->accept_all_data_frames = 0;
10121 sys_config->accept_non_directed_frames = 1;
10122 sys_config->exclude_unicast_unencrypted = 0;
10123 sys_config->disable_unicast_decryption = 1;
10124 sys_config->exclude_multicast_unencrypted = 0;
10125 sys_config->disable_multicast_decryption = 1;
10126 if (antenna < CFG_SYS_ANTENNA_BOTH || antenna > CFG_SYS_ANTENNA_B)
10127 antenna = CFG_SYS_ANTENNA_BOTH;
10128 sys_config->antenna_diversity = antenna;
10129 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
10130 sys_config->dot11g_auto_detection = 0;
10131 sys_config->enable_cts_to_self = 0;
10132 sys_config->bt_coexist_collision_thr = 0;
10133 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10134 sys_config->silence_threshold = 0x1e;
10135}
10136
10137static int ipw_net_open(struct net_device *dev)
10138{
10139 IPW_DEBUG_INFO("dev->open\n");
10140 netif_start_queue(dev);
10141 return 0;
10142}
10143
10144static int ipw_net_stop(struct net_device *dev)
10145{
10146 IPW_DEBUG_INFO("dev->close\n");
10147 netif_stop_queue(dev);
10148 return 0;
10149}
10150
10151/*
10152todo:
10153
10154modify to send one tfd per fragment instead of using chunking. otherwise
10155we need to heavily modify the ieee80211_skb_to_txb.
10156*/
10157
10158static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10159 int pri)
10160{
10161 struct ieee80211_hdr_3addrqos *hdr = (struct ieee80211_hdr_3addrqos *)
10162 txb->fragments[0]->data;
10163 int i = 0;
10164 struct tfd_frame *tfd;
10165#ifdef CONFIG_IPW2200_QOS
10166 int tx_id = ipw_get_tx_queue_number(priv, pri);
10167 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10168#else
10169 struct clx2_tx_queue *txq = &priv->txq[0];
10170#endif
10171 struct clx2_queue *q = &txq->q;
10172 u8 id, hdr_len, unicast;
10173 u16 remaining_bytes;
10174 int fc;
10175
10176 hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
10177 switch (priv->ieee->iw_mode) {
10178 case IW_MODE_ADHOC:
10179 unicast = !is_multicast_ether_addr(hdr->addr1);
10180 id = ipw_find_station(priv, hdr->addr1);
10181 if (id == IPW_INVALID_STATION) {
10182 id = ipw_add_station(priv, hdr->addr1);
10183 if (id == IPW_INVALID_STATION) {
10184 IPW_WARNING("Attempt to send data to "
10185 "invalid cell: %pM\n",
10186 hdr->addr1);
10187 goto drop;
10188 }
10189 }
10190 break;
10191
10192 case IW_MODE_INFRA:
10193 default:
10194 unicast = !is_multicast_ether_addr(hdr->addr3);
10195 id = 0;
10196 break;
10197 }
10198
10199 tfd = &txq->bd[q->first_empty];
10200 txq->txb[q->first_empty] = txb;
10201 memset(tfd, 0, sizeof(*tfd));
10202 tfd->u.data.station_number = id;
10203
10204 tfd->control_flags.message_type = TX_FRAME_TYPE;
10205 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
10206
10207 tfd->u.data.cmd_id = DINO_CMD_TX;
10208 tfd->u.data.len = cpu_to_le16(txb->payload_size);
10209 remaining_bytes = txb->payload_size;
10210
10211 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
10212 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_CCK;
10213 else
10214 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_MODE_OFDM;
10215
10216 if (priv->assoc_request.preamble_length == DCT_FLAG_SHORT_PREAMBLE)
10217 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREAMBLE;
10218
10219 fc = le16_to_cpu(hdr->frame_ctl);
10220 hdr->frame_ctl = cpu_to_le16(fc & ~IEEE80211_FCTL_MOREFRAGS);
10221
10222 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
10223
10224 if (likely(unicast))
10225 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10226
10227 if (txb->encrypted && !priv->ieee->host_encrypt) {
10228 switch (priv->ieee->sec.level) {
10229 case SEC_LEVEL_3:
10230 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10231 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10232 /* XXX: ACK flag must be set for CCMP even if it
10233 * is a multicast/broadcast packet, because CCMP
10234 * group communication encrypted by GTK is
10235 * actually done by the AP. */
10236 if (!unicast)
10237 tfd->u.data.tx_flags |= DCT_FLAG_ACK_REQD;
10238
10239 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10240 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_CCM;
10241 tfd->u.data.key_index = 0;
10242 tfd->u.data.key_index |= DCT_WEP_INDEX_USE_IMMEDIATE;
10243 break;
10244 case SEC_LEVEL_2:
10245 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10246 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10247 tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
10248 tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
10249 tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
10250 break;
10251 case SEC_LEVEL_1:
10252 tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
10253 cpu_to_le16(IEEE80211_FCTL_PROTECTED);
10254 tfd->u.data.key_index = priv->ieee->crypt_info.tx_keyidx;
10255 if (priv->ieee->sec.key_sizes[priv->ieee->crypt_info.tx_keyidx] <=
10256 40)
10257 tfd->u.data.key_index |= DCT_WEP_KEY_64Bit;
10258 else
10259 tfd->u.data.key_index |= DCT_WEP_KEY_128Bit;
10260 break;
10261 case SEC_LEVEL_0:
10262 break;
10263 default:
10264 printk(KERN_ERR "Unknow security level %d\n",
10265 priv->ieee->sec.level);
10266 break;
10267 }
10268 } else
10269 /* No hardware encryption */
10270 tfd->u.data.tx_flags |= DCT_FLAG_NO_WEP;
10271
10272#ifdef CONFIG_IPW2200_QOS
10273 if (fc & IEEE80211_STYPE_QOS_DATA)
10274 ipw_qos_set_tx_queue_command(priv, pri, &(tfd->u.data));
10275#endif /* CONFIG_IPW2200_QOS */
10276
10277 /* payload */
10278 tfd->u.data.num_chunks = cpu_to_le32(min((u8) (NUM_TFD_CHUNKS - 2),
10279 txb->nr_frags));
10280 IPW_DEBUG_FRAG("%i fragments being sent as %i chunks.\n",
10281 txb->nr_frags, le32_to_cpu(tfd->u.data.num_chunks));
10282 for (i = 0; i < le32_to_cpu(tfd->u.data.num_chunks); i++) {
10283 IPW_DEBUG_FRAG("Adding fragment %i of %i (%d bytes).\n",
10284 i, le32_to_cpu(tfd->u.data.num_chunks),
10285 txb->fragments[i]->len - hdr_len);
10286 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
10287 i, tfd->u.data.num_chunks,
10288 txb->fragments[i]->len - hdr_len);
10289 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
10290 txb->fragments[i]->len - hdr_len);
10291
10292 tfd->u.data.chunk_ptr[i] =
10293 cpu_to_le32(pci_map_single
10294 (priv->pci_dev,
10295 txb->fragments[i]->data + hdr_len,
10296 txb->fragments[i]->len - hdr_len,
10297 PCI_DMA_TODEVICE));
10298 tfd->u.data.chunk_len[i] =
10299 cpu_to_le16(txb->fragments[i]->len - hdr_len);
10300 }
10301
10302 if (i != txb->nr_frags) {
10303 struct sk_buff *skb;
10304 u16 remaining_bytes = 0;
10305 int j;
10306
10307 for (j = i; j < txb->nr_frags; j++)
10308 remaining_bytes += txb->fragments[j]->len - hdr_len;
10309
10310 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
10311 remaining_bytes);
10312 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
10313 if (skb != NULL) {
10314 tfd->u.data.chunk_len[i] = cpu_to_le16(remaining_bytes);
10315 for (j = i; j < txb->nr_frags; j++) {
10316 int size = txb->fragments[j]->len - hdr_len;
10317
10318 printk(KERN_INFO "Adding frag %d %d...\n",
10319 j, size);
10320 memcpy(skb_put(skb, size),
10321 txb->fragments[j]->data + hdr_len, size);
10322 }
10323 dev_kfree_skb_any(txb->fragments[i]);
10324 txb->fragments[i] = skb;
10325 tfd->u.data.chunk_ptr[i] =
10326 cpu_to_le32(pci_map_single
10327 (priv->pci_dev, skb->data,
10328 remaining_bytes,
10329 PCI_DMA_TODEVICE));
10330
10331 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10332 }
10333 }
10334
10335 /* kick DMA */
10336 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
10337 ipw_write32(priv, q->reg_w, q->first_empty);
10338
10339 if (ipw_tx_queue_space(q) < q->high_mark)
10340 netif_stop_queue(priv->net_dev);
10341
10342 return NETDEV_TX_OK;
10343
10344 drop:
10345 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
10346 ieee80211_txb_free(txb);
10347 return NETDEV_TX_OK;
10348}
10349
10350static int ipw_net_is_queue_full(struct net_device *dev, int pri)
10351{
10352 struct ipw_priv *priv = ieee80211_priv(dev);
10353#ifdef CONFIG_IPW2200_QOS
10354 int tx_id = ipw_get_tx_queue_number(priv, pri);
10355 struct clx2_tx_queue *txq = &priv->txq[tx_id];
10356#else
10357 struct clx2_tx_queue *txq = &priv->txq[0];
10358#endif /* CONFIG_IPW2200_QOS */
10359
10360 if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark)
10361 return 1;
10362
10363 return 0;
10364}
10365
10366#ifdef CONFIG_IPW2200_PROMISCUOUS
10367static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
10368 struct ieee80211_txb *txb)
10369{
10370 struct ieee80211_rx_stats dummystats;
10371 struct ieee80211_hdr *hdr;
10372 u8 n;
10373 u16 filter = priv->prom_priv->filter;
10374 int hdr_only = 0;
10375
10376 if (filter & IPW_PROM_NO_TX)
10377 return;
10378
10379 memset(&dummystats, 0, sizeof(dummystats));
10380
10381 /* Filtering of fragment chains is done agains the first fragment */
10382 hdr = (void *)txb->fragments[0]->data;
10383 if (ieee80211_is_management(le16_to_cpu(hdr->frame_control))) {
10384 if (filter & IPW_PROM_NO_MGMT)
10385 return;
10386 if (filter & IPW_PROM_MGMT_HEADER_ONLY)
10387 hdr_only = 1;
10388 } else if (ieee80211_is_control(le16_to_cpu(hdr->frame_control))) {
10389 if (filter & IPW_PROM_NO_CTL)
10390 return;
10391 if (filter & IPW_PROM_CTL_HEADER_ONLY)
10392 hdr_only = 1;
10393 } else if (ieee80211_is_data(le16_to_cpu(hdr->frame_control))) {
10394 if (filter & IPW_PROM_NO_DATA)
10395 return;
10396 if (filter & IPW_PROM_DATA_HEADER_ONLY)
10397 hdr_only = 1;
10398 }
10399
10400 for(n=0; n<txb->nr_frags; ++n) {
10401 struct sk_buff *src = txb->fragments[n];
10402 struct sk_buff *dst;
10403 struct ieee80211_radiotap_header *rt_hdr;
10404 int len;
10405
10406 if (hdr_only) {
10407 hdr = (void *)src->data;
10408 len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
10409 } else
10410 len = src->len;
10411
10412 dst = alloc_skb(
10413 len + IEEE80211_RADIOTAP_HDRLEN, GFP_ATOMIC);
10414 if (!dst) continue;
10415
10416 rt_hdr = (void *)skb_put(dst, sizeof(*rt_hdr));
10417
10418 rt_hdr->it_version = PKTHDR_RADIOTAP_VERSION;
10419 rt_hdr->it_pad = 0;
10420 rt_hdr->it_present = 0; /* after all, it's just an idea */
10421 rt_hdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_CHANNEL);
10422
10423 *(__le16*)skb_put(dst, sizeof(u16)) = cpu_to_le16(
10424 ieee80211chan2mhz(priv->channel));
10425 if (priv->channel > 14) /* 802.11a */
10426 *(__le16*)skb_put(dst, sizeof(u16)) =
10427 cpu_to_le16(IEEE80211_CHAN_OFDM |
10428 IEEE80211_CHAN_5GHZ);
10429 else if (priv->ieee->mode == IEEE_B) /* 802.11b */
10430 *(__le16*)skb_put(dst, sizeof(u16)) =
10431 cpu_to_le16(IEEE80211_CHAN_CCK |
10432 IEEE80211_CHAN_2GHZ);
10433 else /* 802.11g */
10434 *(__le16*)skb_put(dst, sizeof(u16)) =
10435 cpu_to_le16(IEEE80211_CHAN_OFDM |
10436 IEEE80211_CHAN_2GHZ);
10437
10438 rt_hdr->it_len = cpu_to_le16(dst->len);
10439
10440 skb_copy_from_linear_data(src, skb_put(dst, len), len);
10441
10442 if (!ieee80211_rx(priv->prom_priv->ieee, dst, &dummystats))
10443 dev_kfree_skb_any(dst);
10444 }
10445}
10446#endif
10447
10448static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
10449 struct net_device *dev, int pri)
10450{
10451 struct ipw_priv *priv = ieee80211_priv(dev);
10452 unsigned long flags;
10453 int ret;
10454
10455 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
10456 spin_lock_irqsave(&priv->lock, flags);
10457
10458#ifdef CONFIG_IPW2200_PROMISCUOUS
10459 if (rtap_iface && netif_running(priv->prom_net_dev))
10460 ipw_handle_promiscuous_tx(priv, txb);
10461#endif
10462
10463 ret = ipw_tx_skb(priv, txb, pri);
10464 if (ret == NETDEV_TX_OK)
10465 __ipw_led_activity_on(priv);
10466 spin_unlock_irqrestore(&priv->lock, flags);
10467
10468 return ret;
10469}
10470
10471static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
10472{
10473 struct ipw_priv *priv = ieee80211_priv(dev);
10474
10475 priv->ieee->stats.tx_packets = priv->tx_packets;
10476 priv->ieee->stats.rx_packets = priv->rx_packets;
10477 return &priv->ieee->stats;
10478}
10479
10480static void ipw_net_set_multicast_list(struct net_device *dev)
10481{
10482
10483}
10484
10485static int ipw_net_set_mac_address(struct net_device *dev, void *p)
10486{
10487 struct ipw_priv *priv = ieee80211_priv(dev);
10488 struct sockaddr *addr = p;
10489
10490 if (!is_valid_ether_addr(addr->sa_data))
10491 return -EADDRNOTAVAIL;
10492 mutex_lock(&priv->mutex);
10493 priv->config |= CFG_CUSTOM_MAC;
10494 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
10495 printk(KERN_INFO "%s: Setting MAC to %pM\n",
10496 priv->net_dev->name, priv->mac_addr);
10497 queue_work(priv->workqueue, &priv->adapter_restart);
10498 mutex_unlock(&priv->mutex);
10499 return 0;
10500}
10501
10502static void ipw_ethtool_get_drvinfo(struct net_device *dev,
10503 struct ethtool_drvinfo *info)
10504{
10505 struct ipw_priv *p = ieee80211_priv(dev);
10506 char vers[64];
10507 char date[32];
10508 u32 len;
10509
10510 strcpy(info->driver, DRV_NAME);
10511 strcpy(info->version, DRV_VERSION);
10512
10513 len = sizeof(vers);
10514 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
10515 len = sizeof(date);
10516 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
10517
10518 snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
10519 vers, date);
10520 strcpy(info->bus_info, pci_name(p->pci_dev));
10521 info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
10522}
10523
10524static u32 ipw_ethtool_get_link(struct net_device *dev)
10525{
10526 struct ipw_priv *priv = ieee80211_priv(dev);
10527 return (priv->status & STATUS_ASSOCIATED) != 0;
10528}
10529
10530static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
10531{
10532 return IPW_EEPROM_IMAGE_SIZE;
10533}
10534
10535static int ipw_ethtool_get_eeprom(struct net_device *dev,
10536 struct ethtool_eeprom *eeprom, u8 * bytes)
10537{
10538 struct ipw_priv *p = ieee80211_priv(dev);
10539
10540 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10541 return -EINVAL;
10542 mutex_lock(&p->mutex);
10543 memcpy(bytes, &p->eeprom[eeprom->offset], eeprom->len);
10544 mutex_unlock(&p->mutex);
10545 return 0;
10546}
10547
10548static int ipw_ethtool_set_eeprom(struct net_device *dev,
10549 struct ethtool_eeprom *eeprom, u8 * bytes)
10550{
10551 struct ipw_priv *p = ieee80211_priv(dev);
10552 int i;
10553
10554 if (eeprom->offset + eeprom->len > IPW_EEPROM_IMAGE_SIZE)
10555 return -EINVAL;
10556 mutex_lock(&p->mutex);
10557 memcpy(&p->eeprom[eeprom->offset], bytes, eeprom->len);
10558 for (i = 0; i < IPW_EEPROM_IMAGE_SIZE; i++)
10559 ipw_write8(p, i + IPW_EEPROM_DATA, p->eeprom[i]);
10560 mutex_unlock(&p->mutex);
10561 return 0;
10562}
10563
10564static const struct ethtool_ops ipw_ethtool_ops = {
10565 .get_link = ipw_ethtool_get_link,
10566 .get_drvinfo = ipw_ethtool_get_drvinfo,
10567 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
10568 .get_eeprom = ipw_ethtool_get_eeprom,
10569 .set_eeprom = ipw_ethtool_set_eeprom,
10570};
10571
10572static irqreturn_t ipw_isr(int irq, void *data)
10573{
10574 struct ipw_priv *priv = data;
10575 u32 inta, inta_mask;
10576
10577 if (!priv)
10578 return IRQ_NONE;
10579
10580 spin_lock(&priv->irq_lock);
10581
10582 if (!(priv->status & STATUS_INT_ENABLED)) {
10583 /* IRQ is disabled */
10584 goto none;
10585 }
10586
10587 inta = ipw_read32(priv, IPW_INTA_RW);
10588 inta_mask = ipw_read32(priv, IPW_INTA_MASK_R);
10589
10590 if (inta == 0xFFFFFFFF) {
10591 /* Hardware disappeared */
10592 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
10593 goto none;
10594 }
10595
10596 if (!(inta & (IPW_INTA_MASK_ALL & inta_mask))) {
10597 /* Shared interrupt */
10598 goto none;
10599 }
10600
10601 /* tell the device to stop sending interrupts */
10602 __ipw_disable_interrupts(priv);
10603
10604 /* ack current interrupts */
10605 inta &= (IPW_INTA_MASK_ALL & inta_mask);
10606 ipw_write32(priv, IPW_INTA_RW, inta);
10607
10608 /* Cache INTA value for our tasklet */
10609 priv->isr_inta = inta;
10610
10611 tasklet_schedule(&priv->irq_tasklet);
10612
10613 spin_unlock(&priv->irq_lock);
10614
10615 return IRQ_HANDLED;
10616 none:
10617 spin_unlock(&priv->irq_lock);
10618 return IRQ_NONE;
10619}
10620
10621static void ipw_rf_kill(void *adapter)
10622{
10623 struct ipw_priv *priv = adapter;
10624 unsigned long flags;
10625
10626 spin_lock_irqsave(&priv->lock, flags);
10627
10628 if (rf_kill_active(priv)) {
10629 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
10630 if (priv->workqueue)
10631 queue_delayed_work(priv->workqueue,
10632 &priv->rf_kill, 2 * HZ);
10633 goto exit_unlock;
10634 }
10635
10636 /* RF Kill is now disabled, so bring the device back up */
10637
10638 if (!(priv->status & STATUS_RF_KILL_MASK)) {
10639 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
10640 "device\n");
10641
10642 /* we can not do an adapter restart while inside an irq lock */
10643 queue_work(priv->workqueue, &priv->adapter_restart);
10644 } else
10645 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
10646 "enabled\n");
10647
10648 exit_unlock:
10649 spin_unlock_irqrestore(&priv->lock, flags);
10650}
10651
10652static void ipw_bg_rf_kill(struct work_struct *work)
10653{
10654 struct ipw_priv *priv =
10655 container_of(work, struct ipw_priv, rf_kill.work);
10656 mutex_lock(&priv->mutex);
10657 ipw_rf_kill(priv);
10658 mutex_unlock(&priv->mutex);
10659}
10660
10661static void ipw_link_up(struct ipw_priv *priv)
10662{
10663 priv->last_seq_num = -1;
10664 priv->last_frag_num = -1;
10665 priv->last_packet_time = 0;
10666
10667 netif_carrier_on(priv->net_dev);
10668
10669 cancel_delayed_work(&priv->request_scan);
10670 cancel_delayed_work(&priv->request_direct_scan);
10671 cancel_delayed_work(&priv->request_passive_scan);
10672 cancel_delayed_work(&priv->scan_event);
10673 ipw_reset_stats(priv);
10674 /* Ensure the rate is updated immediately */
10675 priv->last_rate = ipw_get_current_rate(priv);
10676 ipw_gather_stats(priv);
10677 ipw_led_link_up(priv);
10678 notify_wx_assoc_event(priv);
10679
10680 if (priv->config & CFG_BACKGROUND_SCAN)
10681 queue_delayed_work(priv->workqueue, &priv->request_scan, HZ);
10682}
10683
10684static void ipw_bg_link_up(struct work_struct *work)
10685{
10686 struct ipw_priv *priv =
10687 container_of(work, struct ipw_priv, link_up);
10688 mutex_lock(&priv->mutex);
10689 ipw_link_up(priv);
10690 mutex_unlock(&priv->mutex);
10691}
10692
10693static void ipw_link_down(struct ipw_priv *priv)
10694{
10695 ipw_led_link_down(priv);
10696 netif_carrier_off(priv->net_dev);
10697 notify_wx_assoc_event(priv);
10698
10699 /* Cancel any queued work ... */
10700 cancel_delayed_work(&priv->request_scan);
10701 cancel_delayed_work(&priv->request_direct_scan);
10702 cancel_delayed_work(&priv->request_passive_scan);
10703 cancel_delayed_work(&priv->adhoc_check);
10704 cancel_delayed_work(&priv->gather_stats);
10705
10706 ipw_reset_stats(priv);
10707
10708 if (!(priv->status & STATUS_EXIT_PENDING)) {
10709 /* Queue up another scan... */
10710 queue_delayed_work(priv->workqueue, &priv->request_scan, 0);
10711 } else
10712 cancel_delayed_work(&priv->scan_event);
10713}
10714
10715static void ipw_bg_link_down(struct work_struct *work)
10716{
10717 struct ipw_priv *priv =
10718 container_of(work, struct ipw_priv, link_down);
10719 mutex_lock(&priv->mutex);
10720 ipw_link_down(priv);
10721 mutex_unlock(&priv->mutex);
10722}
10723
10724static int __devinit ipw_setup_deferred_work(struct ipw_priv *priv)
10725{
10726 int ret = 0;
10727
10728 priv->workqueue = create_workqueue(DRV_NAME);
10729 init_waitqueue_head(&priv->wait_command_queue);
10730 init_waitqueue_head(&priv->wait_state);
10731
10732 INIT_DELAYED_WORK(&priv->adhoc_check, ipw_bg_adhoc_check);
10733 INIT_WORK(&priv->associate, ipw_bg_associate);
10734 INIT_WORK(&priv->disassociate, ipw_bg_disassociate);
10735 INIT_WORK(&priv->system_config, ipw_system_config);
10736 INIT_WORK(&priv->rx_replenish, ipw_bg_rx_queue_replenish);
10737 INIT_WORK(&priv->adapter_restart, ipw_bg_adapter_restart);
10738 INIT_DELAYED_WORK(&priv->rf_kill, ipw_bg_rf_kill);
10739 INIT_WORK(&priv->up, ipw_bg_up);
10740 INIT_WORK(&priv->down, ipw_bg_down);
10741 INIT_DELAYED_WORK(&priv->request_scan, ipw_request_scan);
10742 INIT_DELAYED_WORK(&priv->request_direct_scan, ipw_request_direct_scan);
10743 INIT_DELAYED_WORK(&priv->request_passive_scan, ipw_request_passive_scan);
10744 INIT_DELAYED_WORK(&priv->scan_event, ipw_scan_event);
10745 INIT_DELAYED_WORK(&priv->gather_stats, ipw_bg_gather_stats);
10746 INIT_WORK(&priv->abort_scan, ipw_bg_abort_scan);
10747 INIT_WORK(&priv->roam, ipw_bg_roam);
10748 INIT_DELAYED_WORK(&priv->scan_check, ipw_bg_scan_check);
10749 INIT_WORK(&priv->link_up, ipw_bg_link_up);
10750 INIT_WORK(&priv->link_down, ipw_bg_link_down);
10751 INIT_DELAYED_WORK(&priv->led_link_on, ipw_bg_led_link_on);
10752 INIT_DELAYED_WORK(&priv->led_link_off, ipw_bg_led_link_off);
10753 INIT_DELAYED_WORK(&priv->led_act_off, ipw_bg_led_activity_off);
10754 INIT_WORK(&priv->merge_networks, ipw_merge_adhoc_network);
10755
10756#ifdef CONFIG_IPW2200_QOS
10757 INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
10758#endif /* CONFIG_IPW2200_QOS */
10759
10760 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
10761 ipw_irq_tasklet, (unsigned long)priv);
10762
10763 return ret;
10764}
10765
10766static void shim__set_security(struct net_device *dev,
10767 struct ieee80211_security *sec)
10768{
10769 struct ipw_priv *priv = ieee80211_priv(dev);
10770 int i;
10771 for (i = 0; i < 4; i++) {
10772 if (sec->flags & (1 << i)) {
10773 priv->ieee->sec.encode_alg[i] = sec->encode_alg[i];
10774 priv->ieee->sec.key_sizes[i] = sec->key_sizes[i];
10775 if (sec->key_sizes[i] == 0)
10776 priv->ieee->sec.flags &= ~(1 << i);
10777 else {
10778 memcpy(priv->ieee->sec.keys[i], sec->keys[i],
10779 sec->key_sizes[i]);
10780 priv->ieee->sec.flags |= (1 << i);
10781 }
10782 priv->status |= STATUS_SECURITY_UPDATED;
10783 } else if (sec->level != SEC_LEVEL_1)
10784 priv->ieee->sec.flags &= ~(1 << i);
10785 }
10786
10787 if (sec->flags & SEC_ACTIVE_KEY) {
10788 if (sec->active_key <= 3) {
10789 priv->ieee->sec.active_key = sec->active_key;
10790 priv->ieee->sec.flags |= SEC_ACTIVE_KEY;
10791 } else
10792 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10793 priv->status |= STATUS_SECURITY_UPDATED;
10794 } else
10795 priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY;
10796
10797 if ((sec->flags & SEC_AUTH_MODE) &&
10798 (priv->ieee->sec.auth_mode != sec->auth_mode)) {
10799 priv->ieee->sec.auth_mode = sec->auth_mode;
10800 priv->ieee->sec.flags |= SEC_AUTH_MODE;
10801 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
10802 priv->capability |= CAP_SHARED_KEY;
10803 else
10804 priv->capability &= ~CAP_SHARED_KEY;
10805 priv->status |= STATUS_SECURITY_UPDATED;
10806 }
10807
10808 if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) {
10809 priv->ieee->sec.flags |= SEC_ENABLED;
10810 priv->ieee->sec.enabled = sec->enabled;
10811 priv->status |= STATUS_SECURITY_UPDATED;
10812 if (sec->enabled)
10813 priv->capability |= CAP_PRIVACY_ON;
10814 else
10815 priv->capability &= ~CAP_PRIVACY_ON;
10816 }
10817
10818 if (sec->flags & SEC_ENCRYPT)
10819 priv->ieee->sec.encrypt = sec->encrypt;
10820
10821 if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) {
10822 priv->ieee->sec.level = sec->level;
10823 priv->ieee->sec.flags |= SEC_LEVEL;
10824 priv->status |= STATUS_SECURITY_UPDATED;
10825 }
10826
10827 if (!priv->ieee->host_encrypt && (sec->flags & SEC_ENCRYPT))
10828 ipw_set_hwcrypto_keys(priv);
10829
10830 /* To match current functionality of ipw2100 (which works well w/
10831 * various supplicants, we don't force a disassociate if the
10832 * privacy capability changes ... */
10833#if 0
10834 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
10835 (((priv->assoc_request.capability &
10836 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && !sec->enabled) ||
10837 (!(priv->assoc_request.capability &
10838 cpu_to_le16(WLAN_CAPABILITY_PRIVACY)) && sec->enabled))) {
10839 IPW_DEBUG_ASSOC("Disassociating due to capability "
10840 "change.\n");
10841 ipw_disassociate(priv);
10842 }
10843#endif
10844}
10845
10846static int init_supported_rates(struct ipw_priv *priv,
10847 struct ipw_supported_rates *rates)
10848{
10849 /* TODO: Mask out rates based on priv->rates_mask */
10850
10851 memset(rates, 0, sizeof(*rates));
10852 /* configure supported rates */
10853 switch (priv->ieee->freq_band) {
10854 case IEEE80211_52GHZ_BAND:
10855 rates->ieee_mode = IPW_A_MODE;
10856 rates->purpose = IPW_RATE_CAPABILITIES;
10857 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10858 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10859 break;
10860
10861 default: /* Mixed or 2.4Ghz */
10862 rates->ieee_mode = IPW_G_MODE;
10863 rates->purpose = IPW_RATE_CAPABILITIES;
10864 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
10865 IEEE80211_CCK_DEFAULT_RATES_MASK);
10866 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
10867 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
10868 IEEE80211_OFDM_DEFAULT_RATES_MASK);
10869 }
10870 break;
10871 }
10872
10873 return 0;
10874}
10875
10876static int ipw_config(struct ipw_priv *priv)
10877{
10878 /* This is only called from ipw_up, which resets/reloads the firmware
10879 so, we don't need to first disable the card before we configure
10880 it */
10881 if (ipw_set_tx_power(priv))
10882 goto error;
10883
10884 /* initialize adapter address */
10885 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
10886 goto error;
10887
10888 /* set basic system config settings */
10889 init_sys_config(&priv->sys_config);
10890
10891 /* Support Bluetooth if we have BT h/w on board, and user wants to.
10892 * Does not support BT priority yet (don't abort or defer our Tx) */
10893 if (bt_coexist) {
10894 unsigned char bt_caps = priv->eeprom[EEPROM_SKU_CAPABILITY];
10895
10896 if (bt_caps & EEPROM_SKU_CAP_BT_CHANNEL_SIG)
10897 priv->sys_config.bt_coexistence
10898 |= CFG_BT_COEXISTENCE_SIGNAL_CHNL;
10899 if (bt_caps & EEPROM_SKU_CAP_BT_OOB)
10900 priv->sys_config.bt_coexistence
10901 |= CFG_BT_COEXISTENCE_OOB;
10902 }
10903
10904#ifdef CONFIG_IPW2200_PROMISCUOUS
10905 if (priv->prom_net_dev && netif_running(priv->prom_net_dev)) {
10906 priv->sys_config.accept_all_data_frames = 1;
10907 priv->sys_config.accept_non_directed_frames = 1;
10908 priv->sys_config.accept_all_mgmt_bcpr = 1;
10909 priv->sys_config.accept_all_mgmt_frames = 1;
10910 }
10911#endif
10912
10913 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
10914 priv->sys_config.answer_broadcast_ssid_probe = 1;
10915 else
10916 priv->sys_config.answer_broadcast_ssid_probe = 0;
10917
10918 if (ipw_send_system_config(priv))
10919 goto error;
10920
10921 init_supported_rates(priv, &priv->rates);
10922 if (ipw_send_supported_rates(priv, &priv->rates))
10923 goto error;
10924
10925 /* Set request-to-send threshold */
10926 if (priv->rts_threshold) {
10927 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
10928 goto error;
10929 }
10930#ifdef CONFIG_IPW2200_QOS
10931 IPW_DEBUG_QOS("QoS: call ipw_qos_activate\n");
10932 ipw_qos_activate(priv, NULL);
10933#endif /* CONFIG_IPW2200_QOS */
10934
10935 if (ipw_set_random_seed(priv))
10936 goto error;
10937
10938 /* final state transition to the RUN state */
10939 if (ipw_send_host_complete(priv))
10940 goto error;
10941
10942 priv->status |= STATUS_INIT;
10943
10944 ipw_led_init(priv);
10945 ipw_led_radio_on(priv);
10946 priv->notif_missed_beacons = 0;
10947
10948 /* Set hardware WEP key if it is configured. */
10949 if ((priv->capability & CAP_PRIVACY_ON) &&
10950 (priv->ieee->sec.level == SEC_LEVEL_1) &&
10951 !(priv->ieee->host_encrypt || priv->ieee->host_decrypt))
10952 ipw_set_hwcrypto_keys(priv);
10953
10954 return 0;
10955
10956 error:
10957 return -EIO;
10958}
10959
10960/*
10961 * NOTE:
10962 *
10963 * These tables have been tested in conjunction with the
10964 * Intel PRO/Wireless 2200BG and 2915ABG Network Connection Adapters.
10965 *
10966 * Altering this values, using it on other hardware, or in geographies
10967 * not intended for resale of the above mentioned Intel adapters has
10968 * not been tested.
10969 *
10970 * Remember to update the table in README.ipw2200 when changing this
10971 * table.
10972 *
10973 */
10974static const struct ieee80211_geo ipw_geos[] = {
10975 { /* Restricted */
10976 "---",
10977 .bg_channels = 11,
10978 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10979 {2427, 4}, {2432, 5}, {2437, 6},
10980 {2442, 7}, {2447, 8}, {2452, 9},
10981 {2457, 10}, {2462, 11}},
10982 },
10983
10984 { /* Custom US/Canada */
10985 "ZZF",
10986 .bg_channels = 11,
10987 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
10988 {2427, 4}, {2432, 5}, {2437, 6},
10989 {2442, 7}, {2447, 8}, {2452, 9},
10990 {2457, 10}, {2462, 11}},
10991 .a_channels = 8,
10992 .a = {{5180, 36},
10993 {5200, 40},
10994 {5220, 44},
10995 {5240, 48},
10996 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
10997 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
10998 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
10999 {5320, 64, IEEE80211_CH_PASSIVE_ONLY}},
11000 },
11001
11002 { /* Rest of World */
11003 "ZZD",
11004 .bg_channels = 13,
11005 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11006 {2427, 4}, {2432, 5}, {2437, 6},
11007 {2442, 7}, {2447, 8}, {2452, 9},
11008 {2457, 10}, {2462, 11}, {2467, 12},
11009 {2472, 13}},
11010 },
11011
11012 { /* Custom USA & Europe & High */
11013 "ZZA",
11014 .bg_channels = 11,
11015 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11016 {2427, 4}, {2432, 5}, {2437, 6},
11017 {2442, 7}, {2447, 8}, {2452, 9},
11018 {2457, 10}, {2462, 11}},
11019 .a_channels = 13,
11020 .a = {{5180, 36},
11021 {5200, 40},
11022 {5220, 44},
11023 {5240, 48},
11024 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11025 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11026 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11027 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11028 {5745, 149},
11029 {5765, 153},
11030 {5785, 157},
11031 {5805, 161},
11032 {5825, 165}},
11033 },
11034
11035 { /* Custom NA & Europe */
11036 "ZZB",
11037 .bg_channels = 11,
11038 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11039 {2427, 4}, {2432, 5}, {2437, 6},
11040 {2442, 7}, {2447, 8}, {2452, 9},
11041 {2457, 10}, {2462, 11}},
11042 .a_channels = 13,
11043 .a = {{5180, 36},
11044 {5200, 40},
11045 {5220, 44},
11046 {5240, 48},
11047 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11048 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11049 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11050 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11051 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11052 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11053 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11054 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11055 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11056 },
11057
11058 { /* Custom Japan */
11059 "ZZC",
11060 .bg_channels = 11,
11061 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11062 {2427, 4}, {2432, 5}, {2437, 6},
11063 {2442, 7}, {2447, 8}, {2452, 9},
11064 {2457, 10}, {2462, 11}},
11065 .a_channels = 4,
11066 .a = {{5170, 34}, {5190, 38},
11067 {5210, 42}, {5230, 46}},
11068 },
11069
11070 { /* Custom */
11071 "ZZM",
11072 .bg_channels = 11,
11073 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11074 {2427, 4}, {2432, 5}, {2437, 6},
11075 {2442, 7}, {2447, 8}, {2452, 9},
11076 {2457, 10}, {2462, 11}},
11077 },
11078
11079 { /* Europe */
11080 "ZZE",
11081 .bg_channels = 13,
11082 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11083 {2427, 4}, {2432, 5}, {2437, 6},
11084 {2442, 7}, {2447, 8}, {2452, 9},
11085 {2457, 10}, {2462, 11}, {2467, 12},
11086 {2472, 13}},
11087 .a_channels = 19,
11088 .a = {{5180, 36},
11089 {5200, 40},
11090 {5220, 44},
11091 {5240, 48},
11092 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11093 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11094 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11095 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11096 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11097 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11098 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11099 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11100 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11101 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11102 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11103 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11104 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11105 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11106 {5700, 140, IEEE80211_CH_PASSIVE_ONLY}},
11107 },
11108
11109 { /* Custom Japan */
11110 "ZZJ",
11111 .bg_channels = 14,
11112 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11113 {2427, 4}, {2432, 5}, {2437, 6},
11114 {2442, 7}, {2447, 8}, {2452, 9},
11115 {2457, 10}, {2462, 11}, {2467, 12},
11116 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY}},
11117 .a_channels = 4,
11118 .a = {{5170, 34}, {5190, 38},
11119 {5210, 42}, {5230, 46}},
11120 },
11121
11122 { /* Rest of World */
11123 "ZZR",
11124 .bg_channels = 14,
11125 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11126 {2427, 4}, {2432, 5}, {2437, 6},
11127 {2442, 7}, {2447, 8}, {2452, 9},
11128 {2457, 10}, {2462, 11}, {2467, 12},
11129 {2472, 13}, {2484, 14, IEEE80211_CH_B_ONLY |
11130 IEEE80211_CH_PASSIVE_ONLY}},
11131 },
11132
11133 { /* High Band */
11134 "ZZH",
11135 .bg_channels = 13,
11136 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11137 {2427, 4}, {2432, 5}, {2437, 6},
11138 {2442, 7}, {2447, 8}, {2452, 9},
11139 {2457, 10}, {2462, 11},
11140 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11141 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11142 .a_channels = 4,
11143 .a = {{5745, 149}, {5765, 153},
11144 {5785, 157}, {5805, 161}},
11145 },
11146
11147 { /* Custom Europe */
11148 "ZZG",
11149 .bg_channels = 13,
11150 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11151 {2427, 4}, {2432, 5}, {2437, 6},
11152 {2442, 7}, {2447, 8}, {2452, 9},
11153 {2457, 10}, {2462, 11},
11154 {2467, 12}, {2472, 13}},
11155 .a_channels = 4,
11156 .a = {{5180, 36}, {5200, 40},
11157 {5220, 44}, {5240, 48}},
11158 },
11159
11160 { /* Europe */
11161 "ZZK",
11162 .bg_channels = 13,
11163 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11164 {2427, 4}, {2432, 5}, {2437, 6},
11165 {2442, 7}, {2447, 8}, {2452, 9},
11166 {2457, 10}, {2462, 11},
11167 {2467, 12, IEEE80211_CH_PASSIVE_ONLY},
11168 {2472, 13, IEEE80211_CH_PASSIVE_ONLY}},
11169 .a_channels = 24,
11170 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11171 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11172 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11173 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11174 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11175 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11176 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11177 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11178 {5500, 100, IEEE80211_CH_PASSIVE_ONLY},
11179 {5520, 104, IEEE80211_CH_PASSIVE_ONLY},
11180 {5540, 108, IEEE80211_CH_PASSIVE_ONLY},
11181 {5560, 112, IEEE80211_CH_PASSIVE_ONLY},
11182 {5580, 116, IEEE80211_CH_PASSIVE_ONLY},
11183 {5600, 120, IEEE80211_CH_PASSIVE_ONLY},
11184 {5620, 124, IEEE80211_CH_PASSIVE_ONLY},
11185 {5640, 128, IEEE80211_CH_PASSIVE_ONLY},
11186 {5660, 132, IEEE80211_CH_PASSIVE_ONLY},
11187 {5680, 136, IEEE80211_CH_PASSIVE_ONLY},
11188 {5700, 140, IEEE80211_CH_PASSIVE_ONLY},
11189 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11190 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11191 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11192 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11193 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11194 },
11195
11196 { /* Europe */
11197 "ZZL",
11198 .bg_channels = 11,
11199 .bg = {{2412, 1}, {2417, 2}, {2422, 3},
11200 {2427, 4}, {2432, 5}, {2437, 6},
11201 {2442, 7}, {2447, 8}, {2452, 9},
11202 {2457, 10}, {2462, 11}},
11203 .a_channels = 13,
11204 .a = {{5180, 36, IEEE80211_CH_PASSIVE_ONLY},
11205 {5200, 40, IEEE80211_CH_PASSIVE_ONLY},
11206 {5220, 44, IEEE80211_CH_PASSIVE_ONLY},
11207 {5240, 48, IEEE80211_CH_PASSIVE_ONLY},
11208 {5260, 52, IEEE80211_CH_PASSIVE_ONLY},
11209 {5280, 56, IEEE80211_CH_PASSIVE_ONLY},
11210 {5300, 60, IEEE80211_CH_PASSIVE_ONLY},
11211 {5320, 64, IEEE80211_CH_PASSIVE_ONLY},
11212 {5745, 149, IEEE80211_CH_PASSIVE_ONLY},
11213 {5765, 153, IEEE80211_CH_PASSIVE_ONLY},
11214 {5785, 157, IEEE80211_CH_PASSIVE_ONLY},
11215 {5805, 161, IEEE80211_CH_PASSIVE_ONLY},
11216 {5825, 165, IEEE80211_CH_PASSIVE_ONLY}},
11217 }
11218};
11219
11220#define MAX_HW_RESTARTS 5
11221static int ipw_up(struct ipw_priv *priv)
11222{
11223 int rc, i, j;
11224
11225 if (priv->status & STATUS_EXIT_PENDING)
11226 return -EIO;
11227
11228 if (cmdlog && !priv->cmdlog) {
11229 priv->cmdlog = kcalloc(cmdlog, sizeof(*priv->cmdlog),
11230 GFP_KERNEL);
11231 if (priv->cmdlog == NULL) {
11232 IPW_ERROR("Error allocating %d command log entries.\n",
11233 cmdlog);
11234 return -ENOMEM;
11235 } else {
11236 priv->cmdlog_len = cmdlog;
11237 }
11238 }
11239
11240 for (i = 0; i < MAX_HW_RESTARTS; i++) {
11241 /* Load the microcode, firmware, and eeprom.
11242 * Also start the clocks. */
11243 rc = ipw_load(priv);
11244 if (rc) {
11245 IPW_ERROR("Unable to load firmware: %d\n", rc);
11246 return rc;
11247 }
11248
11249 ipw_init_ordinals(priv);
11250 if (!(priv->config & CFG_CUSTOM_MAC))
11251 eeprom_parse_mac(priv, priv->mac_addr);
11252 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11253
11254 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11255 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
11256 ipw_geos[j].name, 3))
11257 break;
11258 }
11259 if (j == ARRAY_SIZE(ipw_geos)) {
11260 IPW_WARNING("SKU [%c%c%c] not recognized.\n",
11261 priv->eeprom[EEPROM_COUNTRY_CODE + 0],
11262 priv->eeprom[EEPROM_COUNTRY_CODE + 1],
11263 priv->eeprom[EEPROM_COUNTRY_CODE + 2]);
11264 j = 0;
11265 }
11266 if (ieee80211_set_geo(priv->ieee, &ipw_geos[j])) {
11267 IPW_WARNING("Could not set geography.");
11268 return 0;
11269 }
11270
11271 if (priv->status & STATUS_RF_KILL_SW) {
11272 IPW_WARNING("Radio disabled by module parameter.\n");
11273 return 0;
11274 } else if (rf_kill_active(priv)) {
11275 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
11276 "Kill switch must be turned off for "
11277 "wireless networking to work.\n");
11278 queue_delayed_work(priv->workqueue, &priv->rf_kill,
11279 2 * HZ);
11280 return 0;
11281 }
11282
11283 rc = ipw_config(priv);
11284 if (!rc) {
11285 IPW_DEBUG_INFO("Configured device on count %i\n", i);
11286
11287 /* If configure to try and auto-associate, kick
11288 * off a scan. */
11289 queue_delayed_work(priv->workqueue,
11290 &priv->request_scan, 0);
11291
11292 return 0;
11293 }
11294
11295 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n", rc);
11296 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
11297 i, MAX_HW_RESTARTS);
11298
11299 /* We had an error bringing up the hardware, so take it
11300 * all the way back down so we can try again */
11301 ipw_down(priv);
11302 }
11303
11304 /* tried to restart and config the device for as long as our
11305 * patience could withstand */
11306 IPW_ERROR("Unable to initialize device after %d attempts.\n", i);
11307
11308 return -EIO;
11309}
11310
11311static void ipw_bg_up(struct work_struct *work)
11312{
11313 struct ipw_priv *priv =
11314 container_of(work, struct ipw_priv, up);
11315 mutex_lock(&priv->mutex);
11316 ipw_up(priv);
11317 mutex_unlock(&priv->mutex);
11318}
11319
11320static void ipw_deinit(struct ipw_priv *priv)
11321{
11322 int i;
11323
11324 if (priv->status & STATUS_SCANNING) {
11325 IPW_DEBUG_INFO("Aborting scan during shutdown.\n");
11326 ipw_abort_scan(priv);
11327 }
11328
11329 if (priv->status & STATUS_ASSOCIATED) {
11330 IPW_DEBUG_INFO("Disassociating during shutdown.\n");
11331 ipw_disassociate(priv);
11332 }
11333
11334 ipw_led_shutdown(priv);
11335
11336 /* Wait up to 1s for status to change to not scanning and not
11337 * associated (disassociation can take a while for a ful 802.11
11338 * exchange */
11339 for (i = 1000; i && (priv->status &
11340 (STATUS_DISASSOCIATING |
11341 STATUS_ASSOCIATED | STATUS_SCANNING)); i--)
11342 udelay(10);
11343
11344 if (priv->status & (STATUS_DISASSOCIATING |
11345 STATUS_ASSOCIATED | STATUS_SCANNING))
11346 IPW_DEBUG_INFO("Still associated or scanning...\n");
11347 else
11348 IPW_DEBUG_INFO("Took %dms to de-init\n", 1000 - i);
11349
11350 /* Attempt to disable the card */
11351 ipw_send_card_disable(priv, 0);
11352
11353 priv->status &= ~STATUS_INIT;
11354}
11355
11356static void ipw_down(struct ipw_priv *priv)
11357{
11358 int exit_pending = priv->status & STATUS_EXIT_PENDING;
11359
11360 priv->status |= STATUS_EXIT_PENDING;
11361
11362 if (ipw_is_init(priv))
11363 ipw_deinit(priv);
11364
11365 /* Wipe out the EXIT_PENDING status bit if we are not actually
11366 * exiting the module */
11367 if (!exit_pending)
11368 priv->status &= ~STATUS_EXIT_PENDING;
11369
11370 /* tell the device to stop sending interrupts */
11371 ipw_disable_interrupts(priv);
11372
11373 /* Clear all bits but the RF Kill */
11374 priv->status &= STATUS_RF_KILL_MASK | STATUS_EXIT_PENDING;
11375 netif_carrier_off(priv->net_dev);
11376
11377 ipw_stop_nic(priv);
11378
11379 ipw_led_radio_off(priv);
11380}
11381
11382static void ipw_bg_down(struct work_struct *work)
11383{
11384 struct ipw_priv *priv =
11385 container_of(work, struct ipw_priv, down);
11386 mutex_lock(&priv->mutex);
11387 ipw_down(priv);
11388 mutex_unlock(&priv->mutex);
11389}
11390
11391/* Called by register_netdev() */
11392static int ipw_net_init(struct net_device *dev)
11393{
11394 struct ipw_priv *priv = ieee80211_priv(dev);
11395 mutex_lock(&priv->mutex);
11396
11397 if (ipw_up(priv)) {
11398 mutex_unlock(&priv->mutex);
11399 return -EIO;
11400 }
11401
11402 mutex_unlock(&priv->mutex);
11403 return 0;
11404}
11405
11406/* PCI driver stuff */
11407static struct pci_device_id card_ids[] = {
11408 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
11409 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
11410 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
11411 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
11412 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
11413 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
11414 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
11415 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
11416 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
11417 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
11418 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
11419 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
11420 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
11421 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
11422 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
11423 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
11424 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
11425 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
11426 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11427 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
11428 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11429 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
11430
11431 /* required last entry */
11432 {0,}
11433};
11434
11435MODULE_DEVICE_TABLE(pci, card_ids);
11436
11437static struct attribute *ipw_sysfs_entries[] = {
11438 &dev_attr_rf_kill.attr,
11439 &dev_attr_direct_dword.attr,
11440 &dev_attr_indirect_byte.attr,
11441 &dev_attr_indirect_dword.attr,
11442 &dev_attr_mem_gpio_reg.attr,
11443 &dev_attr_command_event_reg.attr,
11444 &dev_attr_nic_type.attr,
11445 &dev_attr_status.attr,
11446 &dev_attr_cfg.attr,
11447 &dev_attr_error.attr,
11448 &dev_attr_event_log.attr,
11449 &dev_attr_cmd_log.attr,
11450 &dev_attr_eeprom_delay.attr,
11451 &dev_attr_ucode_version.attr,
11452 &dev_attr_rtc.attr,
11453 &dev_attr_scan_age.attr,
11454 &dev_attr_led.attr,
11455 &dev_attr_speed_scan.attr,
11456 &dev_attr_net_stats.attr,
11457 &dev_attr_channels.attr,
11458#ifdef CONFIG_IPW2200_PROMISCUOUS
11459 &dev_attr_rtap_iface.attr,
11460 &dev_attr_rtap_filter.attr,
11461#endif
11462 NULL
11463};
11464
11465static struct attribute_group ipw_attribute_group = {
11466 .name = NULL, /* put in device directory */
11467 .attrs = ipw_sysfs_entries,
11468};
11469
11470#ifdef CONFIG_IPW2200_PROMISCUOUS
11471static int ipw_prom_open(struct net_device *dev)
11472{
11473 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11474 struct ipw_priv *priv = prom_priv->priv;
11475
11476 IPW_DEBUG_INFO("prom dev->open\n");
11477 netif_carrier_off(dev);
11478
11479 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11480 priv->sys_config.accept_all_data_frames = 1;
11481 priv->sys_config.accept_non_directed_frames = 1;
11482 priv->sys_config.accept_all_mgmt_bcpr = 1;
11483 priv->sys_config.accept_all_mgmt_frames = 1;
11484
11485 ipw_send_system_config(priv);
11486 }
11487
11488 return 0;
11489}
11490
11491static int ipw_prom_stop(struct net_device *dev)
11492{
11493 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11494 struct ipw_priv *priv = prom_priv->priv;
11495
11496 IPW_DEBUG_INFO("prom dev->stop\n");
11497
11498 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
11499 priv->sys_config.accept_all_data_frames = 0;
11500 priv->sys_config.accept_non_directed_frames = 0;
11501 priv->sys_config.accept_all_mgmt_bcpr = 0;
11502 priv->sys_config.accept_all_mgmt_frames = 0;
11503
11504 ipw_send_system_config(priv);
11505 }
11506
11507 return 0;
11508}
11509
11510static int ipw_prom_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
11511{
11512 IPW_DEBUG_INFO("prom dev->xmit\n");
11513 return -EOPNOTSUPP;
11514}
11515
11516static struct net_device_stats *ipw_prom_get_stats(struct net_device *dev)
11517{
11518 struct ipw_prom_priv *prom_priv = ieee80211_priv(dev);
11519 return &prom_priv->ieee->stats;
11520}
11521
11522static int ipw_prom_alloc(struct ipw_priv *priv)
11523{
11524 int rc = 0;
11525
11526 if (priv->prom_net_dev)
11527 return -EPERM;
11528
11529 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
11530 if (priv->prom_net_dev == NULL)
11531 return -ENOMEM;
11532
11533 priv->prom_priv = ieee80211_priv(priv->prom_net_dev);
11534 priv->prom_priv->ieee = netdev_priv(priv->prom_net_dev);
11535 priv->prom_priv->priv = priv;
11536
11537 strcpy(priv->prom_net_dev->name, "rtap%d");
11538 memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11539
11540 priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
11541 priv->prom_net_dev->open = ipw_prom_open;
11542 priv->prom_net_dev->stop = ipw_prom_stop;
11543 priv->prom_net_dev->get_stats = ipw_prom_get_stats;
11544 priv->prom_net_dev->hard_start_xmit = ipw_prom_hard_start_xmit;
11545
11546 priv->prom_priv->ieee->iw_mode = IW_MODE_MONITOR;
11547 SET_NETDEV_DEV(priv->prom_net_dev, &priv->pci_dev->dev);
11548
11549 rc = register_netdev(priv->prom_net_dev);
11550 if (rc) {
11551 free_ieee80211(priv->prom_net_dev);
11552 priv->prom_net_dev = NULL;
11553 return rc;
11554 }
11555
11556 return 0;
11557}
11558
11559static void ipw_prom_free(struct ipw_priv *priv)
11560{
11561 if (!priv->prom_net_dev)
11562 return;
11563
11564 unregister_netdev(priv->prom_net_dev);
11565 free_ieee80211(priv->prom_net_dev);
11566
11567 priv->prom_net_dev = NULL;
11568}
11569
11570#endif
11571
11572
11573static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11574 const struct pci_device_id *ent)
11575{
11576 int err = 0;
11577 struct net_device *net_dev;
11578 void __iomem *base;
11579 u32 length, val;
11580 struct ipw_priv *priv;
11581 int i;
11582
11583 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
11584 if (net_dev == NULL) {
11585 err = -ENOMEM;
11586 goto out;
11587 }
11588
11589 priv = ieee80211_priv(net_dev);
11590 priv->ieee = netdev_priv(net_dev);
11591
11592 priv->net_dev = net_dev;
11593 priv->pci_dev = pdev;
11594 ipw_debug_level = debug;
11595 spin_lock_init(&priv->irq_lock);
11596 spin_lock_init(&priv->lock);
11597 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
11598 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
11599
11600 mutex_init(&priv->mutex);
11601 if (pci_enable_device(pdev)) {
11602 err = -ENODEV;
11603 goto out_free_ieee80211;
11604 }
11605
11606 pci_set_master(pdev);
11607
11608 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
11609 if (!err)
11610 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
11611 if (err) {
11612 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
11613 goto out_pci_disable_device;
11614 }
11615
11616 pci_set_drvdata(pdev, priv);
11617
11618 err = pci_request_regions(pdev, DRV_NAME);
11619 if (err)
11620 goto out_pci_disable_device;
11621
11622 /* We disable the RETRY_TIMEOUT register (0x41) to keep
11623 * PCI Tx retries from interfering with C3 CPU state */
11624 pci_read_config_dword(pdev, 0x40, &val);
11625 if ((val & 0x0000ff00) != 0)
11626 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11627
11628 length = pci_resource_len(pdev, 0);
11629 priv->hw_len = length;
11630
11631 base = pci_ioremap_bar(pdev, 0);
11632 if (!base) {
11633 err = -ENODEV;
11634 goto out_pci_release_regions;
11635 }
11636
11637 priv->hw_base = base;
11638 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
11639 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
11640
11641 err = ipw_setup_deferred_work(priv);
11642 if (err) {
11643 IPW_ERROR("Unable to setup deferred work\n");
11644 goto out_iounmap;
11645 }
11646
11647 ipw_sw_reset(priv, 1);
11648
11649 err = request_irq(pdev->irq, ipw_isr, IRQF_SHARED, DRV_NAME, priv);
11650 if (err) {
11651 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
11652 goto out_destroy_workqueue;
11653 }
11654
11655 SET_NETDEV_DEV(net_dev, &pdev->dev);
11656
11657 mutex_lock(&priv->mutex);
11658
11659 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
11660 priv->ieee->set_security = shim__set_security;
11661 priv->ieee->is_queue_full = ipw_net_is_queue_full;
11662
11663#ifdef CONFIG_IPW2200_QOS
11664 priv->ieee->is_qos_active = ipw_is_qos_active;
11665 priv->ieee->handle_probe_response = ipw_handle_beacon;
11666 priv->ieee->handle_beacon = ipw_handle_probe_response;
11667 priv->ieee->handle_assoc_response = ipw_handle_assoc_response;
11668#endif /* CONFIG_IPW2200_QOS */
11669
11670 priv->ieee->perfect_rssi = -20;
11671 priv->ieee->worst_rssi = -85;
11672
11673 net_dev->open = ipw_net_open;
11674 net_dev->stop = ipw_net_stop;
11675 net_dev->init = ipw_net_init;
11676 net_dev->get_stats = ipw_net_get_stats;
11677 net_dev->set_multicast_list = ipw_net_set_multicast_list;
11678 net_dev->set_mac_address = ipw_net_set_mac_address;
11679 priv->wireless_data.spy_data = &priv->ieee->spy_data;
11680 net_dev->wireless_data = &priv->wireless_data;
11681 net_dev->wireless_handlers = &ipw_wx_handler_def;
11682 net_dev->ethtool_ops = &ipw_ethtool_ops;
11683 net_dev->irq = pdev->irq;
11684 net_dev->base_addr = (unsigned long)priv->hw_base;
11685 net_dev->mem_start = pci_resource_start(pdev, 0);
11686 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11687
11688 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11689 if (err) {
11690 IPW_ERROR("failed to create sysfs device attributes\n");
11691 mutex_unlock(&priv->mutex);
11692 goto out_release_irq;
11693 }
11694
11695 mutex_unlock(&priv->mutex);
11696 err = register_netdev(net_dev);
11697 if (err) {
11698 IPW_ERROR("failed to register network device\n");
11699 goto out_remove_sysfs;
11700 }
11701
11702#ifdef CONFIG_IPW2200_PROMISCUOUS
11703 if (rtap_iface) {
11704 err = ipw_prom_alloc(priv);
11705 if (err) {
11706 IPW_ERROR("Failed to register promiscuous network "
11707 "device (error %d).\n", err);
11708 unregister_netdev(priv->net_dev);
11709 goto out_remove_sysfs;
11710 }
11711 }
11712#endif
11713
11714 printk(KERN_INFO DRV_NAME ": Detected geography %s (%d 802.11bg "
11715 "channels, %d 802.11a channels)\n",
11716 priv->ieee->geo.name, priv->ieee->geo.bg_channels,
11717 priv->ieee->geo.a_channels);
11718
11719 return 0;
11720
11721 out_remove_sysfs:
11722 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11723 out_release_irq:
11724 free_irq(pdev->irq, priv);
11725 out_destroy_workqueue:
11726 destroy_workqueue(priv->workqueue);
11727 priv->workqueue = NULL;
11728 out_iounmap:
11729 iounmap(priv->hw_base);
11730 out_pci_release_regions:
11731 pci_release_regions(pdev);
11732 out_pci_disable_device:
11733 pci_disable_device(pdev);
11734 pci_set_drvdata(pdev, NULL);
11735 out_free_ieee80211:
11736 free_ieee80211(priv->net_dev);
11737 out:
11738 return err;
11739}
11740
11741static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11742{
11743 struct ipw_priv *priv = pci_get_drvdata(pdev);
11744 struct list_head *p, *q;
11745 int i;
11746
11747 if (!priv)
11748 return;
11749
11750 mutex_lock(&priv->mutex);
11751
11752 priv->status |= STATUS_EXIT_PENDING;
11753 ipw_down(priv);
11754 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
11755
11756 mutex_unlock(&priv->mutex);
11757
11758 unregister_netdev(priv->net_dev);
11759
11760 if (priv->rxq) {
11761 ipw_rx_queue_free(priv, priv->rxq);
11762 priv->rxq = NULL;
11763 }
11764 ipw_tx_queue_free(priv);
11765
11766 if (priv->cmdlog) {
11767 kfree(priv->cmdlog);
11768 priv->cmdlog = NULL;
11769 }
11770 /* ipw_down will ensure that there is no more pending work
11771 * in the workqueue's, so we can safely remove them now. */
11772 cancel_delayed_work(&priv->adhoc_check);
11773 cancel_delayed_work(&priv->gather_stats);
11774 cancel_delayed_work(&priv->request_scan);
11775 cancel_delayed_work(&priv->request_direct_scan);
11776 cancel_delayed_work(&priv->request_passive_scan);
11777 cancel_delayed_work(&priv->scan_event);
11778 cancel_delayed_work(&priv->rf_kill);
11779 cancel_delayed_work(&priv->scan_check);
11780 destroy_workqueue(priv->workqueue);
11781 priv->workqueue = NULL;
11782
11783 /* Free MAC hash list for ADHOC */
11784 for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++) {
11785 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
11786 list_del(p);
11787 kfree(list_entry(p, struct ipw_ibss_seq, list));
11788 }
11789 }
11790
11791 kfree(priv->error);
11792 priv->error = NULL;
11793
11794#ifdef CONFIG_IPW2200_PROMISCUOUS
11795 ipw_prom_free(priv);
11796#endif
11797
11798 free_irq(pdev->irq, priv);
11799 iounmap(priv->hw_base);
11800 pci_release_regions(pdev);
11801 pci_disable_device(pdev);
11802 pci_set_drvdata(pdev, NULL);
11803 free_ieee80211(priv->net_dev);
11804 free_firmware();
11805}
11806
11807#ifdef CONFIG_PM
11808static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
11809{
11810 struct ipw_priv *priv = pci_get_drvdata(pdev);
11811 struct net_device *dev = priv->net_dev;
11812
11813 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
11814
11815 /* Take down the device; powers it off, etc. */
11816 ipw_down(priv);
11817
11818 /* Remove the PRESENT state of the device */
11819 netif_device_detach(dev);
11820
11821 pci_save_state(pdev);
11822 pci_disable_device(pdev);
11823 pci_set_power_state(pdev, pci_choose_state(pdev, state));
11824
11825 return 0;
11826}
11827
11828static int ipw_pci_resume(struct pci_dev *pdev)
11829{
11830 struct ipw_priv *priv = pci_get_drvdata(pdev);
11831 struct net_device *dev = priv->net_dev;
11832 int err;
11833 u32 val;
11834
11835 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
11836
11837 pci_set_power_state(pdev, PCI_D0);
11838 err = pci_enable_device(pdev);
11839 if (err) {
11840 printk(KERN_ERR "%s: pci_enable_device failed on resume\n",
11841 dev->name);
11842 return err;
11843 }
11844 pci_restore_state(pdev);
11845
11846 /*
11847 * Suspend/Resume resets the PCI configuration space, so we have to
11848 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
11849 * from interfering with C3 CPU state. pci_restore_state won't help
11850 * here since it only restores the first 64 bytes pci config header.
11851 */
11852 pci_read_config_dword(pdev, 0x40, &val);
11853 if ((val & 0x0000ff00) != 0)
11854 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
11855
11856 /* Set the device back into the PRESENT state; this will also wake
11857 * the queue of needed */
11858 netif_device_attach(dev);
11859
11860 /* Bring the device back up */
11861 queue_work(priv->workqueue, &priv->up);
11862
11863 return 0;
11864}
11865#endif
11866
11867static void ipw_pci_shutdown(struct pci_dev *pdev)
11868{
11869 struct ipw_priv *priv = pci_get_drvdata(pdev);
11870
11871 /* Take down the device; powers it off, etc. */
11872 ipw_down(priv);
11873
11874 pci_disable_device(pdev);
11875}
11876
11877/* driver initialization stuff */
11878static struct pci_driver ipw_driver = {
11879 .name = DRV_NAME,
11880 .id_table = card_ids,
11881 .probe = ipw_pci_probe,
11882 .remove = __devexit_p(ipw_pci_remove),
11883#ifdef CONFIG_PM
11884 .suspend = ipw_pci_suspend,
11885 .resume = ipw_pci_resume,
11886#endif
11887 .shutdown = ipw_pci_shutdown,
11888};
11889
11890static int __init ipw_init(void)
11891{
11892 int ret;
11893
11894 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
11895 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
11896
11897 ret = pci_register_driver(&ipw_driver);
11898 if (ret) {
11899 IPW_ERROR("Unable to initialize PCI module\n");
11900 return ret;
11901 }
11902
11903 ret = driver_create_file(&ipw_driver.driver, &driver_attr_debug_level);
11904 if (ret) {
11905 IPW_ERROR("Unable to create driver sysfs file\n");
11906 pci_unregister_driver(&ipw_driver);
11907 return ret;
11908 }
11909
11910 return ret;
11911}
11912
11913static void __exit ipw_exit(void)
11914{
11915 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
11916 pci_unregister_driver(&ipw_driver);
11917}
11918
11919module_param(disable, int, 0444);
11920MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
11921
11922module_param(associate, int, 0444);
11923MODULE_PARM_DESC(associate, "auto associate when scanning (default off)");
11924
11925module_param(auto_create, int, 0444);
11926MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
11927
11928module_param(led, int, 0444);
11929MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)");
11930
11931module_param(debug, int, 0444);
11932MODULE_PARM_DESC(debug, "debug output mask");
11933
11934module_param(channel, int, 0444);
11935MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
11936
11937#ifdef CONFIG_IPW2200_PROMISCUOUS
11938module_param(rtap_iface, int, 0444);
11939MODULE_PARM_DESC(rtap_iface, "create the rtap interface (1 - create, default 0)");
11940#endif
11941
11942#ifdef CONFIG_IPW2200_QOS
11943module_param(qos_enable, int, 0444);
11944MODULE_PARM_DESC(qos_enable, "enable all QoS functionalitis");
11945
11946module_param(qos_burst_enable, int, 0444);
11947MODULE_PARM_DESC(qos_burst_enable, "enable QoS burst mode");
11948
11949module_param(qos_no_ack_mask, int, 0444);
11950MODULE_PARM_DESC(qos_no_ack_mask, "mask Tx_Queue to no ack");
11951
11952module_param(burst_duration_CCK, int, 0444);
11953MODULE_PARM_DESC(burst_duration_CCK, "set CCK burst value");
11954
11955module_param(burst_duration_OFDM, int, 0444);
11956MODULE_PARM_DESC(burst_duration_OFDM, "set OFDM burst value");
11957#endif /* CONFIG_IPW2200_QOS */
11958
11959#ifdef CONFIG_IPW2200_MONITOR
11960module_param(mode, int, 0444);
11961MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
11962#else
11963module_param(mode, int, 0444);
11964MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
11965#endif
11966
11967module_param(bt_coexist, int, 0444);
11968MODULE_PARM_DESC(bt_coexist, "enable bluetooth coexistence (default off)");
11969
11970module_param(hwcrypto, int, 0444);
11971MODULE_PARM_DESC(hwcrypto, "enable hardware crypto (default off)");
11972
11973module_param(cmdlog, int, 0444);
11974MODULE_PARM_DESC(cmdlog,
11975 "allocate a ring buffer for logging firmware commands");
11976
11977module_param(roaming, int, 0444);
11978MODULE_PARM_DESC(roaming, "enable roaming support (default on)");
11979
11980module_param(antenna, int, 0444);
11981MODULE_PARM_DESC(antenna, "select antenna 1=Main, 3=Aux, default 0 [both], 2=slow_diversity (choose the one with lower background noise)");
11982
11983module_exit(ipw_exit);
11984module_init(ipw_init);